Samesite - proxy that can cache partial transfers

Artifact [873110a573]
anonymous

Artifact [873110a573]

Artifact 873110a573b70aeeb1696d56a723941d5a3dc77fe6481695775f6265759ccf1b:


#!/usr/bin/env python3.1

import datetime, http.cookiejar, optparse, os, sys, shelve, re, urllib.request

parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action = 'store_true', dest = 'verbose', help = 'turns on verbose status notifications', metavar = 'bool', default = False)
parser.add_option('-d', '--dir', action = 'store', dest = 'dir', help = 'specify directory where the files should be stored', metavar = 'string', default = None)
parser.add_option('-r', '--root', action = 'store', dest = 'root', help = 'specify a site from which data should be mirrored', metavar = 'string', default = None)
parser.add_option('-l', '--log', action = 'store', dest = 'log', help = 'specify a log file to process', metavar = 'string', default = None)
(options, args) = parser.parse_args()

if not options.dir:
	print('Directory not specified')
	exit(1)

if not options.root:
	print('Server not specified')
	exit(1)

if not options.log:
	print('Log file not specified')
	exit(1)

if not os.access(options.log, os.R_OK):
	print('Log file unreadable')
	exit(1)

# this is file index - everything is stored in this file
index = shelve.open(options.dir + '/.index')
desc_fields = ('Content-Length', 'ETag', 'Pragma', 'Last-Modified')
ignore_fields = ('Accept-Ranges', 'Age', 'Cache-Control', 'Connection', 'Content-Type', 'Date', 'Expires', 'Server', 'Via', 'X-Cache', 'X-Cache-Lookup')

while True:
	unchecked_files = set()
	checked_files = 0

	# reading log and storing found urls for processing
	# check file mtime XXX
	with open(options.log, 'r') as log_file:
		log_line = re.compile('^[^ ]+ - - \[.*] "GET (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
		for line in log_file:
			this_line = log_line.match(line.strip())
			if this_line:
				unchecked_files.add(this_line.group(1))

	for url in unchecked_files:

		# creating empty placeholder in index
		if not url in index:
			index[url] = {}
		reload = False

		# creating file name from url
		file_name = options.dir + re.compile('%20').sub(' ', url)

		# forcibly checking file if no file present
		if not os.access(file_name, os.R_OK):
			reload = True

		# forcibly checking file if file size doesn't match with index data
		elif 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
			print('File size is', os.stat(file_name).st_size, 'and stored file size is', index[url]['Content-Length'])
			reload = True

		# forcibly checking file if index hods Pragma header
		if 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
			reload = True

		# skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
		if not reload and '__time__' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['__time__']).days < 0:
			continue
		print('Checking file:', url)
		try:
			with urllib.request.urlopen(options.root + url) as source:
				new_headers = {}
				headers = source.info()

				# stripping unneeded headers (XXX make this inplace?)
				for header in headers:
					if header in desc_fields:
						if header == 'Pragma' and headers[header] != 'no-cache':
							print('Pragma:', headers[header])
						new_headers[header] = headers[header]
					elif not header in ignore_fields:
						print('Undefined header', header, ':', headers[header])

				# comparing headers with data found in index
				# if any header has changed (except Pragma) file is fully downloaded
				# same if we get more or less headers
				old_keys = set(index[url].keys())
				old_keys.discard('__time__')
				old_keys.discard('Pragma')
				more_keys = set(new_headers.keys()) - old_keys
				more_keys.discard('Pragma')
				less_keys = old_keys - set(new_headers.keys())
				if len(more_keys) > 0:
					print('More headers appear:', more_keys)
					reload = True
				elif len(less_keys) > 0:
					print('Less headers appear:', less_keys)
					reload = True
				else:
					for key in index[url].keys():
						if key not in ('__time__', 'Pragma') and not index[url][key] == new_headers[key]:
							print('Header', key, 'changed from', index[url][key], 'to', new_headers[key])
							reload = True

				# downloading file
				if reload:
					if 'Content-Length' in headers:
						print('Downloading', headers['Content-Length'], 'bytes [', end='')
					else:
						print('Downloading [', end='')
					sys.stdout.flush()

					# file is created at temporary location and moved in place only when download completes
					temp_file = open(options.dir + '/.tmp', 'wb')
					buffer = source.read(4096)
					while len(buffer) > 0:
						temp_file.write(buffer)
						print('.', end='')
						sys.stdout.flush()
						buffer = source.read(4096)
					temp_file.close()
					print(']')
					os.renames(options.dir + '/.tmp', file_name)

				checked_files += 1

				# storing new time mark and storing new headers
				new_headers['__time__'] = datetime.datetime.now()
				index[url] = new_headers

		except urllib.error.HTTPError as error:
			# in case of error we don't need to do anything actually,
			# if file download stalls or fails the file would not be moved to it's location
			print(error)

	print('[', len(unchecked_files), '/', checked_files, ']')

	# checking if there were any files downloaded, if yes - restarting sequence
	if checked_files == 0:
		break