Samesite - proxy that can cache partial transfers

Annotation For samesite.py
anonymous

Annotation For samesite.py

Lines of samesite.py from check-in 53dcfdb8f7 that are changed by the sequence of edits moving toward check-in 083ec707ea:

                         1: #!/usr/bin/env python3.1
                         2: 
                         3: import datetime, http.cookiejar, optparse, os, sys, shelve, re, urllib.request
                         4: 
                         5: parser = optparse.OptionParser()
                         6: parser.add_option('-v', '--verbose', action = 'store_true', dest = 'verbose', help = 'turns on verbose status notifications', metavar = 'bool', default = False)
                         7: parser.add_option('-d', '--dir', action = 'store', dest = 'dir', help = 'specify directory where the files should be stored', metavar = 'string', default = None)
                         8: parser.add_option('-r', '--root', action = 'store', dest = 'root', help = 'specify a site from which data should be mirrored', metavar = 'string', default = None)
                         9: parser.add_option('-l', '--log', action = 'store', dest = 'log', help = 'specify a log file to process', metavar = 'string', default = None)
                        10: (options, args) = parser.parse_args()
                        11: 
                        12: if not options.dir:
                        13: 	print('Directory not specified')
                        14: 	exit(1)
                        15: 
                        16: if not options.root:
                        17: 	print('Server not specified')
                        18: 	exit(1)
                        19: 
                        20: if not options.log:
                        21: 	print('Log file not specified')
                        22: 	exit(1)
                        23: 
                        24: if not os.access(options.log, os.R_OK):
                        25: 	print('Log file unreadable')
                        26: 	exit(1)
                        27: 
                        28: # this is file index - everything is stored in this file
                        29: index = shelve.open(options.dir + '/.index')
                        30: desc_fields = ('Content-Length', 'ETag', 'Pragma', 'Last-Modified')
                        31: ignore_fields = ('Accept-Ranges', 'Age', 'Cache-Control', 'Connection', 'Content-Type', 'Date', 'Expires', 'Server', 'Via', 'X-Cache', 'X-Cache-Lookup')
                        32: 
                        33: block_size = 32768
                        34: 
                        35: while True:
                        36: 	unchecked_files = set()
                        37: 	checked_files = 0
                        38: 
                        39: 	# reading log and storing found urls for processing
                        40: 	# check file mtime XXX
                        41: 	with open(options.log, 'r') as log_file:
                        42: 		log_line = re.compile('^[^ ]+ - - \[.*] "(GET|HEAD) (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
                        43: 		for line in log_file:
                        44: 			this_line = log_line.match(line.strip())
                        45: 			if this_line:
                        46: 				unchecked_files.add(this_line.group(2))
                        47: 
                        48: 	for url in unchecked_files:
                        49: 
                        50: 		# creating empty placeholder in index
                        51: 		if not url in index:
                        52: 			index[url] = {}
53dcfdb8f7 2010-07-05   53: 		reload = False
                        54: 
                        55: 		# creating file name from url
                        56: 		file_name = options.dir + re.compile('%20').sub(' ', url)
53dcfdb8f7 2010-07-05   57: 		print('Checking file:', url)
                        58: 
                        59: 		# forcibly checking file if no file present
53dcfdb8f7 2010-07-05   60: 		if not os.access(file_name, os.R_OK):
                        61: 			print('File not found or inaccessible.')
                        62: 			reload = True
                        63: 
                        64: 		# forcibly checking file if file size doesn't match with index data
53dcfdb8f7 2010-07-05   65: 		elif 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
53dcfdb8f7 2010-07-05   66: 			print('File size is', os.stat(file_name).st_size, 'and stored file size is', index[url]['Content-Length'])
                        67: 			reload = True
                        68: 
                        69: 		# forcibly checking file if index hods Pragma header
53dcfdb8f7 2010-07-05   70: 		if 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
53dcfdb8f7 2010-07-05   71: 			print('Pragma on: recheck iminent.')
53dcfdb8f7 2010-07-05   72: 			reload = True
                        73: 
                        74: 		# skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
53dcfdb8f7 2010-07-05   75: 		if not reload and '__time__' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['__time__']).days < 0:
                        76: 			continue
                        77: 		try:
53dcfdb8f7 2010-07-05   78: 			print('Contacting website.')
                        79: 			with urllib.request.urlopen(options.root + url) as source:
                        80: 				new_headers = {}
                        81: 				headers = source.info()
                        82: 
                        83: 				# stripping unneeded headers (XXX make this inplace?)
                        84: 				for header in headers:
                        85: 					if header in desc_fields:
                        86: 						if header == 'Pragma' and headers[header] != 'no-cache':
                        87: 							print('Pragma:', headers[header])
                        88: 						new_headers[header] = headers[header]
                        89: 					elif not header in ignore_fields:
                        90: 						print('Undefined header "', header, '": ', headers[header], sep='')
                        91: 
                        92: 				# comparing headers with data found in index
                        93: 				# if any header has changed (except Pragma) file is fully downloaded
                        94: 				# same if we get more or less headers
                        95: 				old_keys = set(index[url].keys())
                        96: 				old_keys.discard('__time__')
                        97: 				old_keys.discard('Pragma')
                        98: 				more_keys = set(new_headers.keys()) - old_keys
                        99: 				more_keys.discard('Pragma')
                       100: 				less_keys = old_keys - set(new_headers.keys())
                       101: 				if len(more_keys) > 0:
53dcfdb8f7 2010-07-05  102: 					if len(old_keys) == 0:
53dcfdb8f7 2010-07-05  103: 						print('No data on that file yet.')
53dcfdb8f7 2010-07-05  104: 					else:
                       105: 						print('More headers appear:', more_keys)
                       106: 					reload = True
                       107: 				elif len(less_keys) > 0:
                       108: 					print('Less headers appear:', less_keys)
                       109: 					reload = True
                       110: 				else:
                       111: 					for key in index[url].keys():
                       112: 						if key not in ('__time__', 'Pragma') and not index[url][key] == new_headers[key]:
                       113: 							print('Header "', key, '" changed from [', index[url][key], '] to [', new_headers[key], ']', sep='')
                       114: 							reload = True
                       115: 
                       116: 				# downloading file
                       117: 				if reload:
                       118: 					if 'Content-Length' in headers:
                       119: 						print('Downloading', headers['Content-Length'], 'bytes [', end='')
                       120: 					else:
                       121: 						print('Downloading [', end='')
                       122: 					sys.stdout.flush()
                       123: 
                       124: 					# file is created at temporary location and moved in place only when download completes
                       125: 					temp_file = open(options.dir + '/.tmp', 'wb')
                       126: 					buffer = source.read(block_size)
                       127: 					blocks = 0
                       128: 					megs = 0
                       129: 					while len(buffer) > 0:
                       130: 						temp_file.write(buffer)
                       131: 						print('.', end='')
                       132: 						sys.stdout.flush()
                       133: 						buffer = source.read(block_size)
                       134: 						blocks += 1
                       135: 						if blocks > 1024*1024/block_size:
                       136: 							blocks = blocks - 1024*1024/block_size
                       137: 							megs += 1
                       138: 							print('{}Mb'.format(megs), end='')
                       139: 					temp_file.close()
                       140: 					print(']')
                       141: 					os.renames(options.dir + '/.tmp', file_name)
                       142: 
                       143: 				checked_files += 1
                       144: 
                       145: 				# storing new time mark and storing new headers
                       146: 				new_headers['__time__'] = datetime.datetime.now()
                       147: 				index[url] = new_headers
                       148: 				index.sync()
                       149: 
                       150: 		except urllib.error.HTTPError as error:
                       151: 			# in case of error we don't need to do anything actually,
                       152: 			# if file download stalls or fails the file would not be moved to it's location
                       153: 			print(error)
                       154: 
                       155: 	print('[', len(unchecked_files), '/', checked_files, ']')
                       156: 
                       157: 	# checking if there were any files downloaded, if yes - restarting sequence
                       158: 	if checked_files == 0:
                       159: 		break