Samesite - proxy that can cache partial transfers

samesite.py at tip
anonymous

samesite.py at tip

File samesite.py from the latest check-in


#!/usr/bin/env python3.3

import argparse, os
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', dest = 'config', help = 'config file location', metavar = 'FILE', default = 'samesite.conf')
args = parser.parse_args()
assert os.access(args.config, os.R_OK), "Fatal error: can't read {}".format(args.config)

import configparser
config = configparser.ConfigParser({
	'port': '8008',
	'verbose': 'no',
	'noetag': 'no',
	'noparts': 'no',
	'strip': '',
	'sub': '',
	'proto': 'http',
})
config.read(args.config)

cache_dir = os.path.realpath(os.path.dirname(args.config))

import re
for section in config.sections():
	if section != 'DEFAULT':
		if 'dir' in config[section]:
			if not re.compile('^/.*').match(config[section]['dir']):
				config[section]['dir'] = cache_dir + os.sep + section
			thisDir = re.compile('^(.*)/$').match(config[section]['dir'])
			if thisDir:
				config[section]['dir'] = thisDir.group(1)
			if not re.compile('^/(.*)$').match(config[section]['dir']):
				config[section]['dir'] = cache_dir + os.sep + config[section]['dir']
		else:
			config[section]['dir'] = cache_dir + os.sep + section
		if not 'root' in config[section]:
			config[section]['root'] = section

#assert options.port or os.access(options.log, os.R_OK), 'Log file unreadable'

const_desc_fields = set(['Content-Length', 'Last-Modified', 'Pragma'])
const_ignore_fields = set([
	'Accept-Ranges', 'Age',
	'Cache-Control', 'Connection', 'Content-Type',
	'Date',
	'Expires',
	'Referer',
	'Server',
	'Via',
	'X-CCC', 'X-CID', 'X-Cache', 'X-Cache-Lookup', 'X-Livetool', 'X-Powered-By',
])

block_size = 8192

import bsddb3.dbshelve, copy, datetime, http.server, spacemap, urllib.request, urllib.error

class MyRequestHandler(http.server.BaseHTTPRequestHandler):
	def __process(self):
		# reload means file needs to be reloaded to serve request
		reload = False
		# recheck means file needs to be checked, this also means that if file hav been modified we can serve older copy
		recheck = False
		# file_stat means file definitely exists
		file_stat = None
		# requested_ranges holds data about any range requested
		requested_ranges = None
		# records holds data from index locally, should be written back upon successfull completion
		record = None

		myPath = re.compile('^(.*?)(\?.*)$').match(self.path)
		if myPath:
			my_path = myPath.group(1)
		else:
			my_path = self.path

		if not config.has_section(self.headers['Host']):
			config.add_section(self.headers['Host'])
			config[self.headers['Host']]['root'] = self.headers['Host']
			config[self.headers['Host']]['dir'] = cache_dir + os.sep + self.headers['Host']
		config_host = config[self.headers['Host']]

		if config_host['sub'] != None and config_host['strip'] != None and len(config_host['strip']) > 0:
			string = re.compile(config_host['strip']).sub(config_host['sub'], my_path)
			my_path = string

		my_path_b = my_path.encode('utf-8')
		info = 'Checking file: ' + my_path

		if not os.access(config_host['dir'], os.X_OK):
			os.mkdir(config_host['dir'])
		# this is file index - everything is stored in this file
		# _parts - list of stored parts of file
		# _time - last time the file was checked
		# everything else is just the headers
		index = bsddb3.dbshelve.open(config_host['dir'] + os.sep + '.index')

		desc_fields = const_desc_fields.copy()
		ignore_fields = const_ignore_fields.copy()
		if config_host['noetag'] == 'no':
			desc_fields.add('ETag')
		else:
			ignore_fields.add('ETag')

		proxy_ignored = set([
			'Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language',
			'Cache-Control', 'Connection', 'Content-Length', 'Cookie',
			'DNT',
			'Host',
			'If-Modified-Since', 'If-None-Match', 'If-Unmodified-Since',
			'Referer',
			'UA-CPU', 'User-Agent',
			'Via',
			'X-Forwarded-For', 'X-Last-HR', 'X-Last-HTTP-Status-Code', 'X-Old-UID', 'X-Removed', 'X-Real-IP', 'X-Retry-Count',
		])

		print('===============[ {} request ]==='.format(self.command))

		for header in self.headers:
			if header in proxy_ignored:
				pass
			elif header in ('Range'):
				isRange = re.compile('bytes=(\d+)-(\d+)').match(self.headers[header])
				if isRange:
					requested_ranges = spacemap.SpaceMap({int(isRange.group(1)): int(isRange.group(2)) + 1})
				else:
					return()
			elif header in ('Pragma'):
				if my_path_b in index:
					index[my_path_b][header] = self.headers[header]
			else:
				print('Unknown header - ', header, ': ', self.headers[header], sep='')
				return()
			print(header, self.headers[header])

		# creating file name from my_path
		file_name = config_host['dir'] + os.sep + re.compile('%20').sub(' ', my_path)
		# partial file or unfinished download
		temp_name = config_host['dir'] + os.sep + '.parts' + re.compile('%20').sub(' ', my_path)

		# creating empty placeholder in index
		# if there's no space map and there's no file in real directory - we have no file
		# if there's an empty space map - file is full
		# space map generally covers every bit of file we don't posess currently
		if not my_path_b in index:
			info += '\nThis one is new.'
			reload = True
			record = {}
		else:
			# forcibly checking file if no file present
			record = index[my_path_b]
			if os.access(file_name, os.R_OK):
				info += '\nFull file found.'
				file_stat = os.stat(file_name)
			elif '_parts' in index[my_path_b] and os.access(temp_name, os.R_OK):
				info += '\nPartial file found.'
				file_stat = os.stat(temp_name)
				recheck = True
			else:
				info += '\nFile not found or inaccessible.'
				record['_parts'] = None
				reload = True

		if not '_parts' in record:
			record['_parts'] = None

		if record['_parts'] == None:
			recheck = True

		# forcibly checking file if file size doesn't match with index data
		if not reload:
			if '_parts' in record and record['_parts'] == spacemap.SpaceMap():
				if 'content-length' in record and file_stat and file_stat.st_size != int(record['content-length']):
					info += '\nFile size is {} and stored file size is {}.'.format(file_stat.st_size, record['content-length'])
					record['_parts'] = None
					reload = True

		# forcibly checking file if index holds Pragma header
		if not reload and 'pragma' in record and record['pragma'] == 'no-cache':
			info +='\nPragma on: recheck imminent.'
			recheck = True

		# skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
		if not recheck and not reload and '_time' in record and (record['_time'] - datetime.datetime.now() + datetime.timedelta(hours = 4)).days < 0:
			info += '\nFile is old - rechecking.'
			recheck = True

		print(info)
		if reload or recheck:

			try:
				request = config_host['proto'] + '://' + config_host['root'] + self.path
				my_headers = {}
				for header in ('Accept', 'Cache-Control', 'Cookie', 'Referer', 'User-Agent'):
					if header in self.headers:
						my_headers[header] = self.headers[header]

				needed = None
				if self.command not in ('HEAD'):
					if '_parts' in record and record['_parts'] != None:
						if config_host['noparts'] != 'no' or requested_ranges == None or requested_ranges == spacemap.SpaceMap():
							needed = record['_parts']
						else:
							needed = record['_parts'] & requested_ranges
					elif config_host['noparts'] =='no' and requested_ranges != None and requested_ranges != spacemap.SpaceMap():
						needed = requested_ranges
					ranges = ()
					print('Missing ranges: {}, requested ranges: {}, needed ranges: {}.'.format(record['_parts'], requested_ranges, needed))
					if needed != None and len(needed) > 0:
						needed.rewind()
						while True:
							range = needed.pop()
							if range[0] == None:
								break
							ranges += '{}-{}'.format(range[0], range[1] - 1),
						my_headers['Range'] = 'bytes=' + ','.join(ranges)

				#my_headers['Accept-Encoding'] = 'gzip, compress, deflate, identity; q=0'
				request = urllib.request.Request(request, headers = my_headers)

				source = urllib.request.urlopen(request, timeout = 60)
				new_record = {}
				new_record['_parts'] = record['_parts']
				headers = source.info()

				if 'Content-Encoding' in headers and headers['Content-Encoding'] == 'gzip':
					import gzip
					source = gzip.GzipFile(fileobj=source)

				# stripping unneeded headers (XXX make this inplace?)
				for header in headers:
					if header in desc_fields:
						#if header == 'Pragma' and headers[header] != 'no-cache':
						if header == 'Content-Length':
							if 'Content-Range' not in headers:
								new_record[header] = int(headers[header])
						else:
							new_record[header] = headers[header]
					elif header == 'Content-Range':
						range = re.compile('^bytes (\d+)-(\d+)/(\d+)$').match(headers[header])
						if range:
							new_record['Content-Length'] = int(range.group(3))
						else:	
							assert False, 'Content-Range unrecognized.'
					elif not header in ignore_fields:
						print('Undefined header "', header, '": ', headers[header], sep='')

				# comparing headers with data found in index
				# if any header has changed (except Pragma) file is fully downloaded
				# same if we get more or less headers
				old_keys = set(record.keys())
				old_keys.discard('_time')
				old_keys.discard('Pragma')
				more_keys = set(new_record.keys()) - old_keys
				more_keys.discard('Pragma')
				less_keys = old_keys - set(new_record.keys())
				if len(more_keys) > 0:
					if len(old_keys) != 0:
						print('More headers appear:', more_keys)
					reload = True
				elif len(less_keys) > 0:
					print('Less headers appear:', less_keys)
				else:
					for key in record.keys():
						if key[0] != '_' and key != 'Pragma' and record[key] != new_record[key]:
							print('Header "', key, '" changed from [', record[key], '] to [', new_record[key], ']', sep='')
							print(type(record[key]), type(new_record[key]))
							reload = True

				if reload:
					print('Reloading.')
					if os.access(temp_name, os.R_OK):
						os.unlink(temp_name)
					if os.access(file_name, os.R_OK):
						os.unlink(file_name)
					if 'Content-Length' in new_record:
						new_record['_parts'] = spacemap.SpaceMap({0: int(new_record['Content-Length'])})
				if not new_record['_parts']:
					new_record['_parts'] = spacemap.SpaceMap()
				print(new_record)

				# downloading file or segment
				if 'Content-Length' in new_record:
					if needed == None:
						needed = new_record['_parts']
					else:
						if len(needed) > 1:
							print("Multipart requests currently not supported.")
							assert False, 'Skip this one for now.'
				#else:
					#assert False, 'No content-length or Content-Range header.'

				new_record['_time'] = datetime.datetime.now()
				if self.command not in ('HEAD'):
					# file is created at temporary location and moved in place only when download completes
					if not os.access(temp_name, os.R_OK):
						empty_name = config_host['dir'] + os.sep + '.tmp'
						with open(empty_name, 'w+b') as some_file:
							pass
						os.renames(empty_name, temp_name)
					temp_file = open(temp_name, 'r+b')
					if requested_ranges == None and needed == None:
						needed = new_record['_parts']
					needed.rewind()
					countdown = 16
					while True:
						# XXX can make this implicit - one request per range
						(start, end) = needed.pop()
						if start == None:
							break
						stream_last = start
						old_record = copy.copy(new_record)
						if end - start < block_size:
							req_block_size = end - start
						else:
							req_block_size = block_size
						buffer = source.read(req_block_size)
						length = len(buffer)
						while length > 0 and stream_last < end:
							stream_pos = stream_last + length
							assert stream_pos <= end, 'Received more data then requested: pos:{} start:{} end:{}.'.format(stream_pos, start, end)
							temp_file.seek(stream_last)
							temp_file.write(buffer)
							x = new_record['_parts'] - spacemap.SpaceMap({stream_last: stream_pos})
							new_record['_parts'] = new_record['_parts'] - spacemap.SpaceMap({stream_last: stream_pos})
							countdown -= 1
							if countdown == 0:
								index[my_path_b] = old_record
								index.sync()
								countdown = 16
							old_record = copy.copy(new_record)
							stream_last = stream_pos
							if end - stream_last < block_size:
								req_block_size = end - stream_last
							buffer = source.read(req_block_size)
							length = len(buffer)
					# moving downloaded data to real file
					temp_file.close()

				index[my_path_b] = new_record
				index.sync()

			except urllib.error.HTTPError as error:
				# in case of error we don't need to do anything actually,
				# if file download stalls or fails the file would not be moved to it's location
				self.send_response(error.code)
				self.end_headers()
				print(error, repr(my_headers))
				return

		#print(index[my_path_b])

		if not os.access(file_name, os.R_OK) and os.access(temp_name, os.R_OK) and '_parts' in index[my_path_b] and index[my_path_b]['_parts'] == spacemap.SpaceMap():
			# just moving
			# drop old dirs XXX
			print('Moving temporary file to new destination.')
			os.renames(temp_name, file_name)

		if not my_path_b in index:
			self.send_response(502)
			self.end_headers()
			return

		if self.command == 'HEAD':
			self.send_response(200)
			if 'Content-Length' in index[my_path_b]:
				self.send_header('Content-Length', index[my_path_b]['Content-Length'])
			self.send_header('Accept-Ranges', 'bytes')
			self.send_header('Content-Type', 'application/octet-stream')
			if 'Last-Modified' in index[my_path_b]:
				self.send_header('Last-Modified', index[my_path_b]['Last-Modified'])
			self.end_headers()
		else:
			if ('_parts' in index[my_path_b] and index[my_path_b]['_parts'] != spacemap.SpaceMap()) or not os.access(file_name, os.R_OK):
				file_name = temp_name

			with open(file_name, 'rb') as real_file:
				file_stat = os.stat(file_name)
				if 'Range' in self.headers:
					self.send_response(206)
					ranges = ()
					requested_ranges.rewind()
					while True:
						pair = requested_ranges.pop()
						if pair[0] == None:
							break
						ranges += '{}-{}'.format(pair[0], str(pair[1] - 1)),
					self.send_header('Content-Range', 'bytes {}/{}'.format(','.join(ranges), index[my_path_b]['Content-Length']))
				else:
					self.send_response(200)
					self.send_header('Content-Length', str(file_stat.st_size))
					requested_ranges = spacemap.SpaceMap({0: file_stat.st_size})
				if 'Last-Modified' in index[my_path_b]:
					self.send_header('Last-Modified', index[my_path_b]['Last-Modified'])
				self.send_header('Content-Type', 'application/octet-stream')
				self.end_headers()
				if self.command in ('GET'):
					if len(requested_ranges) > 0:
						requested_ranges.rewind()
						(start, end) = requested_ranges.pop()
					else:
						start = 0
						# XXX ugly hack
						if 'Content-Length' in index[my_path_b]:
							end = index[my_path_b]['Content-Length']
						else:
							end = 0
					real_file.seek(start)
					if block_size > end - start:
						req_block_size = end - start
					else:
						req_block_size = block_size
					buffer = real_file.read(req_block_size)
					length = len(buffer)
					while length > 0:
						self.wfile.write(buffer)
						start += len(buffer)
						if req_block_size > end - start:
							req_block_size = end - start
						if req_block_size == 0:
							break
						buffer = real_file.read(req_block_size)
						length = len(buffer)
				
	def do_HEAD(self):
		return self.__process()
	def do_GET(self):
		return self.__process()

server = http.server.HTTPServer(('127.0.0.1', int(config['DEFAULT']['port'])), MyRequestHandler)
server.serve_forever()

#gevent.joinall()