Lines of
samesite.py
from check-in cab908195f
that are changed by the sequence of edits moving toward
check-in 439e1753a4:
1: #!/usr/bin/env python3.1
2:
3: import datetime, http.cookiejar, os, sys, shelve, spacemap, re, urllib.request
4:
5: class Config:
6: __slots__ = frozenset(['_config', '_default', '_section', 'options', 'root'])
7: _default = {
8: 'general': {
9: 'port': '8008',
10: },
11: '_other': {
12: 'verbose': 'no',
13: 'noetag': 'no',
14: 'noparts': 'no',
15: 'strip': '',
16: 'sub': '',
17: },}
18:
19: # function to read in config file
20: def __init__(self):
21: import configparser, optparse
22:
23: parser = optparse.OptionParser()
24: parser.add_option('-c', '--config', dest = 'config', help = 'config file location', metavar = 'FILE', default = 'samesite.conf')
25: (self.options, args) = parser.parse_args()
26:
27: assert os.access(self.options.config, os.R_OK), "Fatal error: can't read {}".format(self.options.config)
28:
29: configDir = re.compile('^(.*)/[^/]+$').match(self.options.config)
30: if configDir:
31: self.root = configDir.group(1)
32: else:
33: self.root = os.getcwd()
34:
35: self._config = configparser.ConfigParser()
36: self._config.readfp(open(self.options.config))
37:
38: for section in self._config.sections():
39: if section != 'general':
40: if self._config.has_option(section, 'dir'):
41: if re.compile('^/$').match(self._config.get(section, 'dir')):
42: self._config.set(section, 'dir', self.root + os.sep + section)
43: thisDir = re.compile('^(.*)/$').match(self._config.get(section, 'dir'))
44: if thisDir:
45: self._config.set(section, 'dir', thisDir.group(1))
46: if not re.compile('^/(.*)$').match(self._config.get(section, 'dir')):
47: self._config.set(section, 'dir', self.root + os.sep + self._config.get(section, 'dir'))
48: else:
49: self._config.set(section, 'dir', self.root + os.sep + section)
50:
51: if not self._config.has_option(section, 'root'):
52: self._config.set(section, 'root', section)
53:
54: # function to select config file section or create one
55: def section(self, section):
56: if not self._config.has_section(section):
57: self._config.add_section(section)
58: self._section = section
59:
60: # function to get config parameter, if parameter doesn't exists the default
61: # value or None is substituted
62: def __getitem__(self, name):
63: if not self._config.has_option(self._section, name):
64: if self._section in self._default:
65: if name in self._default[self._section]:
66: self._config.set(self._section, name, self._default[self._section][name])
67: else:
68: self._config.set(self._section, name, None)
69: elif name in self._default['_other']:
70: self._config.set(self._section, name, self._default['_other'][name])
71: else:
72: self._config.set(self._section, name, None)
73: return(self._config.get(self._section, name))
74:
75: config = Config()
76:
77: #assert options.port or os.access(options.log, os.R_OK), 'Log file unreadable'
78:
79: const_desc_fields = set(['Content-Length', 'Last-Modified', 'Pragma'])
80: const_ignore_fields = set([
81: 'Accept-Ranges', 'Age',
82: 'Cache-Control', 'Connection', 'Content-Type',
83: 'Date',
84: 'Expires',
85: 'Server',
86: 'Via',
87: 'X-Cache', 'X-Cache-Lookup', 'X-Powered-By'
88: ])
89:
90: block_size = 4096
91:
92: '''
93: # later, kqueue would be good but later
94: class Connection:
95: __slots__ = frozenset(('__address', '__input', '__socket', '__status', 'error', 'method', 'url', 'http_version'))
96:
97: def __init__(self, socket, address):
98: self.__address = address
99: self.__input = b''
100: self.__socket = socket
101: self.__status = 0
102:
103: def read(self, kev):
104: buffer = self.__socket.recv(kev.data)
105: exhausted = False
106: if len(buffer) == 0:
107: eof = True
108: else:
109: self.__input += buffer
110: while not exhausted:
111: if self.__status == -1:
112: exhausted = True
113: elif self.__status == 0:
114: endstring = self.__input.find(b'\n')
115: if endstring > 0:
116: print('Processing request line.')
117: line = self.__input[:endstring].decode('ascii')
118: self.__input = self.__input[endstring + 1:]
119: isRequest = re.compile('(GET) ([^ ]+) HTTP/(1\.0)').match(line)
120: if not isRequest:
121: self.error = 'Not a HTTP connection.'
122: self.__status = -1
123: else:
124: self.method = isRequest.group(1)
125: self.url = isRequest.group(2)
126: self.http_version = isRequest.group(3)
127: self.__status = 1
128: else:
129: exhausted = True
130: elif self.__status == 1:
131: endstring = self.__input.find(b'\n')
132: if endstring > 0:
133: print('Processing header line.' + repr(self.__input))
134: line = self.__input[:endstring].decode('ascii')
135: self.__input = self.__input[endstring + 1:]
136: isHeader = re.compile('([^:]*): +(.*)').match(line)
137: if not isHeader:
138: self.error = 'Bad header.'
139: return(False)
140: # process header here
141: elif endstring == 0:
142: self.__status = 2
143: else:
144: exhausted = True
145:
146: def write(self, kev):
147: pass
148:
149: if options.port:
150: import select, socket
151:
152: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
153: try:
154: sock.bind(('127.0.0.1', int(options.port)))
155: sock.listen(-1)
156:
157: kq = select.kqueue()
158: assert kq.fileno() != -1, "Fatal error: can't initialise kqueue."
159:
160: kq.control([select.kevent(sock, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
161: timeout = None
162:
163: connections = {sock.fileno(): None}
164:
165: while True:
166: kevs = kq.control(None, 1, timeout)
167:
168: for kev in kevs:
169: if type(connections[kev.ident]) == Connection:
170: print(kev.ident, kev.data, kev.filter, kev.flags)
171: assert kev.data != 0, 'No data available.'
172: if kev.filter == select.KQ_FILTER_READ:
173: connections[kev.ident].read(kev)
174: elif kev.filter == select.KQ_FILTER_WRITE:
175: connections[kev.ident].write(kev)
176: else:
177: assert kev.filter in (select.KQ_FILTER_READ, select.KQ_FILTER_WRITE), 'Do we support other filters?'
178: else:
179: (conn, addr) = sock.accept()
180: print('Connection from ' + repr(addr))
181: kq.control([select.kevent(conn, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
182: connections[conn.fileno()] = Connection(conn, addr)
183:
184: if kev.flags >> 15 == 1:
185: kq.control([select.kevent(kev.ident, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0)
186: kq.control([select.kevent(kev.ident, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0)
187: del(connections[kev.ident])
188: finally:
189: sock.close()
190: '''
191:
192: # XXX how about rechecking files?
193: if True:
194: import http.server
195:
196: class MyRequestHandler(http.server.BaseHTTPRequestHandler):
197: def __process(self):
198: # reload means file needs to be reloaded to serve request
199: reload = False
200: # recheck means file needs to be checked, this also means that if file hav been modified we can serve older copy
201: recheck = False
202: # file_stat means file definitely exists
203: file_stat = None
204: # requested_ranges holds data about any range requested
205: requested_ranges = None
206: # records holds data from index locally, should be written back upon successfull completion
207: record = None
208:
209: myPath = re.compile('^(.*?)(\?.*)$').match(self.path)
210: if myPath:
211: my_path = myPath.group(1)
212: else:
213: my_path = self.path
214:
215: config.section(self.headers['Host'])
216:
217: if config['sub'] != None and config['strip'] != None and len(config['strip']) > 0:
218: string = re.compile(config['strip']).sub(config['sub'], my_path)
219: my_path = string
220:
221: info = 'Checking file: ' + my_path
222:
223: if not os.access(config['dir'], os.X_OK):
224: os.mkdir(config['dir'])
225: # this is file index - everything is stored in this file
226: # _parts - list of stored parts of file
227: # _time - last time the file was checked
228: # everything else is just the headers
229: index = shelve.open(config['dir'] + os.sep + '.index')
230:
231: desc_fields = const_desc_fields.copy()
232: ignore_fields = const_ignore_fields.copy()
233: if not config['noetag']:
234: desc_fields.add('ETag')
235: else:
236: ignore_fields.add('ETag')
237:
238: proxy_ignored = set([
239: 'Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language',
240: 'Cache-Control', 'Connection', 'Content-Length', 'Cookie',
241: 'Host',
242: 'If-Modified-Since', 'If-Unmodified-Since',
243: 'Referer',
244: 'User-Agent',
245: 'Via',
246: 'X-Forwarded-For', 'X-REMOVED',
247: ])
248:
249: print('===============[ {} request ]==='.format(self.command))
250:
251: for header in self.headers:
252: if header in proxy_ignored:
253: pass
254: elif header in ('Range'):
255: isRange = re.compile('bytes=(\d+)-(\d+)').match(self.headers[header])
256: if isRange:
257: requested_ranges = spacemap.SpaceMap({int(isRange.group(1)): int(isRange.group(2)) + 1})
258: else:
259: return()
260: elif header in ('Pragma'):
261: if my_path in index:
262: index[my_path][header] = self.headers[header]
263: else:
264: print('Unknown header - ', header, ': ', self.headers[header], sep='')
265: return()
266: print(header, self.headers[header])
267:
268: # creating file name from my_path
269: file_name = config['dir'] + os.sep + re.compile('%20').sub(' ', my_path)
270: # partial file or unfinished download
271: temp_name = config['dir'] + os.sep + '.parts' + re.compile('%20').sub(' ', my_path)
272:
273: # creating empty placeholder in index
274: # if there's no space map and there's no file in real directory - we have no file
275: # if there's an empty space map - file is full
276: # space map generally covers every bit of file we don't posess currently
277: if not my_path in index:
278: info += '\nThis one is new.'
279: reload = True
280: record = {}
281: else:
282: # forcibly checking file if no file present
283: if os.access(file_name, os.R_OK):
284: file_stat = os.stat(file_name)
285: elif '_parts' in index[my_path] and os.access(temp_name, os.R_OK):
286: file_stat = os.stat(temp_name)
287: else:
288: info += '\nFile not found or inaccessible.'
289: index[my_path]['_parts'] = None
290: reload = True
291: record = index[my_path]
292:
293: if not '_parts' in record:
294: record['_parts'] = None
295:
296: if record['_parts'] == None:
297: recheck = True
298:
299: # forcibly checking file if file size doesn't match with index data
300: if not reload:
301: if '_parts' in record and record['_parts'] == spacemap.SpaceMap():
302: if 'Content-Length' in record and file_stat and file_stat.st_size != int(record['Content-Length']):
303: info += '\nFile size is {} and stored file size is {}.'.format(file_stat.st_size, record['Content-Length'])
304: record['_parts'] = None
305: reload = True
306:
307: # forcibly checking file if index holds Pragma header
308: if not reload and 'Pragma' in record and record['Pragma'] == 'no-cache':
309: info +='\nPragma on: recheck imminent.'
310: recheck = True
311:
312: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
313: if not recheck and not reload and '_time' in record and (datetime.datetime.now() - datetime.timedelta(hours = 4) - record['_time']).days < 0:
314: recheck = True
315:
316: print(info)
317: if reload or recheck:
318:
319: try:
320: request = 'http://' + config['root'] + self.path
321: my_headers = {}
322: for header in ('Cache-Control', 'Cookie', 'Referer', 'User-Agent'):
323: if header in self.headers:
324: my_headers[header] = self.headers[header]
325:
326: needed = None
cab908195f 2010-09-06 327: # XXX and if we specify full file we don't go partial?
cab908195f 2010-09-06 328: if requested_ranges != None:
cab908195f 2010-09-06 329: if '_parts' in record and record['_parts'] != None:
cab908195f 2010-09-06 330: if config['noparts']:
cab908195f 2010-09-06 331: needed = record['_parts']
cab908195f 2010-09-06 332: else:
cab908195f 2010-09-06 333: needed = record['_parts'] | requested_ranges
cab908195f 2010-09-06 334: elif not config['noparts']:
cab908195f 2010-09-06 335: needed = requested_ranges
cab908195f 2010-09-06 336: ranges = ()
cab908195f 2010-09-06 337: print('Missing ranges: {}, requested ranges: {}, needed ranges: {}.'.format(record['_parts'], requested_ranges, needed))
cab908195f 2010-09-06 338: if needed != None and len(needed) > 0:
cab908195f 2010-09-06 339: needed.rewind()
cab908195f 2010-09-06 340: while True:
cab908195f 2010-09-06 341: range = needed.pop()
cab908195f 2010-09-06 342: if range[0] == None:
cab908195f 2010-09-06 343: break
cab908195f 2010-09-06 344: ranges += '{}-{}'.format(range[0], range[1] - 1),
cab908195f 2010-09-06 345: my_headers['Range'] = 'bytes=' + ','.join(ranges)
346:
347: request = urllib.request.Request(request, headers = my_headers)
348:
349: with urllib.request.urlopen(request) as source:
350: new_record = {}
351: new_record['_parts'] = record['_parts']
352: headers = source.info()
353:
354: # stripping unneeded headers (XXX make this inplace?)
355: for header in headers:
356: if header in desc_fields:
357: #if header == 'Pragma' and headers[header] != 'no-cache':
358: if header == 'Content-Length':
359: if 'Content-Range' not in headers:
360: new_record[header] = int(headers[header])
361: else:
362: new_record[header] = headers[header]
363: elif header == 'Content-Range':
364: range = re.compile('^bytes (\d+)-(\d+)/(\d+)$').match(headers[header])
365: if range:
366: new_record['Content-Length'] = int(range.group(3))
367: else:
368: assert False, 'Content-Range unrecognized.'
369: elif not header in ignore_fields:
370: print('Undefined header "', header, '": ', headers[header], sep='')
371:
372: # comparing headers with data found in index
373: # if any header has changed (except Pragma) file is fully downloaded
374: # same if we get more or less headers
375: old_keys = set(record.keys())
376: old_keys.discard('_time')
377: old_keys.discard('Pragma')
378: more_keys = set(new_record.keys()) - old_keys
379: more_keys.discard('Pragma')
380: less_keys = old_keys - set(new_record.keys())
381: if len(more_keys) > 0:
382: if not len(old_keys) == 0:
383: print('More headers appear:', more_keys)
384: reload = True
385: elif len(less_keys) > 0:
386: print('Less headers appear:', less_keys)
387: else:
388: for key in record.keys():
389: if key[0] != '_' and key != 'Pragma' and not record[key] == new_record[key]:
390: print('Header "', key, '" changed from [', record[key], '] to [', new_record[key], ']', sep='')
391: print(type(record[key]), type(new_record[key]))
392: reload = True
393:
394: if reload:
395: print('Reloading.')
396: if os.access(temp_name, os.R_OK):
397: os.unlink(temp_name)
398: if os.access(file_name, os.R_OK):
399: os.unlink(file_name)
400: new_record['_parts'] = spacemap.SpaceMap({0: int(new_record['Content-Length'])})
401: print(new_record)
402:
403: # downloading file or segment
404: if 'Content-Length' in new_record:
405: if needed == None:
406: needed = new_record['_parts']
407: else:
408: if len(needed) > 1:
409: print("Multipart requests currently not supported.")
410: assert False, 'Skip this one for now.'
411: else:
412: assert False, 'No Content-Length or Content-Range header.'
413:
414: new_record['_time'] = datetime.datetime.now()
415: if self.command not in ('HEAD'):
416: # file is created at temporary location and moved in place only when download completes
417: if not os.access(temp_name, os.R_OK):
418: empty_name = config['dir'] + os.sep + '.tmp'
419: with open(empty_name, 'w+b') as some_file:
420: pass
421: os.renames(empty_name, temp_name)
422: temp_file = open(temp_name, 'r+b')
423: needed.rewind()
424: while True:
425: (start, end) = needed.pop()
426: if start == None:
427: break
428: stream_last = start
429: old_record = new_record
430: if end - start < block_size:
431: req_block_size = end - start
432: else:
433: req_block_size = block_size
434: buffer = source.read(req_block_size)
435: length = len(buffer)
436: while length > 0 and stream_last < end:
437: stream_pos = stream_last + length
438: assert not stream_pos > end, 'Received more data then requested: pos:{} start:{} end:{}.'.format(stream_pos, start, end)
439: temp_file.seek(stream_last)
440: temp_file.write(buffer)
441: new_record['_parts'] = new_record['_parts'] - spacemap.SpaceMap({stream_last: stream_pos})
442: index[my_path] = old_record
443: index.sync()
444: old_record = new_record
445: stream_last = stream_pos
446: if end - stream_last < block_size:
447: req_block_size = end - stream_last
448: buffer = source.read(req_block_size)
449: length = len(buffer)
450: # moving downloaded data to real file
451: temp_file.close()
452:
453: print(new_record)
454: index[my_path] = new_record
455: index.sync()
456:
457: except urllib.error.HTTPError as error:
458: # in case of error we don't need to do anything actually,
459: # if file download stalls or fails the file would not be moved to it's location
460: print(error)
461:
462: if not os.access(file_name, os.R_OK) and os.access(temp_name, os.R_OK) and '_parts' in index[my_path] and index[my_path]['_parts'] == spacemap.SpaceMap():
463: # just moving
464: # drop old dirs XXX
465: print('Moving temporary file to new destination.')
466: os.renames(temp_name, file_name)
467:
468: if not my_path in index:
469: self.send_response(502)
470: self.end_headers()
471: return
472:
473: if self.command == 'HEAD':
474: self.send_response(200)
475: if 'Content-Length' in index[my_path]:
476: self.send_header('Content-Length', index[my_path]['Content-Length'])
477: self.send_header('Accept-Ranges', 'bytes')
478: self.send_header('Content-Type', 'application/octet-stream')
479: if 'Last-Modified' in index[my_path]:
480: self.send_header('Last-Modified', index[my_path]['Last-Modified'])
481: self.end_headers()
482: else:
483: if ('_parts' in index[my_path] and index[my_path]['_parts'] != spacemap.SpaceMap()) or not os.access(file_name, os.R_OK):
484: file_name = temp_name
485:
486: with open(file_name, 'rb') as real_file:
487: file_stat = os.stat(file_name)
488: if 'Range' in self.headers:
489: self.send_response(206)
490: ranges = ()
491: requested_ranges.rewind()
492: while True:
493: pair = requested_ranges.pop()
494: if pair[0] == None:
495: break
496: ranges += '{}-{}'.format(pair[0], str(pair[1] - 1)),
497: self.send_header('Content-Range', 'bytes {}/{}'.format(','.join(ranges), index[my_path]['Content-Length']))
498: else:
499: self.send_response(200)
500: self.send_header('Content-Length', str(file_stat.st_size))
501: requested_ranges = spacemap.SpaceMap({0: file_stat.st_size})
502: if 'Last-Modified' in index[my_path]:
503: self.send_header('Last-Modified', index[my_path]['Last-Modified'])
504: self.send_header('Content-Type', 'application/octet-stream')
505: self.end_headers()
506: if self.command in ('GET'):
507: if len(requested_ranges) > 0:
508: requested_ranges.rewind()
509: (start, end) = requested_ranges.pop()
510: else:
511: start = 0
512: end = index[my_path]['Content-Length']
513: real_file.seek(start)
514: if block_size > end - start:
515: req_block_size = end - start
516: else:
517: req_block_size = block_size
518: buffer = real_file.read(req_block_size)
519: length = len(buffer)
520: while length > 0:
521: self.wfile.write(buffer)
522: start += len(buffer)
523: if req_block_size > end - start:
524: req_block_size = end - start
525: if req_block_size == 0:
526: break
527: buffer = real_file.read(req_block_size)
528: length = len(buffer)
529:
530: def do_HEAD(self):
531: return self.__process()
532: def do_GET(self):
533: return self.__process()
534:
535: config.section('general')
536: server = http.server.HTTPServer(('127.0.0.1', int(config['port'])), MyRequestHandler)
537: server.serve_forever()
538:
539: else:
540: while True:
541: unchecked_files = set()
542: checked_files = 0
543:
544: # reading log and storing found urls for processing
545: # check file mtime XXX
546: with open(options.log, 'r') as log_file:
547: log_line = re.compile('^[^ ]+ - - \[.*] "(GET|HEAD) (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
548: for line in log_file:
549: this_line = log_line.match(line.strip())
550: if this_line:
551: unchecked_files.add(this_line.group(2))
552:
553: for url in unchecked_files:
554: reload = False
555: recheck = False
556: info = 'Checking file: ' + url
557:
558: # creating empty placeholder in index
559: if not url in index:
560: info += '\nThis one is new.'
561: index[url] = {}
562: reload = True
563:
564: # creating file name from url
565: file_name = options.dir + re.compile('%20').sub(' ', url)
566:
567: # forcibly checking file if no file present
568: if not reload and not os.access(file_name, os.R_OK):
569: info += '\nFile not found or inaccessible.'
570: reload = True
571:
572: # forcibly checking file if file size doesn't match with index data
573: elif not reload and 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
574: info += '\nFile size is ' + os.stat(file_name).st_size + ' and stored file size is ' + index[url]['Content-Length'] + '.'
575: reload = True
576:
577: # forcibly checking file if index hods Pragma header
578: if not reload and 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
579: info +='\nPragma on: recheck imminent.'
580: recheck = True
581:
582: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
583: if not recheck and not reload and (options.noupdate or ('_time' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['_time']).days < 0)):
584: if options.verbose:
585: print(info)
586: continue
587: else:
588: print(info)
589:
590: try:
591: with urllib.request.urlopen(options.root + url) as source:
592: new_headers = {}
593: headers = source.info()
594:
595: # stripping unneeded headers (XXX make this inplace?)
596: for header in headers:
597: if header in desc_fields:
598: if header == 'Pragma' and headers[header] != 'no-cache':
599: print('Pragma:', headers[header])
600: new_headers[header] = headers[header]
601: elif not header in ignore_fields:
602: print('Undefined header "', header, '": ', headers[header], sep='')
603:
604: # comparing headers with data found in index
605: # if any header has changed (except Pragma) file is fully downloaded
606: # same if we get more or less headers
607: old_keys = set(index[url].keys())
608: old_keys.discard('_time')
609: old_keys.discard('Pragma')
610: more_keys = set(new_headers.keys()) - old_keys
611: more_keys.discard('Pragma')
612: less_keys = old_keys - set(new_headers.keys())
613: if len(more_keys) > 0:
614: if not len(old_keys) == 0:
615: print('More headers appear:', more_keys)
616: reload = True
617: elif len(less_keys) > 0:
618: print('Less headers appear:', less_keys)
619: else:
620: for key in index[url].keys():
621: if key[0] != '_' and key != 'Pragma' and not index[url][key] == new_headers[key]:
622: print('Header "', key, '" changed from [', index[url][key], '] to [', new_headers[key], ']', sep='')
623: reload = True
624:
625: # downloading file
626: if reload:
627: if 'Content-Length' in headers:
628: print('Downloading', headers['Content-Length'], 'bytes [', end='')
629: else:
630: print('Downloading [', end='')
631: sys.stdout.flush()
632:
633: # file is created at temporary location and moved in place only when download completes
634: temp_file = open(options.dir + os.sep + '.tmp', 'wb')
635: buffer = source.read(block_size)
636: megablocks = 0
637: blocks = 0
638: megs = 0
639: while len(buffer) > 0:
640: temp_file.write(buffer)
641: buffer = source.read(block_size)
642: blocks += 1
643: if blocks > 102400/block_size:
644: megablocks += 1
645: if megablocks > 10:
646: megablocks = megablocks - 10
647: megs += 1
648: print('{}Mb'.format(megs), end='')
649: else:
650: print('.', end='')
651: blocks = blocks - 102400/block_size
652: sys.stdout.flush()
653: temp_file.close()
654: print(']')
655: os.renames(options.dir + os.sep + '.tmp', file_name)
656:
657: checked_files += 1
658:
659: # storing new time mark and storing new headers
660: new_headers['_time'] = datetime.datetime.now()
661: index[url] = new_headers
662: index.sync()
663:
664: except urllib.error.HTTPError as error:
665: # in case of error we don't need to do anything actually,
666: # if file download stalls or fails the file would not be moved to it's location
667: print(error)
668:
669: if options.verbose:
670: print('[', len(unchecked_files), '/', checked_files, ']')
671:
672: # checking if there were any files downloaded, if yes - restarting sequence
673: if checked_files == 0:
674: break