Lines of
samesite.py
from check-in 439e1753a4
that are changed by the sequence of edits moving toward
check-in c3db1a007e:
1: #!/usr/bin/env python3.1
2:
3: import datetime, http.cookiejar, os, sys, shelve, spacemap, re, urllib.request
4:
5: class Config:
6: __slots__ = frozenset(['_config', '_default', '_section', 'options', 'root'])
7: _default = {
8: 'general': {
9: 'port': '8008',
10: },
11: '_other': {
12: 'verbose': 'no',
13: 'noetag': 'no',
14: 'noparts': 'no',
15: 'strip': '',
16: 'sub': '',
17: },}
18:
19: # function to read in config file
20: def __init__(self):
21: import configparser, optparse
22:
23: parser = optparse.OptionParser()
24: parser.add_option('-c', '--config', dest = 'config', help = 'config file location', metavar = 'FILE', default = 'samesite.conf')
25: (self.options, args) = parser.parse_args()
26:
27: assert os.access(self.options.config, os.R_OK), "Fatal error: can't read {}".format(self.options.config)
28:
29: configDir = re.compile('^(.*)/[^/]+$').match(self.options.config)
30: if configDir:
31: self.root = configDir.group(1)
32: else:
33: self.root = os.getcwd()
34:
35: self._config = configparser.ConfigParser()
36: self._config.readfp(open(self.options.config))
37:
38: for section in self._config.sections():
39: if section != 'general':
40: if self._config.has_option(section, 'dir'):
41: if re.compile('^/$').match(self._config.get(section, 'dir')):
42: self._config.set(section, 'dir', self.root + os.sep + section)
43: thisDir = re.compile('^(.*)/$').match(self._config.get(section, 'dir'))
44: if thisDir:
45: self._config.set(section, 'dir', thisDir.group(1))
46: if not re.compile('^/(.*)$').match(self._config.get(section, 'dir')):
47: self._config.set(section, 'dir', self.root + os.sep + self._config.get(section, 'dir'))
48: else:
49: self._config.set(section, 'dir', self.root + os.sep + section)
50:
51: if not self._config.has_option(section, 'root'):
52: self._config.set(section, 'root', section)
53:
54: # function to select config file section or create one
55: def section(self, section):
56: if not self._config.has_section(section):
57: self._config.add_section(section)
58: self._section = section
59:
60: # function to get config parameter, if parameter doesn't exists the default
61: # value or None is substituted
62: def __getitem__(self, name):
63: if not self._config.has_option(self._section, name):
64: if self._section in self._default:
65: if name in self._default[self._section]:
66: self._config.set(self._section, name, self._default[self._section][name])
67: else:
68: self._config.set(self._section, name, None)
69: elif name in self._default['_other']:
70: self._config.set(self._section, name, self._default['_other'][name])
71: else:
72: self._config.set(self._section, name, None)
73: return(self._config.get(self._section, name))
74:
75: config = Config()
76:
77: #assert options.port or os.access(options.log, os.R_OK), 'Log file unreadable'
78:
79: const_desc_fields = set(['Content-Length', 'Last-Modified', 'Pragma'])
80: const_ignore_fields = set([
81: 'Accept-Ranges', 'Age',
82: 'Cache-Control', 'Connection', 'Content-Type',
83: 'Date',
84: 'Expires',
85: 'Referer',
86: 'Server',
87: 'Via',
88: 'X-Cache', 'X-Cache-Lookup', 'X-Powered-By'
89: ])
90:
91: block_size = 4096
92:
93: '''
94: # later, kqueue would be good but later
95: class Connection:
96: __slots__ = frozenset(('__address', '__input', '__socket', '__status', 'error', 'method', 'url', 'http_version'))
97:
98: def __init__(self, socket, address):
99: self.__address = address
100: self.__input = b''
101: self.__socket = socket
102: self.__status = 0
103:
104: def read(self, kev):
105: buffer = self.__socket.recv(kev.data)
106: exhausted = False
107: if len(buffer) == 0:
108: eof = True
109: else:
110: self.__input += buffer
111: while not exhausted:
112: if self.__status == -1:
113: exhausted = True
114: elif self.__status == 0:
115: endstring = self.__input.find(b'\n')
116: if endstring > 0:
117: print('Processing request line.')
118: line = self.__input[:endstring].decode('ascii')
119: self.__input = self.__input[endstring + 1:]
120: isRequest = re.compile('(GET) ([^ ]+) HTTP/(1\.0)').match(line)
121: if not isRequest:
122: self.error = 'Not a HTTP connection.'
123: self.__status = -1
124: else:
125: self.method = isRequest.group(1)
126: self.url = isRequest.group(2)
127: self.http_version = isRequest.group(3)
128: self.__status = 1
129: else:
130: exhausted = True
131: elif self.__status == 1:
132: endstring = self.__input.find(b'\n')
133: if endstring > 0:
134: print('Processing header line.' + repr(self.__input))
135: line = self.__input[:endstring].decode('ascii')
136: self.__input = self.__input[endstring + 1:]
137: isHeader = re.compile('([^:]*): +(.*)').match(line)
138: if not isHeader:
139: self.error = 'Bad header.'
140: return(False)
141: # process header here
142: elif endstring == 0:
143: self.__status = 2
144: else:
145: exhausted = True
146:
147: def write(self, kev):
148: pass
149:
150: if options.port:
151: import select, socket
152:
153: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
154: try:
155: sock.bind(('127.0.0.1', int(options.port)))
156: sock.listen(-1)
157:
158: kq = select.kqueue()
159: assert kq.fileno() != -1, "Fatal error: can't initialise kqueue."
160:
161: kq.control([select.kevent(sock, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
162: timeout = None
163:
164: connections = {sock.fileno(): None}
165:
166: while True:
167: kevs = kq.control(None, 1, timeout)
168:
169: for kev in kevs:
170: if type(connections[kev.ident]) == Connection:
171: print(kev.ident, kev.data, kev.filter, kev.flags)
172: assert kev.data != 0, 'No data available.'
173: if kev.filter == select.KQ_FILTER_READ:
174: connections[kev.ident].read(kev)
175: elif kev.filter == select.KQ_FILTER_WRITE:
176: connections[kev.ident].write(kev)
177: else:
178: assert kev.filter in (select.KQ_FILTER_READ, select.KQ_FILTER_WRITE), 'Do we support other filters?'
179: else:
180: (conn, addr) = sock.accept()
181: print('Connection from ' + repr(addr))
182: kq.control([select.kevent(conn, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
183: connections[conn.fileno()] = Connection(conn, addr)
184:
185: if kev.flags >> 15 == 1:
186: kq.control([select.kevent(kev.ident, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0)
187: kq.control([select.kevent(kev.ident, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0)
188: del(connections[kev.ident])
189: finally:
190: sock.close()
191: '''
192:
193: # XXX how about rechecking files?
194: if True:
195: import http.server
196:
197: class MyRequestHandler(http.server.BaseHTTPRequestHandler):
198: def __process(self):
199: # reload means file needs to be reloaded to serve request
200: reload = False
201: # recheck means file needs to be checked, this also means that if file hav been modified we can serve older copy
202: recheck = False
203: # file_stat means file definitely exists
204: file_stat = None
205: # requested_ranges holds data about any range requested
206: requested_ranges = None
207: # records holds data from index locally, should be written back upon successfull completion
208: record = None
209:
210: myPath = re.compile('^(.*?)(\?.*)$').match(self.path)
211: if myPath:
212: my_path = myPath.group(1)
213: else:
214: my_path = self.path
215:
216: config.section(self.headers['Host'])
217:
218: if config['sub'] != None and config['strip'] != None and len(config['strip']) > 0:
219: string = re.compile(config['strip']).sub(config['sub'], my_path)
220: my_path = string
221:
222: info = 'Checking file: ' + my_path
223:
224: if not os.access(config['dir'], os.X_OK):
225: os.mkdir(config['dir'])
226: # this is file index - everything is stored in this file
227: # _parts - list of stored parts of file
228: # _time - last time the file was checked
229: # everything else is just the headers
230: index = shelve.open(config['dir'] + os.sep + '.index')
231:
232: desc_fields = const_desc_fields.copy()
233: ignore_fields = const_ignore_fields.copy()
439e1753a4 2010-09-07 234: if not config['noetag']:
235: desc_fields.add('ETag')
236: else:
237: ignore_fields.add('ETag')
238:
239: proxy_ignored = set([
240: 'Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language',
241: 'Cache-Control', 'Connection', 'Content-Length', 'Cookie',
242: 'Host',
243: 'If-Modified-Since', 'If-Unmodified-Since',
244: 'Referer',
245: 'User-Agent',
246: 'Via',
247: 'X-Forwarded-For', 'X-REMOVED',
248: ])
249:
250: print('===============[ {} request ]==='.format(self.command))
251:
252: for header in self.headers:
253: if header in proxy_ignored:
254: pass
255: elif header in ('Range'):
256: isRange = re.compile('bytes=(\d+)-(\d+)').match(self.headers[header])
257: if isRange:
258: requested_ranges = spacemap.SpaceMap({int(isRange.group(1)): int(isRange.group(2)) + 1})
259: else:
260: return()
261: elif header in ('Pragma'):
262: if my_path in index:
263: index[my_path][header] = self.headers[header]
264: else:
265: print('Unknown header - ', header, ': ', self.headers[header], sep='')
266: return()
267: print(header, self.headers[header])
268:
269: # creating file name from my_path
270: file_name = config['dir'] + os.sep + re.compile('%20').sub(' ', my_path)
271: # partial file or unfinished download
272: temp_name = config['dir'] + os.sep + '.parts' + re.compile('%20').sub(' ', my_path)
273:
274: # creating empty placeholder in index
275: # if there's no space map and there's no file in real directory - we have no file
276: # if there's an empty space map - file is full
277: # space map generally covers every bit of file we don't posess currently
278: if not my_path in index:
279: info += '\nThis one is new.'
280: reload = True
281: record = {}
282: else:
283: # forcibly checking file if no file present
284: if os.access(file_name, os.R_OK):
285: info += '\nFull file found.'
286: file_stat = os.stat(file_name)
287: elif '_parts' in index[my_path] and os.access(temp_name, os.R_OK):
288: info += '\nPartial file found.'
289: file_stat = os.stat(temp_name)
290: else:
291: info += '\nFile not found or inaccessible.'
439e1753a4 2010-09-07 292: index[my_path]['_parts'] = None
293: reload = True
439e1753a4 2010-09-07 294: record = index[my_path]
295:
296: if not '_parts' in record:
297: record['_parts'] = None
298:
299: if record['_parts'] == None:
300: recheck = True
301:
302: # forcibly checking file if file size doesn't match with index data
303: if not reload:
304: if '_parts' in record and record['_parts'] == spacemap.SpaceMap():
305: if 'Content-Length' in record and file_stat and file_stat.st_size != int(record['Content-Length']):
306: info += '\nFile size is {} and stored file size is {}.'.format(file_stat.st_size, record['Content-Length'])
307: record['_parts'] = None
308: reload = True
309:
310: # forcibly checking file if index holds Pragma header
311: if not reload and 'Pragma' in record and record['Pragma'] == 'no-cache':
312: info +='\nPragma on: recheck imminent.'
313: recheck = True
314:
315: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
316: if not recheck and not reload and '_time' in record and (datetime.datetime.now() - datetime.timedelta(hours = 4) - record['_time']).days < 0:
317: recheck = True
318:
319: print(info)
320: if reload or recheck:
321:
322: try:
323: request = 'http://' + config['root'] + self.path
324: my_headers = {}
325: for header in ('Cache-Control', 'Cookie', 'Referer', 'User-Agent'):
326: if header in self.headers:
327: my_headers[header] = self.headers[header]
328:
329: needed = None
330: if '_parts' in record and record['_parts'] != None:
439e1753a4 2010-09-07 331: if config['noparts'] or requested_ranges == None:
332: needed = record['_parts']
333: else:
334: needed = record['_parts'] | requested_ranges
439e1753a4 2010-09-07 335: elif not config['noparts']:
336: needed = requested_ranges
337: ranges = ()
338: print('Missing ranges: {}, requested ranges: {}, needed ranges: {}.'.format(record['_parts'], requested_ranges, needed))
339: if needed != None and len(needed) > 0:
340: needed.rewind()
341: while True:
342: range = needed.pop()
343: if range[0] == None:
344: break
345: ranges += '{}-{}'.format(range[0], range[1] - 1),
346: my_headers['Range'] = 'bytes=' + ','.join(ranges)
347:
348: request = urllib.request.Request(request, headers = my_headers)
349:
350: with urllib.request.urlopen(request) as source:
351: new_record = {}
352: new_record['_parts'] = record['_parts']
353: headers = source.info()
354:
355: # stripping unneeded headers (XXX make this inplace?)
356: for header in headers:
357: if header in desc_fields:
358: #if header == 'Pragma' and headers[header] != 'no-cache':
359: if header == 'Content-Length':
360: if 'Content-Range' not in headers:
361: new_record[header] = int(headers[header])
362: else:
363: new_record[header] = headers[header]
364: elif header == 'Content-Range':
365: range = re.compile('^bytes (\d+)-(\d+)/(\d+)$').match(headers[header])
366: if range:
367: new_record['Content-Length'] = int(range.group(3))
368: else:
369: assert False, 'Content-Range unrecognized.'
370: elif not header in ignore_fields:
371: print('Undefined header "', header, '": ', headers[header], sep='')
372:
373: # comparing headers with data found in index
374: # if any header has changed (except Pragma) file is fully downloaded
375: # same if we get more or less headers
376: old_keys = set(record.keys())
377: old_keys.discard('_time')
378: old_keys.discard('Pragma')
379: more_keys = set(new_record.keys()) - old_keys
380: more_keys.discard('Pragma')
381: less_keys = old_keys - set(new_record.keys())
382: if len(more_keys) > 0:
383: if not len(old_keys) == 0:
384: print('More headers appear:', more_keys)
385: reload = True
386: elif len(less_keys) > 0:
387: print('Less headers appear:', less_keys)
388: else:
389: for key in record.keys():
390: if key[0] != '_' and key != 'Pragma' and not record[key] == new_record[key]:
391: print('Header "', key, '" changed from [', record[key], '] to [', new_record[key], ']', sep='')
392: print(type(record[key]), type(new_record[key]))
393: reload = True
394:
395: if reload:
396: print('Reloading.')
397: if os.access(temp_name, os.R_OK):
398: os.unlink(temp_name)
399: if os.access(file_name, os.R_OK):
400: os.unlink(file_name)
401: new_record['_parts'] = spacemap.SpaceMap({0: int(new_record['Content-Length'])})
402: print(new_record)
403:
404: # downloading file or segment
405: if 'Content-Length' in new_record:
406: if needed == None:
407: needed = new_record['_parts']
408: else:
409: if len(needed) > 1:
410: print("Multipart requests currently not supported.")
411: assert False, 'Skip this one for now.'
412: else:
413: assert False, 'No Content-Length or Content-Range header.'
414:
415: new_record['_time'] = datetime.datetime.now()
416: if self.command not in ('HEAD'):
417: # file is created at temporary location and moved in place only when download completes
418: if not os.access(temp_name, os.R_OK):
419: empty_name = config['dir'] + os.sep + '.tmp'
420: with open(empty_name, 'w+b') as some_file:
421: pass
422: os.renames(empty_name, temp_name)
423: temp_file = open(temp_name, 'r+b')
424: if requested_ranges == None and needed == None:
425: needed = new_record['_parts']
426: needed.rewind()
427: while True:
428: (start, end) = needed.pop()
429: if start == None:
430: break
431: stream_last = start
432: old_record = new_record
433: if end - start < block_size:
434: req_block_size = end - start
435: else:
436: req_block_size = block_size
437: buffer = source.read(req_block_size)
438: length = len(buffer)
439: while length > 0 and stream_last < end:
440: stream_pos = stream_last + length
441: assert not stream_pos > end, 'Received more data then requested: pos:{} start:{} end:{}.'.format(stream_pos, start, end)
442: temp_file.seek(stream_last)
443: temp_file.write(buffer)
444: new_record['_parts'] = new_record['_parts'] - spacemap.SpaceMap({stream_last: stream_pos})
445: index[my_path] = old_record
446: index.sync()
447: old_record = new_record
448: stream_last = stream_pos
449: if end - stream_last < block_size:
450: req_block_size = end - stream_last
451: buffer = source.read(req_block_size)
452: length = len(buffer)
453: # moving downloaded data to real file
454: temp_file.close()
455:
439e1753a4 2010-09-07 456: print(new_record)
457: index[my_path] = new_record
458: index.sync()
459:
460: except urllib.error.HTTPError as error:
461: # in case of error we don't need to do anything actually,
462: # if file download stalls or fails the file would not be moved to it's location
463: print(error)
464:
465: if not os.access(file_name, os.R_OK) and os.access(temp_name, os.R_OK) and '_parts' in index[my_path] and index[my_path]['_parts'] == spacemap.SpaceMap():
466: # just moving
467: # drop old dirs XXX
468: print('Moving temporary file to new destination.')
469: os.renames(temp_name, file_name)
470:
471: if not my_path in index:
472: self.send_response(502)
473: self.end_headers()
474: return
475:
476: if self.command == 'HEAD':
477: self.send_response(200)
478: if 'Content-Length' in index[my_path]:
479: self.send_header('Content-Length', index[my_path]['Content-Length'])
480: self.send_header('Accept-Ranges', 'bytes')
481: self.send_header('Content-Type', 'application/octet-stream')
482: if 'Last-Modified' in index[my_path]:
483: self.send_header('Last-Modified', index[my_path]['Last-Modified'])
484: self.end_headers()
485: else:
486: if ('_parts' in index[my_path] and index[my_path]['_parts'] != spacemap.SpaceMap()) or not os.access(file_name, os.R_OK):
487: file_name = temp_name
488:
489: with open(file_name, 'rb') as real_file:
490: file_stat = os.stat(file_name)
491: if 'Range' in self.headers:
492: self.send_response(206)
493: ranges = ()
494: requested_ranges.rewind()
495: while True:
496: pair = requested_ranges.pop()
497: if pair[0] == None:
498: break
499: ranges += '{}-{}'.format(pair[0], str(pair[1] - 1)),
500: self.send_header('Content-Range', 'bytes {}/{}'.format(','.join(ranges), index[my_path]['Content-Length']))
501: else:
502: self.send_response(200)
503: self.send_header('Content-Length', str(file_stat.st_size))
504: requested_ranges = spacemap.SpaceMap({0: file_stat.st_size})
505: if 'Last-Modified' in index[my_path]:
506: self.send_header('Last-Modified', index[my_path]['Last-Modified'])
507: self.send_header('Content-Type', 'application/octet-stream')
508: self.end_headers()
509: if self.command in ('GET'):
510: if len(requested_ranges) > 0:
511: requested_ranges.rewind()
512: (start, end) = requested_ranges.pop()
513: else:
514: start = 0
515: end = index[my_path]['Content-Length']
516: real_file.seek(start)
517: if block_size > end - start:
518: req_block_size = end - start
519: else:
520: req_block_size = block_size
521: buffer = real_file.read(req_block_size)
522: length = len(buffer)
523: while length > 0:
524: self.wfile.write(buffer)
525: start += len(buffer)
526: if req_block_size > end - start:
527: req_block_size = end - start
528: if req_block_size == 0:
529: break
530: buffer = real_file.read(req_block_size)
531: length = len(buffer)
532:
533: def do_HEAD(self):
534: return self.__process()
535: def do_GET(self):
536: return self.__process()
537:
538: config.section('general')
539: server = http.server.HTTPServer(('127.0.0.1', int(config['port'])), MyRequestHandler)
540: server.serve_forever()
541:
542: else:
543: while True:
544: unchecked_files = set()
545: checked_files = 0
546:
547: # reading log and storing found urls for processing
548: # check file mtime XXX
549: with open(options.log, 'r') as log_file:
550: log_line = re.compile('^[^ ]+ - - \[.*] "(GET|HEAD) (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
551: for line in log_file:
552: this_line = log_line.match(line.strip())
553: if this_line:
554: unchecked_files.add(this_line.group(2))
555:
556: for url in unchecked_files:
557: reload = False
558: recheck = False
559: info = 'Checking file: ' + url
560:
561: # creating empty placeholder in index
562: if not url in index:
563: info += '\nThis one is new.'
564: index[url] = {}
565: reload = True
566:
567: # creating file name from url
568: file_name = options.dir + re.compile('%20').sub(' ', url)
569:
570: # forcibly checking file if no file present
571: if not reload and not os.access(file_name, os.R_OK):
572: info += '\nFile not found or inaccessible.'
573: reload = True
574:
575: # forcibly checking file if file size doesn't match with index data
576: elif not reload and 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
577: info += '\nFile size is ' + os.stat(file_name).st_size + ' and stored file size is ' + index[url]['Content-Length'] + '.'
578: reload = True
579:
580: # forcibly checking file if index hods Pragma header
581: if not reload and 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
582: info +='\nPragma on: recheck imminent.'
583: recheck = True
584:
585: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
586: if not recheck and not reload and (options.noupdate or ('_time' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['_time']).days < 0)):
587: if options.verbose:
588: print(info)
589: continue
590: else:
591: print(info)
592:
593: try:
594: with urllib.request.urlopen(options.root + url) as source:
595: new_headers = {}
596: headers = source.info()
597:
598: # stripping unneeded headers (XXX make this inplace?)
599: for header in headers:
600: if header in desc_fields:
601: if header == 'Pragma' and headers[header] != 'no-cache':
602: print('Pragma:', headers[header])
603: new_headers[header] = headers[header]
604: elif not header in ignore_fields:
605: print('Undefined header "', header, '": ', headers[header], sep='')
606:
607: # comparing headers with data found in index
608: # if any header has changed (except Pragma) file is fully downloaded
609: # same if we get more or less headers
610: old_keys = set(index[url].keys())
611: old_keys.discard('_time')
612: old_keys.discard('Pragma')
613: more_keys = set(new_headers.keys()) - old_keys
614: more_keys.discard('Pragma')
615: less_keys = old_keys - set(new_headers.keys())
616: if len(more_keys) > 0:
617: if not len(old_keys) == 0:
618: print('More headers appear:', more_keys)
619: reload = True
620: elif len(less_keys) > 0:
621: print('Less headers appear:', less_keys)
622: else:
623: for key in index[url].keys():
624: if key[0] != '_' and key != 'Pragma' and not index[url][key] == new_headers[key]:
625: print('Header "', key, '" changed from [', index[url][key], '] to [', new_headers[key], ']', sep='')
626: reload = True
627:
628: # downloading file
629: if reload:
630: if 'Content-Length' in headers:
631: print('Downloading', headers['Content-Length'], 'bytes [', end='')
632: else:
633: print('Downloading [', end='')
634: sys.stdout.flush()
635:
636: # file is created at temporary location and moved in place only when download completes
637: temp_file = open(options.dir + os.sep + '.tmp', 'wb')
638: buffer = source.read(block_size)
639: megablocks = 0
640: blocks = 0
641: megs = 0
642: while len(buffer) > 0:
643: temp_file.write(buffer)
644: buffer = source.read(block_size)
645: blocks += 1
646: if blocks > 102400/block_size:
647: megablocks += 1
648: if megablocks > 10:
649: megablocks = megablocks - 10
650: megs += 1
651: print('{}Mb'.format(megs), end='')
652: else:
653: print('.', end='')
654: blocks = blocks - 102400/block_size
655: sys.stdout.flush()
656: temp_file.close()
657: print(']')
658: os.renames(options.dir + os.sep + '.tmp', file_name)
659:
660: checked_files += 1
661:
662: # storing new time mark and storing new headers
663: new_headers['_time'] = datetime.datetime.now()
664: index[url] = new_headers
665: index.sync()
666:
667: except urllib.error.HTTPError as error:
668: # in case of error we don't need to do anything actually,
669: # if file download stalls or fails the file would not be moved to it's location
670: print(error)
671:
672: if options.verbose:
673: print('[', len(unchecked_files), '/', checked_files, ']')
674:
675: # checking if there were any files downloaded, if yes - restarting sequence
676: if checked_files == 0:
677: break