id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
29,800
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler._check_cronjob
def _check_cronjob(self): """Check projects cronjob tick, return True when a new tick is sended""" now = time.time() self._last_tick = int(self._last_tick) if now - self._last_tick < 1: return False self._last_tick += 1 for project in itervalues(self.projects): if not project.active: continue if project.waiting_get_info: continue if int(project.min_tick) == 0: continue if self._last_tick % int(project.min_tick) != 0: continue self.on_select_task({ 'taskid': '_on_cronjob', 'project': project.name, 'url': 'data:,_on_cronjob', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': { 'tick': self._last_tick, }, }, 'process': { 'callback': '_on_cronjob', }, }) return True
python
def _check_cronjob(self): """Check projects cronjob tick, return True when a new tick is sended""" now = time.time() self._last_tick = int(self._last_tick) if now - self._last_tick < 1: return False self._last_tick += 1 for project in itervalues(self.projects): if not project.active: continue if project.waiting_get_info: continue if int(project.min_tick) == 0: continue if self._last_tick % int(project.min_tick) != 0: continue self.on_select_task({ 'taskid': '_on_cronjob', 'project': project.name, 'url': 'data:,_on_cronjob', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': { 'tick': self._last_tick, }, }, 'process': { 'callback': '_on_cronjob', }, }) return True
[ "def", "_check_cronjob", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "self", ".", "_last_tick", "=", "int", "(", "self", ".", "_last_tick", ")", "if", "now", "-", "self", ".", "_last_tick", "<", "1", ":", "return", "False", "s...
Check projects cronjob tick, return True when a new tick is sended
[ "Check", "projects", "cronjob", "tick", "return", "True", "when", "a", "new", "tick", "is", "sended" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L419-L449
29,801
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler._check_select
def _check_select(self): '''Select task to fetch & process''' while self._send_buffer: _task = self._send_buffer.pop() try: # use force=False here to prevent automatic send_buffer append and get exception self.send_task(_task, False) except Queue.Full: self._send_buffer.append(_task) break if self.out_queue.full(): return {} taskids = [] cnt = 0 cnt_dict = dict() limit = self.LOOP_LIMIT # dynamic assign select limit for each project, use qsize as weight project_weights, total_weight = dict(), 0 for project in itervalues(self.projects): # type:Project if not project.active: continue # only check project pause when select new tasks, cronjob and new request still working if project.paused: continue if project.waiting_get_info: continue # task queue task_queue = project.task_queue # type:TaskQueue pro_weight = task_queue.size() total_weight += pro_weight project_weights[project.name] = pro_weight pass min_project_limit = int(limit / 10.) # ensure minimum select limit for each project max_project_limit = int(limit / 3.0) # ensure maximum select limit for each project for pro_name, pro_weight in iteritems(project_weights): if cnt >= limit: break project = self.projects[pro_name] # type:Project # task queue task_queue = project.task_queue task_queue.check_update() project_cnt = 0 # calculate select limit for project if total_weight < 1 or pro_weight < 1: project_limit = min_project_limit else: project_limit = int((1.0 * pro_weight / total_weight) * limit) if project_limit < min_project_limit: project_limit = min_project_limit elif project_limit > max_project_limit: project_limit = max_project_limit # check send_buffer here. when not empty, out_queue may blocked. Not sending tasks while cnt < limit and project_cnt < project_limit: taskid = task_queue.get() if not taskid: break taskids.append((project.name, taskid)) if taskid != 'on_finished': project_cnt += 1 cnt += 1 cnt_dict[project.name] = project_cnt if project_cnt: project._selected_tasks = True project._send_finished_event_wait = 0 # check and send finished event to project if not project_cnt and len(task_queue) == 0 and project._selected_tasks: # wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed if project._send_finished_event_wait < self.FAIL_PAUSE_NUM: project._send_finished_event_wait += 1 else: project._selected_tasks = False project._send_finished_event_wait = 0 self._postpone_request.append({ 'project': project.name, 'taskid': 'on_finished', 'url': 'data:,on_finished', 'process': { 'callback': 'on_finished', }, "schedule": { "age": 0, "priority": 9, "force_update": True, }, }) for project, taskid in taskids: self._load_put_task(project, taskid) return cnt_dict
python
def _check_select(self): '''Select task to fetch & process''' while self._send_buffer: _task = self._send_buffer.pop() try: # use force=False here to prevent automatic send_buffer append and get exception self.send_task(_task, False) except Queue.Full: self._send_buffer.append(_task) break if self.out_queue.full(): return {} taskids = [] cnt = 0 cnt_dict = dict() limit = self.LOOP_LIMIT # dynamic assign select limit for each project, use qsize as weight project_weights, total_weight = dict(), 0 for project in itervalues(self.projects): # type:Project if not project.active: continue # only check project pause when select new tasks, cronjob and new request still working if project.paused: continue if project.waiting_get_info: continue # task queue task_queue = project.task_queue # type:TaskQueue pro_weight = task_queue.size() total_weight += pro_weight project_weights[project.name] = pro_weight pass min_project_limit = int(limit / 10.) # ensure minimum select limit for each project max_project_limit = int(limit / 3.0) # ensure maximum select limit for each project for pro_name, pro_weight in iteritems(project_weights): if cnt >= limit: break project = self.projects[pro_name] # type:Project # task queue task_queue = project.task_queue task_queue.check_update() project_cnt = 0 # calculate select limit for project if total_weight < 1 or pro_weight < 1: project_limit = min_project_limit else: project_limit = int((1.0 * pro_weight / total_weight) * limit) if project_limit < min_project_limit: project_limit = min_project_limit elif project_limit > max_project_limit: project_limit = max_project_limit # check send_buffer here. when not empty, out_queue may blocked. Not sending tasks while cnt < limit and project_cnt < project_limit: taskid = task_queue.get() if not taskid: break taskids.append((project.name, taskid)) if taskid != 'on_finished': project_cnt += 1 cnt += 1 cnt_dict[project.name] = project_cnt if project_cnt: project._selected_tasks = True project._send_finished_event_wait = 0 # check and send finished event to project if not project_cnt and len(task_queue) == 0 and project._selected_tasks: # wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed if project._send_finished_event_wait < self.FAIL_PAUSE_NUM: project._send_finished_event_wait += 1 else: project._selected_tasks = False project._send_finished_event_wait = 0 self._postpone_request.append({ 'project': project.name, 'taskid': 'on_finished', 'url': 'data:,on_finished', 'process': { 'callback': 'on_finished', }, "schedule": { "age": 0, "priority": 9, "force_update": True, }, }) for project, taskid in taskids: self._load_put_task(project, taskid) return cnt_dict
[ "def", "_check_select", "(", "self", ")", ":", "while", "self", ".", "_send_buffer", ":", "_task", "=", "self", ".", "_send_buffer", ".", "pop", "(", ")", "try", ":", "# use force=False here to prevent automatic send_buffer append and get exception", "self", ".", "s...
Select task to fetch & process
[ "Select", "task", "to", "fetch", "&", "process" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L463-L566
29,802
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler._try_dump_cnt
def _try_dump_cnt(self): '''Dump counters every 60 seconds''' now = time.time() if now - self._last_dump_cnt > 60: self._last_dump_cnt = now self._dump_cnt() self._print_counter_log()
python
def _try_dump_cnt(self): '''Dump counters every 60 seconds''' now = time.time() if now - self._last_dump_cnt > 60: self._last_dump_cnt = now self._dump_cnt() self._print_counter_log()
[ "def", "_try_dump_cnt", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "self", ".", "_last_dump_cnt", ">", "60", ":", "self", ".", "_last_dump_cnt", "=", "now", "self", ".", "_dump_cnt", "(", ")", "self", ".", "...
Dump counters every 60 seconds
[ "Dump", "counters", "every", "60", "seconds" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L622-L628
29,803
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler._check_delete
def _check_delete(self): '''Check project delete''' now = time.time() for project in list(itervalues(self.projects)): if project.db_status != 'STOP': continue if now - project.updatetime < self.DELETE_TIME: continue if 'delete' not in self.projectdb.split_group(project.group): continue logger.warning("deleting project: %s!", project.name) del self.projects[project.name] self.taskdb.drop(project.name) self.projectdb.drop(project.name) if self.resultdb: self.resultdb.drop(project.name) for each in self._cnt.values(): del each[project.name]
python
def _check_delete(self): '''Check project delete''' now = time.time() for project in list(itervalues(self.projects)): if project.db_status != 'STOP': continue if now - project.updatetime < self.DELETE_TIME: continue if 'delete' not in self.projectdb.split_group(project.group): continue logger.warning("deleting project: %s!", project.name) del self.projects[project.name] self.taskdb.drop(project.name) self.projectdb.drop(project.name) if self.resultdb: self.resultdb.drop(project.name) for each in self._cnt.values(): del each[project.name]
[ "def", "_check_delete", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "for", "project", "in", "list", "(", "itervalues", "(", "self", ".", "projects", ")", ")", ":", "if", "project", ".", "db_status", "!=", "'STOP'", ":", "continu...
Check project delete
[ "Check", "project", "delete" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L630-L648
29,804
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.quit
def quit(self): '''Set quit signal''' self._quit = True # stop xmlrpc server if hasattr(self, 'xmlrpc_server'): self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop) self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
python
def quit(self): '''Set quit signal''' self._quit = True # stop xmlrpc server if hasattr(self, 'xmlrpc_server'): self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop) self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
[ "def", "quit", "(", "self", ")", ":", "self", ".", "_quit", "=", "True", "# stop xmlrpc server", "if", "hasattr", "(", "self", ",", "'xmlrpc_server'", ")", ":", "self", ".", "xmlrpc_ioloop", ".", "add_callback", "(", "self", ".", "xmlrpc_server", ".", "sto...
Set quit signal
[ "Set", "quit", "signal" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L653-L659
29,805
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.run_once
def run_once(self): '''comsume queues and feed tasks to fetcher, once''' self._update_projects() self._check_task_done() self._check_request() while self._check_cronjob(): pass self._check_select() self._check_delete() self._try_dump_cnt()
python
def run_once(self): '''comsume queues and feed tasks to fetcher, once''' self._update_projects() self._check_task_done() self._check_request() while self._check_cronjob(): pass self._check_select() self._check_delete() self._try_dump_cnt()
[ "def", "run_once", "(", "self", ")", ":", "self", ".", "_update_projects", "(", ")", "self", ".", "_check_task_done", "(", ")", "self", ".", "_check_request", "(", ")", "while", "self", ".", "_check_cronjob", "(", ")", ":", "pass", "self", ".", "_check_s...
comsume queues and feed tasks to fetcher, once
[ "comsume", "queues", "and", "feed", "tasks", "to", "fetcher", "once" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L661-L671
29,806
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.run
def run(self): '''Start scheduler loop''' logger.info("scheduler starting...") while not self._quit: try: time.sleep(self.LOOP_INTERVAL) self.run_once() self._exceptions = 0 except KeyboardInterrupt: break except Exception as e: logger.exception(e) self._exceptions += 1 if self._exceptions > self.EXCEPTION_LIMIT: break continue logger.info("scheduler exiting...") self._dump_cnt()
python
def run(self): '''Start scheduler loop''' logger.info("scheduler starting...") while not self._quit: try: time.sleep(self.LOOP_INTERVAL) self.run_once() self._exceptions = 0 except KeyboardInterrupt: break except Exception as e: logger.exception(e) self._exceptions += 1 if self._exceptions > self.EXCEPTION_LIMIT: break continue logger.info("scheduler exiting...") self._dump_cnt()
[ "def", "run", "(", "self", ")", ":", "logger", ".", "info", "(", "\"scheduler starting...\"", ")", "while", "not", "self", ".", "_quit", ":", "try", ":", "time", ".", "sleep", "(", "self", ".", "LOOP_INTERVAL", ")", "self", ".", "run_once", "(", ")", ...
Start scheduler loop
[ "Start", "scheduler", "loop" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L673-L692
29,807
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.on_new_request
def on_new_request(self, task): '''Called when a new request is arrived''' task['status'] = self.taskdb.ACTIVE self.insert_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) self._cnt['all'].event((project, 'pending'), +1) logger.info('new task %(project)s:%(taskid)s %(url)s', task) return task
python
def on_new_request(self, task): '''Called when a new request is arrived''' task['status'] = self.taskdb.ACTIVE self.insert_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) self._cnt['all'].event((project, 'pending'), +1) logger.info('new task %(project)s:%(taskid)s %(url)s', task) return task
[ "def", "on_new_request", "(", "self", ",", "task", ")", ":", "task", "[", "'status'", "]", "=", "self", ".", "taskdb", ".", "ACTIVE", "self", ".", "insert_task", "(", "task", ")", "self", ".", "put_task", "(", "task", ")", "project", "=", "task", "["...
Called when a new request is arrived
[ "Called", "when", "a", "new", "request", "is", "arrived" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L825-L837
29,808
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.on_old_request
def on_old_request(self, task, old_task): '''Called when a crawled task is arrived''' now = time.time() _schedule = task.get('schedule', self.default_schedule) old_schedule = old_task.get('schedule', {}) if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']): # when a task is in processing, the modify may conflict with the running task. # postpone the modify after task finished. logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task) self._postpone_request.append(task) return restart = False schedule_age = _schedule.get('age', self.default_schedule['age']) if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'): restart = True elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now: restart = True elif _schedule.get('force_update'): restart = True if not restart: logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) return if _schedule.get('cancel'): logger.info('cancel task %(project)s:%(taskid)s %(url)s', task) task['status'] = self.taskdb.BAD self.update_task(task) self.projects[task['project']].task_queue.delete(task['taskid']) return task task['status'] = self.taskdb.ACTIVE self.update_task(task) self.put_task(task) project = task['project'] if old_task['status'] != self.taskdb.ACTIVE: self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) if old_task['status'] == self.taskdb.SUCCESS: self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1) elif old_task['status'] == self.taskdb.FAILED: self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1) logger.info('restart task %(project)s:%(taskid)s %(url)s', task) return task
python
def on_old_request(self, task, old_task): '''Called when a crawled task is arrived''' now = time.time() _schedule = task.get('schedule', self.default_schedule) old_schedule = old_task.get('schedule', {}) if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']): # when a task is in processing, the modify may conflict with the running task. # postpone the modify after task finished. logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task) self._postpone_request.append(task) return restart = False schedule_age = _schedule.get('age', self.default_schedule['age']) if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'): restart = True elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now: restart = True elif _schedule.get('force_update'): restart = True if not restart: logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) return if _schedule.get('cancel'): logger.info('cancel task %(project)s:%(taskid)s %(url)s', task) task['status'] = self.taskdb.BAD self.update_task(task) self.projects[task['project']].task_queue.delete(task['taskid']) return task task['status'] = self.taskdb.ACTIVE self.update_task(task) self.put_task(task) project = task['project'] if old_task['status'] != self.taskdb.ACTIVE: self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) if old_task['status'] == self.taskdb.SUCCESS: self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1) elif old_task['status'] == self.taskdb.FAILED: self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1) logger.info('restart task %(project)s:%(taskid)s %(url)s', task) return task
[ "def", "on_old_request", "(", "self", ",", "task", ",", "old_task", ")", ":", "now", "=", "time", ".", "time", "(", ")", "_schedule", "=", "task", ".", "get", "(", "'schedule'", ",", "self", ".", "default_schedule", ")", "old_schedule", "=", "old_task", ...
Called when a crawled task is arrived
[ "Called", "when", "a", "crawled", "task", "is", "arrived" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L839-L887
29,809
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.on_task_status
def on_task_status(self, task): '''Called when a status pack is arrived''' try: procesok = task['track']['process']['ok'] if not self.projects[task['project']].task_queue.done(task['taskid']): logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task) return None except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
python
def on_task_status(self, task): '''Called when a status pack is arrived''' try: procesok = task['track']['process']['ok'] if not self.projects[task['project']].task_queue.done(task['taskid']): logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task) return None except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
[ "def", "on_task_status", "(", "self", ",", "task", ")", ":", "try", ":", "procesok", "=", "task", "[", "'track'", "]", "[", "'process'", "]", "[", "'ok'", "]", "if", "not", "self", ".", "projects", "[", "task", "[", "'project'", "]", "]", ".", "tas...
Called when a status pack is arrived
[ "Called", "when", "a", "status", "pack", "is", "arrived" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L889-L912
29,810
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.on_task_done
def on_task_done(self, task): '''Called when a task is done and success, called by `on_task_status`''' task['status'] = self.taskdb.SUCCESS task['lastcrawltime'] = time.time() if 'schedule' in task: if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: task['status'] = self.taskdb.ACTIVE next_exetime = task['schedule'].get('age') task['schedule']['exetime'] = time.time() + next_exetime self.put_task(task) else: del task['schedule'] self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'success'), +1) self._cnt['1h'].event((project, 'success'), +1) self._cnt['1d'].event((project, 'success'), +1) self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1) logger.info('task done %(project)s:%(taskid)s %(url)s', task) return task
python
def on_task_done(self, task): '''Called when a task is done and success, called by `on_task_status`''' task['status'] = self.taskdb.SUCCESS task['lastcrawltime'] = time.time() if 'schedule' in task: if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: task['status'] = self.taskdb.ACTIVE next_exetime = task['schedule'].get('age') task['schedule']['exetime'] = time.time() + next_exetime self.put_task(task) else: del task['schedule'] self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'success'), +1) self._cnt['1h'].event((project, 'success'), +1) self._cnt['1d'].event((project, 'success'), +1) self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1) logger.info('task done %(project)s:%(taskid)s %(url)s', task) return task
[ "def", "on_task_done", "(", "self", ",", "task", ")", ":", "task", "[", "'status'", "]", "=", "self", ".", "taskdb", ".", "SUCCESS", "task", "[", "'lastcrawltime'", "]", "=", "time", ".", "time", "(", ")", "if", "'schedule'", "in", "task", ":", "if",...
Called when a task is done and success, called by `on_task_status`
[ "Called", "when", "a", "task", "is", "done", "and", "success", "called", "by", "on_task_status" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L914-L935
29,811
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.on_task_failed
def on_task_failed(self, task): '''Called when a task is failed, called by `on_task_status`''' if 'schedule' not in task: old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule']) if old_task is None: logging.error('unknown status pack: %s' % task) return task['schedule'] = old_task.get('schedule', {}) retries = task['schedule'].get('retries', self.default_schedule['retries']) retried = task['schedule'].get('retried', 0) project_info = self.projects[task['project']] retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY[''])) if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: next_exetime = min(next_exetime, task['schedule'].get('age')) else: if retried >= retries: next_exetime = -1 elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'): next_exetime = task['schedule'].get('age') if next_exetime < 0: task['status'] = self.taskdb.FAILED task['lastcrawltime'] = time.time() self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'failed'), +1) self._cnt['1h'].event((project, 'failed'), +1) self._cnt['1d'].event((project, 'failed'), +1) self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1) logger.info('task failed %(project)s:%(taskid)s %(url)s' % task) return task else: task['schedule']['retried'] = retried + 1 task['schedule']['exetime'] = time.time() + next_exetime task['lastcrawltime'] = time.time() self.update_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'retry'), +1) self._cnt['1h'].event((project, 'retry'), +1) self._cnt['1d'].event((project, 'retry'), +1) # self._cnt['all'].event((project, 'retry'), +1) logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % ( retried, retries), task) return task
python
def on_task_failed(self, task): '''Called when a task is failed, called by `on_task_status`''' if 'schedule' not in task: old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule']) if old_task is None: logging.error('unknown status pack: %s' % task) return task['schedule'] = old_task.get('schedule', {}) retries = task['schedule'].get('retries', self.default_schedule['retries']) retried = task['schedule'].get('retried', 0) project_info = self.projects[task['project']] retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY[''])) if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: next_exetime = min(next_exetime, task['schedule'].get('age')) else: if retried >= retries: next_exetime = -1 elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'): next_exetime = task['schedule'].get('age') if next_exetime < 0: task['status'] = self.taskdb.FAILED task['lastcrawltime'] = time.time() self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'failed'), +1) self._cnt['1h'].event((project, 'failed'), +1) self._cnt['1d'].event((project, 'failed'), +1) self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1) logger.info('task failed %(project)s:%(taskid)s %(url)s' % task) return task else: task['schedule']['retried'] = retried + 1 task['schedule']['exetime'] = time.time() + next_exetime task['lastcrawltime'] = time.time() self.update_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'retry'), +1) self._cnt['1h'].event((project, 'retry'), +1) self._cnt['1d'].event((project, 'retry'), +1) # self._cnt['all'].event((project, 'retry'), +1) logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % ( retried, retries), task) return task
[ "def", "on_task_failed", "(", "self", ",", "task", ")", ":", "if", "'schedule'", "not", "in", "task", ":", "old_task", "=", "self", ".", "taskdb", ".", "get_task", "(", "task", "[", "'project'", "]", ",", "task", "[", "'taskid'", "]", ",", "fields", ...
Called when a task is failed, called by `on_task_status`
[ "Called", "when", "a", "task", "is", "failed", "called", "by", "on_task_status" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L937-L988
29,812
binux/pyspider
pyspider/scheduler/scheduler.py
Scheduler.on_select_task
def on_select_task(self, task): '''Called when a task is selected to fetch & process''' # inject informations about project logger.info('select %(project)s:%(taskid)s %(url)s', task) project_info = self.projects.get(task['project']) assert project_info, 'no such project' task['type'] = self.TASK_PACK task['group'] = project_info.group task['project_md5sum'] = project_info.md5sum task['project_updatetime'] = project_info.updatetime # lazy join project.crawl_config if getattr(project_info, 'crawl_config', None): task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config) project_info.active_tasks.appendleft((time.time(), task)) self.send_task(task) return task
python
def on_select_task(self, task): '''Called when a task is selected to fetch & process''' # inject informations about project logger.info('select %(project)s:%(taskid)s %(url)s', task) project_info = self.projects.get(task['project']) assert project_info, 'no such project' task['type'] = self.TASK_PACK task['group'] = project_info.group task['project_md5sum'] = project_info.md5sum task['project_updatetime'] = project_info.updatetime # lazy join project.crawl_config if getattr(project_info, 'crawl_config', None): task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config) project_info.active_tasks.appendleft((time.time(), task)) self.send_task(task) return task
[ "def", "on_select_task", "(", "self", ",", "task", ")", ":", "# inject informations about project", "logger", ".", "info", "(", "'select %(project)s:%(taskid)s %(url)s'", ",", "task", ")", "project_info", "=", "self", ".", "projects", ".", "get", "(", "task", "[",...
Called when a task is selected to fetch & process
[ "Called", "when", "a", "task", "is", "selected", "to", "fetch", "&", "process" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L990-L1008
29,813
binux/pyspider
pyspider/scheduler/scheduler.py
OneScheduler._check_select
def _check_select(self): """ interactive mode of select tasks """ if not self.interactive: return super(OneScheduler, self)._check_select() # waiting for running tasks if self.running_task > 0: return is_crawled = [] def run(project=None): return crawl('on_start', project=project) def crawl(url, project=None, **kwargs): """ Crawl given url, same parameters as BaseHandler.crawl url - url or taskid, parameters will be used if in taskdb project - can be ignored if only one project exists. """ # looking up the project instance if project is None: if len(self.projects) == 1: project = list(self.projects.keys())[0] else: raise LookupError('You need specify the project: %r' % list(self.projects.keys())) project_data = self.processor.project_manager.get(project) if not project_data: raise LookupError('no such project: %s' % project) # get task package instance = project_data['instance'] instance._reset() task = instance.crawl(url, **kwargs) if isinstance(task, list): raise Exception('url list is not allowed in interactive mode') # check task in taskdb if not kwargs: dbtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.request_task_fields) if not dbtask: dbtask = self.taskdb.get_task(task['project'], task['url'], fields=self.request_task_fields) if dbtask: task = dbtask # select the task self.on_select_task(task) is_crawled.append(True) shell.ask_exit() def quit_interactive(): '''Quit interactive mode''' is_crawled.append(True) self.interactive = False shell.ask_exit() def quit_pyspider(): '''Close pyspider''' is_crawled[:] = [] shell.ask_exit() shell = utils.get_python_console() banner = ( 'pyspider shell - Select task\n' 'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n' 'quit_interactive() - Quit interactive mode\n' 'quit_pyspider() - Close pyspider' ) if hasattr(shell, 'show_banner'): shell.show_banner(banner) shell.interact() else: shell.interact(banner) if not is_crawled: self.ioloop.add_callback(self.ioloop.stop)
python
def _check_select(self): """ interactive mode of select tasks """ if not self.interactive: return super(OneScheduler, self)._check_select() # waiting for running tasks if self.running_task > 0: return is_crawled = [] def run(project=None): return crawl('on_start', project=project) def crawl(url, project=None, **kwargs): """ Crawl given url, same parameters as BaseHandler.crawl url - url or taskid, parameters will be used if in taskdb project - can be ignored if only one project exists. """ # looking up the project instance if project is None: if len(self.projects) == 1: project = list(self.projects.keys())[0] else: raise LookupError('You need specify the project: %r' % list(self.projects.keys())) project_data = self.processor.project_manager.get(project) if not project_data: raise LookupError('no such project: %s' % project) # get task package instance = project_data['instance'] instance._reset() task = instance.crawl(url, **kwargs) if isinstance(task, list): raise Exception('url list is not allowed in interactive mode') # check task in taskdb if not kwargs: dbtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.request_task_fields) if not dbtask: dbtask = self.taskdb.get_task(task['project'], task['url'], fields=self.request_task_fields) if dbtask: task = dbtask # select the task self.on_select_task(task) is_crawled.append(True) shell.ask_exit() def quit_interactive(): '''Quit interactive mode''' is_crawled.append(True) self.interactive = False shell.ask_exit() def quit_pyspider(): '''Close pyspider''' is_crawled[:] = [] shell.ask_exit() shell = utils.get_python_console() banner = ( 'pyspider shell - Select task\n' 'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n' 'quit_interactive() - Quit interactive mode\n' 'quit_pyspider() - Close pyspider' ) if hasattr(shell, 'show_banner'): shell.show_banner(banner) shell.interact() else: shell.interact(banner) if not is_crawled: self.ioloop.add_callback(self.ioloop.stop)
[ "def", "_check_select", "(", "self", ")", ":", "if", "not", "self", ".", "interactive", ":", "return", "super", "(", "OneScheduler", ",", "self", ")", ".", "_check_select", "(", ")", "# waiting for running tasks", "if", "self", ".", "running_task", ">", "0",...
interactive mode of select tasks
[ "interactive", "mode", "of", "select", "tasks" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L1022-L1104
29,814
binux/pyspider
pyspider/scheduler/scheduler.py
OneScheduler.on_task_status
def on_task_status(self, task): """Ignore not processing error in interactive mode""" if not self.interactive: super(OneScheduler, self).on_task_status(task) try: procesok = task['track']['process']['ok'] except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
python
def on_task_status(self, task): """Ignore not processing error in interactive mode""" if not self.interactive: super(OneScheduler, self).on_task_status(task) try: procesok = task['track']['process']['ok'] except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret
[ "def", "on_task_status", "(", "self", ",", "task", ")", ":", "if", "not", "self", ".", "interactive", ":", "super", "(", "OneScheduler", ",", "self", ")", ".", "on_task_status", "(", "task", ")", "try", ":", "procesok", "=", "task", "[", "'track'", "]"...
Ignore not processing error in interactive mode
[ "Ignore", "not", "processing", "error", "in", "interactive", "mode" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/scheduler.py#L1112-L1134
29,815
binux/pyspider
pyspider/processor/project_module.py
ProjectManager.build_module
def build_module(project, env=None): '''Build project script as module''' from pyspider.libs import base_handler assert 'name' in project, 'need name of project' assert 'script' in project, 'need script of project' if env is None: env = {} # fix for old non-package version scripts pyspider_path = os.path.join(os.path.dirname(__file__), "..") if pyspider_path not in sys.path: sys.path.insert(1, pyspider_path) env = dict(env) env.update({ 'debug': project.get('status', 'DEBUG') == 'DEBUG', }) loader = ProjectLoader(project) module = loader.load_module(project['name']) # logger inject module.log_buffer = [] module.logging = module.logger = logging.Logger(project['name']) if env.get('enable_stdout_capture', True): handler = SaveLogHandler(module.log_buffer) handler.setFormatter(LogFormatter(color=False)) else: handler = logging.StreamHandler() handler.setFormatter(LogFormatter(color=True)) module.logger.addHandler(handler) if '__handler_cls__' not in module.__dict__: BaseHandler = module.__dict__.get('BaseHandler', base_handler.BaseHandler) for each in list(six.itervalues(module.__dict__)): if inspect.isclass(each) and each is not BaseHandler \ and issubclass(each, BaseHandler): module.__dict__['__handler_cls__'] = each _class = module.__dict__.get('__handler_cls__') assert _class is not None, "need BaseHandler in project module" instance = _class() instance.__env__ = env instance.project_name = project['name'] instance.project = project return { 'loader': loader, 'module': module, 'class': _class, 'instance': instance, 'exception': None, 'exception_log': '', 'info': project, 'load_time': time.time(), }
python
def build_module(project, env=None): '''Build project script as module''' from pyspider.libs import base_handler assert 'name' in project, 'need name of project' assert 'script' in project, 'need script of project' if env is None: env = {} # fix for old non-package version scripts pyspider_path = os.path.join(os.path.dirname(__file__), "..") if pyspider_path not in sys.path: sys.path.insert(1, pyspider_path) env = dict(env) env.update({ 'debug': project.get('status', 'DEBUG') == 'DEBUG', }) loader = ProjectLoader(project) module = loader.load_module(project['name']) # logger inject module.log_buffer = [] module.logging = module.logger = logging.Logger(project['name']) if env.get('enable_stdout_capture', True): handler = SaveLogHandler(module.log_buffer) handler.setFormatter(LogFormatter(color=False)) else: handler = logging.StreamHandler() handler.setFormatter(LogFormatter(color=True)) module.logger.addHandler(handler) if '__handler_cls__' not in module.__dict__: BaseHandler = module.__dict__.get('BaseHandler', base_handler.BaseHandler) for each in list(six.itervalues(module.__dict__)): if inspect.isclass(each) and each is not BaseHandler \ and issubclass(each, BaseHandler): module.__dict__['__handler_cls__'] = each _class = module.__dict__.get('__handler_cls__') assert _class is not None, "need BaseHandler in project module" instance = _class() instance.__env__ = env instance.project_name = project['name'] instance.project = project return { 'loader': loader, 'module': module, 'class': _class, 'instance': instance, 'exception': None, 'exception_log': '', 'info': project, 'load_time': time.time(), }
[ "def", "build_module", "(", "project", ",", "env", "=", "None", ")", ":", "from", "pyspider", ".", "libs", "import", "base_handler", "assert", "'name'", "in", "project", ",", "'need name of project'", "assert", "'script'", "in", "project", ",", "'need script of ...
Build project script as module
[ "Build", "project", "script", "as", "module" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L32-L87
29,816
binux/pyspider
pyspider/processor/project_module.py
ProjectManager._need_update
def _need_update(self, project_name, updatetime=None, md5sum=None): '''Check if project_name need update''' if project_name not in self.projects: return True elif md5sum and md5sum != self.projects[project_name]['info'].get('md5sum'): return True elif updatetime and updatetime > self.projects[project_name]['info'].get('updatetime', 0): return True elif time.time() - self.projects[project_name]['load_time'] > self.RELOAD_PROJECT_INTERVAL: return True return False
python
def _need_update(self, project_name, updatetime=None, md5sum=None): '''Check if project_name need update''' if project_name not in self.projects: return True elif md5sum and md5sum != self.projects[project_name]['info'].get('md5sum'): return True elif updatetime and updatetime > self.projects[project_name]['info'].get('updatetime', 0): return True elif time.time() - self.projects[project_name]['load_time'] > self.RELOAD_PROJECT_INTERVAL: return True return False
[ "def", "_need_update", "(", "self", ",", "project_name", ",", "updatetime", "=", "None", ",", "md5sum", "=", "None", ")", ":", "if", "project_name", "not", "in", "self", ".", "projects", ":", "return", "True", "elif", "md5sum", "and", "md5sum", "!=", "se...
Check if project_name need update
[ "Check", "if", "project_name", "need", "update" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L96-L106
29,817
binux/pyspider
pyspider/processor/project_module.py
ProjectManager._check_projects
def _check_projects(self): '''Check projects by last update time''' for project in self.projectdb.check_update(self.last_check_projects, ['name', 'updatetime']): if project['name'] not in self.projects: continue if project['updatetime'] > self.projects[project['name']]['info'].get('updatetime', 0): self._update_project(project['name']) self.last_check_projects = time.time()
python
def _check_projects(self): '''Check projects by last update time''' for project in self.projectdb.check_update(self.last_check_projects, ['name', 'updatetime']): if project['name'] not in self.projects: continue if project['updatetime'] > self.projects[project['name']]['info'].get('updatetime', 0): self._update_project(project['name']) self.last_check_projects = time.time()
[ "def", "_check_projects", "(", "self", ")", ":", "for", "project", "in", "self", ".", "projectdb", ".", "check_update", "(", "self", ".", "last_check_projects", ",", "[", "'name'", ",", "'updatetime'", "]", ")", ":", "if", "project", "[", "'name'", "]", ...
Check projects by last update time
[ "Check", "projects", "by", "last", "update", "time" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L108-L116
29,818
binux/pyspider
pyspider/processor/project_module.py
ProjectManager._update_project
def _update_project(self, project_name): '''Update one project from database''' project = self.projectdb.get(project_name) if not project: return None return self._load_project(project)
python
def _update_project(self, project_name): '''Update one project from database''' project = self.projectdb.get(project_name) if not project: return None return self._load_project(project)
[ "def", "_update_project", "(", "self", ",", "project_name", ")", ":", "project", "=", "self", ".", "projectdb", ".", "get", "(", "project_name", ")", "if", "not", "project", ":", "return", "None", "return", "self", ".", "_load_project", "(", "project", ")"...
Update one project from database
[ "Update", "one", "project", "from", "database" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L118-L123
29,819
binux/pyspider
pyspider/processor/project_module.py
ProjectManager._load_project
def _load_project(self, project): '''Load project into self.projects from project info dict''' try: project['md5sum'] = utils.md5string(project['script']) ret = self.build_module(project, self.env) self.projects[project['name']] = ret except Exception as e: logger.exception("load project %s error", project.get('name', None)) ret = { 'loader': None, 'module': None, 'class': None, 'instance': None, 'exception': e, 'exception_log': traceback.format_exc(), 'info': project, 'load_time': time.time(), } self.projects[project['name']] = ret return False logger.debug('project: %s updated.', project.get('name', None)) return True
python
def _load_project(self, project): '''Load project into self.projects from project info dict''' try: project['md5sum'] = utils.md5string(project['script']) ret = self.build_module(project, self.env) self.projects[project['name']] = ret except Exception as e: logger.exception("load project %s error", project.get('name', None)) ret = { 'loader': None, 'module': None, 'class': None, 'instance': None, 'exception': e, 'exception_log': traceback.format_exc(), 'info': project, 'load_time': time.time(), } self.projects[project['name']] = ret return False logger.debug('project: %s updated.', project.get('name', None)) return True
[ "def", "_load_project", "(", "self", ",", "project", ")", ":", "try", ":", "project", "[", "'md5sum'", "]", "=", "utils", ".", "md5string", "(", "project", "[", "'script'", "]", ")", "ret", "=", "self", ".", "build_module", "(", "project", ",", "self",...
Load project into self.projects from project info dict
[ "Load", "project", "into", "self", ".", "projects", "from", "project", "info", "dict" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L125-L146
29,820
binux/pyspider
pyspider/processor/project_module.py
ProjectManager.get
def get(self, project_name, updatetime=None, md5sum=None): '''get project data object, return None if not exists''' if time.time() - self.last_check_projects > self.CHECK_PROJECTS_INTERVAL: self._check_projects() if self._need_update(project_name, updatetime, md5sum): self._update_project(project_name) return self.projects.get(project_name, None)
python
def get(self, project_name, updatetime=None, md5sum=None): '''get project data object, return None if not exists''' if time.time() - self.last_check_projects > self.CHECK_PROJECTS_INTERVAL: self._check_projects() if self._need_update(project_name, updatetime, md5sum): self._update_project(project_name) return self.projects.get(project_name, None)
[ "def", "get", "(", "self", ",", "project_name", ",", "updatetime", "=", "None", ",", "md5sum", "=", "None", ")", ":", "if", "time", ".", "time", "(", ")", "-", "self", ".", "last_check_projects", ">", "self", ".", "CHECK_PROJECTS_INTERVAL", ":", "self", ...
get project data object, return None if not exists
[ "get", "project", "data", "object", "return", "None", "if", "not", "exists" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/processor/project_module.py#L148-L154
29,821
binux/pyspider
pyspider/fetcher/cookie_utils.py
MockResponse.get_all
def get_all(self, name, default=None): """make cookie python 3 version use this instead of getheaders""" if default is None: default = [] return self._headers.get_list(name) or default
python
def get_all(self, name, default=None): """make cookie python 3 version use this instead of getheaders""" if default is None: default = [] return self._headers.get_list(name) or default
[ "def", "get_all", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "default", "is", "None", ":", "default", "=", "[", "]", "return", "self", ".", "_headers", ".", "get_list", "(", "name", ")", "or", "default" ]
make cookie python 3 version use this instead of getheaders
[ "make", "cookie", "python", "3", "version", "use", "this", "instead", "of", "getheaders" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/cookie_utils.py#L23-L27
29,822
binux/pyspider
pyspider/database/elasticsearch/taskdb.py
TaskDB.refresh
def refresh(self): """ Explicitly refresh one or more index, making all operations performed since the last refresh available for search. """ self._changed = False self.es.indices.refresh(index=self.index)
python
def refresh(self): """ Explicitly refresh one or more index, making all operations performed since the last refresh available for search. """ self._changed = False self.es.indices.refresh(index=self.index)
[ "def", "refresh", "(", "self", ")", ":", "self", ".", "_changed", "=", "False", "self", ".", "es", ".", "indices", ".", "refresh", "(", "index", "=", "self", ".", "index", ")" ]
Explicitly refresh one or more index, making all operations performed since the last refresh available for search.
[ "Explicitly", "refresh", "one", "or", "more", "index", "making", "all", "operations", "performed", "since", "the", "last", "refresh", "available", "for", "search", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/database/elasticsearch/taskdb.py#L119-L125
29,823
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
Fetcher.send_result
def send_result(self, type, task, result): '''Send fetch result to processor''' if self.outqueue: try: self.outqueue.put((task, result)) except Exception as e: logger.exception(e)
python
def send_result(self, type, task, result): '''Send fetch result to processor''' if self.outqueue: try: self.outqueue.put((task, result)) except Exception as e: logger.exception(e)
[ "def", "send_result", "(", "self", ",", "type", ",", "task", ",", "result", ")", ":", "if", "self", ".", "outqueue", ":", "try", ":", "self", ".", "outqueue", ".", "put", "(", "(", "task", ",", "result", ")", ")", "except", "Exception", "as", "e", ...
Send fetch result to processor
[ "Send", "fetch", "result", "to", "processor" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L108-L114
29,824
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
Fetcher.async_fetch
def async_fetch(self, task, callback=None): '''Do one fetch''' url = task.get('url', 'data:,') if callback is None: callback = self.send_result type = 'None' start_time = time.time() try: if url.startswith('data:'): type = 'data' result = yield gen.maybe_future(self.data_fetch(url, task)) elif task.get('fetch', {}).get('fetch_type') in ('js', 'phantomjs'): type = 'phantomjs' result = yield self.phantomjs_fetch(url, task) elif task.get('fetch', {}).get('fetch_type') in ('splash', ): type = 'splash' result = yield self.splash_fetch(url, task) elif task.get('fetch', {}).get('fetch_type') in ('puppeteer', ): type = 'puppeteer' result = yield self.puppeteer_fetch(url, task) else: type = 'http' result = yield self.http_fetch(url, task) except Exception as e: logger.exception(e) result = self.handle_error(type, url, task, start_time, e) callback(type, task, result) self.on_result(type, task, result) raise gen.Return(result)
python
def async_fetch(self, task, callback=None): '''Do one fetch''' url = task.get('url', 'data:,') if callback is None: callback = self.send_result type = 'None' start_time = time.time() try: if url.startswith('data:'): type = 'data' result = yield gen.maybe_future(self.data_fetch(url, task)) elif task.get('fetch', {}).get('fetch_type') in ('js', 'phantomjs'): type = 'phantomjs' result = yield self.phantomjs_fetch(url, task) elif task.get('fetch', {}).get('fetch_type') in ('splash', ): type = 'splash' result = yield self.splash_fetch(url, task) elif task.get('fetch', {}).get('fetch_type') in ('puppeteer', ): type = 'puppeteer' result = yield self.puppeteer_fetch(url, task) else: type = 'http' result = yield self.http_fetch(url, task) except Exception as e: logger.exception(e) result = self.handle_error(type, url, task, start_time, e) callback(type, task, result) self.on_result(type, task, result) raise gen.Return(result)
[ "def", "async_fetch", "(", "self", ",", "task", ",", "callback", "=", "None", ")", ":", "url", "=", "task", ".", "get", "(", "'url'", ",", "'data:,'", ")", "if", "callback", "is", "None", ":", "callback", "=", "self", ".", "send_result", "type", "=",...
Do one fetch
[ "Do", "one", "fetch" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L123-L153
29,825
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
Fetcher.sync_fetch
def sync_fetch(self, task): '''Synchronization fetch, usually used in xmlrpc thread''' if not self._running: return self.ioloop.run_sync(functools.partial(self.async_fetch, task, lambda t, _, r: True)) wait_result = threading.Condition() _result = {} def callback(type, task, result): wait_result.acquire() _result['type'] = type _result['task'] = task _result['result'] = result wait_result.notify() wait_result.release() wait_result.acquire() self.ioloop.add_callback(self.fetch, task, callback) while 'result' not in _result: wait_result.wait() wait_result.release() return _result['result']
python
def sync_fetch(self, task): '''Synchronization fetch, usually used in xmlrpc thread''' if not self._running: return self.ioloop.run_sync(functools.partial(self.async_fetch, task, lambda t, _, r: True)) wait_result = threading.Condition() _result = {} def callback(type, task, result): wait_result.acquire() _result['type'] = type _result['task'] = task _result['result'] = result wait_result.notify() wait_result.release() wait_result.acquire() self.ioloop.add_callback(self.fetch, task, callback) while 'result' not in _result: wait_result.wait() wait_result.release() return _result['result']
[ "def", "sync_fetch", "(", "self", ",", "task", ")", ":", "if", "not", "self", ".", "_running", ":", "return", "self", ".", "ioloop", ".", "run_sync", "(", "functools", ".", "partial", "(", "self", ".", "async_fetch", ",", "task", ",", "lambda", "t", ...
Synchronization fetch, usually used in xmlrpc thread
[ "Synchronization", "fetch", "usually", "used", "in", "xmlrpc", "thread" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L155-L176
29,826
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
Fetcher.data_fetch
def data_fetch(self, url, task): '''A fake fetcher for dataurl''' self.on_fetch('data', task) result = {} result['orig_url'] = url result['content'] = dataurl.decode(url) result['headers'] = {} result['status_code'] = 200 result['url'] = url result['cookies'] = {} result['time'] = 0 result['save'] = task.get('fetch', {}).get('save') if len(result['content']) < 70: logger.info("[200] %s:%s %s 0s", task.get('project'), task.get('taskid'), url) else: logger.info( "[200] %s:%s data:,%s...[content:%d] 0s", task.get('project'), task.get('taskid'), result['content'][:70], len(result['content']) ) return result
python
def data_fetch(self, url, task): '''A fake fetcher for dataurl''' self.on_fetch('data', task) result = {} result['orig_url'] = url result['content'] = dataurl.decode(url) result['headers'] = {} result['status_code'] = 200 result['url'] = url result['cookies'] = {} result['time'] = 0 result['save'] = task.get('fetch', {}).get('save') if len(result['content']) < 70: logger.info("[200] %s:%s %s 0s", task.get('project'), task.get('taskid'), url) else: logger.info( "[200] %s:%s data:,%s...[content:%d] 0s", task.get('project'), task.get('taskid'), result['content'][:70], len(result['content']) ) return result
[ "def", "data_fetch", "(", "self", ",", "url", ",", "task", ")", ":", "self", ".", "on_fetch", "(", "'data'", ",", "task", ")", "result", "=", "{", "}", "result", "[", "'orig_url'", "]", "=", "url", "result", "[", "'content'", "]", "=", "dataurl", "...
A fake fetcher for dataurl
[ "A", "fake", "fetcher", "for", "dataurl" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L178-L200
29,827
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
Fetcher.xmlrpc_run
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False): '''Run xmlrpc server''' import umsgpack from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary application = WSGIXMLRPCApplication() application.register_function(self.quit, '_quit') application.register_function(self.size) def sync_fetch(task): result = self.sync_fetch(task) result = Binary(umsgpack.packb(result)) return result application.register_function(sync_fetch, 'fetch') def dump_counter(_time, _type): return self._cnt[_time].to_dict(_type) application.register_function(dump_counter, 'counter') import tornado.wsgi import tornado.ioloop import tornado.httpserver container = tornado.wsgi.WSGIContainer(application) self.xmlrpc_ioloop = tornado.ioloop.IOLoop() self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop) self.xmlrpc_server.listen(port=port, address=bind) logger.info('fetcher.xmlrpc listening on %s:%s', bind, port) self.xmlrpc_ioloop.start()
python
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False): '''Run xmlrpc server''' import umsgpack from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication try: from xmlrpc.client import Binary except ImportError: from xmlrpclib import Binary application = WSGIXMLRPCApplication() application.register_function(self.quit, '_quit') application.register_function(self.size) def sync_fetch(task): result = self.sync_fetch(task) result = Binary(umsgpack.packb(result)) return result application.register_function(sync_fetch, 'fetch') def dump_counter(_time, _type): return self._cnt[_time].to_dict(_type) application.register_function(dump_counter, 'counter') import tornado.wsgi import tornado.ioloop import tornado.httpserver container = tornado.wsgi.WSGIContainer(application) self.xmlrpc_ioloop = tornado.ioloop.IOLoop() self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop) self.xmlrpc_server.listen(port=port, address=bind) logger.info('fetcher.xmlrpc listening on %s:%s', bind, port) self.xmlrpc_ioloop.start()
[ "def", "xmlrpc_run", "(", "self", ",", "port", "=", "24444", ",", "bind", "=", "'127.0.0.1'", ",", "logRequests", "=", "False", ")", ":", "import", "umsgpack", "from", "pyspider", ".", "libs", ".", "wsgi_xmlrpc", "import", "WSGIXMLRPCApplication", "try", ":"...
Run xmlrpc server
[ "Run", "xmlrpc", "server" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L792-L825
29,828
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
Fetcher.on_result
def on_result(self, type, task, result): '''Called after task fetched''' status_code = result.get('status_code', 599) if status_code != 599: status_code = (int(status_code) / 100 * 100) self._cnt['5m'].event((task.get('project'), status_code), +1) self._cnt['1h'].event((task.get('project'), status_code), +1) if type in ('http', 'phantomjs') and result.get('time'): content_len = len(result.get('content', '')) self._cnt['5m'].event((task.get('project'), 'speed'), float(content_len) / result.get('time')) self._cnt['1h'].event((task.get('project'), 'speed'), float(content_len) / result.get('time')) self._cnt['5m'].event((task.get('project'), 'time'), result.get('time')) self._cnt['1h'].event((task.get('project'), 'time'), result.get('time'))
python
def on_result(self, type, task, result): '''Called after task fetched''' status_code = result.get('status_code', 599) if status_code != 599: status_code = (int(status_code) / 100 * 100) self._cnt['5m'].event((task.get('project'), status_code), +1) self._cnt['1h'].event((task.get('project'), status_code), +1) if type in ('http', 'phantomjs') and result.get('time'): content_len = len(result.get('content', '')) self._cnt['5m'].event((task.get('project'), 'speed'), float(content_len) / result.get('time')) self._cnt['1h'].event((task.get('project'), 'speed'), float(content_len) / result.get('time')) self._cnt['5m'].event((task.get('project'), 'time'), result.get('time')) self._cnt['1h'].event((task.get('project'), 'time'), result.get('time'))
[ "def", "on_result", "(", "self", ",", "type", ",", "task", ",", "result", ")", ":", "status_code", "=", "result", ".", "get", "(", "'status_code'", ",", "599", ")", "if", "status_code", "!=", "599", ":", "status_code", "=", "(", "int", "(", "status_cod...
Called after task fetched
[ "Called", "after", "task", "fetched" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L831-L846
29,829
binux/pyspider
pyspider/libs/counter.py
CounterManager.value
def value(self, key, value=1): """Set value of a counter by counter key""" if isinstance(key, six.string_types): key = (key, ) # assert all(isinstance(k, six.string_types) for k in key) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].value(value) return self
python
def value(self, key, value=1): """Set value of a counter by counter key""" if isinstance(key, six.string_types): key = (key, ) # assert all(isinstance(k, six.string_types) for k in key) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].value(value) return self
[ "def", "value", "(", "self", ",", "key", ",", "value", "=", "1", ")", ":", "if", "isinstance", "(", "key", ",", "six", ".", "string_types", ")", ":", "key", "=", "(", "key", ",", ")", "# assert all(isinstance(k, six.string_types) for k in key)", "assert", ...
Set value of a counter by counter key
[ "Set", "value", "of", "a", "counter", "by", "counter", "key" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/counter.py#L355-L364
29,830
binux/pyspider
pyspider/libs/counter.py
CounterManager.trim
def trim(self): """Clear not used counters""" for key, value in list(iteritems(self.counters)): if value.empty(): del self.counters[key]
python
def trim(self): """Clear not used counters""" for key, value in list(iteritems(self.counters)): if value.empty(): del self.counters[key]
[ "def", "trim", "(", "self", ")", ":", "for", "key", ",", "value", "in", "list", "(", "iteritems", "(", "self", ".", "counters", ")", ")", ":", "if", "value", ".", "empty", "(", ")", ":", "del", "self", ".", "counters", "[", "key", "]" ]
Clear not used counters
[ "Clear", "not", "used", "counters" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/counter.py#L366-L370
29,831
binux/pyspider
pyspider/libs/counter.py
CounterManager.load
def load(self, filename): """Load counters to file""" try: with open(filename, 'rb') as fp: self.counters = cPickle.load(fp) except: logging.debug("can't load counter from file: %s", filename) return False return True
python
def load(self, filename): """Load counters to file""" try: with open(filename, 'rb') as fp: self.counters = cPickle.load(fp) except: logging.debug("can't load counter from file: %s", filename) return False return True
[ "def", "load", "(", "self", ",", "filename", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fp", ":", "self", ".", "counters", "=", "cPickle", ".", "load", "(", "fp", ")", "except", ":", "logging", ".", "debug", "...
Load counters to file
[ "Load", "counters", "to", "file" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/counter.py#L433-L441
29,832
binux/pyspider
pyspider/run.py
scheduler
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num, scheduler_cls, threads, get_object=False): """ Run Scheduler, only one scheduler is allowed. """ g = ctx.obj Scheduler = load_cls(None, None, scheduler_cls) kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb, newtask_queue=g.newtask_queue, status_queue=g.status_queue, out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data')) if threads: kwargs['threads'] = int(threads) scheduler = Scheduler(**kwargs) scheduler.INQUEUE_LIMIT = inqueue_limit scheduler.DELETE_TIME = delete_time scheduler.ACTIVE_TASKS = active_tasks scheduler.LOOP_LIMIT = loop_limit scheduler.FAIL_PAUSE_NUM = fail_pause_num g.instances.append(scheduler) if g.get('testing_mode') or get_object: return scheduler if xmlrpc: utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host) scheduler.run()
python
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, inqueue_limit, delete_time, active_tasks, loop_limit, fail_pause_num, scheduler_cls, threads, get_object=False): """ Run Scheduler, only one scheduler is allowed. """ g = ctx.obj Scheduler = load_cls(None, None, scheduler_cls) kwargs = dict(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb, newtask_queue=g.newtask_queue, status_queue=g.status_queue, out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data')) if threads: kwargs['threads'] = int(threads) scheduler = Scheduler(**kwargs) scheduler.INQUEUE_LIMIT = inqueue_limit scheduler.DELETE_TIME = delete_time scheduler.ACTIVE_TASKS = active_tasks scheduler.LOOP_LIMIT = loop_limit scheduler.FAIL_PAUSE_NUM = fail_pause_num g.instances.append(scheduler) if g.get('testing_mode') or get_object: return scheduler if xmlrpc: utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host) scheduler.run()
[ "def", "scheduler", "(", "ctx", ",", "xmlrpc", ",", "xmlrpc_host", ",", "xmlrpc_port", ",", "inqueue_limit", ",", "delete_time", ",", "active_tasks", ",", "loop_limit", ",", "fail_pause_num", ",", "scheduler_cls", ",", "threads", ",", "get_object", "=", "False",...
Run Scheduler, only one scheduler is allowed.
[ "Run", "Scheduler", "only", "one", "scheduler", "is", "allowed", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L192-L220
29,833
binux/pyspider
pyspider/run.py
fetcher
def fetcher(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, poolsize, proxy, user_agent, timeout, phantomjs_endpoint, puppeteer_endpoint, splash_endpoint, fetcher_cls, async_mode=True, get_object=False, no_input=False): """ Run Fetcher. """ g = ctx.obj Fetcher = load_cls(None, None, fetcher_cls) if no_input: inqueue = None outqueue = None else: inqueue = g.scheduler2fetcher outqueue = g.fetcher2processor fetcher = Fetcher(inqueue=inqueue, outqueue=outqueue, poolsize=poolsize, proxy=proxy, async_mode=async_mode) fetcher.phantomjs_proxy = phantomjs_endpoint or g.phantomjs_proxy fetcher.puppeteer_proxy = puppeteer_endpoint or g.puppeteer_proxy fetcher.splash_endpoint = splash_endpoint if user_agent: fetcher.user_agent = user_agent if timeout: fetcher.default_options = copy.deepcopy(fetcher.default_options) fetcher.default_options['timeout'] = timeout g.instances.append(fetcher) if g.get('testing_mode') or get_object: return fetcher if xmlrpc: utils.run_in_thread(fetcher.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host) fetcher.run()
python
def fetcher(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, poolsize, proxy, user_agent, timeout, phantomjs_endpoint, puppeteer_endpoint, splash_endpoint, fetcher_cls, async_mode=True, get_object=False, no_input=False): """ Run Fetcher. """ g = ctx.obj Fetcher = load_cls(None, None, fetcher_cls) if no_input: inqueue = None outqueue = None else: inqueue = g.scheduler2fetcher outqueue = g.fetcher2processor fetcher = Fetcher(inqueue=inqueue, outqueue=outqueue, poolsize=poolsize, proxy=proxy, async_mode=async_mode) fetcher.phantomjs_proxy = phantomjs_endpoint or g.phantomjs_proxy fetcher.puppeteer_proxy = puppeteer_endpoint or g.puppeteer_proxy fetcher.splash_endpoint = splash_endpoint if user_agent: fetcher.user_agent = user_agent if timeout: fetcher.default_options = copy.deepcopy(fetcher.default_options) fetcher.default_options['timeout'] = timeout g.instances.append(fetcher) if g.get('testing_mode') or get_object: return fetcher if xmlrpc: utils.run_in_thread(fetcher.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host) fetcher.run()
[ "def", "fetcher", "(", "ctx", ",", "xmlrpc", ",", "xmlrpc_host", ",", "xmlrpc_port", ",", "poolsize", ",", "proxy", ",", "user_agent", ",", "timeout", ",", "phantomjs_endpoint", ",", "puppeteer_endpoint", ",", "splash_endpoint", ",", "fetcher_cls", ",", "async_m...
Run Fetcher.
[ "Run", "Fetcher", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L237-L269
29,834
binux/pyspider
pyspider/run.py
processor
def processor(ctx, processor_cls, process_time_limit, enable_stdout_capture=True, get_object=False): """ Run Processor. """ g = ctx.obj Processor = load_cls(None, None, processor_cls) processor = Processor(projectdb=g.projectdb, inqueue=g.fetcher2processor, status_queue=g.status_queue, newtask_queue=g.newtask_queue, result_queue=g.processor2result, enable_stdout_capture=enable_stdout_capture, process_time_limit=process_time_limit) g.instances.append(processor) if g.get('testing_mode') or get_object: return processor processor.run()
python
def processor(ctx, processor_cls, process_time_limit, enable_stdout_capture=True, get_object=False): """ Run Processor. """ g = ctx.obj Processor = load_cls(None, None, processor_cls) processor = Processor(projectdb=g.projectdb, inqueue=g.fetcher2processor, status_queue=g.status_queue, newtask_queue=g.newtask_queue, result_queue=g.processor2result, enable_stdout_capture=enable_stdout_capture, process_time_limit=process_time_limit) g.instances.append(processor) if g.get('testing_mode') or get_object: return processor processor.run()
[ "def", "processor", "(", "ctx", ",", "processor_cls", ",", "process_time_limit", ",", "enable_stdout_capture", "=", "True", ",", "get_object", "=", "False", ")", ":", "g", "=", "ctx", ".", "obj", "Processor", "=", "load_cls", "(", "None", ",", "None", ",",...
Run Processor.
[ "Run", "Processor", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L277-L294
29,835
binux/pyspider
pyspider/run.py
result_worker
def result_worker(ctx, result_cls, get_object=False): """ Run result worker. """ g = ctx.obj ResultWorker = load_cls(None, None, result_cls) result_worker = ResultWorker(resultdb=g.resultdb, inqueue=g.processor2result) g.instances.append(result_worker) if g.get('testing_mode') or get_object: return result_worker result_worker.run()
python
def result_worker(ctx, result_cls, get_object=False): """ Run result worker. """ g = ctx.obj ResultWorker = load_cls(None, None, result_cls) result_worker = ResultWorker(resultdb=g.resultdb, inqueue=g.processor2result) g.instances.append(result_worker) if g.get('testing_mode') or get_object: return result_worker result_worker.run()
[ "def", "result_worker", "(", "ctx", ",", "result_cls", ",", "get_object", "=", "False", ")", ":", "g", "=", "ctx", ".", "obj", "ResultWorker", "=", "load_cls", "(", "None", ",", "None", ",", "result_cls", ")", "result_worker", "=", "ResultWorker", "(", "...
Run result worker.
[ "Run", "result", "worker", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L301-L314
29,836
binux/pyspider
pyspider/run.py
phantomjs
def phantomjs(ctx, phantomjs_path, port, auto_restart, args): """ Run phantomjs fetcher if phantomjs is installed. """ args = args or ctx.default_map and ctx.default_map.get('args', []) import subprocess g = ctx.obj _quit = [] phantomjs_fetcher = os.path.join( os.path.dirname(pyspider.__file__), 'fetcher/phantomjs_fetcher.js') cmd = [phantomjs_path, # this may cause memory leak: https://github.com/ariya/phantomjs/issues/12903 #'--load-images=false', '--ssl-protocol=any', '--disk-cache=true'] + list(args or []) + [phantomjs_fetcher, str(port)] try: _phantomjs = subprocess.Popen(cmd) except OSError: logging.warning('phantomjs not found, continue running without it.') return None def quit(*args, **kwargs): _quit.append(1) _phantomjs.kill() _phantomjs.wait() logging.info('phantomjs exited.') if not g.get('phantomjs_proxy'): g['phantomjs_proxy'] = '127.0.0.1:%s' % port phantomjs = utils.ObjectDict(port=port, quit=quit) g.instances.append(phantomjs) if g.get('testing_mode'): return phantomjs while True: _phantomjs.wait() if _quit or not auto_restart: break _phantomjs = subprocess.Popen(cmd)
python
def phantomjs(ctx, phantomjs_path, port, auto_restart, args): """ Run phantomjs fetcher if phantomjs is installed. """ args = args or ctx.default_map and ctx.default_map.get('args', []) import subprocess g = ctx.obj _quit = [] phantomjs_fetcher = os.path.join( os.path.dirname(pyspider.__file__), 'fetcher/phantomjs_fetcher.js') cmd = [phantomjs_path, # this may cause memory leak: https://github.com/ariya/phantomjs/issues/12903 #'--load-images=false', '--ssl-protocol=any', '--disk-cache=true'] + list(args or []) + [phantomjs_fetcher, str(port)] try: _phantomjs = subprocess.Popen(cmd) except OSError: logging.warning('phantomjs not found, continue running without it.') return None def quit(*args, **kwargs): _quit.append(1) _phantomjs.kill() _phantomjs.wait() logging.info('phantomjs exited.') if not g.get('phantomjs_proxy'): g['phantomjs_proxy'] = '127.0.0.1:%s' % port phantomjs = utils.ObjectDict(port=port, quit=quit) g.instances.append(phantomjs) if g.get('testing_mode'): return phantomjs while True: _phantomjs.wait() if _quit or not auto_restart: break _phantomjs = subprocess.Popen(cmd)
[ "def", "phantomjs", "(", "ctx", ",", "phantomjs_path", ",", "port", ",", "auto_restart", ",", "args", ")", ":", "args", "=", "args", "or", "ctx", ".", "default_map", "and", "ctx", ".", "default_map", ".", "get", "(", "'args'", ",", "[", "]", ")", "im...
Run phantomjs fetcher if phantomjs is installed.
[ "Run", "phantomjs", "fetcher", "if", "phantomjs", "is", "installed", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L402-L443
29,837
binux/pyspider
pyspider/run.py
puppeteer
def puppeteer(ctx, port, auto_restart, args): """ Run puppeteer fetcher if puppeteer is installed. """ import subprocess g = ctx.obj _quit = [] puppeteer_fetcher = os.path.join( os.path.dirname(pyspider.__file__), 'fetcher/puppeteer_fetcher.js') cmd = ['node', puppeteer_fetcher, str(port)] try: _puppeteer = subprocess.Popen(cmd) except OSError: logging.warning('puppeteer not found, continue running without it.') return None def quit(*args, **kwargs): _quit.append(1) _puppeteer.kill() _puppeteer.wait() logging.info('puppeteer exited.') if not g.get('puppeteer_proxy'): g['puppeteer_proxy'] = '127.0.0.1:%s' % port puppeteer = utils.ObjectDict(port=port, quit=quit) g.instances.append(puppeteer) if g.get('testing_mode'): return puppeteer while True: _puppeteer.wait() if _quit or not auto_restart: break _puppeteer = subprocess.Popen(cmd)
python
def puppeteer(ctx, port, auto_restart, args): """ Run puppeteer fetcher if puppeteer is installed. """ import subprocess g = ctx.obj _quit = [] puppeteer_fetcher = os.path.join( os.path.dirname(pyspider.__file__), 'fetcher/puppeteer_fetcher.js') cmd = ['node', puppeteer_fetcher, str(port)] try: _puppeteer = subprocess.Popen(cmd) except OSError: logging.warning('puppeteer not found, continue running without it.') return None def quit(*args, **kwargs): _quit.append(1) _puppeteer.kill() _puppeteer.wait() logging.info('puppeteer exited.') if not g.get('puppeteer_proxy'): g['puppeteer_proxy'] = '127.0.0.1:%s' % port puppeteer = utils.ObjectDict(port=port, quit=quit) g.instances.append(puppeteer) if g.get('testing_mode'): return puppeteer while True: _puppeteer.wait() if _quit or not auto_restart: break _puppeteer = subprocess.Popen(cmd)
[ "def", "puppeteer", "(", "ctx", ",", "port", ",", "auto_restart", ",", "args", ")", ":", "import", "subprocess", "g", "=", "ctx", ".", "obj", "_quit", "=", "[", "]", "puppeteer_fetcher", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ...
Run puppeteer fetcher if puppeteer is installed.
[ "Run", "puppeteer", "fetcher", "if", "puppeteer", "is", "installed", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L450-L486
29,838
binux/pyspider
pyspider/run.py
all
def all(ctx, fetcher_num, processor_num, result_worker_num, run_in): """ Run all the components in subprocess or thread """ ctx.obj['debug'] = False g = ctx.obj # FIXME: py34 cannot run components with threads if run_in == 'subprocess' and os.name != 'nt': run_in = utils.run_in_subprocess else: run_in = utils.run_in_thread threads = [] try: # phantomjs if not g.get('phantomjs_proxy'): phantomjs_config = g.config.get('phantomjs', {}) phantomjs_config.setdefault('auto_restart', True) threads.append(run_in(ctx.invoke, phantomjs, **phantomjs_config)) time.sleep(2) if threads[-1].is_alive() and not g.get('phantomjs_proxy'): g['phantomjs_proxy'] = '127.0.0.1:%s' % phantomjs_config.get('port', 25555) # puppeteer if not g.get('puppeteer_proxy'): puppeteer_config = g.config.get('puppeteer', {}) puppeteer_config.setdefault('auto_restart', True) threads.append(run_in(ctx.invoke, puppeteer, **puppeteer_config)) time.sleep(2) if threads[-1].is_alive() and not g.get('puppeteer_proxy'): g['puppeteer_proxy'] = '127.0.0.1:%s' % puppeteer_config.get('port', 22222) # result worker result_worker_config = g.config.get('result_worker', {}) for i in range(result_worker_num): threads.append(run_in(ctx.invoke, result_worker, **result_worker_config)) # processor processor_config = g.config.get('processor', {}) for i in range(processor_num): threads.append(run_in(ctx.invoke, processor, **processor_config)) # fetcher fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc_host', '127.0.0.1') for i in range(fetcher_num): threads.append(run_in(ctx.invoke, fetcher, **fetcher_config)) # scheduler scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc_host', '127.0.0.1') threads.append(run_in(ctx.invoke, scheduler, **scheduler_config)) # running webui in main thread to make it exitable webui_config = g.config.get('webui', {}) webui_config.setdefault('scheduler_rpc', 'http://127.0.0.1:%s/' % g.config.get('scheduler', {}).get('xmlrpc_port', 23333)) ctx.invoke(webui, **webui_config) finally: # exit components run in threading for each in g.instances: each.quit() # exit components run in subprocess for each in threads: if not each.is_alive(): continue if hasattr(each, 'terminate'): each.terminate() each.join()
python
def all(ctx, fetcher_num, processor_num, result_worker_num, run_in): """ Run all the components in subprocess or thread """ ctx.obj['debug'] = False g = ctx.obj # FIXME: py34 cannot run components with threads if run_in == 'subprocess' and os.name != 'nt': run_in = utils.run_in_subprocess else: run_in = utils.run_in_thread threads = [] try: # phantomjs if not g.get('phantomjs_proxy'): phantomjs_config = g.config.get('phantomjs', {}) phantomjs_config.setdefault('auto_restart', True) threads.append(run_in(ctx.invoke, phantomjs, **phantomjs_config)) time.sleep(2) if threads[-1].is_alive() and not g.get('phantomjs_proxy'): g['phantomjs_proxy'] = '127.0.0.1:%s' % phantomjs_config.get('port', 25555) # puppeteer if not g.get('puppeteer_proxy'): puppeteer_config = g.config.get('puppeteer', {}) puppeteer_config.setdefault('auto_restart', True) threads.append(run_in(ctx.invoke, puppeteer, **puppeteer_config)) time.sleep(2) if threads[-1].is_alive() and not g.get('puppeteer_proxy'): g['puppeteer_proxy'] = '127.0.0.1:%s' % puppeteer_config.get('port', 22222) # result worker result_worker_config = g.config.get('result_worker', {}) for i in range(result_worker_num): threads.append(run_in(ctx.invoke, result_worker, **result_worker_config)) # processor processor_config = g.config.get('processor', {}) for i in range(processor_num): threads.append(run_in(ctx.invoke, processor, **processor_config)) # fetcher fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc_host', '127.0.0.1') for i in range(fetcher_num): threads.append(run_in(ctx.invoke, fetcher, **fetcher_config)) # scheduler scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc_host', '127.0.0.1') threads.append(run_in(ctx.invoke, scheduler, **scheduler_config)) # running webui in main thread to make it exitable webui_config = g.config.get('webui', {}) webui_config.setdefault('scheduler_rpc', 'http://127.0.0.1:%s/' % g.config.get('scheduler', {}).get('xmlrpc_port', 23333)) ctx.invoke(webui, **webui_config) finally: # exit components run in threading for each in g.instances: each.quit() # exit components run in subprocess for each in threads: if not each.is_alive(): continue if hasattr(each, 'terminate'): each.terminate() each.join()
[ "def", "all", "(", "ctx", ",", "fetcher_num", ",", "processor_num", ",", "result_worker_num", ",", "run_in", ")", ":", "ctx", ".", "obj", "[", "'debug'", "]", "=", "False", "g", "=", "ctx", ".", "obj", "# FIXME: py34 cannot run components with threads", "if", ...
Run all the components in subprocess or thread
[ "Run", "all", "the", "components", "in", "subprocess", "or", "thread" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L498-L570
29,839
binux/pyspider
pyspider/run.py
one
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """ ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
python
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """ ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
[ "def", "one", "(", "ctx", ",", "interactive", ",", "enable_phantomjs", ",", "enable_puppeteer", ",", "scripts", ")", ":", "ctx", ".", "obj", "[", "'debug'", "]", "=", "False", "g", "=", "ctx", ".", "obj", "g", "[", "'testing_mode'", "]", "=", "True", ...
One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose
[ "One", "mode", "not", "only", "means", "all", "-", "in", "-", "one", "it", "runs", "every", "thing", "in", "one", "process", "over", "tornado", ".", "ioloop", "for", "debug", "purpose" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L723-L793
29,840
binux/pyspider
pyspider/run.py
send_message
def send_message(ctx, scheduler_rpc, project, message): """ Send Message to project from command line """ if isinstance(scheduler_rpc, six.string_types): scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc) if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'): scheduler_rpc = connect_rpc(ctx, None, 'http://%s/' % ( os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):])) if scheduler_rpc is None: scheduler_rpc = connect_rpc(ctx, None, 'http://127.0.0.1:23333/') return scheduler_rpc.send_task({ 'taskid': utils.md5string('data:,on_message'), 'project': project, 'url': 'data:,on_message', 'fetch': { 'save': ('__command__', message), }, 'process': { 'callback': '_on_message', } })
python
def send_message(ctx, scheduler_rpc, project, message): """ Send Message to project from command line """ if isinstance(scheduler_rpc, six.string_types): scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc) if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'): scheduler_rpc = connect_rpc(ctx, None, 'http://%s/' % ( os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):])) if scheduler_rpc is None: scheduler_rpc = connect_rpc(ctx, None, 'http://127.0.0.1:23333/') return scheduler_rpc.send_task({ 'taskid': utils.md5string('data:,on_message'), 'project': project, 'url': 'data:,on_message', 'fetch': { 'save': ('__command__', message), }, 'process': { 'callback': '_on_message', } })
[ "def", "send_message", "(", "ctx", ",", "scheduler_rpc", ",", "project", ",", "message", ")", ":", "if", "isinstance", "(", "scheduler_rpc", ",", "six", ".", "string_types", ")", ":", "scheduler_rpc", "=", "connect_rpc", "(", "ctx", ",", "None", ",", "sche...
Send Message to project from command line
[ "Send", "Message", "to", "project", "from", "command", "line" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L801-L823
29,841
binux/pyspider
pyspider/libs/pprint.py
pformat
def pformat(object, indent=1, width=80, depth=None): """Format a Python object into a pretty-printed representation.""" return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
python
def pformat(object, indent=1, width=80, depth=None): """Format a Python object into a pretty-printed representation.""" return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
[ "def", "pformat", "(", "object", ",", "indent", "=", "1", ",", "width", "=", "80", ",", "depth", "=", "None", ")", ":", "return", "PrettyPrinter", "(", "indent", "=", "indent", ",", "width", "=", "width", ",", "depth", "=", "depth", ")", ".", "pfor...
Format a Python object into a pretty-printed representation.
[ "Format", "a", "Python", "object", "into", "a", "pretty", "-", "printed", "representation", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/pprint.py#L61-L63
29,842
binux/pyspider
pyspider/libs/pprint.py
PrettyPrinter.format
def format(self, object, context, maxlevels, level): """Format object for a specific context, returning a string and flags indicating whether the representation is 'readable' and whether the object represents a recursive construct. """ return _safe_repr(object, context, maxlevels, level)
python
def format(self, object, context, maxlevels, level): """Format object for a specific context, returning a string and flags indicating whether the representation is 'readable' and whether the object represents a recursive construct. """ return _safe_repr(object, context, maxlevels, level)
[ "def", "format", "(", "self", ",", "object", ",", "context", ",", "maxlevels", ",", "level", ")", ":", "return", "_safe_repr", "(", "object", ",", "context", ",", "maxlevels", ",", "level", ")" ]
Format object for a specific context, returning a string and flags indicating whether the representation is 'readable' and whether the object represents a recursive construct.
[ "Format", "object", "for", "a", "specific", "context", "returning", "a", "string", "and", "flags", "indicating", "whether", "the", "representation", "is", "readable", "and", "whether", "the", "object", "represents", "a", "recursive", "construct", "." ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/pprint.py#L243-L248
29,843
binux/pyspider
pyspider/scheduler/token_bucket.py
Bucket.get
def get(self): '''Get the number of tokens in bucket''' now = time.time() if self.bucket >= self.burst: self.last_update = now return self.bucket bucket = self.rate * (now - self.last_update) self.mutex.acquire() if bucket > 1: self.bucket += bucket if self.bucket > self.burst: self.bucket = self.burst self.last_update = now self.mutex.release() return self.bucket
python
def get(self): '''Get the number of tokens in bucket''' now = time.time() if self.bucket >= self.burst: self.last_update = now return self.bucket bucket = self.rate * (now - self.last_update) self.mutex.acquire() if bucket > 1: self.bucket += bucket if self.bucket > self.burst: self.bucket = self.burst self.last_update = now self.mutex.release() return self.bucket
[ "def", "get", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "self", ".", "bucket", ">=", "self", ".", "burst", ":", "self", ".", "last_update", "=", "now", "return", "self", ".", "bucket", "bucket", "=", "self", ".", "ra...
Get the number of tokens in bucket
[ "Get", "the", "number", "of", "tokens", "in", "bucket" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/scheduler/token_bucket.py#L33-L47
29,844
binux/pyspider
tools/migrate.py
migrate
def migrate(pool, from_connection, to_connection): """ Migrate tool for pyspider """ f = connect_database(from_connection) t = connect_database(to_connection) if isinstance(f, ProjectDB): for each in f.get_all(): each = unicode_obj(each) logging.info("projectdb: %s", each['name']) t.drop(each['name']) t.insert(each['name'], each) elif isinstance(f, TaskDB): pool = Pool(pool) pool.map( lambda x, f=from_connection, t=to_connection: taskdb_migrating(x, f, t), f.projects) elif isinstance(f, ResultDB): pool = Pool(pool) pool.map( lambda x, f=from_connection, t=to_connection: resultdb_migrating(x, f, t), f.projects)
python
def migrate(pool, from_connection, to_connection): """ Migrate tool for pyspider """ f = connect_database(from_connection) t = connect_database(to_connection) if isinstance(f, ProjectDB): for each in f.get_all(): each = unicode_obj(each) logging.info("projectdb: %s", each['name']) t.drop(each['name']) t.insert(each['name'], each) elif isinstance(f, TaskDB): pool = Pool(pool) pool.map( lambda x, f=from_connection, t=to_connection: taskdb_migrating(x, f, t), f.projects) elif isinstance(f, ResultDB): pool = Pool(pool) pool.map( lambda x, f=from_connection, t=to_connection: resultdb_migrating(x, f, t), f.projects)
[ "def", "migrate", "(", "pool", ",", "from_connection", ",", "to_connection", ")", ":", "f", "=", "connect_database", "(", "from_connection", ")", "t", "=", "connect_database", "(", "to_connection", ")", "if", "isinstance", "(", "f", ",", "ProjectDB", ")", ":...
Migrate tool for pyspider
[ "Migrate", "tool", "for", "pyspider" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/tools/migrate.py#L43-L65
29,845
binux/pyspider
pyspider/libs/dataurl.py
encode
def encode(data, mime_type='', charset='utf-8', base64=True): """ Encode data to DataURL """ if isinstance(data, six.text_type): data = data.encode(charset) else: charset = None if base64: data = utils.text(b64encode(data)) else: data = utils.text(quote(data)) result = ['data:', ] if mime_type: result.append(mime_type) if charset: result.append(';charset=') result.append(charset) if base64: result.append(';base64') result.append(',') result.append(data) return ''.join(result)
python
def encode(data, mime_type='', charset='utf-8', base64=True): """ Encode data to DataURL """ if isinstance(data, six.text_type): data = data.encode(charset) else: charset = None if base64: data = utils.text(b64encode(data)) else: data = utils.text(quote(data)) result = ['data:', ] if mime_type: result.append(mime_type) if charset: result.append(';charset=') result.append(charset) if base64: result.append(';base64') result.append(',') result.append(data) return ''.join(result)
[ "def", "encode", "(", "data", ",", "mime_type", "=", "''", ",", "charset", "=", "'utf-8'", ",", "base64", "=", "True", ")", ":", "if", "isinstance", "(", "data", ",", "six", ".", "text_type", ")", ":", "data", "=", "data", ".", "encode", "(", "char...
Encode data to DataURL
[ "Encode", "data", "to", "DataURL" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/dataurl.py#L14-L38
29,846
binux/pyspider
pyspider/libs/dataurl.py
decode
def decode(data_url): """ Decode DataURL data """ metadata, data = data_url.rsplit(',', 1) _, metadata = metadata.split('data:', 1) parts = metadata.split(';') if parts[-1] == 'base64': data = b64decode(data) else: data = unquote(data) for part in parts: if part.startswith("charset="): data = data.decode(part[8:]) return data
python
def decode(data_url): """ Decode DataURL data """ metadata, data = data_url.rsplit(',', 1) _, metadata = metadata.split('data:', 1) parts = metadata.split(';') if parts[-1] == 'base64': data = b64decode(data) else: data = unquote(data) for part in parts: if part.startswith("charset="): data = data.decode(part[8:]) return data
[ "def", "decode", "(", "data_url", ")", ":", "metadata", ",", "data", "=", "data_url", ".", "rsplit", "(", "','", ",", "1", ")", "_", ",", "metadata", "=", "metadata", ".", "split", "(", "'data:'", ",", "1", ")", "parts", "=", "metadata", ".", "spli...
Decode DataURL data
[ "Decode", "DataURL", "data" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/dataurl.py#L41-L56
29,847
binux/pyspider
pyspider/libs/url.py
quote_chinese
def quote_chinese(url, encodeing="utf-8"): """Quote non-ascii characters""" if isinstance(url, six.text_type): return quote_chinese(url.encode(encodeing)) if six.PY3: res = [six.int2byte(b).decode('latin-1') if b < 128 else '%%%02X' % b for b in url] else: res = [b if ord(b) < 128 else '%%%02X' % ord(b) for b in url] return "".join(res)
python
def quote_chinese(url, encodeing="utf-8"): """Quote non-ascii characters""" if isinstance(url, six.text_type): return quote_chinese(url.encode(encodeing)) if six.PY3: res = [six.int2byte(b).decode('latin-1') if b < 128 else '%%%02X' % b for b in url] else: res = [b if ord(b) < 128 else '%%%02X' % ord(b) for b in url] return "".join(res)
[ "def", "quote_chinese", "(", "url", ",", "encodeing", "=", "\"utf-8\"", ")", ":", "if", "isinstance", "(", "url", ",", "six", ".", "text_type", ")", ":", "return", "quote_chinese", "(", "url", ".", "encode", "(", "encodeing", ")", ")", "if", "six", "."...
Quote non-ascii characters
[ "Quote", "non", "-", "ascii", "characters" ]
3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/url.py#L62-L70
29,848
lanpa/tensorboardX
examples/demo_caffe2.py
DownloadResource
def DownloadResource(url, path): '''Downloads resources from s3 by url and unzips them to the provided path''' import requests from six import BytesIO import zipfile print("Downloading... {} to {}".format(url, path)) r = requests.get(url, stream=True) z = zipfile.ZipFile(BytesIO(r.content)) z.extractall(path) print("Completed download and extraction.")
python
def DownloadResource(url, path): '''Downloads resources from s3 by url and unzips them to the provided path''' import requests from six import BytesIO import zipfile print("Downloading... {} to {}".format(url, path)) r = requests.get(url, stream=True) z = zipfile.ZipFile(BytesIO(r.content)) z.extractall(path) print("Completed download and extraction.")
[ "def", "DownloadResource", "(", "url", ",", "path", ")", ":", "import", "requests", "from", "six", "import", "BytesIO", "import", "zipfile", "print", "(", "\"Downloading... {} to {}\"", ".", "format", "(", "url", ",", "path", ")", ")", "r", "=", "requests", ...
Downloads resources from s3 by url and unzips them to the provided path
[ "Downloads", "resources", "from", "s3", "by", "url", "and", "unzips", "them", "to", "the", "provided", "path" ]
0bf6c07d97b0745654fd9fab8ee3261ec707f253
https://github.com/lanpa/tensorboardX/blob/0bf6c07d97b0745654fd9fab8ee3261ec707f253/examples/demo_caffe2.py#L28-L37
29,849
lanpa/tensorboardX
examples/demo_caffe2.py
AddAccuracy
def AddAccuracy(model, softmax, label): """Adds an accuracy op to the model""" accuracy = brew.accuracy(model, [softmax, label], "accuracy") return accuracy
python
def AddAccuracy(model, softmax, label): """Adds an accuracy op to the model""" accuracy = brew.accuracy(model, [softmax, label], "accuracy") return accuracy
[ "def", "AddAccuracy", "(", "model", ",", "softmax", ",", "label", ")", ":", "accuracy", "=", "brew", ".", "accuracy", "(", "model", ",", "[", "softmax", ",", "label", "]", ",", "\"accuracy\"", ")", "return", "accuracy" ]
Adds an accuracy op to the model
[ "Adds", "an", "accuracy", "op", "to", "the", "model" ]
0bf6c07d97b0745654fd9fab8ee3261ec707f253
https://github.com/lanpa/tensorboardX/blob/0bf6c07d97b0745654fd9fab8ee3261ec707f253/examples/demo_caffe2.py#L130-L133
29,850
lanpa/tensorboardX
examples/demo_caffe2.py
AddTrainingOperators
def AddTrainingOperators(model, softmax, label): """Adds training operators to the model.""" xent = model.LabelCrossEntropy([softmax, label], 'xent') # compute the expected loss loss = model.AveragedLoss(xent, "loss") # track the accuracy of the model AddAccuracy(model, softmax, label) # use the average loss we just computed to add gradient operators to the # model model.AddGradientOperators([loss]) # do a simple stochastic gradient descent ITER = brew.iter(model, "iter") # set the learning rate schedule LR = model.LearningRate( ITER, "LR", base_lr=-0.1, policy="step", stepsize=1, gamma=0.999) # ONE is a constant value that is used in the gradient update. We only need # to create it once, so it is explicitly placed in param_init_net. ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) # Now, for each parameter, we do the gradient updates. for param in model.params: # Note how we get the gradient of each parameter - ModelHelper keeps # track of that. param_grad = model.param_to_grad[param] # The update is a simple weighted sum: param = param + param_grad * LR model.WeightedSum([param, ONE, param_grad, LR], param)
python
def AddTrainingOperators(model, softmax, label): """Adds training operators to the model.""" xent = model.LabelCrossEntropy([softmax, label], 'xent') # compute the expected loss loss = model.AveragedLoss(xent, "loss") # track the accuracy of the model AddAccuracy(model, softmax, label) # use the average loss we just computed to add gradient operators to the # model model.AddGradientOperators([loss]) # do a simple stochastic gradient descent ITER = brew.iter(model, "iter") # set the learning rate schedule LR = model.LearningRate( ITER, "LR", base_lr=-0.1, policy="step", stepsize=1, gamma=0.999) # ONE is a constant value that is used in the gradient update. We only need # to create it once, so it is explicitly placed in param_init_net. ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) # Now, for each parameter, we do the gradient updates. for param in model.params: # Note how we get the gradient of each parameter - ModelHelper keeps # track of that. param_grad = model.param_to_grad[param] # The update is a simple weighted sum: param = param + param_grad * LR model.WeightedSum([param, ONE, param_grad, LR], param)
[ "def", "AddTrainingOperators", "(", "model", ",", "softmax", ",", "label", ")", ":", "xent", "=", "model", ".", "LabelCrossEntropy", "(", "[", "softmax", ",", "label", "]", ",", "'xent'", ")", "# compute the expected loss", "loss", "=", "model", ".", "Averag...
Adds training operators to the model.
[ "Adds", "training", "operators", "to", "the", "model", "." ]
0bf6c07d97b0745654fd9fab8ee3261ec707f253
https://github.com/lanpa/tensorboardX/blob/0bf6c07d97b0745654fd9fab8ee3261ec707f253/examples/demo_caffe2.py#L136-L160
29,851
lanpa/tensorboardX
examples/demo_caffe2.py
AddBookkeepingOperators
def AddBookkeepingOperators(model): """This adds a few bookkeeping operators that we can inspect later. These operators do not affect the training procedure: they only collect statistics and prints them to file or to logs. """ # Print basically prints out the content of the blob. to_file=1 routes the # printed output to a file. The file is going to be stored under # root_folder/[blob name] model.Print('accuracy', [], to_file=1) model.Print('loss', [], to_file=1) # Summarizes the parameters. Different from Print, Summarize gives some # statistics of the parameter, such as mean, std, min and max. for param in model.params: model.Summarize(param, [], to_file=1) model.Summarize(model.param_to_grad[param], [], to_file=1)
python
def AddBookkeepingOperators(model): """This adds a few bookkeeping operators that we can inspect later. These operators do not affect the training procedure: they only collect statistics and prints them to file or to logs. """ # Print basically prints out the content of the blob. to_file=1 routes the # printed output to a file. The file is going to be stored under # root_folder/[blob name] model.Print('accuracy', [], to_file=1) model.Print('loss', [], to_file=1) # Summarizes the parameters. Different from Print, Summarize gives some # statistics of the parameter, such as mean, std, min and max. for param in model.params: model.Summarize(param, [], to_file=1) model.Summarize(model.param_to_grad[param], [], to_file=1)
[ "def", "AddBookkeepingOperators", "(", "model", ")", ":", "# Print basically prints out the content of the blob. to_file=1 routes the", "# printed output to a file. The file is going to be stored under", "# root_folder/[blob name]", "model", ".", "Print", "(", "'accuracy'", ",", "[...
This adds a few bookkeeping operators that we can inspect later. These operators do not affect the training procedure: they only collect statistics and prints them to file or to logs.
[ "This", "adds", "a", "few", "bookkeeping", "operators", "that", "we", "can", "inspect", "later", "." ]
0bf6c07d97b0745654fd9fab8ee3261ec707f253
https://github.com/lanpa/tensorboardX/blob/0bf6c07d97b0745654fd9fab8ee3261ec707f253/examples/demo_caffe2.py#L163-L178
29,852
lanpa/tensorboardX
examples/chainer/plain_logger/net.py
VAE.get_loss_func
def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """ def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
python
def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """ def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
[ "def", "get_loss_func", "(", "self", ",", "C", "=", "1.0", ",", "k", "=", "1", ")", ":", "def", "lf", "(", "x", ")", ":", "mu", ",", "ln_var", "=", "self", ".", "encode", "(", "x", ")", "batchsize", "=", "len", "(", "mu", ".", "data", ")", ...
Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector.
[ "Get", "loss", "function", "of", "VAE", "." ]
0bf6c07d97b0745654fd9fab8ee3261ec707f253
https://github.com/lanpa/tensorboardX/blob/0bf6c07d97b0745654fd9fab8ee3261ec707f253/examples/chainer/plain_logger/net.py#L41-L65
29,853
keras-rl/keras-rl
rl/core.py
Processor.process_step
def process_step(self, observation, reward, done, info): """Processes an entire step by applying the processor to the observation, reward, and info arguments. # Arguments observation (object): An observation as obtained by the environment. reward (float): A reward as obtained by the environment. done (boolean): `True` if the environment is in a terminal state, `False` otherwise. info (dict): The debug info dictionary as obtained by the environment. # Returns The tupel (observation, reward, done, reward) with with all elements after being processed. """ observation = self.process_observation(observation) reward = self.process_reward(reward) info = self.process_info(info) return observation, reward, done, info
python
def process_step(self, observation, reward, done, info): """Processes an entire step by applying the processor to the observation, reward, and info arguments. # Arguments observation (object): An observation as obtained by the environment. reward (float): A reward as obtained by the environment. done (boolean): `True` if the environment is in a terminal state, `False` otherwise. info (dict): The debug info dictionary as obtained by the environment. # Returns The tupel (observation, reward, done, reward) with with all elements after being processed. """ observation = self.process_observation(observation) reward = self.process_reward(reward) info = self.process_info(info) return observation, reward, done, info
[ "def", "process_step", "(", "self", ",", "observation", ",", "reward", ",", "done", ",", "info", ")", ":", "observation", "=", "self", ".", "process_observation", "(", "observation", ")", "reward", "=", "self", ".", "process_reward", "(", "reward", ")", "i...
Processes an entire step by applying the processor to the observation, reward, and info arguments. # Arguments observation (object): An observation as obtained by the environment. reward (float): A reward as obtained by the environment. done (boolean): `True` if the environment is in a terminal state, `False` otherwise. info (dict): The debug info dictionary as obtained by the environment. # Returns The tupel (observation, reward, done, reward) with with all elements after being processed.
[ "Processes", "an", "entire", "step", "by", "applying", "the", "processor", "to", "the", "observation", "reward", "and", "info", "arguments", "." ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/core.py#L511-L526
29,854
keras-rl/keras-rl
rl/policy.py
LinearAnnealedPolicy.get_current_value
def get_current_value(self): """Return current annealing value # Returns Value to use in annealing """ if self.agent.training: # Linear annealed: f(x) = ax + b. a = -float(self.value_max - self.value_min) / float(self.nb_steps) b = float(self.value_max) value = max(self.value_min, a * float(self.agent.step) + b) else: value = self.value_test return value
python
def get_current_value(self): """Return current annealing value # Returns Value to use in annealing """ if self.agent.training: # Linear annealed: f(x) = ax + b. a = -float(self.value_max - self.value_min) / float(self.nb_steps) b = float(self.value_max) value = max(self.value_min, a * float(self.agent.step) + b) else: value = self.value_test return value
[ "def", "get_current_value", "(", "self", ")", ":", "if", "self", ".", "agent", ".", "training", ":", "# Linear annealed: f(x) = ax + b.", "a", "=", "-", "float", "(", "self", ".", "value_max", "-", "self", ".", "value_min", ")", "/", "float", "(", "self", ...
Return current annealing value # Returns Value to use in annealing
[ "Return", "current", "annealing", "value" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L62-L75
29,855
keras-rl/keras-rl
rl/policy.py
LinearAnnealedPolicy.select_action
def select_action(self, **kwargs): """Choose an action to perform # Returns Action to take (int) """ setattr(self.inner_policy, self.attr, self.get_current_value()) return self.inner_policy.select_action(**kwargs)
python
def select_action(self, **kwargs): """Choose an action to perform # Returns Action to take (int) """ setattr(self.inner_policy, self.attr, self.get_current_value()) return self.inner_policy.select_action(**kwargs)
[ "def", "select_action", "(", "self", ",", "*", "*", "kwargs", ")", ":", "setattr", "(", "self", ".", "inner_policy", ",", "self", ".", "attr", ",", "self", ".", "get_current_value", "(", ")", ")", "return", "self", ".", "inner_policy", ".", "select_actio...
Choose an action to perform # Returns Action to take (int)
[ "Choose", "an", "action", "to", "perform" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L77-L84
29,856
keras-rl/keras-rl
rl/policy.py
LinearAnnealedPolicy.get_config
def get_config(self): """Return configurations of LinearAnnealedPolicy # Returns Dict of config """ config = super(LinearAnnealedPolicy, self).get_config() config['attr'] = self.attr config['value_max'] = self.value_max config['value_min'] = self.value_min config['value_test'] = self.value_test config['nb_steps'] = self.nb_steps config['inner_policy'] = get_object_config(self.inner_policy) return config
python
def get_config(self): """Return configurations of LinearAnnealedPolicy # Returns Dict of config """ config = super(LinearAnnealedPolicy, self).get_config() config['attr'] = self.attr config['value_max'] = self.value_max config['value_min'] = self.value_min config['value_test'] = self.value_test config['nb_steps'] = self.nb_steps config['inner_policy'] = get_object_config(self.inner_policy) return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "LinearAnnealedPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'attr'", "]", "=", "self", ".", "attr", "config", "[", "'value_max'", "]", "=", "self", ".",...
Return configurations of LinearAnnealedPolicy # Returns Dict of config
[ "Return", "configurations", "of", "LinearAnnealedPolicy" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L105-L118
29,857
keras-rl/keras-rl
rl/policy.py
EpsGreedyQPolicy.get_config
def get_config(self): """Return configurations of EpsGreedyQPolicy # Returns Dict of config """ config = super(EpsGreedyQPolicy, self).get_config() config['eps'] = self.eps return config
python
def get_config(self): """Return configurations of EpsGreedyQPolicy # Returns Dict of config """ config = super(EpsGreedyQPolicy, self).get_config() config['eps'] = self.eps return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "EpsGreedyQPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'eps'", "]", "=", "self", ".", "eps", "return", "config" ]
Return configurations of EpsGreedyQPolicy # Returns Dict of config
[ "Return", "configurations", "of", "EpsGreedyQPolicy" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L171-L179
29,858
keras-rl/keras-rl
rl/policy.py
BoltzmannQPolicy.get_config
def get_config(self): """Return configurations of BoltzmannQPolicy # Returns Dict of config """ config = super(BoltzmannQPolicy, self).get_config() config['tau'] = self.tau config['clip'] = self.clip return config
python
def get_config(self): """Return configurations of BoltzmannQPolicy # Returns Dict of config """ config = super(BoltzmannQPolicy, self).get_config() config['tau'] = self.tau config['clip'] = self.clip return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "BoltzmannQPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'tau'", "]", "=", "self", ".", "tau", "config", "[", "'clip'", "]", "=", "self", ".", "clip", ...
Return configurations of BoltzmannQPolicy # Returns Dict of config
[ "Return", "configurations", "of", "BoltzmannQPolicy" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L230-L239
29,859
keras-rl/keras-rl
rl/policy.py
MaxBoltzmannQPolicy.get_config
def get_config(self): """Return configurations of MaxBoltzmannQPolicy # Returns Dict of config """ config = super(MaxBoltzmannQPolicy, self).get_config() config['eps'] = self.eps config['tau'] = self.tau config['clip'] = self.clip return config
python
def get_config(self): """Return configurations of MaxBoltzmannQPolicy # Returns Dict of config """ config = super(MaxBoltzmannQPolicy, self).get_config() config['eps'] = self.eps config['tau'] = self.tau config['clip'] = self.clip return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "MaxBoltzmannQPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'eps'", "]", "=", "self", ".", "eps", "config", "[", "'tau'", "]", "=", "self", ".", "tau",...
Return configurations of MaxBoltzmannQPolicy # Returns Dict of config
[ "Return", "configurations", "of", "MaxBoltzmannQPolicy" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L280-L290
29,860
keras-rl/keras-rl
rl/policy.py
BoltzmannGumbelQPolicy.get_config
def get_config(self): """Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config """ config = super(BoltzmannGumbelQPolicy, self).get_config() config['C'] = self.C return config
python
def get_config(self): """Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config """ config = super(BoltzmannGumbelQPolicy, self).get_config() config['C'] = self.C return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "BoltzmannGumbelQPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'C'", "]", "=", "self", ".", "C", "return", "config" ]
Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config
[ "Return", "configurations", "of", "BoltzmannGumbelQPolicy" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L348-L356
29,861
keras-rl/keras-rl
rl/callbacks.py
CallbackList._set_env
def _set_env(self, env): """ Set environment for each callback in callbackList """ for callback in self.callbacks: if callable(getattr(callback, '_set_env', None)): callback._set_env(env)
python
def _set_env(self, env): """ Set environment for each callback in callbackList """ for callback in self.callbacks: if callable(getattr(callback, '_set_env', None)): callback._set_env(env)
[ "def", "_set_env", "(", "self", ",", "env", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "if", "callable", "(", "getattr", "(", "callback", ",", "'_set_env'", ",", "None", ")", ")", ":", "callback", ".", "_set_env", "(", "env", "...
Set environment for each callback in callbackList
[ "Set", "environment", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L45-L49
29,862
keras-rl/keras-rl
rl/callbacks.py
CallbackList.on_episode_begin
def on_episode_begin(self, episode, logs={}): """ Called at beginning of each episode for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_episode_begin` callback. # If not, fall back to `on_epoch_begin` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_episode_begin', None)): callback.on_episode_begin(episode, logs=logs) else: callback.on_epoch_begin(episode, logs=logs)
python
def on_episode_begin(self, episode, logs={}): """ Called at beginning of each episode for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_episode_begin` callback. # If not, fall back to `on_epoch_begin` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_episode_begin', None)): callback.on_episode_begin(episode, logs=logs) else: callback.on_epoch_begin(episode, logs=logs)
[ "def", "on_episode_begin", "(", "self", ",", "episode", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "# Check if callback supports the more appropriate `on_episode_begin` callback.", "# If not, fall back to `on_epoch_begin` t...
Called at beginning of each episode for each callback in callbackList
[ "Called", "at", "beginning", "of", "each", "episode", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L51-L59
29,863
keras-rl/keras-rl
rl/callbacks.py
CallbackList.on_episode_end
def on_episode_end(self, episode, logs={}): """ Called at end of each episode for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_episode_end` callback. # If not, fall back to `on_epoch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_episode_end', None)): callback.on_episode_end(episode, logs=logs) else: callback.on_epoch_end(episode, logs=logs)
python
def on_episode_end(self, episode, logs={}): """ Called at end of each episode for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_episode_end` callback. # If not, fall back to `on_epoch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_episode_end', None)): callback.on_episode_end(episode, logs=logs) else: callback.on_epoch_end(episode, logs=logs)
[ "def", "on_episode_end", "(", "self", ",", "episode", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "# Check if callback supports the more appropriate `on_episode_end` callback.", "# If not, fall back to `on_epoch_end` to be c...
Called at end of each episode for each callback in callbackList
[ "Called", "at", "end", "of", "each", "episode", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L61-L69
29,864
keras-rl/keras-rl
rl/callbacks.py
CallbackList.on_step_begin
def on_step_begin(self, step, logs={}): """ Called at beginning of each step for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_begin` callback. # If not, fall back to `on_batch_begin` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_begin', None)): callback.on_step_begin(step, logs=logs) else: callback.on_batch_begin(step, logs=logs)
python
def on_step_begin(self, step, logs={}): """ Called at beginning of each step for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_begin` callback. # If not, fall back to `on_batch_begin` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_begin', None)): callback.on_step_begin(step, logs=logs) else: callback.on_batch_begin(step, logs=logs)
[ "def", "on_step_begin", "(", "self", ",", "step", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "# Check if callback supports the more appropriate `on_step_begin` callback.", "# If not, fall back to `on_batch_begin` to be comp...
Called at beginning of each step for each callback in callbackList
[ "Called", "at", "beginning", "of", "each", "step", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L71-L79
29,865
keras-rl/keras-rl
rl/callbacks.py
CallbackList.on_step_end
def on_step_end(self, step, logs={}): """ Called at end of each step for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_end` callback. # If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_end', None)): callback.on_step_end(step, logs=logs) else: callback.on_batch_end(step, logs=logs)
python
def on_step_end(self, step, logs={}): """ Called at end of each step for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_end` callback. # If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_end', None)): callback.on_step_end(step, logs=logs) else: callback.on_batch_end(step, logs=logs)
[ "def", "on_step_end", "(", "self", ",", "step", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "# Check if callback supports the more appropriate `on_step_end` callback.", "# If not, fall back to `on_batch_end` to be compatible...
Called at end of each step for each callback in callbackList
[ "Called", "at", "end", "of", "each", "step", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L81-L89
29,866
keras-rl/keras-rl
rl/callbacks.py
CallbackList.on_action_begin
def on_action_begin(self, action, logs={}): """ Called at beginning of each action for each callback in callbackList""" for callback in self.callbacks: if callable(getattr(callback, 'on_action_begin', None)): callback.on_action_begin(action, logs=logs)
python
def on_action_begin(self, action, logs={}): """ Called at beginning of each action for each callback in callbackList""" for callback in self.callbacks: if callable(getattr(callback, 'on_action_begin', None)): callback.on_action_begin(action, logs=logs)
[ "def", "on_action_begin", "(", "self", ",", "action", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "if", "callable", "(", "getattr", "(", "callback", ",", "'on_action_begin'", ",", "None", ")", ")", ":",...
Called at beginning of each action for each callback in callbackList
[ "Called", "at", "beginning", "of", "each", "action", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L91-L95
29,867
keras-rl/keras-rl
rl/callbacks.py
CallbackList.on_action_end
def on_action_end(self, action, logs={}): """ Called at end of each action for each callback in callbackList""" for callback in self.callbacks: if callable(getattr(callback, 'on_action_end', None)): callback.on_action_end(action, logs=logs)
python
def on_action_end(self, action, logs={}): """ Called at end of each action for each callback in callbackList""" for callback in self.callbacks: if callable(getattr(callback, 'on_action_end', None)): callback.on_action_end(action, logs=logs)
[ "def", "on_action_end", "(", "self", ",", "action", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "if", "callable", "(", "getattr", "(", "callback", ",", "'on_action_end'", ",", "None", ")", ")", ":", "...
Called at end of each action for each callback in callbackList
[ "Called", "at", "end", "of", "each", "action", "for", "each", "callback", "in", "callbackList" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L97-L101
29,868
keras-rl/keras-rl
rl/callbacks.py
TrainEpisodeLogger.on_train_begin
def on_train_begin(self, logs): """ Print training values at beginning of training """ self.train_start = timeit.default_timer() self.metrics_names = self.model.metrics_names print('Training for {} steps ...'.format(self.params['nb_steps']))
python
def on_train_begin(self, logs): """ Print training values at beginning of training """ self.train_start = timeit.default_timer() self.metrics_names = self.model.metrics_names print('Training for {} steps ...'.format(self.params['nb_steps']))
[ "def", "on_train_begin", "(", "self", ",", "logs", ")", ":", "self", ".", "train_start", "=", "timeit", ".", "default_timer", "(", ")", "self", ".", "metrics_names", "=", "self", ".", "model", ".", "metrics_names", "print", "(", "'Training for {} steps ...'", ...
Print training values at beginning of training
[ "Print", "training", "values", "at", "beginning", "of", "training" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L133-L137
29,869
keras-rl/keras-rl
rl/callbacks.py
TrainEpisodeLogger.on_train_end
def on_train_end(self, logs): """ Print training time at end of training """ duration = timeit.default_timer() - self.train_start print('done, took {:.3f} seconds'.format(duration))
python
def on_train_end(self, logs): """ Print training time at end of training """ duration = timeit.default_timer() - self.train_start print('done, took {:.3f} seconds'.format(duration))
[ "def", "on_train_end", "(", "self", ",", "logs", ")", ":", "duration", "=", "timeit", ".", "default_timer", "(", ")", "-", "self", ".", "train_start", "print", "(", "'done, took {:.3f} seconds'", ".", "format", "(", "duration", ")", ")" ]
Print training time at end of training
[ "Print", "training", "time", "at", "end", "of", "training" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L139-L142
29,870
keras-rl/keras-rl
rl/callbacks.py
TrainEpisodeLogger.on_episode_begin
def on_episode_begin(self, episode, logs): """ Reset environment variables at beginning of each episode """ self.episode_start[episode] = timeit.default_timer() self.observations[episode] = [] self.rewards[episode] = [] self.actions[episode] = [] self.metrics[episode] = []
python
def on_episode_begin(self, episode, logs): """ Reset environment variables at beginning of each episode """ self.episode_start[episode] = timeit.default_timer() self.observations[episode] = [] self.rewards[episode] = [] self.actions[episode] = [] self.metrics[episode] = []
[ "def", "on_episode_begin", "(", "self", ",", "episode", ",", "logs", ")", ":", "self", ".", "episode_start", "[", "episode", "]", "=", "timeit", ".", "default_timer", "(", ")", "self", ".", "observations", "[", "episode", "]", "=", "[", "]", "self", "....
Reset environment variables at beginning of each episode
[ "Reset", "environment", "variables", "at", "beginning", "of", "each", "episode" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L144-L150
29,871
keras-rl/keras-rl
rl/callbacks.py
TrainEpisodeLogger.on_episode_end
def on_episode_end(self, episode, logs): """ Compute and print training statistics of the episode when done """ duration = timeit.default_timer() - self.episode_start[episode] episode_steps = len(self.observations[episode]) # Format all metrics. metrics = np.array(self.metrics[episode]) metrics_template = '' metrics_variables = [] with warnings.catch_warnings(): warnings.filterwarnings('error') for idx, name in enumerate(self.metrics_names): if idx > 0: metrics_template += ', ' try: value = np.nanmean(metrics[:, idx]) metrics_template += '{}: {:f}' except Warning: value = '--' metrics_template += '{}: {}' metrics_variables += [name, value] metrics_text = metrics_template.format(*metrics_variables) nb_step_digits = str(int(np.ceil(np.log10(self.params['nb_steps']))) + 1) template = '{step: ' + nb_step_digits + 'd}/{nb_steps}: episode: {episode}, duration: {duration:.3f}s, episode steps: {episode_steps}, steps per second: {sps:.0f}, episode reward: {episode_reward:.3f}, mean reward: {reward_mean:.3f} [{reward_min:.3f}, {reward_max:.3f}], mean action: {action_mean:.3f} [{action_min:.3f}, {action_max:.3f}], mean observation: {obs_mean:.3f} [{obs_min:.3f}, {obs_max:.3f}], {metrics}' variables = { 'step': self.step, 'nb_steps': self.params['nb_steps'], 'episode': episode + 1, 'duration': duration, 'episode_steps': episode_steps, 'sps': float(episode_steps) / duration, 'episode_reward': np.sum(self.rewards[episode]), 'reward_mean': np.mean(self.rewards[episode]), 'reward_min': np.min(self.rewards[episode]), 'reward_max': np.max(self.rewards[episode]), 'action_mean': np.mean(self.actions[episode]), 'action_min': np.min(self.actions[episode]), 'action_max': np.max(self.actions[episode]), 'obs_mean': np.mean(self.observations[episode]), 'obs_min': np.min(self.observations[episode]), 'obs_max': np.max(self.observations[episode]), 'metrics': metrics_text, } print(template.format(**variables)) # Free up resources. del self.episode_start[episode] del self.observations[episode] del self.rewards[episode] del self.actions[episode] del self.metrics[episode]
python
def on_episode_end(self, episode, logs): """ Compute and print training statistics of the episode when done """ duration = timeit.default_timer() - self.episode_start[episode] episode_steps = len(self.observations[episode]) # Format all metrics. metrics = np.array(self.metrics[episode]) metrics_template = '' metrics_variables = [] with warnings.catch_warnings(): warnings.filterwarnings('error') for idx, name in enumerate(self.metrics_names): if idx > 0: metrics_template += ', ' try: value = np.nanmean(metrics[:, idx]) metrics_template += '{}: {:f}' except Warning: value = '--' metrics_template += '{}: {}' metrics_variables += [name, value] metrics_text = metrics_template.format(*metrics_variables) nb_step_digits = str(int(np.ceil(np.log10(self.params['nb_steps']))) + 1) template = '{step: ' + nb_step_digits + 'd}/{nb_steps}: episode: {episode}, duration: {duration:.3f}s, episode steps: {episode_steps}, steps per second: {sps:.0f}, episode reward: {episode_reward:.3f}, mean reward: {reward_mean:.3f} [{reward_min:.3f}, {reward_max:.3f}], mean action: {action_mean:.3f} [{action_min:.3f}, {action_max:.3f}], mean observation: {obs_mean:.3f} [{obs_min:.3f}, {obs_max:.3f}], {metrics}' variables = { 'step': self.step, 'nb_steps': self.params['nb_steps'], 'episode': episode + 1, 'duration': duration, 'episode_steps': episode_steps, 'sps': float(episode_steps) / duration, 'episode_reward': np.sum(self.rewards[episode]), 'reward_mean': np.mean(self.rewards[episode]), 'reward_min': np.min(self.rewards[episode]), 'reward_max': np.max(self.rewards[episode]), 'action_mean': np.mean(self.actions[episode]), 'action_min': np.min(self.actions[episode]), 'action_max': np.max(self.actions[episode]), 'obs_mean': np.mean(self.observations[episode]), 'obs_min': np.min(self.observations[episode]), 'obs_max': np.max(self.observations[episode]), 'metrics': metrics_text, } print(template.format(**variables)) # Free up resources. del self.episode_start[episode] del self.observations[episode] del self.rewards[episode] del self.actions[episode] del self.metrics[episode]
[ "def", "on_episode_end", "(", "self", ",", "episode", ",", "logs", ")", ":", "duration", "=", "timeit", ".", "default_timer", "(", ")", "-", "self", ".", "episode_start", "[", "episode", "]", "episode_steps", "=", "len", "(", "self", ".", "observations", ...
Compute and print training statistics of the episode when done
[ "Compute", "and", "print", "training", "statistics", "of", "the", "episode", "when", "done" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L152-L203
29,872
keras-rl/keras-rl
rl/callbacks.py
TrainEpisodeLogger.on_step_end
def on_step_end(self, step, logs): """ Update statistics of episode after each step """ episode = logs['episode'] self.observations[episode].append(logs['observation']) self.rewards[episode].append(logs['reward']) self.actions[episode].append(logs['action']) self.metrics[episode].append(logs['metrics']) self.step += 1
python
def on_step_end(self, step, logs): """ Update statistics of episode after each step """ episode = logs['episode'] self.observations[episode].append(logs['observation']) self.rewards[episode].append(logs['reward']) self.actions[episode].append(logs['action']) self.metrics[episode].append(logs['metrics']) self.step += 1
[ "def", "on_step_end", "(", "self", ",", "step", ",", "logs", ")", ":", "episode", "=", "logs", "[", "'episode'", "]", "self", ".", "observations", "[", "episode", "]", ".", "append", "(", "logs", "[", "'observation'", "]", ")", "self", ".", "rewards", ...
Update statistics of episode after each step
[ "Update", "statistics", "of", "episode", "after", "each", "step" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L205-L212
29,873
keras-rl/keras-rl
rl/callbacks.py
TrainIntervalLogger.on_step_begin
def on_step_begin(self, step, logs): """ Print metrics if interval is over """ if self.step % self.interval == 0: if len(self.episode_rewards) > 0: metrics = np.array(self.metrics) assert metrics.shape == (self.interval, len(self.metrics_names)) formatted_metrics = '' if not np.isnan(metrics).all(): # not all values are means means = np.nanmean(self.metrics, axis=0) assert means.shape == (len(self.metrics_names),) for name, mean in zip(self.metrics_names, means): formatted_metrics += ' - {}: {:.3f}'.format(name, mean) formatted_infos = '' if len(self.infos) > 0: infos = np.array(self.infos) if not np.isnan(infos).all(): # not all values are means means = np.nanmean(self.infos, axis=0) assert means.shape == (len(self.info_names),) for name, mean in zip(self.info_names, means): formatted_infos += ' - {}: {:.3f}'.format(name, mean) print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos)) print('') self.reset() print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
python
def on_step_begin(self, step, logs): """ Print metrics if interval is over """ if self.step % self.interval == 0: if len(self.episode_rewards) > 0: metrics = np.array(self.metrics) assert metrics.shape == (self.interval, len(self.metrics_names)) formatted_metrics = '' if not np.isnan(metrics).all(): # not all values are means means = np.nanmean(self.metrics, axis=0) assert means.shape == (len(self.metrics_names),) for name, mean in zip(self.metrics_names, means): formatted_metrics += ' - {}: {:.3f}'.format(name, mean) formatted_infos = '' if len(self.infos) > 0: infos = np.array(self.infos) if not np.isnan(infos).all(): # not all values are means means = np.nanmean(self.infos, axis=0) assert means.shape == (len(self.info_names),) for name, mean in zip(self.info_names, means): formatted_infos += ' - {}: {:.3f}'.format(name, mean) print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos)) print('') self.reset() print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
[ "def", "on_step_begin", "(", "self", ",", "step", ",", "logs", ")", ":", "if", "self", ".", "step", "%", "self", ".", "interval", "==", "0", ":", "if", "len", "(", "self", ".", "episode_rewards", ")", ">", "0", ":", "metrics", "=", "np", ".", "ar...
Print metrics if interval is over
[ "Print", "metrics", "if", "interval", "is", "over" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L241-L265
29,874
keras-rl/keras-rl
rl/callbacks.py
TrainIntervalLogger.on_step_end
def on_step_end(self, step, logs): """ Update progression bar at the end of each step """ if self.info_names is None: self.info_names = logs['info'].keys() values = [('reward', logs['reward'])] if KERAS_VERSION > '2.1.3': self.progbar.update((self.step % self.interval) + 1, values=values) else: self.progbar.update((self.step % self.interval) + 1, values=values, force=True) self.step += 1 self.metrics.append(logs['metrics']) if len(self.info_names) > 0: self.infos.append([logs['info'][k] for k in self.info_names])
python
def on_step_end(self, step, logs): """ Update progression bar at the end of each step """ if self.info_names is None: self.info_names = logs['info'].keys() values = [('reward', logs['reward'])] if KERAS_VERSION > '2.1.3': self.progbar.update((self.step % self.interval) + 1, values=values) else: self.progbar.update((self.step % self.interval) + 1, values=values, force=True) self.step += 1 self.metrics.append(logs['metrics']) if len(self.info_names) > 0: self.infos.append([logs['info'][k] for k in self.info_names])
[ "def", "on_step_end", "(", "self", ",", "step", ",", "logs", ")", ":", "if", "self", ".", "info_names", "is", "None", ":", "self", ".", "info_names", "=", "logs", "[", "'info'", "]", ".", "keys", "(", ")", "values", "=", "[", "(", "'reward'", ",", ...
Update progression bar at the end of each step
[ "Update", "progression", "bar", "at", "the", "end", "of", "each", "step" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L267-L279
29,875
keras-rl/keras-rl
rl/callbacks.py
FileLogger.on_episode_begin
def on_episode_begin(self, episode, logs): """ Initialize metrics at the beginning of each episode """ assert episode not in self.metrics assert episode not in self.starts self.metrics[episode] = [] self.starts[episode] = timeit.default_timer()
python
def on_episode_begin(self, episode, logs): """ Initialize metrics at the beginning of each episode """ assert episode not in self.metrics assert episode not in self.starts self.metrics[episode] = [] self.starts[episode] = timeit.default_timer()
[ "def", "on_episode_begin", "(", "self", ",", "episode", ",", "logs", ")", ":", "assert", "episode", "not", "in", "self", ".", "metrics", "assert", "episode", "not", "in", "self", ".", "starts", "self", ".", "metrics", "[", "episode", "]", "=", "[", "]"...
Initialize metrics at the beginning of each episode
[ "Initialize", "metrics", "at", "the", "beginning", "of", "each", "episode" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L305-L310
29,876
keras-rl/keras-rl
rl/callbacks.py
FileLogger.on_episode_end
def on_episode_end(self, episode, logs): """ Compute and print metrics at the end of each episode """ duration = timeit.default_timer() - self.starts[episode] metrics = self.metrics[episode] if np.isnan(metrics).all(): mean_metrics = np.array([np.nan for _ in self.metrics_names]) else: mean_metrics = np.nanmean(metrics, axis=0) assert len(mean_metrics) == len(self.metrics_names) data = list(zip(self.metrics_names, mean_metrics)) data += list(logs.items()) data += [('episode', episode), ('duration', duration)] for key, value in data: if key not in self.data: self.data[key] = [] self.data[key].append(value) if self.interval is not None and episode % self.interval == 0: self.save_data() # Clean up. del self.metrics[episode] del self.starts[episode]
python
def on_episode_end(self, episode, logs): """ Compute and print metrics at the end of each episode """ duration = timeit.default_timer() - self.starts[episode] metrics = self.metrics[episode] if np.isnan(metrics).all(): mean_metrics = np.array([np.nan for _ in self.metrics_names]) else: mean_metrics = np.nanmean(metrics, axis=0) assert len(mean_metrics) == len(self.metrics_names) data = list(zip(self.metrics_names, mean_metrics)) data += list(logs.items()) data += [('episode', episode), ('duration', duration)] for key, value in data: if key not in self.data: self.data[key] = [] self.data[key].append(value) if self.interval is not None and episode % self.interval == 0: self.save_data() # Clean up. del self.metrics[episode] del self.starts[episode]
[ "def", "on_episode_end", "(", "self", ",", "episode", ",", "logs", ")", ":", "duration", "=", "timeit", ".", "default_timer", "(", ")", "-", "self", ".", "starts", "[", "episode", "]", "metrics", "=", "self", ".", "metrics", "[", "episode", "]", "if", ...
Compute and print metrics at the end of each episode
[ "Compute", "and", "print", "metrics", "at", "the", "end", "of", "each", "episode" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L312-L336
29,877
keras-rl/keras-rl
rl/callbacks.py
FileLogger.save_data
def save_data(self): """ Save metrics in a json file """ if len(self.data.keys()) == 0: return # Sort everything by episode. assert 'episode' in self.data sorted_indexes = np.argsort(self.data['episode']) sorted_data = {} for key, values in self.data.items(): assert len(self.data[key]) == len(sorted_indexes) # We convert to np.array() and then to list to convert from np datatypes to native datatypes. # This is necessary because json.dump cannot handle np.float32, for example. sorted_data[key] = np.array([self.data[key][idx] for idx in sorted_indexes]).tolist() # Overwrite already open file. We can simply seek to the beginning since the file will # grow strictly monotonously. with open(self.filepath, 'w') as f: json.dump(sorted_data, f)
python
def save_data(self): """ Save metrics in a json file """ if len(self.data.keys()) == 0: return # Sort everything by episode. assert 'episode' in self.data sorted_indexes = np.argsort(self.data['episode']) sorted_data = {} for key, values in self.data.items(): assert len(self.data[key]) == len(sorted_indexes) # We convert to np.array() and then to list to convert from np datatypes to native datatypes. # This is necessary because json.dump cannot handle np.float32, for example. sorted_data[key] = np.array([self.data[key][idx] for idx in sorted_indexes]).tolist() # Overwrite already open file. We can simply seek to the beginning since the file will # grow strictly monotonously. with open(self.filepath, 'w') as f: json.dump(sorted_data, f)
[ "def", "save_data", "(", "self", ")", ":", "if", "len", "(", "self", ".", "data", ".", "keys", "(", ")", ")", "==", "0", ":", "return", "# Sort everything by episode.", "assert", "'episode'", "in", "self", ".", "data", "sorted_indexes", "=", "np", ".", ...
Save metrics in a json file
[ "Save", "metrics", "in", "a", "json", "file" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L342-L360
29,878
keras-rl/keras-rl
rl/callbacks.py
ModelIntervalCheckpoint.on_step_end
def on_step_end(self, step, logs={}): """ Save weights at interval steps during training """ self.total_steps += 1 if self.total_steps % self.interval != 0: # Nothing to do. return filepath = self.filepath.format(step=self.total_steps, **logs) if self.verbose > 0: print('Step {}: saving model to {}'.format(self.total_steps, filepath)) self.model.save_weights(filepath, overwrite=True)
python
def on_step_end(self, step, logs={}): """ Save weights at interval steps during training """ self.total_steps += 1 if self.total_steps % self.interval != 0: # Nothing to do. return filepath = self.filepath.format(step=self.total_steps, **logs) if self.verbose > 0: print('Step {}: saving model to {}'.format(self.total_steps, filepath)) self.model.save_weights(filepath, overwrite=True)
[ "def", "on_step_end", "(", "self", ",", "step", ",", "logs", "=", "{", "}", ")", ":", "self", ".", "total_steps", "+=", "1", "if", "self", ".", "total_steps", "%", "self", ".", "interval", "!=", "0", ":", "# Nothing to do.", "return", "filepath", "=", ...
Save weights at interval steps during training
[ "Save", "weights", "at", "interval", "steps", "during", "training" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L377-L387
29,879
keras-rl/keras-rl
rl/memory.py
zeroed_observation
def zeroed_observation(observation): """Return an array of zeros with same shape as given observation # Argument observation (list): List of observation # Return A np.ndarray of zeros with observation.shape """ if hasattr(observation, 'shape'): return np.zeros(observation.shape) elif hasattr(observation, '__iter__'): out = [] for x in observation: out.append(zeroed_observation(x)) return out else: return 0.
python
def zeroed_observation(observation): """Return an array of zeros with same shape as given observation # Argument observation (list): List of observation # Return A np.ndarray of zeros with observation.shape """ if hasattr(observation, 'shape'): return np.zeros(observation.shape) elif hasattr(observation, '__iter__'): out = [] for x in observation: out.append(zeroed_observation(x)) return out else: return 0.
[ "def", "zeroed_observation", "(", "observation", ")", ":", "if", "hasattr", "(", "observation", ",", "'shape'", ")", ":", "return", "np", ".", "zeros", "(", "observation", ".", "shape", ")", "elif", "hasattr", "(", "observation", ",", "'__iter__'", ")", ":...
Return an array of zeros with same shape as given observation # Argument observation (list): List of observation # Return A np.ndarray of zeros with observation.shape
[ "Return", "an", "array", "of", "zeros", "with", "same", "shape", "as", "given", "observation" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L85-L102
29,880
keras-rl/keras-rl
rl/memory.py
Memory.get_recent_state
def get_recent_state(self, current_observation): """Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations """ # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
python
def get_recent_state(self, current_observation): """Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations """ # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
[ "def", "get_recent_state", "(", "self", ",", "current_observation", ")", ":", "# This code is slightly complicated by the fact that subsequent observations might be", "# from different episodes. We ensure that an experience never spans multiple episodes.", "# This is probably not that important ...
Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations
[ "Return", "list", "of", "last", "observations" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L120-L144
29,881
keras-rl/keras-rl
rl/memory.py
SequentialMemory.sample
def sample(self, batch_size, batch_idxs=None): """Return a randomized batch of experiences # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of experiences randomly selected """ # It is not possible to tell whether the first state in the memory is terminal, because it # would require access to the "terminal" flag associated to the previous state. As a result # we will never return this first state (only using `self.terminals[0]` to know whether the # second state is terminal). # In addition we need enough entries to fill the desired window length. assert self.nb_entries >= self.window_length + 2, 'not enough entries in the memory' if batch_idxs is None: # Draw random indexes such that we have enough entries before each index to fill the # desired window length. batch_idxs = sample_batch_indexes( self.window_length, self.nb_entries - 1, size=batch_size) batch_idxs = np.array(batch_idxs) + 1 assert np.min(batch_idxs) >= self.window_length + 1 assert np.max(batch_idxs) < self.nb_entries assert len(batch_idxs) == batch_size # Create experiences experiences = [] for idx in batch_idxs: terminal0 = self.terminals[idx - 2] while terminal0: # Skip this transition because the environment was reset here. Select a new, random # transition and use this instead. This may cause the batch to contain the same # transition twice. idx = sample_batch_indexes(self.window_length + 1, self.nb_entries, size=1)[0] terminal0 = self.terminals[idx - 2] assert self.window_length + 1 <= idx < self.nb_entries # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state0 = [self.observations[idx - 1]] for offset in range(0, self.window_length - 1): current_idx = idx - 2 - offset assert current_idx >= 1 current_terminal = self.terminals[current_idx - 1] if current_terminal and not self.ignore_episode_boundaries: # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state0.insert(0, self.observations[current_idx]) while len(state0) < self.window_length: state0.insert(0, zeroed_observation(state0[0])) action = self.actions[idx - 1] reward = self.rewards[idx - 1] terminal1 = self.terminals[idx - 1] # Okay, now we need to create the follow-up state. This is state0 shifted on timestep # to the right. Again, we need to be careful to not include an observation from the next # episode if the last state is terminal. state1 = [np.copy(x) for x in state0[1:]] state1.append(self.observations[idx]) assert len(state0) == self.window_length assert len(state1) == len(state0) experiences.append(Experience(state0=state0, action=action, reward=reward, state1=state1, terminal1=terminal1)) assert len(experiences) == batch_size return experiences
python
def sample(self, batch_size, batch_idxs=None): """Return a randomized batch of experiences # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of experiences randomly selected """ # It is not possible to tell whether the first state in the memory is terminal, because it # would require access to the "terminal" flag associated to the previous state. As a result # we will never return this first state (only using `self.terminals[0]` to know whether the # second state is terminal). # In addition we need enough entries to fill the desired window length. assert self.nb_entries >= self.window_length + 2, 'not enough entries in the memory' if batch_idxs is None: # Draw random indexes such that we have enough entries before each index to fill the # desired window length. batch_idxs = sample_batch_indexes( self.window_length, self.nb_entries - 1, size=batch_size) batch_idxs = np.array(batch_idxs) + 1 assert np.min(batch_idxs) >= self.window_length + 1 assert np.max(batch_idxs) < self.nb_entries assert len(batch_idxs) == batch_size # Create experiences experiences = [] for idx in batch_idxs: terminal0 = self.terminals[idx - 2] while terminal0: # Skip this transition because the environment was reset here. Select a new, random # transition and use this instead. This may cause the batch to contain the same # transition twice. idx = sample_batch_indexes(self.window_length + 1, self.nb_entries, size=1)[0] terminal0 = self.terminals[idx - 2] assert self.window_length + 1 <= idx < self.nb_entries # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state0 = [self.observations[idx - 1]] for offset in range(0, self.window_length - 1): current_idx = idx - 2 - offset assert current_idx >= 1 current_terminal = self.terminals[current_idx - 1] if current_terminal and not self.ignore_episode_boundaries: # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state0.insert(0, self.observations[current_idx]) while len(state0) < self.window_length: state0.insert(0, zeroed_observation(state0[0])) action = self.actions[idx - 1] reward = self.rewards[idx - 1] terminal1 = self.terminals[idx - 1] # Okay, now we need to create the follow-up state. This is state0 shifted on timestep # to the right. Again, we need to be careful to not include an observation from the next # episode if the last state is terminal. state1 = [np.copy(x) for x in state0[1:]] state1.append(self.observations[idx]) assert len(state0) == self.window_length assert len(state1) == len(state0) experiences.append(Experience(state0=state0, action=action, reward=reward, state1=state1, terminal1=terminal1)) assert len(experiences) == batch_size return experiences
[ "def", "sample", "(", "self", ",", "batch_size", ",", "batch_idxs", "=", "None", ")", ":", "# It is not possible to tell whether the first state in the memory is terminal, because it", "# would require access to the \"terminal\" flag associated to the previous state. As a result", "# we ...
Return a randomized batch of experiences # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of experiences randomly selected
[ "Return", "a", "randomized", "batch", "of", "experiences" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L171-L239
29,882
keras-rl/keras-rl
rl/memory.py
SequentialMemory.append
def append(self, observation, action, reward, terminal, training=True): """Append an observation to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal """ super(SequentialMemory, self).append(observation, action, reward, terminal, training=training) # This needs to be understood as follows: in `observation`, take `action`, obtain `reward` # and weather the next state is `terminal` or not. if training: self.observations.append(observation) self.actions.append(action) self.rewards.append(reward) self.terminals.append(terminal)
python
def append(self, observation, action, reward, terminal, training=True): """Append an observation to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal """ super(SequentialMemory, self).append(observation, action, reward, terminal, training=training) # This needs to be understood as follows: in `observation`, take `action`, obtain `reward` # and weather the next state is `terminal` or not. if training: self.observations.append(observation) self.actions.append(action) self.rewards.append(reward) self.terminals.append(terminal)
[ "def", "append", "(", "self", ",", "observation", ",", "action", ",", "reward", ",", "terminal", ",", "training", "=", "True", ")", ":", "super", "(", "SequentialMemory", ",", "self", ")", ".", "append", "(", "observation", ",", "action", ",", "reward", ...
Append an observation to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal
[ "Append", "an", "observation", "to", "the", "memory" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L241-L258
29,883
keras-rl/keras-rl
rl/memory.py
SequentialMemory.get_config
def get_config(self): """Return configurations of SequentialMemory # Returns Dict of config """ config = super(SequentialMemory, self).get_config() config['limit'] = self.limit return config
python
def get_config(self): """Return configurations of SequentialMemory # Returns Dict of config """ config = super(SequentialMemory, self).get_config() config['limit'] = self.limit return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "SequentialMemory", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'limit'", "]", "=", "self", ".", "limit", "return", "config" ]
Return configurations of SequentialMemory # Returns Dict of config
[ "Return", "configurations", "of", "SequentialMemory" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L269-L277
29,884
keras-rl/keras-rl
rl/memory.py
EpisodeParameterMemory.sample
def sample(self, batch_size, batch_idxs=None): """Return a randomized batch of params and rewards # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of params randomly selected and a list of associated rewards """ if batch_idxs is None: batch_idxs = sample_batch_indexes(0, self.nb_entries, size=batch_size) assert len(batch_idxs) == batch_size batch_params = [] batch_total_rewards = [] for idx in batch_idxs: batch_params.append(self.params[idx]) batch_total_rewards.append(self.total_rewards[idx]) return batch_params, batch_total_rewards
python
def sample(self, batch_size, batch_idxs=None): """Return a randomized batch of params and rewards # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of params randomly selected and a list of associated rewards """ if batch_idxs is None: batch_idxs = sample_batch_indexes(0, self.nb_entries, size=batch_size) assert len(batch_idxs) == batch_size batch_params = [] batch_total_rewards = [] for idx in batch_idxs: batch_params.append(self.params[idx]) batch_total_rewards.append(self.total_rewards[idx]) return batch_params, batch_total_rewards
[ "def", "sample", "(", "self", ",", "batch_size", ",", "batch_idxs", "=", "None", ")", ":", "if", "batch_idxs", "is", "None", ":", "batch_idxs", "=", "sample_batch_indexes", "(", "0", ",", "self", ".", "nb_entries", ",", "size", "=", "batch_size", ")", "a...
Return a randomized batch of params and rewards # Argument batch_size (int): Size of the all batch batch_idxs (int): Indexes to extract # Returns A list of params randomly selected and a list of associated rewards
[ "Return", "a", "randomized", "batch", "of", "params", "and", "rewards" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L289-L307
29,885
keras-rl/keras-rl
rl/memory.py
EpisodeParameterMemory.append
def append(self, observation, action, reward, terminal, training=True): """Append a reward to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal """ super(EpisodeParameterMemory, self).append(observation, action, reward, terminal, training=training) if training: self.intermediate_rewards.append(reward)
python
def append(self, observation, action, reward, terminal, training=True): """Append a reward to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal """ super(EpisodeParameterMemory, self).append(observation, action, reward, terminal, training=training) if training: self.intermediate_rewards.append(reward)
[ "def", "append", "(", "self", ",", "observation", ",", "action", ",", "reward", ",", "terminal", ",", "training", "=", "True", ")", ":", "super", "(", "EpisodeParameterMemory", ",", "self", ")", ".", "append", "(", "observation", ",", "action", ",", "rew...
Append a reward to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal
[ "Append", "a", "reward", "to", "the", "memory" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L309-L320
29,886
keras-rl/keras-rl
rl/memory.py
EpisodeParameterMemory.finalize_episode
def finalize_episode(self, params): """Closes the current episode, sums up rewards and stores the parameters # Argument params (object): Parameters associated with the episode to be stored and then retrieved back in sample() """ total_reward = sum(self.intermediate_rewards) self.total_rewards.append(total_reward) self.params.append(params) self.intermediate_rewards = []
python
def finalize_episode(self, params): """Closes the current episode, sums up rewards and stores the parameters # Argument params (object): Parameters associated with the episode to be stored and then retrieved back in sample() """ total_reward = sum(self.intermediate_rewards) self.total_rewards.append(total_reward) self.params.append(params) self.intermediate_rewards = []
[ "def", "finalize_episode", "(", "self", ",", "params", ")", ":", "total_reward", "=", "sum", "(", "self", ".", "intermediate_rewards", ")", "self", ".", "total_rewards", ".", "append", "(", "total_reward", ")", "self", ".", "params", ".", "append", "(", "p...
Closes the current episode, sums up rewards and stores the parameters # Argument params (object): Parameters associated with the episode to be stored and then retrieved back in sample()
[ "Closes", "the", "current", "episode", "sums", "up", "rewards", "and", "stores", "the", "parameters" ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L322-L331
29,887
keras-rl/keras-rl
rl/common/cmd_util.py
make_gym_env
def make_gym_env(env_id, num_env=2, seed=123, wrapper_kwargs=None, start_index=0): """ Create a wrapped, SubprocVecEnv for Gym Environments. """ if wrapper_kwargs is None: wrapper_kwargs = {} def make_env(rank): # pylint: disable=C0111 def _thunk(): env = gym.make(env_id) env.seed(seed + rank) return env return _thunk set_global_seeds(seed) return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
python
def make_gym_env(env_id, num_env=2, seed=123, wrapper_kwargs=None, start_index=0): """ Create a wrapped, SubprocVecEnv for Gym Environments. """ if wrapper_kwargs is None: wrapper_kwargs = {} def make_env(rank): # pylint: disable=C0111 def _thunk(): env = gym.make(env_id) env.seed(seed + rank) return env return _thunk set_global_seeds(seed) return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
[ "def", "make_gym_env", "(", "env_id", ",", "num_env", "=", "2", ",", "seed", "=", "123", ",", "wrapper_kwargs", "=", "None", ",", "start_index", "=", "0", ")", ":", "if", "wrapper_kwargs", "is", "None", ":", "wrapper_kwargs", "=", "{", "}", "def", "mak...
Create a wrapped, SubprocVecEnv for Gym Environments.
[ "Create", "a", "wrapped", "SubprocVecEnv", "for", "Gym", "Environments", "." ]
e6efb0d8297ec38d704a3110b5d6ed74d09a05e3
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/common/cmd_util.py#L7-L22
29,888
awslabs/aws-sam-cli
samcli/commands/local/cli_common/options.py
invoke_common_options
def invoke_common_options(f): """ Common CLI options shared by "local invoke" and "local start-api" commands :param f: Callback passed by Click """ invoke_options = [ template_click_option(), click.option('--env-vars', '-n', type=click.Path(exists=True), help="JSON file containing values for Lambda function's environment variables."), parameter_override_click_option(), click.option('--debug-port', '-d', help="When specified, Lambda function container will start in debug mode and will expose this " "port on localhost.", envvar="SAM_DEBUG_PORT"), click.option('--debugger-path', help="Host path to a debugger that will be mounted into the Lambda container."), click.option('--debug-args', help="Additional arguments to be passed to the debugger.", envvar="DEBUGGER_ARGS"), click.option('--docker-volume-basedir', '-v', envvar="SAM_DOCKER_VOLUME_BASEDIR", help="Specifies the location basedir where the SAM file exists. If the Docker is running on " "a remote machine, you must mount the path where the SAM file exists on the docker machine " "and modify this value to match the remote machine."), click.option('--log-file', '-l', help="logfile to send runtime logs to."), click.option('--layer-cache-basedir', type=click.Path(exists=False, file_okay=False), envvar="SAM_LAYER_CACHE_BASEDIR", help="Specifies the location basedir where the Layers your template uses will be downloaded to.", default=get_default_layer_cache_dir()), ] + docker_click_options() + [ click.option('--force-image-build', is_flag=True, help='Specify whether CLI should rebuild the image used for invoking functions with layers.', envvar='SAM_FORCE_IMAGE_BUILD', default=False), ] # Reverse the list to maintain ordering of options in help text printed with --help for option in reversed(invoke_options): option(f) return f
python
def invoke_common_options(f): """ Common CLI options shared by "local invoke" and "local start-api" commands :param f: Callback passed by Click """ invoke_options = [ template_click_option(), click.option('--env-vars', '-n', type=click.Path(exists=True), help="JSON file containing values for Lambda function's environment variables."), parameter_override_click_option(), click.option('--debug-port', '-d', help="When specified, Lambda function container will start in debug mode and will expose this " "port on localhost.", envvar="SAM_DEBUG_PORT"), click.option('--debugger-path', help="Host path to a debugger that will be mounted into the Lambda container."), click.option('--debug-args', help="Additional arguments to be passed to the debugger.", envvar="DEBUGGER_ARGS"), click.option('--docker-volume-basedir', '-v', envvar="SAM_DOCKER_VOLUME_BASEDIR", help="Specifies the location basedir where the SAM file exists. If the Docker is running on " "a remote machine, you must mount the path where the SAM file exists on the docker machine " "and modify this value to match the remote machine."), click.option('--log-file', '-l', help="logfile to send runtime logs to."), click.option('--layer-cache-basedir', type=click.Path(exists=False, file_okay=False), envvar="SAM_LAYER_CACHE_BASEDIR", help="Specifies the location basedir where the Layers your template uses will be downloaded to.", default=get_default_layer_cache_dir()), ] + docker_click_options() + [ click.option('--force-image-build', is_flag=True, help='Specify whether CLI should rebuild the image used for invoking functions with layers.', envvar='SAM_FORCE_IMAGE_BUILD', default=False), ] # Reverse the list to maintain ordering of options in help text printed with --help for option in reversed(invoke_options): option(f) return f
[ "def", "invoke_common_options", "(", "f", ")", ":", "invoke_options", "=", "[", "template_click_option", "(", ")", ",", "click", ".", "option", "(", "'--env-vars'", ",", "'-n'", ",", "type", "=", "click", ".", "Path", "(", "exists", "=", "True", ")", ","...
Common CLI options shared by "local invoke" and "local start-api" commands :param f: Callback passed by Click
[ "Common", "CLI", "options", "shared", "by", "local", "invoke", "and", "local", "start", "-", "api", "commands" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/cli_common/options.py#L73-L130
29,889
awslabs/aws-sam-cli
samcli/commands/_utils/options.py
template_click_option
def template_click_option(include_build=True): """ Click Option for template option """ return click.option('--template', '-t', default=_TEMPLATE_OPTION_DEFAULT_VALUE, type=click.Path(), envvar="SAM_TEMPLATE_FILE", callback=partial(get_or_default_template_file_name, include_build=include_build), show_default=True, help="AWS SAM template file")
python
def template_click_option(include_build=True): """ Click Option for template option """ return click.option('--template', '-t', default=_TEMPLATE_OPTION_DEFAULT_VALUE, type=click.Path(), envvar="SAM_TEMPLATE_FILE", callback=partial(get_or_default_template_file_name, include_build=include_build), show_default=True, help="AWS SAM template file")
[ "def", "template_click_option", "(", "include_build", "=", "True", ")", ":", "return", "click", ".", "option", "(", "'--template'", ",", "'-t'", ",", "default", "=", "_TEMPLATE_OPTION_DEFAULT_VALUE", ",", "type", "=", "click", ".", "Path", "(", ")", ",", "en...
Click Option for template option
[ "Click", "Option", "for", "template", "option" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/_utils/options.py#L73-L83
29,890
awslabs/aws-sam-cli
samcli/lib/utils/tar.py
create_tarball
def create_tarball(tar_paths): """ Context Manger that creates the tarball of the Docker Context to use for building the image Parameters ---------- tar_paths dict(str, str) Key representing a full path to the file or directory and the Value representing the path within the tarball Yields ------ The tarball file """ tarballfile = TemporaryFile() with tarfile.open(fileobj=tarballfile, mode='w') as archive: for path_on_system, path_in_tarball in tar_paths.items(): archive.add(path_on_system, arcname=path_in_tarball) # Flush are seek to the beginning of the file tarballfile.flush() tarballfile.seek(0) try: yield tarballfile finally: tarballfile.close()
python
def create_tarball(tar_paths): """ Context Manger that creates the tarball of the Docker Context to use for building the image Parameters ---------- tar_paths dict(str, str) Key representing a full path to the file or directory and the Value representing the path within the tarball Yields ------ The tarball file """ tarballfile = TemporaryFile() with tarfile.open(fileobj=tarballfile, mode='w') as archive: for path_on_system, path_in_tarball in tar_paths.items(): archive.add(path_on_system, arcname=path_in_tarball) # Flush are seek to the beginning of the file tarballfile.flush() tarballfile.seek(0) try: yield tarballfile finally: tarballfile.close()
[ "def", "create_tarball", "(", "tar_paths", ")", ":", "tarballfile", "=", "TemporaryFile", "(", ")", "with", "tarfile", ".", "open", "(", "fileobj", "=", "tarballfile", ",", "mode", "=", "'w'", ")", "as", "archive", ":", "for", "path_on_system", ",", "path_...
Context Manger that creates the tarball of the Docker Context to use for building the image Parameters ---------- tar_paths dict(str, str) Key representing a full path to the file or directory and the Value representing the path within the tarball Yields ------ The tarball file
[ "Context", "Manger", "that", "creates", "the", "tarball", "of", "the", "Docker", "Context", "to", "use", "for", "building", "the", "image" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/lib/utils/tar.py#L11-L37
29,891
awslabs/aws-sam-cli
samcli/commands/local/lib/local_lambda_service.py
LocalLambdaService.start
def start(self): """ Creates and starts the Local Lambda Invoke service. This method will block until the service is stopped manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint to invoke the Lambda function and receive a response. NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM """ # We care about passing only stderr to the Service and not stdout because stdout from Docker container # contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed # to the console or a log file. stderr from Docker container contains runtime logs and output of print # statements from the Lambda function service = LocalLambdaInvokeService(lambda_runner=self.lambda_runner, port=self.port, host=self.host, stderr=self.stderr_stream) service.create() LOG.info("Starting the Local Lambda Service. You can now invoke your Lambda Functions defined in your template" " through the endpoint.") service.run()
python
def start(self): """ Creates and starts the Local Lambda Invoke service. This method will block until the service is stopped manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint to invoke the Lambda function and receive a response. NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM """ # We care about passing only stderr to the Service and not stdout because stdout from Docker container # contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed # to the console or a log file. stderr from Docker container contains runtime logs and output of print # statements from the Lambda function service = LocalLambdaInvokeService(lambda_runner=self.lambda_runner, port=self.port, host=self.host, stderr=self.stderr_stream) service.create() LOG.info("Starting the Local Lambda Service. You can now invoke your Lambda Functions defined in your template" " through the endpoint.") service.run()
[ "def", "start", "(", "self", ")", ":", "# We care about passing only stderr to the Service and not stdout because stdout from Docker container", "# contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed", "# to the console or a log file. stderr from Dock...
Creates and starts the Local Lambda Invoke service. This method will block until the service is stopped manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint to invoke the Lambda function and receive a response. NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM
[ "Creates", "and", "starts", "the", "Local", "Lambda", "Invoke", "service", ".", "This", "method", "will", "block", "until", "the", "service", "is", "stopped", "manually", "using", "an", "interrupt", ".", "After", "the", "service", "is", "started", "callers", ...
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/local_lambda_service.py#L35-L58
29,892
awslabs/aws-sam-cli
samcli/commands/local/lib/sam_function_provider.py
SamFunctionProvider._extract_sam_function_codeuri
def _extract_sam_function_codeuri(name, resource_properties, code_property_key): """ Extracts the SAM Function CodeUri from the Resource Properties Parameters ---------- name str LogicalId of the resource resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path """ codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI) # CodeUri can be a dictionary of S3 Bucket/Key or a S3 URI, neither of which are supported if isinstance(codeuri, dict) or \ (isinstance(codeuri, six.string_types) and codeuri.startswith("s3://")): codeuri = SamFunctionProvider._DEFAULT_CODEURI LOG.warning("Lambda function '%s' has specified S3 location for CodeUri which is unsupported. " "Using default value of '%s' instead", name, codeuri) return codeuri
python
def _extract_sam_function_codeuri(name, resource_properties, code_property_key): """ Extracts the SAM Function CodeUri from the Resource Properties Parameters ---------- name str LogicalId of the resource resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path """ codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI) # CodeUri can be a dictionary of S3 Bucket/Key or a S3 URI, neither of which are supported if isinstance(codeuri, dict) or \ (isinstance(codeuri, six.string_types) and codeuri.startswith("s3://")): codeuri = SamFunctionProvider._DEFAULT_CODEURI LOG.warning("Lambda function '%s' has specified S3 location for CodeUri which is unsupported. " "Using default value of '%s' instead", name, codeuri) return codeuri
[ "def", "_extract_sam_function_codeuri", "(", "name", ",", "resource_properties", ",", "code_property_key", ")", ":", "codeuri", "=", "resource_properties", ".", "get", "(", "code_property_key", ",", "SamFunctionProvider", ".", "_DEFAULT_CODEURI", ")", "# CodeUri can be a ...
Extracts the SAM Function CodeUri from the Resource Properties Parameters ---------- name str LogicalId of the resource resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path
[ "Extracts", "the", "SAM", "Function", "CodeUri", "from", "the", "Resource", "Properties" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_function_provider.py#L138-L163
29,893
awslabs/aws-sam-cli
samcli/commands/local/lib/sam_function_provider.py
SamFunctionProvider._extract_lambda_function_code
def _extract_lambda_function_code(resource_properties, code_property_key): """ Extracts the Lambda Function Code from the Resource Properties Parameters ---------- resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path """ codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI) if isinstance(codeuri, dict): codeuri = SamFunctionProvider._DEFAULT_CODEURI return codeuri
python
def _extract_lambda_function_code(resource_properties, code_property_key): """ Extracts the Lambda Function Code from the Resource Properties Parameters ---------- resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path """ codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI) if isinstance(codeuri, dict): codeuri = SamFunctionProvider._DEFAULT_CODEURI return codeuri
[ "def", "_extract_lambda_function_code", "(", "resource_properties", ",", "code_property_key", ")", ":", "codeuri", "=", "resource_properties", ".", "get", "(", "code_property_key", ",", "SamFunctionProvider", ".", "_DEFAULT_CODEURI", ")", "if", "isinstance", "(", "codeu...
Extracts the Lambda Function Code from the Resource Properties Parameters ---------- resource_properties dict Dictionary representing the Properties of the Resource code_property_key str Property Key of the code on the Resource Returns ------- str Representing the local code path
[ "Extracts", "the", "Lambda", "Function", "Code", "from", "the", "Resource", "Properties" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_function_provider.py#L195-L217
29,894
awslabs/aws-sam-cli
samcli/commands/local/lib/sam_function_provider.py
SamFunctionProvider._parse_layer_info
def _parse_layer_info(list_of_layers, resources): """ Creates a list of Layer objects that are represented by the resources and the list of layers Parameters ---------- list_of_layers List(str) List of layers that are defined within the Layers Property on a function resources dict The Resources dictionary defined in a template Returns ------- List(samcli.commands.local.lib.provider.Layer) List of the Layer objects created from the template and layer list defined on the function. The order of the layers does not change. I.E: list_of_layers = ["layer1", "layer2"] the return would be [Layer("layer1"), Layer("layer2")] """ layers = [] for layer in list_of_layers: # If the layer is a string, assume it is the arn if isinstance(layer, six.string_types): layers.append(LayerVersion(layer, None)) continue # In the list of layers that is defined within a template, you can reference a LayerVersion resource. # When running locally, we need to follow that Ref so we can extract the local path to the layer code. if isinstance(layer, dict) and layer.get("Ref"): layer_logical_id = layer.get("Ref") layer_resource = resources.get(layer_logical_id) if not layer_resource or \ layer_resource.get("Type", "") not in (SamFunctionProvider._SERVERLESS_LAYER, SamFunctionProvider._LAMBDA_LAYER): raise InvalidLayerReference() layer_properties = layer_resource.get("Properties", {}) resource_type = layer_resource.get("Type") codeuri = None if resource_type == SamFunctionProvider._LAMBDA_LAYER: codeuri = SamFunctionProvider._extract_lambda_function_code(layer_properties, "Content") if resource_type == SamFunctionProvider._SERVERLESS_LAYER: codeuri = SamFunctionProvider._extract_sam_function_codeuri(layer_logical_id, layer_properties, "ContentUri") layers.append(LayerVersion(layer_logical_id, codeuri)) return layers
python
def _parse_layer_info(list_of_layers, resources): """ Creates a list of Layer objects that are represented by the resources and the list of layers Parameters ---------- list_of_layers List(str) List of layers that are defined within the Layers Property on a function resources dict The Resources dictionary defined in a template Returns ------- List(samcli.commands.local.lib.provider.Layer) List of the Layer objects created from the template and layer list defined on the function. The order of the layers does not change. I.E: list_of_layers = ["layer1", "layer2"] the return would be [Layer("layer1"), Layer("layer2")] """ layers = [] for layer in list_of_layers: # If the layer is a string, assume it is the arn if isinstance(layer, six.string_types): layers.append(LayerVersion(layer, None)) continue # In the list of layers that is defined within a template, you can reference a LayerVersion resource. # When running locally, we need to follow that Ref so we can extract the local path to the layer code. if isinstance(layer, dict) and layer.get("Ref"): layer_logical_id = layer.get("Ref") layer_resource = resources.get(layer_logical_id) if not layer_resource or \ layer_resource.get("Type", "") not in (SamFunctionProvider._SERVERLESS_LAYER, SamFunctionProvider._LAMBDA_LAYER): raise InvalidLayerReference() layer_properties = layer_resource.get("Properties", {}) resource_type = layer_resource.get("Type") codeuri = None if resource_type == SamFunctionProvider._LAMBDA_LAYER: codeuri = SamFunctionProvider._extract_lambda_function_code(layer_properties, "Content") if resource_type == SamFunctionProvider._SERVERLESS_LAYER: codeuri = SamFunctionProvider._extract_sam_function_codeuri(layer_logical_id, layer_properties, "ContentUri") layers.append(LayerVersion(layer_logical_id, codeuri)) return layers
[ "def", "_parse_layer_info", "(", "list_of_layers", ",", "resources", ")", ":", "layers", "=", "[", "]", "for", "layer", "in", "list_of_layers", ":", "# If the layer is a string, assume it is the arn", "if", "isinstance", "(", "layer", ",", "six", ".", "string_types"...
Creates a list of Layer objects that are represented by the resources and the list of layers Parameters ---------- list_of_layers List(str) List of layers that are defined within the Layers Property on a function resources dict The Resources dictionary defined in a template Returns ------- List(samcli.commands.local.lib.provider.Layer) List of the Layer objects created from the template and layer list defined on the function. The order of the layers does not change. I.E: list_of_layers = ["layer1", "layer2"] the return would be [Layer("layer1"), Layer("layer2")]
[ "Creates", "a", "list", "of", "Layer", "objects", "that", "are", "represented", "by", "the", "resources", "and", "the", "list", "of", "layers" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_function_provider.py#L220-L270
29,895
awslabs/aws-sam-cli
samcli/local/lambdafn/env_vars.py
EnvironmentVariables.resolve
def resolve(self): """ Resolves the values from different sources and returns a dict of environment variables to use when running the function locally. :return dict: Dict where key is the variable name and value is the value of the variable. Both key and values are strings """ # AWS_* variables must always be passed to the function, but user has the choice to override them result = self._get_aws_variables() # Default value for the variable gets lowest priority for name, value in self.variables.items(): # Shell environment values, second priority if name in self.shell_env_values: value = self.shell_env_values[name] # Overridden values, highest priority if name in self.override_values: value = self.override_values[name] # Any value must be a string when passed to Lambda runtime. # Runtime expects a Map<String, String> for environment variables result[name] = self._stringify_value(value) return result
python
def resolve(self): """ Resolves the values from different sources and returns a dict of environment variables to use when running the function locally. :return dict: Dict where key is the variable name and value is the value of the variable. Both key and values are strings """ # AWS_* variables must always be passed to the function, but user has the choice to override them result = self._get_aws_variables() # Default value for the variable gets lowest priority for name, value in self.variables.items(): # Shell environment values, second priority if name in self.shell_env_values: value = self.shell_env_values[name] # Overridden values, highest priority if name in self.override_values: value = self.override_values[name] # Any value must be a string when passed to Lambda runtime. # Runtime expects a Map<String, String> for environment variables result[name] = self._stringify_value(value) return result
[ "def", "resolve", "(", "self", ")", ":", "# AWS_* variables must always be passed to the function, but user has the choice to override them", "result", "=", "self", ".", "_get_aws_variables", "(", ")", "# Default value for the variable gets lowest priority", "for", "name", ",", "...
Resolves the values from different sources and returns a dict of environment variables to use when running the function locally. :return dict: Dict where key is the variable name and value is the value of the variable. Both key and values are strings
[ "Resolves", "the", "values", "from", "different", "sources", "and", "returns", "a", "dict", "of", "environment", "variables", "to", "use", "when", "running", "the", "function", "locally", "." ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambdafn/env_vars.py#L77-L104
29,896
awslabs/aws-sam-cli
samcli/local/lambdafn/env_vars.py
EnvironmentVariables._stringify_value
def _stringify_value(self, value): """ This method stringifies values of environment variables. If the value of the method is a list or dictionary, then this method will replace it with empty string. Values of environment variables in Lambda must be a string. List or dictionary usually means they are intrinsic functions which have not been resolved. :param value: Value to stringify :return string: Stringified value """ # List/dict/None values are replaced with a blank if isinstance(value, (dict, list, tuple)) or value is None: result = self._BLANK_VALUE # str(True) will output "True". To maintain backwards compatibility we need to output "true" or "false" elif value is True: result = "true" elif value is False: result = "false" # value is a scalar type like int, str which can be stringified # do not stringify unicode in Py2, Py3 str supports unicode elif sys.version_info.major > 2: result = str(value) elif not isinstance(value, unicode): # noqa: F821 pylint: disable=undefined-variable result = str(value) else: result = value return result
python
def _stringify_value(self, value): """ This method stringifies values of environment variables. If the value of the method is a list or dictionary, then this method will replace it with empty string. Values of environment variables in Lambda must be a string. List or dictionary usually means they are intrinsic functions which have not been resolved. :param value: Value to stringify :return string: Stringified value """ # List/dict/None values are replaced with a blank if isinstance(value, (dict, list, tuple)) or value is None: result = self._BLANK_VALUE # str(True) will output "True". To maintain backwards compatibility we need to output "true" or "false" elif value is True: result = "true" elif value is False: result = "false" # value is a scalar type like int, str which can be stringified # do not stringify unicode in Py2, Py3 str supports unicode elif sys.version_info.major > 2: result = str(value) elif not isinstance(value, unicode): # noqa: F821 pylint: disable=undefined-variable result = str(value) else: result = value return result
[ "def", "_stringify_value", "(", "self", ",", "value", ")", ":", "# List/dict/None values are replaced with a blank", "if", "isinstance", "(", "value", ",", "(", "dict", ",", "list", ",", "tuple", ")", ")", "or", "value", "is", "None", ":", "result", "=", "se...
This method stringifies values of environment variables. If the value of the method is a list or dictionary, then this method will replace it with empty string. Values of environment variables in Lambda must be a string. List or dictionary usually means they are intrinsic functions which have not been resolved. :param value: Value to stringify :return string: Stringified value
[ "This", "method", "stringifies", "values", "of", "environment", "variables", ".", "If", "the", "value", "of", "the", "method", "is", "a", "list", "or", "dictionary", "then", "this", "method", "will", "replace", "it", "with", "empty", "string", ".", "Values",...
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambdafn/env_vars.py#L175-L204
29,897
awslabs/aws-sam-cli
samcli/local/docker/container.py
Container.delete
def delete(self): """ Removes a container that was created earlier. """ if not self.is_created(): LOG.debug("Container was not created. Skipping deletion") return try: self.docker_client.containers\ .get(self.id)\ .remove(force=True) # Remove a container, even if it is running except docker.errors.NotFound: # Container is already not there LOG.debug("Container with ID %s does not exist. Skipping deletion", self.id) except docker.errors.APIError as ex: msg = str(ex) removal_in_progress = ("removal of container" in msg) and ("is already in progress" in msg) # When removal is already started, Docker API will throw an exception # Skip such exceptions. if not removal_in_progress: raise ex self.id = None
python
def delete(self): """ Removes a container that was created earlier. """ if not self.is_created(): LOG.debug("Container was not created. Skipping deletion") return try: self.docker_client.containers\ .get(self.id)\ .remove(force=True) # Remove a container, even if it is running except docker.errors.NotFound: # Container is already not there LOG.debug("Container with ID %s does not exist. Skipping deletion", self.id) except docker.errors.APIError as ex: msg = str(ex) removal_in_progress = ("removal of container" in msg) and ("is already in progress" in msg) # When removal is already started, Docker API will throw an exception # Skip such exceptions. if not removal_in_progress: raise ex self.id = None
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "is_created", "(", ")", ":", "LOG", ".", "debug", "(", "\"Container was not created. Skipping deletion\"", ")", "return", "try", ":", "self", ".", "docker_client", ".", "containers", ".", "get...
Removes a container that was created earlier.
[ "Removes", "a", "container", "that", "was", "created", "earlier", "." ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/container.py#L139-L163
29,898
awslabs/aws-sam-cli
samcli/local/docker/container.py
Container.start
def start(self, input_data=None): """ Calls Docker API to start the container. The container must be created at the first place to run. It waits for the container to complete, fetches both stdout and stderr logs and returns through the given streams. Parameters ---------- input_data Optional. Input data sent to the container through container's stdin. """ if input_data: raise ValueError("Passing input through container's stdin is not supported") if not self.is_created(): raise RuntimeError("Container does not exist. Cannot start this container") # Get the underlying container instance from Docker API real_container = self.docker_client.containers.get(self.id) # Start the container real_container.start()
python
def start(self, input_data=None): """ Calls Docker API to start the container. The container must be created at the first place to run. It waits for the container to complete, fetches both stdout and stderr logs and returns through the given streams. Parameters ---------- input_data Optional. Input data sent to the container through container's stdin. """ if input_data: raise ValueError("Passing input through container's stdin is not supported") if not self.is_created(): raise RuntimeError("Container does not exist. Cannot start this container") # Get the underlying container instance from Docker API real_container = self.docker_client.containers.get(self.id) # Start the container real_container.start()
[ "def", "start", "(", "self", ",", "input_data", "=", "None", ")", ":", "if", "input_data", ":", "raise", "ValueError", "(", "\"Passing input through container's stdin is not supported\"", ")", "if", "not", "self", ".", "is_created", "(", ")", ":", "raise", "Runt...
Calls Docker API to start the container. The container must be created at the first place to run. It waits for the container to complete, fetches both stdout and stderr logs and returns through the given streams. Parameters ---------- input_data Optional. Input data sent to the container through container's stdin.
[ "Calls", "Docker", "API", "to", "start", "the", "container", ".", "The", "container", "must", "be", "created", "at", "the", "first", "place", "to", "run", ".", "It", "waits", "for", "the", "container", "to", "complete", "fetches", "both", "stdout", "and", ...
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/container.py#L165-L187
29,899
awslabs/aws-sam-cli
samcli/local/docker/container.py
Container._write_container_output
def _write_container_output(output_itr, stdout=None, stderr=None): """ Based on the data returned from the Container output, via the iterator, write it to the appropriate streams Parameters ---------- output_itr: Iterator Iterator returned by the Docker Attach command stdout: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stdout data from Container into stderr: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stderr data from the Container into """ # Iterator returns a tuple of (frame_type, data) where the frame type determines which stream we write output # to for frame_type, data in output_itr: if frame_type == Container._STDOUT_FRAME_TYPE and stdout: # Frame type 1 is stdout data. stdout.write(data) elif frame_type == Container._STDERR_FRAME_TYPE and stderr: # Frame type 2 is stderr data. stderr.write(data) else: # Either an unsupported frame type or stream for this frame type is not configured LOG.debug("Dropping Docker container output because of unconfigured frame type. " "Frame Type: %s. Data: %s", frame_type, data)
python
def _write_container_output(output_itr, stdout=None, stderr=None): """ Based on the data returned from the Container output, via the iterator, write it to the appropriate streams Parameters ---------- output_itr: Iterator Iterator returned by the Docker Attach command stdout: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stdout data from Container into stderr: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stderr data from the Container into """ # Iterator returns a tuple of (frame_type, data) where the frame type determines which stream we write output # to for frame_type, data in output_itr: if frame_type == Container._STDOUT_FRAME_TYPE and stdout: # Frame type 1 is stdout data. stdout.write(data) elif frame_type == Container._STDERR_FRAME_TYPE and stderr: # Frame type 2 is stderr data. stderr.write(data) else: # Either an unsupported frame type or stream for this frame type is not configured LOG.debug("Dropping Docker container output because of unconfigured frame type. " "Frame Type: %s. Data: %s", frame_type, data)
[ "def", "_write_container_output", "(", "output_itr", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ")", ":", "# Iterator returns a tuple of (frame_type, data) where the frame type determines which stream we write output", "# to", "for", "frame_type", ",", "data", "i...
Based on the data returned from the Container output, via the iterator, write it to the appropriate streams Parameters ---------- output_itr: Iterator Iterator returned by the Docker Attach command stdout: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stdout data from Container into stderr: samcli.lib.utils.stream_writer.StreamWriter, optional Stream writer to write stderr data from the Container into
[ "Based", "on", "the", "data", "returned", "from", "the", "Container", "output", "via", "the", "iterator", "write", "it", "to", "the", "appropriate", "streams" ]
c05af5e7378c6f05f7d82ad3f0bca17204177db6
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/container.py#L229-L258