_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q42100
GroupsAPI.list_group_s_users
train
def list_group_s_users(self, group_id, include=None, search_term=None): """ List group's users. Returns a list of users in the group. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # OPTIONAL - search_term """The partial name or full ID of the users to match and return in the results list. Must be at least 3 characters.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - include """- "avatar_url": Include users' avatar_urls.""" if include is not None: self._validate_enum(include, ["avatar_url"]) params["include"] = include self.logger.debug("GET /api/v1/groups/{group_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/users".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42101
GroupsAPI.preview_processed_html
train
def preview_processed_html(self, group_id, html=None): """ Preview processed html. Preview html content processed for this group """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # OPTIONAL - html """The html content to process""" if html is not None: data["html"] = html self.logger.debug("POST /api/v1/groups/{group_id}/preview_html with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/preview_html".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42102
GroupsAPI.list_group_memberships
train
def list_group_memberships(self, group_id, filter_states=None): """ List group memberships. List the members of a group. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # OPTIONAL - filter_states """Only list memberships with the given workflow_states. By default it will return all memberships.""" if filter_states is not None: self._validate_enum(filter_states, ["accepted", "invited", "requested"]) params["filter_states"] = filter_states self.logger.debug("GET /api/v1/groups/{group_id}/memberships with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/memberships".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42103
Clause.from_str
train
def from_str(cls, string): """ Creates a clause from a given string. Parameters ---------- string: str A string of the form `a+!b` which translates to `a AND NOT b`. Returns ------- caspo.core.clause.Clause Created object instance """ return cls([Literal.from_str(lit) for lit in string.split('+')])
python
{ "resource": "" }
q42104
Clause.bool
train
def bool(self, state): """ Returns the Boolean evaluation of the clause with respect to a given state Parameters ---------- state : dict Key-value mapping describing a Boolean state or assignment Returns ------- boolean The evaluation of the clause with respect to the given state or assignment """ value = 1 for source, sign in self: value = value and (state[source] if sign == 1 else not state[source]) if not value: break return value
python
{ "resource": "" }
q42105
Compare.compare_dbs
train
def compare_dbs(self, db_x, db_y, show=True): """Compare the tables and row counts of two databases.""" # TODO: Improve method self._printer("\tComparing database's {0} and {1}".format(db_x, db_y)) # Run compare_dbs_getter to get row counts x = self._compare_dbs_getter(db_x) y = self._compare_dbs_getter(db_y) x_tbl_count = len(list(x.keys())) y_tbl_count = len(list(y.keys())) # Check that database does not have zero tables if x_tbl_count == 0: self._printer('\tThe database {0} has no tables'.format(db_x)) self._printer('\tDatabase differencing was not run') return None elif y_tbl_count == 0: self._printer('\tThe database {0} has no tables'.format(db_y)) self._printer('\tDatabase differencing was not run') return None # Print comparisons if show: uniques_x = diff(x, y, x_only=True) if len(uniques_x) > 0: self._printer('\nUnique keys from {0} ({1} of {2}):'.format(db_x, len(uniques_x), x_tbl_count)) self._printer('------------------------------') # print(uniques) for k, v in sorted(uniques_x): self._printer('{0:25} {1}'.format(k, v)) self._printer('\n') uniques_y = diff(x, y, y_only=True) if len(uniques_y) > 0: self._printer('Unique keys from {0} ({1} of {2}):'.format(db_y, len(uniques_y), y_tbl_count)) self._printer('------------------------------') for k, v in sorted(uniques_y): self._printer('{0:25} {1}'.format(k, v)) self._printer('\n') if len(uniques_y) == 0 and len(uniques_y) == 0: self._printer("Databases's {0} and {1} are identical:".format(db_x, db_y)) self._printer('------------------------------') return diff(x, y)
python
{ "resource": "" }
q42106
Compare._compare_dbs_getter
train
def _compare_dbs_getter(self, db): """Retrieve a dictionary of table_name, row count key value pairs for a DB.""" # Change DB connection if needed if self.database != db: self.change_db(db) return self.count_rows_all()
python
{ "resource": "" }
q42107
Compare.compare_schemas
train
def compare_schemas(self, db_x, db_y, show=True): """ Compare the structures of two databases. Analysis's and compares the column definitions of each table in both databases's. Identifies differences in column names, data types and keys. """ # TODO: Improve method self._printer("\tComparing database schema's {0} and {1}".format(db_x, db_y)) # Run compare_dbs_getter to get row counts x = self._schema_getter(db_x) y = self._schema_getter(db_y) x_count = len(x) y_count = len(y) # Check that database does not have zero tables if x_count == 0: self._printer('\tThe database {0} has no tables'.format(db_x)) self._printer('\tDatabase differencing was not run') return None elif y_count == 0: self._printer('\tThe database {0} has no tables'.format(db_y)) self._printer('\tDatabase differencing was not run') return None # Print comparisons if show: uniques_x = diff(x, y, x_only=True) if len(uniques_x) > 0: self._printer('\nUnique keys from {0} ({1} of {2}):'.format(db_x, len(uniques_x), x_count)) self._printer('------------------------------') # print(uniques) for k, v in sorted(uniques_x): self._printer('{0:25} {1}'.format(k, v)) self._printer('\n') uniques_y = diff(x, y, y_only=True) if len(uniques_y) > 0: self._printer('Unique keys from {0} ({1} of {2}):'.format(db_y, len(uniques_y), y_count)) self._printer('------------------------------') for k, v in sorted(uniques_y): self._printer('{0:25} {1}'.format(k, v)) self._printer('\n') if len(uniques_y) == 0 and len(uniques_y) == 0: self._printer("Databases's {0} and {1} are identical:".format(db_x, db_y)) self._printer('------------------------------') return diff(x, y)
python
{ "resource": "" }
q42108
Compare._schema_getter
train
def _schema_getter(self, db): """Retrieve a dictionary representing a database's data schema.""" # Change DB connection if needed if self.database != db: self.change_db(db) schema_dict = {tbl: self.get_schema(tbl) for tbl in self.tables} schema_lst = [] for table, schema in schema_dict.items(): for col in schema: col.insert(0, table) schema_lst.append(col) return schema_lst
python
{ "resource": "" }
q42109
Memoize.put_cache_results
train
def put_cache_results(self, key, func_akw, set_cache_cb): """Put function results into cache.""" args, kwargs = func_akw # get function results func_results = self.func(*args, **kwargs) # optionally add results to cache if set_cache_cb(func_results): self[key] = func_results return func_results
python
{ "resource": "" }
q42110
service_param_string
train
def service_param_string(params): """Takes a param section from a metadata class and returns a param string for the service method""" p = [] k = [] for param in params: name = fix_param_name(param['name']) if 'required' in param and param['required'] is True: p.append(name) else: if 'default' in param: k.append('{name}={default}'.format(name=name, default=param['default'])) else: k.append('{name}=None'.format(name=name)) p.sort(lambda a, b: len(a) - len(b)) k.sort() a = p + k return ', '.join(a)
python
{ "resource": "" }
q42111
build_metadata_class
train
def build_metadata_class(specfile): """Generate a metadata class for the specified specfile.""" with open(specfile) as f: spec = json.load(f) name = os.path.basename(specfile).split('.')[0] spec['name'] = name env = get_jinja_env() metadata_template = env.get_template('metadata.py.jinja2') with open('pycanvas/meta/{}.py'.format(name), 'w') as t: t.write(metadata_template.render(spec=spec))
python
{ "resource": "" }
q42112
build_model_classes
train
def build_model_classes(metadata): """Generate a model class for any models contained in the specified spec file.""" i = importlib.import_module(metadata) env = get_jinja_env() model_template = env.get_template('model.py.jinja2') for model in i.models: with open(model_path(model.name.lower()), 'w') as t: t.write(model_template.render(model_md=model))
python
{ "resource": "" }
q42113
build_service_class
train
def build_service_class(metadata): """Generate a service class for the service contained in the specified metadata class.""" i = importlib.import_module(metadata) service = i.service env = get_jinja_env() service_template = env.get_template('service.py.jinja2') with open(api_path(service.name.lower()), 'w') as t: t.write(service_template.render(service_md=service))
python
{ "resource": "" }
q42114
AccountReportsAPI.start_report
train
def start_report(self, report, account_id, _parameters=None): """ Start a Report. Generates a report instance for the account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - report """ID""" path["report"] = report # OPTIONAL - [parameters] """The parameters will vary for each report""" if _parameters is not None: data["[parameters]"] = _parameters self.logger.debug("POST /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42115
AccountReportsAPI.index_of_reports
train
def index_of_reports(self, report, account_id): """ Index of Reports. Shows all reports that have been run for the account of a specific type. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - report """ID""" path["report"] = report self.logger.debug("GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42116
BaseQueryMixin.clean_query
train
def clean_query(self): """ Removes any `None` value from an elasticsearch query. """ if self.query: for key, value in self.query.items(): if isinstance(value, list) and None in value: self.query[key] = [v for v in value if v is not None]
python
{ "resource": "" }
q42117
BaseQueryMixin.get_recirc_content
train
def get_recirc_content(self, published=True, count=3): """gets the first 3 content objects in the `included_ids` """ query = self.get_query() # check if query has included_ids & if there are any ids in it, # in case the ids have been removed from the array if not query.get('included_ids'): qs = Content.search_objects.search() qs = qs.query( TagBoost(slugs=self.tags.values_list("slug", flat=True)) ).filter( ~Ids(values=[self.id]) ).sort( "_score" ) return qs[:count] # NOTE: set included_ids to just be the first 3 ids, # otherwise search will return last 3 items query['included_ids'] = query['included_ids'][:count] search = custom_search_model(Content, query, published=published, field_map={ "feature_type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type" }) return search
python
{ "resource": "" }
q42118
BaseQueryMixin.get_full_recirc_content
train
def get_full_recirc_content(self, published=True): """performs es search and gets all content objects """ q = self.get_query() search = custom_search_model(Content, q, published=published, field_map={ "feature_type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type" }) return search
python
{ "resource": "" }
q42119
Timer.run_later
train
def run_later(self, callable_, timeout, *args, **kwargs): """Schedules the specified callable for delayed execution. Returns a TimerTask instance that can be used to cancel pending execution. """ self.lock.acquire() try: if self.die: raise RuntimeError('This timer has been shut down and ' 'does not accept new jobs.') job = TimerTask(callable_, *args, **kwargs) self._jobs.append((job, time.time() + timeout)) self._jobs.sort(key=lambda j: j[1]) # sort on time self.lock.notify() return job finally: self.lock.release()
python
{ "resource": "" }
q42120
PagesAPI.show_front_page_groups
train
def show_front_page_groups(self, group_id): """ Show front page. Retrieve the content of the front page """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id self.logger.debug("GET /api/v1/groups/{group_id}/front_page with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/front_page".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42121
ContentTypeField.to_representation
train
def to_representation(self, value): """Convert to natural key.""" content_type = ContentType.objects.get_for_id(value) return "_".join(content_type.natural_key())
python
{ "resource": "" }
q42122
ContentTypeField.to_internal_value
train
def to_internal_value(self, value): """Convert to integer id.""" natural_key = value.split("_") content_type = ContentType.objects.get_by_natural_key(*natural_key) return content_type.id
python
{ "resource": "" }
q42123
DefaultUserSerializer.to_internal_value
train
def to_internal_value(self, data): """Basically, each author dict must include either a username or id.""" # model = get_user_model() model = self.Meta.model if "id" in data: author = model.objects.get(id=data["id"]) else: if "username" not in data: raise ValidationError("Authors must include an ID or a username.") username = data["username"] author = model.objects.get(username=username) return author
python
{ "resource": "" }
q42124
ChangeHandler.run
train
def run(self): """Called when a file is changed to re-run the tests with nose.""" if self.auto_clear: os.system('cls' if os.name == 'nt' else 'auto_clear') else: print print 'Running unit tests...' if self.auto_clear: print subprocess.call('nosetests', cwd=self.directory)
python
{ "resource": "" }
q42125
write_tex
train
def write_tex(): """ Finds all of the output data files, and writes them out to .tex """ datadir = livvkit.index_dir outdir = os.path.join(datadir, "tex") print(outdir) # functions.mkdir_p(outdir) data_files = glob.glob(datadir + "/**/*.json", recursive=True) for each in data_files: data = functions.read_json(each) tex = translate_page(data) outfile = os.path.join(outdir, os.path.basename(each).replace('json', 'tex')) with open(outfile, 'w') as f: f.write(tex)
python
{ "resource": "" }
q42126
Subprocess._invoke
train
def _invoke(self, *params): """ Invoke self.exe as a subprocess """ cmd = [self.exe] + list(params) proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.location, env=self.env) stdout, stderr = proc.communicate() if not proc.returncode == 0: raise RuntimeError(stderr.strip() or stdout.strip()) return stdout.decode('utf-8')
python
{ "resource": "" }
q42127
allow_bare_decorator
train
def allow_bare_decorator(cls): """ Wrapper for a class decorator which allows for bare decorator and argument syntax """ @wraps(cls) def wrapper(*args, **kwargs): """"Wrapper for real decorator""" # If we weren't only passed a bare class, return class instance if kwargs or len(args) != 1 or not isclass(args[0]): # pylint: disable=no-else-return return cls(*args, **kwargs) # Otherwise, pass call to instance with default values else: return cls()(args[0]) return wrapper
python
{ "resource": "" }
q42128
warp_image_by_corner_points_projection
train
def warp_image_by_corner_points_projection(corner_points, image): """Given corner points of a Sudoku, warps original selection to a square image. :param corner_points: :type: corner_points: list :param image: :type image: :return: :rtype: """ # Clarify by storing in named variables. top_left, top_right, bottom_left, bottom_right = np.array(corner_points) top_edge = np.linalg.norm(top_right - top_left) bottom_edge = np.linalg.norm(bottom_right - bottom_left) left_edge = np.linalg.norm(top_left - bottom_left) right_edge = np.linalg.norm(top_right - bottom_right) L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge]))) src = np.array([top_left, top_right, bottom_left, bottom_right]) dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]]) tr = ProjectiveTransform() tr.estimate(dst, src) warped_image = warp(image, tr, output_shape=(L, L)) out = resize(warped_image, (500, 500)) return out
python
{ "resource": "" }
q42129
dict_get_path
train
def dict_get_path(data, path, default=None): """ Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist """ keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
python
{ "resource": "" }
q42130
HyperGraph.to_funset
train
def to_funset(self): """ Converts the hypergraph to a set of `gringo.Fun`_ instances Returns ------- set Representation of the hypergraph as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, n in self.nodes.iteritems(): fs.add(gringo.Fun('node', [n, i])) for j, i in self.hyper.iteritems(): fs.add(gringo.Fun('hyper', [i, j, len(self.edges[self.edges.hyper_idx == j])])) for j, v, s in self.edges.itertuples(index=False): fs.add(gringo.Fun('edge', [j, v, s])) return fs
python
{ "resource": "" }
q42131
options
train
def options(f): """ Shared options, used by all bartender commands """ f = click.option('--config', envvar='VODKA_HOME', default=click.get_app_dir('vodka'), help="location of config file")(f) return f
python
{ "resource": "" }
q42132
check_config
train
def check_config(config): """ Check and validate configuration attributes, to help administrators quickly spot missing required configurations and invalid configuration values in general """ cfg = vodka.config.Config(read=config) vodka.log.set_loggers(cfg.get("logging")) vodka.app.load_all(cfg) click.echo("Checking config at %s for errors ..." % config) num_crit, num_warn = vodka.config.InstanceHandler.validate(cfg) click.echo("%d config ERRORS, %d config WARNINGS" % (num_crit, num_warn))
python
{ "resource": "" }
q42133
config
train
def config(config, skip_defaults): """ Generates configuration file from config specifications """ configurator = ClickConfigurator( vodka.plugin, skip_defaults=skip_defaults ) configurator.configure(vodka.config.instance, vodka.config.InstanceHandler) try: dst = munge_config.parse_url(config) except ValueError: config = os.path.join(config, "config.yaml") dst = munge_config.parse_url(config) config_dir = os.path.dirname(config) if not os.path.exists(config_dir) and config_dir: os.makedirs(config_dir) dst.cls().dumpu(vodka.config.instance, dst.url.path) if configurator.action_required: click.echo("") click.echo("not all required values could be set by this script, please manually edit the config and set the following values") click.echo("") for item in configurator.action_required: click.echo("- %s" % item) click.echo("") click.echo("Config written to %s" % dst.url.path)
python
{ "resource": "" }
q42134
newapp
train
def newapp(path): """ Generates all files for a new vodka app at the specified location. Will generate to current directory if no path is specified """ app_path = os.path.join(VODKA_INSTALL_DIR, "resources", "blank_app") if not os.path.exists(path): os.makedirs(path) elif os.path.exists(os.path.join(path, "application.py")): click.error("There already exists a vodka app at %s, please specify a different path" % path) os.makedirs(os.path.join(path, "plugins")) shutil.copy(os.path.join(app_path, "application.py"), os.path.join(path, "application.py")) shutil.copy(os.path.join(app_path, "__init__.py"), os.path.join(path, "__init__.py")) shutil.copy(os.path.join(app_path, "plugins", "example.py"), os.path.join(path, "plugins", "example.py")) shutil.copy(os.path.join(app_path, "plugins", "__init__.py"), os.path.join(path, "plugins", "__init__.py"))
python
{ "resource": "" }
q42135
is_enabled
train
def is_enabled(): """Returns ``True`` if bcrypt should be used.""" enabled = getattr(settings, "BCRYPT_ENABLED", True) if not enabled: return False # Are we under a test? if hasattr(mail, 'outbox'): return getattr(settings, "BCRYPT_ENABLED_UNDER_TEST", False) return True
python
{ "resource": "" }
q42136
open_file_with_default_program
train
def open_file_with_default_program(file_path, background=False, return_cmd=False): '''Opens a file with the default program for that type. Open the file with the user's preferred application. Args: file_path (str) : Path to the file to be opened. background (bool): Run the program in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command to run the program (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the program is returned instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': open_file_cmd = 'explorer.exe ' + "'%s'" % file_path elif desktop_env == 'mac': open_file_cmd = 'open ' + "'%s'" % file_path else: file_mime_type = system.get_cmd_out( ['xdg-mime', 'query', 'filetype', file_path]) desktop_file = system.get_cmd_out( ['xdg-mime', 'query', 'default', file_mime_type]) open_file_cmd = desktopfile.execute(desktopfile.locate( desktop_file)[0], files=[file_path], return_cmd=True) if return_cmd: return open_file_cmd else: def_program_proc = sp.Popen(open_file_cmd, shell=True) if not background: def_program_proc.wait()
python
{ "resource": "" }
q42137
terminal
train
def terminal(exec_='', background=False, shell_after_cmd_exec=False, keep_open_after_cmd_exec=False, return_cmd=False): '''Start the default terminal emulator. Start the user's preferred terminal emulator, optionally running a command in it. **Order of starting** Windows: Powershell Mac: - iTerm2 - Terminal.app Linux/Unix: - ``$TERMINAL`` - ``x-terminal-emulator`` - Terminator - Desktop environment's terminal - gnome-terminal - urxvt - rxvt - xterm Args: exec\_ (str) : An optional command to run in the opened terminal emulator. Defaults to empty (no command). background (bool): Run the terminal in the background, instead of waiting for completion. Defaults to ``False``. shell_after_cmd_exec (bool): Start the user's shell after running the command (see exec_). Defaults to `False`. return_cmd (bool): Returns the command used to start the terminal (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, returns the command to run the terminal instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if not exec_: shell_after_cmd_exec = True if desktop_env == 'windows': terminal_cmd_str = 'start powershell.exe' if desktop_env == 'mac': # Try iTerm2 first, apparently most popular Mac Terminal if mac_app_exists('iTerm2'): terminal_cmd_str = 'open -a iTerm2' else: terminal_cmd_str = 'open -a Terminal' else: # sensible-terminal if os.getenv('TERMINAL'): # Not everywhere, but if user *really* has a preference, they will # set this terminal_cmd_str = os.getenv('TERMINAL') elif system.is_in_path('x-terminal-emulator'): # This is a convenience script that launches terminal based on # user preferences. # This is not available on some distros (but most have it) # so try this first terminal_cmd_str = 'x-terminal-emulator' elif system.is_in_path('terminator'): terminal_cmd_str = 'terminator' elif desktop_env in ['gnome', 'unity', 'cinnamon', 'gnome2']: terminal_cmd_str = 'gnome-terminal' elif desktop_env == 'xfce4': terminal_cmd_str = 'xfce4-terminal' elif desktop_env == 'kde' or desktop_env == 'trinity': terminal_cmd_str = 'konsole' elif desktop_env == 'mate': terminal_cmd_str = 'mate-terminal' elif desktop_env == 'i3': terminal_cmd_str = 'i3-sensible-terminal' elif desktop_env == 'pantheon': terminal_cmd_str = 'pantheon-terminal' elif desktop_env == 'enlightenment': terminal_cmd_str = 'terminology' elif desktop_env == 'lxde' or desktop_env == 'lxqt': terminal_cmd_str = 'lxterminal' else: if system.is_in_path('gnome-terminal'): terminal_cmd_str = 'gnome-terminal' elif system.is_in_path('urxvt'): terminal_cmd_str = 'urxvt' elif system.is_in_path('rxvt'): terminal_cmd_str = 'rxvt' elif system.is_in_path('xterm'): terminal_cmd_str = 'xterm' if exec_: if desktop_env == 'windows': if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; pause' if os.path.isfile(exec_): terminal_cmd_str += exec_ else: terminal_cmd_str += ' -Command ' + '"' + exec_ + '"' if shell_after_cmd_exec: terminal_cmd_str += ' -NoExit' else: if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; read' if shell_after_cmd_exec: exec_ += '; ' + os.getenv('SHELL') if desktop_env == 'mac': terminal_cmd_str += ' sh -c {}'.format(shlex.quote(exec_)) else: terminal_cmd_str += ' -e {}'.format( shlex.quote('sh -c {}'.format(shlex.quote(exec_)))) if return_cmd: return terminal_cmd_str terminal_proc = sp.Popen([terminal_cmd_str], shell=True, stdout=sp.PIPE) if not background: # Wait for process to complete terminal_proc.wait()
python
{ "resource": "" }
q42138
text_editor
train
def text_editor(file='', background=False, return_cmd=False): '''Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': editor_cmd_str = system.get_cmd_out( ['ftype', 'textfile']).split('=', 1)[1] elif desktop_env == 'mac': editor_cmd_str = 'open -a' + system.get_cmd_out( ['def', 'read', 'com.apple.LaunchServices', 'LSHandlers' '-array' '{LSHandlerContentType=public.plain-text;}'] ) else: # Use def handler for MIME-type text/plain editor_cmd_str = system.get_cmd_out( ['xdg-mime', 'query', 'default', 'text/plain']) if '\n' in editor_cmd_str: # Sometimes locate returns multiple results # use first one editor_cmd_str = editor_cmd_str.split('\n')[0] if editor_cmd_str.endswith('.desktop'): # We don't use desktopfile.execute() in order to have working # return_cmd and background editor_cmd_str = desktopfile.parse( desktopfile.locate(editor_cmd_str)[0])['Exec'] for i in editor_cmd_str.split(): if i.startswith('%'): # %-style formatters editor_cmd_str = editor_cmd_str.replace(i, '') if i == '--new-document': # Gedit editor_cmd_str = editor_cmd_str.replace(i, '') if file: editor_cmd_str += ' {}'.format(shlex.quote(file)) if return_cmd: return editor_cmd_str text_editor_proc = sp.Popen([editor_cmd_str], shell=True) if not background: text_editor_proc.wait()
python
{ "resource": "" }
q42139
run
train
def run(run_type, module, config): """ Collects the analyses cases to be run and launches processes for each of them. Args: run_type: A string representation of the run type (eg. verification) module: The module corresponding to the run. Must have a run_suite function config: The configuration for the module """ print(" -----------------------------------------------------------------") print(" Beginning " + run_type.lower() + " test suite ") print(" -----------------------------------------------------------------") print("") summary = run_quiet(module, config) print(" -----------------------------------------------------------------") print(" " + run_type.capitalize() + " test suite complete ") print(" -----------------------------------------------------------------") print("") return summary
python
{ "resource": "" }
q42140
launch_processes
train
def launch_processes(tests, run_module, group=True, **config): """ Helper method to launch processes and sync output """ manager = multiprocessing.Manager() test_summaries = manager.dict() process_handles = [multiprocessing.Process(target=run_module.run_suite, args=(test, config[test], test_summaries)) for test in tests] for p in process_handles: p.start() for p in process_handles: p.join() if group: summary = run_module.populate_metadata(tests[0], config[tests[0]]) summary["Data"] = dict(test_summaries) return summary else: test_summaries = dict(test_summaries) summary = [] for ii, test in enumerate(tests): summary.append(run_module.populate_metadata(test, config[test])) if summary[ii]: summary[ii]['Data'] = {test: test_summaries[test]} return summary
python
{ "resource": "" }
q42141
on_resize
train
def on_resize(width, height): """Setup 3D projection""" glViewport(0, 0, width, height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(30, 1.0*width/height, 0.1, 1000.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
python
{ "resource": "" }
q42142
Sanji.register
train
def register(self, reg_data, retry=True, interval=1, timeout=3): """ register function retry True, infinity retries False, no retries Number, retries times interval time period for retry return False if no success Tunnel if success """ if len(reg_data["resources"]) == 0: _logger.debug("%s no need to register due to no resources" % (reg_data["name"])) return def _register(): try: resp = self.publish.direct.post( "/controller/registration", reg_data) if resp.code == 200: return resp except TimeoutError: _logger.debug("Register message is timeout") return False resp = _register() while resp is False: _logger.debug("Register failed.") self.deregister(reg_data) resp = _register() if resp is None: _logger.error("Can\'t not register to controller") self.stop() return False self._conn.set_tunnel( reg_data["role"], resp.data["tunnel"], self.on_sanji_message) self.bundle.profile["currentTunnels"] = [ tunnel for tunnel, callback in self._conn.tunnels.items()] self.bundle.profile["regCount"] = \ self.bundle.profile.get("reg_count", 0) + 1 _logger.debug("Register successfully %s tunnel: %s" % (reg_data["name"], resp.data["tunnel"],))
python
{ "resource": "" }
q42143
MailListView.get_context
train
def get_context(self): """Add mails to the context """ context = super(MailListView, self).get_context() mail_list = registered_mails_names() context['mail_map'] = mail_list return context
python
{ "resource": "" }
q42144
open
train
def open(s3_url, mode='r', s3_connection=None, **kwargs): """Open S3 url, returning a File Object. S3 connection: 1. Can be specified directly by `s3_connection`. 2. `boto.connect_s3` will be used supplying all `kwargs`. - `aws_access_key_id` and `aws_secret_access_key`. - `profile_name` - recommended. See: http://boto.readthedocs.org/en/latest/boto_config_tut.html """ connection = s3_connection or boto.connect_s3(**kwargs) bucket_name, key_name = url_split(s3_url) try: bucket = connection.get_bucket(bucket_name) except boto.exception.S3ResponseError: raise BucketNotFoundError('Bucket "%s" was not found.' % bucket_name) f = NamedTemporaryFile() try: if 'w' in mode.lower(): s3_key = bucket.new_key(key_name) yield f f.seek(0) s3_key.set_contents_from_file(f) else: s3_key = bucket.get_key(key_name) if not s3_key: raise KeyNotFoundError('Key "%s" was not found.' % s3_url) s3_key.get_file(f) f.seek(0) yield f finally: f.close()
python
{ "resource": "" }
q42145
Numeric.is_decimal
train
def is_decimal(self): """Determine if a data record is of the type float.""" dt = DATA_TYPES['decimal'] if type(self.data) in dt['type']: self.type = 'DECIMAL' num_split = str(self.data).split('.', 1) self.len = len(num_split[0]) self.len_decimal = len(num_split[1]) return True
python
{ "resource": "" }
q42146
EnrollmentTermsAPI.create_enrollment_term
train
def create_enrollment_term(self, account_id, enrollment_term_end_at=None, enrollment_term_name=None, enrollment_term_sis_term_id=None, enrollment_term_start_at=None): """ Create enrollment term. Create a new enrollment term for the specified account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - enrollment_term[name] """The name of the term.""" if enrollment_term_name is not None: data["enrollment_term[name]"] = enrollment_term_name # OPTIONAL - enrollment_term[start_at] """The day/time the term starts. Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z.""" if enrollment_term_start_at is not None: data["enrollment_term[start_at]"] = enrollment_term_start_at # OPTIONAL - enrollment_term[end_at] """The day/time the term ends. Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z.""" if enrollment_term_end_at is not None: data["enrollment_term[end_at]"] = enrollment_term_end_at # OPTIONAL - enrollment_term[sis_term_id] """The unique SIS identifier for the term.""" if enrollment_term_sis_term_id is not None: data["enrollment_term[sis_term_id]"] = enrollment_term_sis_term_id self.logger.debug("POST /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42147
EnrollmentTermsAPI.list_enrollment_terms
train
def list_enrollment_terms(self, account_id, workflow_state=None): """ List enrollment terms. Return all of the terms in the account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - workflow_state """If set, only returns terms that are in the given state. Defaults to 'active'.""" if workflow_state is not None: self._validate_enum(workflow_state, ["active", "deleted", "all"]) params["workflow_state"] = workflow_state self.logger.debug("GET /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, data_key='enrollment_terms', all_pages=True)
python
{ "resource": "" }
q42148
AttributeFilter.from_model
train
def from_model(cls, model_instance, default_value=False, **kwargs): """ wrapper for Model's get_attribute_filter """ if not isinstance(model_instance, DataCollection): raise TypeError("model_instance must be a subclass of \ prestans.types.DataCollection, %s given" % (model_instance.__class__.__name__)) elif isinstance(model_instance, Array) and model_instance.is_scalar: return AttributeFilter(is_array_scalar=True) attribute_filter_instance = model_instance.get_attribute_filter(default_value) # kwargs support for name, value in iter(kwargs.items()): if name in attribute_filter_instance: setattr(attribute_filter_instance, name, value) else: raise KeyError(name) return attribute_filter_instance
python
{ "resource": "" }
q42149
AttributeFilter.conforms_to_template_filter
train
def conforms_to_template_filter(self, template_filter): """ Check AttributeFilter conforms to the rules set by the template - If self, has attributes that template_filter does not contain, throw Exception - If sub list found, perform the first check - If self has a value for an attribute, assign to final AttributeFilter - If not found, assign value from template todo: rename as current name is mis-leading """ if not isinstance(template_filter, self.__class__): raise TypeError("AttributeFilter can only check conformance against \ another template filter, %s provided" % template_filter.__class__.__name__) # keys from the template template_filter_keys = template_filter.keys() # Keys from the object itself this_filter_keys = self.keys() # 1. Check to see if the client has provided unwanted keys unwanted_keys = set(this_filter_keys) - set(template_filter_keys) if len(unwanted_keys) > 0: raise exception.AttributeFilterDiffers(list(unwanted_keys)) # 2. Make a attribute_filter that we send back evaluated_attribute_filter = AttributeFilter() # 3. Evaluate the differences between the two, with template_filter as the standard for template_key in template_filter_keys: if template_key in this_filter_keys: value = getattr(self, template_key) # if sub filter and boolean provided with of true, create default filter with value of true if isinstance(value, bool) and \ value is True and \ isinstance(getattr(template_filter, template_key), AttributeFilter): setattr(evaluated_attribute_filter, template_key, getattr(template_filter, template_key)) elif isinstance(value, bool): setattr(evaluated_attribute_filter, template_key, value) elif isinstance(value, self.__class__): # Attribute lists sort themselves out, to produce sub Attribute Filters template_sub_list = getattr(template_filter, template_key) this_sub_list = getattr(self, template_key) setattr( evaluated_attribute_filter, template_key, this_sub_list.conforms_to_template_filter(template_sub_list) ) else: setattr(evaluated_attribute_filter, template_key, getattr(template_filter, template_key)) return evaluated_attribute_filter
python
{ "resource": "" }
q42150
AttributeFilter.is_filter_at_key
train
def is_filter_at_key(self, key): """ return True if attribute is a sub filter """ if key in self: attribute_status = getattr(self, key) if isinstance(attribute_status, self.__class__): return True return False
python
{ "resource": "" }
q42151
AttributeFilter.is_attribute_visible
train
def is_attribute_visible(self, key): """ Returns True if an attribute is visible If attribute is an instance of AttributeFilter, it returns True if all attributes of the sub filter are visible. :param key: name of attribute to check :type key: str :return: whether attribute is visible :rtype: bool """ if key in self: attribute_status = getattr(self, key) if isinstance(attribute_status, bool) and attribute_status is True: return True elif isinstance(attribute_status, self.__class__) and attribute_status.are_any_attributes_visible(): return True return False
python
{ "resource": "" }
q42152
AttributeFilter.are_any_attributes_visible
train
def are_any_attributes_visible(self): """ checks to see if any attributes are set to true """ for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue if isinstance(type_instance, bool) and type_instance is True: return True elif isinstance(type_instance, self.__class__) and type_instance.are_all_attributes_visible() is True: return True return False
python
{ "resource": "" }
q42153
AttributeFilter.set_all_attribute_values
train
def set_all_attribute_values(self, value): """ sets all the attribute values to the value and propagate to any children """ for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): # Ignore parameters with __ and if they are methods continue if isinstance(type_instance, bool): self.__dict__[attribute_name] = value elif isinstance(type_instance, self.__class__): type_instance.set_all_attribute_values(value)
python
{ "resource": "" }
q42154
AttributeFilter._init_from_dictionary
train
def _init_from_dictionary(self, from_dictionary, template_model=None): """ Private helper to init values from a dictionary, wraps children into AttributeFilter objects :param from_dictionary: dictionary to get attribute names and visibility from :type from_dictionary: dict :param template_model: :type template_model: DataCollection """ if not isinstance(from_dictionary, dict): raise TypeError("from_dictionary must be of type dict, %s \ provided" % from_dictionary.__class__.__name__) rewrite_map = None if template_model is not None: if not isinstance(template_model, DataCollection): msg = "template_model should be a prestans model %s provided" % template_model.__class__.__name__ raise TypeError(msg) rewrite_map = template_model.attribute_rewrite_reverse_map() for key, value in iter(from_dictionary.items()): target_key = key # minify support if rewrite_map is not None: target_key = rewrite_map[key] # ensure that the key exists in the template model if template_model is not None and target_key not in template_model: unwanted_keys = list() unwanted_keys.append(target_key) raise exception.AttributeFilterDiffers(unwanted_keys) # check to see we can work with the value if not isinstance(value, (bool, dict)): raise TypeError("AttributeFilter input for key %s must be \ boolean or dict, %s provided" % (key, value.__class__.__name__)) # Either keep the value of wrap it up with AttributeFilter if isinstance(value, bool): setattr(self, target_key, value) elif isinstance(value, dict): sub_map = None if template_model is not None: sub_map = getattr(template_model, target_key) # prestans Array support if isinstance(sub_map, Array): sub_map = sub_map.element_template setattr(self, target_key, AttributeFilter(from_dictionary=value, template_model=sub_map))
python
{ "resource": "" }
q42155
Predictor.predict
train
def predict(self): """ Computes all possible weighted average predictions and their variances Example:: >>> from caspo import core, predict >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> predictor = predict.Predictor(networks, setup) >>> df = predictor.predict() >>> df.to_csv('predictions.csv'), index=False) Returns -------- `pandas.DataFrame`_ DataFrame with the weighted average predictions and variance of all readouts for each possible clamping .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ self._logger.info("Computing all predictions and their variance for %s logical networks...", len(self.networks)) return self.networks.predictions(self.setup.filter(self.networks))
python
{ "resource": "" }
q42156
LogFile.content
train
def content(self): """ Returns raw CSV content of the log file. """ raw_content = self._manager.api.session.get(self.download_link).content data = BytesIO(raw_content) archive = ZipFile(data) filename = archive.filelist[0] # Always 1 file in the archive return archive.read(filename)
python
{ "resource": "" }
q42157
Validator.validate
train
def validate(self, read_tuple_name): """Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s """ if reg_lrn.match(read_tuple_name) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_read_tuple_name_structure", message="'{}' is not matched".format(reg_lrn), ) else: parts = read_tuple_name.split("__") if reg_prefix_part.match(parts[0]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_prefix_part", message="'{}' is not matched".format(reg_prefix_part), ) if reg_id_part.match(parts[1]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_id_part", message="'{}' is not matched".format(reg_id_part), ) if reg_segmental_part.match(parts[2]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_segmental_part", message="'{}' is not matched".format(reg_segmental_part), ) if reg_suffix_part.match(parts[3]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_suffix_part", message="'{}' is not matched".format(reg_suffix_part), ) if not self.rnf_profile.check(read_tuple_name): self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_profile", message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format( self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name), ), warning=True, )
python
{ "resource": "" }
q42158
Validator.report_error
train
def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False): """Report an error. Args: read_tuple_name (): Name of the read tuple. error_name (): Name of the error. wrong (str): What is wrong. message (str): Additional msessage to be printed. warning (bool): Warning (not an error). """ if (not self.report_only_first) or (error_name not in self.reported_errors): print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message])) self.reported_errors.add(error_name) if warning: self.warning_has_been_reported = True else: self.error_has_been_reported = True
python
{ "resource": "" }
q42159
TraceSlowRequestsMiddleware._is_exempt
train
def _is_exempt(self, environ): """ Returns True if this request's URL starts with one of the excluded paths. """ exemptions = self.exclude_paths if exemptions: path = environ.get('PATH_INFO') for excluded_p in self.exclude_paths: if path.startswith(excluded_p): return True return False
python
{ "resource": "" }
q42160
load_network_model
train
def load_network_model(model): ''' Loads metabolic network models in metabolitics. :param str model: model name ''' if type(model) == str: if model in ['ecoli', 'textbook', 'salmonella']: return cb.test.create_test_model(model) elif model == 'recon2': return cb.io.load_json_model('%s/network_models/%s.json' % (DATASET_PATH, model)) if type(model) == cb.Model: return model
python
{ "resource": "" }
q42161
TokenQueryset.bulk_refresh
train
def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete)
python
{ "resource": "" }
q42162
ibatch
train
def ibatch(iterable, size): """Yield a series of batches from iterable, each size elements long.""" source = iter(iterable) while True: batch = itertools.islice(source, size) yield itertools.chain([next(batch)], batch)
python
{ "resource": "" }
q42163
KVStore.put_many
train
def put_many(self, items): # pragma: no cover """Put many key-value pairs. This method may take advantage of performance or atomicity features of the underlying store. It does not guarantee that all items will be set in the same transaction, only that transactions may be used for performance. :param items: An iterable producing (key, value) tuples. """ for key, value in items: self.put(key, value)
python
{ "resource": "" }
q42164
KVStore.prefix_keys
train
def prefix_keys(self, prefix, strip_prefix=False): """Get all keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All keys in the store that begin with ``prefix``. """ keys = self.keys(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key in keys: if not key.startswith(prefix): break yield key[start:]
python
{ "resource": "" }
q42165
GradebookHistoryAPI.details_for_given_date_in_gradebook_history_for_this_course
train
def details_for_given_date_in_gradebook_history_for_this_course(self, date, course_id): """ Details for a given date in gradebook history for this course. Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # REQUIRED - PATH - date """The date for which you would like to see detailed information""" path["date"] = date self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42166
GradebookHistoryAPI.lists_submissions
train
def lists_submissions(self, date, course_id, grader_id, assignment_id): """ Lists submissions. Gives a nested list of submission versions """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # REQUIRED - PATH - date """The date for which you would like to see submissions""" path["date"] = date # REQUIRED - PATH - grader_id """The ID of the grader for which you want to see submissions""" path["grader_id"] = grader_id # REQUIRED - PATH - assignment_id """The ID of the assignment for which you want to see submissions""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date}/graders/{grader_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}/graders/{grader_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42167
GradebookHistoryAPI.list_uncollated_submission_versions
train
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None): """ List uncollated submission versions. Gives a paginated, uncollated list of submission versions for all matching submissions in the context. This SubmissionVersion objects will not include the +new_grade+ or +previous_grade+ keys, only the +grade+; same for +graded_at+ and +grader+. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # OPTIONAL - assignment_id """The ID of the assignment for which you want to see submissions. If absent, versions of submissions from any assignment in the course are included.""" if assignment_id is not None: params["assignment_id"] = assignment_id # OPTIONAL - user_id """The ID of the user for which you want to see submissions. If absent, versions of submissions from any user in the course are included.""" if user_id is not None: params["user_id"] = user_id # OPTIONAL - ascending """Returns submission versions in ascending date order (oldest first). If absent, returns submission versions in descending date order (newest first).""" if ascending is not None: params["ascending"] = ascending self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42168
Section._save_percolator
train
def _save_percolator(self): """saves the query field as an elasticsearch percolator """ index = Content.search_objects.mapping.index query_filter = self.get_content().to_dict() q = {} if "query" in query_filter: q = {"query": query_filter.get("query", {})} else: return es.index( index=index, doc_type=".percolator", body=q, id=self.es_id )
python
{ "resource": "" }
q42169
Section.get_content
train
def get_content(self): """performs es search and gets content objects """ if "query" in self.query: q = self.query["query"] else: q = self.query search = custom_search_model(Content, q, field_map={ "feature-type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type", }) return search
python
{ "resource": "" }
q42170
sample
train
def sample(name, reads_in_tuple): """ Create a new sample. """ if name in [sample_x.get_name() for sample_x in __SAMPLES__]: rnftools.utils.error( "Multiple samples have the same name. Each sample must have a unique name.", program="RNFtools", subprogram="MIShmash", exception=ValueError, ) Sample( name=name, reads_in_tuple=reads_in_tuple, ) add_input(current_sample().fq_fns())
python
{ "resource": "" }
q42171
get_schema_model
train
def get_schema_model(): """ Returns the schema model that is active in this project. """ try: return django_apps.get_model(settings.POSTGRES_SCHEMA_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("POSTGRES_SCHEMA_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "POSTGRES_SCHEMA_MODEL refers to model '%s' that has not been installed" % settings.POSTGRES_SCHEMA_MODEL )
python
{ "resource": "" }
q42172
SSLSocket.read
train
def read(self, len=1024): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" while True: try: return self._sslobj.read(len) except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: return b'' elif ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout) else: raise
python
{ "resource": "" }
q42173
SSLSocket.write
train
def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" while True: try: return self._sslobj.write(data) except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout) else: raise
python
{ "resource": "" }
q42174
SSLSocket.connect
train
def connect(self, addr): """Connects to remote ADDR, and then wraps the connection in an SSL channel.""" # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._sslobj: raise ValueError("attempt to connect already-connected SSLSocket!") socket.connect(self, addr) if six.PY3: self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname) else: if self.ciphers is None: self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs) else: self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs, self.ciphers) if self.do_handshake_on_connect: self.do_handshake()
python
{ "resource": "" }
q42175
SSLSocket.accept
train
def accept(self): """Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.""" newsock, addr = socket.accept(self) ssl_sock = SSLSocket(newsock._sock, keyfile=self.keyfile, certfile=self.certfile, server_side=True, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ca_certs=self.ca_certs, do_handshake_on_connect=self.do_handshake_on_connect, suppress_ragged_eofs=self.suppress_ragged_eofs, ciphers=self.ciphers) return ssl_sock, addr
python
{ "resource": "" }
q42176
sclient.send_request
train
def send_request(self, url): """ Send a request to given url. """ while True: try: return urllib.request.urlopen(url) except urllib.error.HTTPError as e: raise serror( "Request `%s` failed (%s:%s)." % (url, e.__class__.__name__, e.code)) except Exception as e: choice = input(serror( "Error occured: %s - Retry? [yN]" % type(e))) if choice.strip().lower() != "y": raise serror(e)
python
{ "resource": "" }
q42177
sclient.get
train
def get(self, uri): """ Send a request to given uri. """ return self.send_request( "{0}://{1}:{2}{3}{4}".format( self.get_protocol(), self.host, self.port, uri, self.client_id ) )
python
{ "resource": "" }
q42178
sclient.get_client_id
train
def get_client_id(self): """ Attempt to get client_id from soundcloud homepage. """ # FIXME: This method doesn't works id = re.search( "\"clientID\":\"([a-z0-9]*)\"", self.send_request(self.SC_HOME).read().decode("utf-8")) if not id: raise serror("Cannot retrieve client_id.") return id.group(1)
python
{ "resource": "" }
q42179
error_handler
train
def error_handler(task): """Handle and log RPC errors.""" @wraps(task) def wrapper(self, *args, **kwargs): try: return task(self, *args, **kwargs) except Exception as e: self.connected = False if not self.testing: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] error_message = ( "[" + str(datetime.now()) + "] Error in task \"" + task.__name__ + "\" (" + fname + "/" + str(exc_tb.tb_lineno) + ")" + e.message ) self.logger.error("%s: RPC instruction failed" % error_message) return wrapper
python
{ "resource": "" }
q42180
Bridge.payment
train
def payment(self, origin, destination, amount): """Convenience method for sending Bitcoins. Send coins from origin to destination. Calls record_tx to log the transaction to database. Uses free, instant "move" transfers if addresses are both local (in the same wallet), and standard "sendfrom" transactions otherwise. The sender is required to be specified by user_id (account label); however, the recipient can be specified either by Bitcoin address (anyone) or user_id (if the user is local). Payment tries sending Bitcoins in this order: 1. "move" from account to account (local) 2. "move" from account to address (local) 3. "sendfrom" account to address (broadcast) Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send Returns: bool: True if successful, False otherwise """ if type(amount) != Decimal: amount = Decimal(amount) if amount <= 0: raise Exception("Amount must be a positive number") # Check if the destination is within the same wallet; # if so, we can use the fast (and free) "move" command all_addresses = [] accounts = self.listaccounts() if origin in accounts: if destination in accounts: with self.openwallet(): result = self.move(origin, destination, amount) return self.record_tx(origin, None, amount, result, destination) for account in accounts: addresses = self.getaddressesbyaccount(account) if destination in addresses: with self.openwallet(): result = self.move(origin, account, amount) return self.record_tx(origin, destination, amount, result, account) # Didn't find anything, so use "sendfrom" instead else: with self.openwallet(): txhash = self.sendfrom(origin, destination, amount) return self.record_tx(origin, destination, amount, txhash)
python
{ "resource": "" }
q42181
Bridge.record_tx
train
def record_tx(self, origin, destination, amount, outcome, destination_id=None): """Records a transaction in the database. Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send outcome (str, bool): the transaction hash if this is a "sendfrom" transaction; for "move", True if successful, False otherwise destination_id (str): the destination account label ("move" only) Returns: str or bool: the outcome (input) argument """ # "move" commands if destination_id: tx = db.Transaction( txtype="move", from_user_id=origin, to_user_id=destination_id, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, ) # "sendfrom" commands else: self.logger.debug(self.gettransaction(outcome)) confirmations = self.gettransaction(outcome)["confirmations"] last_confirmation = datetime.now() if confirmations else None tx = db.Transaction( txtype="sendfrom", from_user_id=origin, txhash=outcome, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, confirmations=confirmations, last_confirmation=last_confirmation ) db.session.add(tx) db.session.commit() return outcome
python
{ "resource": "" }
q42182
Bridge.rpc_connect
train
def rpc_connect(self): """Connect to a coin daemon's JSON RPC interface. Returns: bool: True if successfully connected, False otherwise. """ if self.coin in COINS: rpc_url = COINS[self.coin]["rpc-url"] + ":" if self.testnet: rpc_url += COINS[self.coin]["rpc-port-testnet"] else: rpc_url += COINS[self.coin]["rpc-port"] self.rpc = pyjsonrpc.HttpClient( url=rpc_url, username=COINS[self.coin]["rpc-user"], password=COINS[self.coin]["rpc-password"] ) self.logger.debug(self.coin, "RPC connection ok") self.connected = True else: self.logger.debug(self.coin, "bridge not found") return self.connected
python
{ "resource": "" }
q42183
Bridge.getaccountaddress
train
def getaccountaddress(self, user_id=""): """Get the coin address associated with a user id. If the specified user id does not yet have an address for this coin, then generate one. Args: user_id (str): this user's unique identifier Returns: str: Base58Check address for this account """ address = self.rpc.call("getaccountaddress", user_id) self.logger.debug("Your", self.coin, "address is", address) return address
python
{ "resource": "" }
q42184
Bridge.getbalance
train
def getbalance(self, user_id="", as_decimal=True): """Calculate the total balance in all addresses belonging to this user. Args: user_id (str): this user's unique identifier as_decimal (bool): balance is returned as a Decimal if True (default) or a string if False Returns: str or Decimal: this account's total coin balance """ balance = unicode(self.rpc.call("getbalance", user_id)) self.logger.debug("\"" + user_id + "\"", self.coin, "balance:", balance) if as_decimal: return Decimal(balance) else: return balance
python
{ "resource": "" }
q42185
Bridge.listtransactions
train
def listtransactions(self, user_id="", count=10, start_at=0): """List all transactions associated with this account. Args: user_id (str): this user's unique identifier count (int): number of transactions to return (default=10) start_at (int): start the list at this transaction (default=0) Returns: list [dict]: transactions associated with this user's account """ txlist = self.rpc.call("listtransactions", user_id, count, start_at) self.logger.debug("Got transaction list for " + str(user_id)) return txlist
python
{ "resource": "" }
q42186
Bridge.move
train
def move(self, fromaccount, toaccount, amount, minconf=1): """Send coins between accounts in the same wallet. If the receiving account does not exist, it is automatically created (but not automatically assigned an address). Args: fromaccount (str): origin account toaccount (str): destination account amount (str or Decimal): amount to send (8 decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: bool: True if the coins are moved successfully, False otherwise """ amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) return self.rpc.call("move", fromaccount, toaccount, float(str(amount)), minconf )
python
{ "resource": "" }
q42187
Bridge.sendfrom
train
def sendfrom(self, user_id, dest_address, amount, minconf=1): """ Send coins from user's account. Args: user_id (str): this user's unique identifier dest_address (str): address which is to receive coins amount (str or Decimal): amount to send (eight decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: str: transaction ID """ amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) txhash = self.rpc.call("sendfrom", user_id, dest_address, float(str(amount)), minconf ) self.logger.debug("Send %s %s from %s to %s" % (str(amount), self.coin, str(user_id), dest_address)) self.logger.debug("Transaction hash: %s" % txhash) return txhash
python
{ "resource": "" }
q42188
Bridge.signmessage
train
def signmessage(self, address, message): """Sign a message with the private key of an address. Cryptographically signs a message using ECDSA. Since this requires an address's private key, the wallet must be unlocked first. Args: address (str): address used to sign the message message (str): plaintext message to which apply the signature Returns: str: ECDSA signature over the message """ signature = self.rpc.call("signmessage", address, message) self.logger.debug("Signature: %s" % signature) return signature
python
{ "resource": "" }
q42189
Bridge.verifymessage
train
def verifymessage(self, address, signature, message): """ Verifies that a message has been signed by an address. Args: address (str): address claiming to have signed the message signature (str): ECDSA signature message (str): plaintext message which was signed Returns: bool: True if the address signed the message, False otherwise """ verified = self.rpc.call("verifymessage", address, signature, message) self.logger.debug("Signature verified: %s" % str(verified)) return verified
python
{ "resource": "" }
q42190
Bridge.call
train
def call(self, command, *args): """ Passes an arbitrary command to the coin daemon. Args: command (str): command to be sent to the coin daemon """ return self.rpc.call(str(command), *args)
python
{ "resource": "" }
q42191
update_feature_type_rates
train
def update_feature_type_rates(sender, instance, created, *args, **kwargs): """ Creates a default FeatureTypeRate for each role after the creation of a FeatureTypeRate. """ if created: for role in ContributorRole.objects.all(): FeatureTypeRate.objects.create(role=role, feature_type=instance, rate=0)
python
{ "resource": "" }
q42192
update_contributions
train
def update_contributions(sender, instance, action, model, pk_set, **kwargs): """Creates a contribution for each author added to an article. """ if action != 'pre_add': return else: for author in model.objects.filter(pk__in=pk_set): update_content_contributions(instance, author)
python
{ "resource": "" }
q42193
CalendarEventsAPI.create_calendar_event
train
def create_calendar_event(self, calendar_event_context_code, calendar_event_child_event_data_X_context_code=None, calendar_event_child_event_data_X_end_at=None, calendar_event_child_event_data_X_start_at=None, calendar_event_description=None, calendar_event_duplicate_append_iterator=None, calendar_event_duplicate_count=None, calendar_event_duplicate_frequency=None, calendar_event_duplicate_interval=None, calendar_event_end_at=None, calendar_event_location_address=None, calendar_event_location_name=None, calendar_event_start_at=None, calendar_event_time_zone_edited=None, calendar_event_title=None): """ Create a calendar event. Create and return a new calendar event """ path = {} data = {} params = {} # REQUIRED - calendar_event[context_code] """Context code of the course/group/user whose calendar this event should be added to.""" data["calendar_event[context_code]"] = calendar_event_context_code # OPTIONAL - calendar_event[title] """Short title for the calendar event.""" if calendar_event_title is not None: data["calendar_event[title]"] = calendar_event_title # OPTIONAL - calendar_event[description] """Longer HTML description of the event.""" if calendar_event_description is not None: data["calendar_event[description]"] = calendar_event_description # OPTIONAL - calendar_event[start_at] """Start date/time of the event.""" if calendar_event_start_at is not None: data["calendar_event[start_at]"] = calendar_event_start_at # OPTIONAL - calendar_event[end_at] """End date/time of the event.""" if calendar_event_end_at is not None: data["calendar_event[end_at]"] = calendar_event_end_at # OPTIONAL - calendar_event[location_name] """Location name of the event.""" if calendar_event_location_name is not None: data["calendar_event[location_name]"] = calendar_event_location_name # OPTIONAL - calendar_event[location_address] """Location address""" if calendar_event_location_address is not None: data["calendar_event[location_address]"] = calendar_event_location_address # OPTIONAL - calendar_event[time_zone_edited] """Time zone of the user editing the event. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.""" if calendar_event_time_zone_edited is not None: data["calendar_event[time_zone_edited]"] = calendar_event_time_zone_edited # OPTIONAL - calendar_event[child_event_data][X][start_at] """Section-level start time(s) if this is a course event. X can be any identifier, provided that it is consistent across the start_at, end_at and context_code""" if calendar_event_child_event_data_X_start_at is not None: data["calendar_event[child_event_data][X][start_at]"] = calendar_event_child_event_data_X_start_at # OPTIONAL - calendar_event[child_event_data][X][end_at] """Section-level end time(s) if this is a course event.""" if calendar_event_child_event_data_X_end_at is not None: data["calendar_event[child_event_data][X][end_at]"] = calendar_event_child_event_data_X_end_at # OPTIONAL - calendar_event[child_event_data][X][context_code] """Context code(s) corresponding to the section-level start and end time(s).""" if calendar_event_child_event_data_X_context_code is not None: data["calendar_event[child_event_data][X][context_code]"] = calendar_event_child_event_data_X_context_code # OPTIONAL - calendar_event[duplicate][count] """Number of times to copy/duplicate the event.""" if calendar_event_duplicate_count is not None: data["calendar_event[duplicate][count]"] = calendar_event_duplicate_count # OPTIONAL - calendar_event[duplicate][interval] """Defaults to 1 if duplicate `count` is set. The interval between the duplicated events.""" if calendar_event_duplicate_interval is not None: data["calendar_event[duplicate][interval]"] = calendar_event_duplicate_interval # OPTIONAL - calendar_event[duplicate][frequency] """Defaults to "weekly". The frequency at which to duplicate the event""" if calendar_event_duplicate_frequency is not None: self._validate_enum(calendar_event_duplicate_frequency, ["daily", "weekly", "monthly"]) data["calendar_event[duplicate][frequency]"] = calendar_event_duplicate_frequency # OPTIONAL - calendar_event[duplicate][append_iterator] """Defaults to false. If set to `true`, an increasing counter number will be appended to the event title when the event is duplicated. (e.g. Event 1, Event 2, Event 3, etc)""" if calendar_event_duplicate_append_iterator is not None: data["calendar_event[duplicate][append_iterator]"] = calendar_event_duplicate_append_iterator self.logger.debug("POST /api/v1/calendar_events with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/calendar_events".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42194
CalendarEventsAPI.reserve_time_slot
train
def reserve_time_slot(self, id, cancel_existing=None, comments=None, participant_id=None): """ Reserve a time slot. Reserves a particular time slot and return the new reservation """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - participant_id """User or group id for whom you are making the reservation (depends on the participant type). Defaults to the current user (or user's candidate group).""" if participant_id is not None: data["participant_id"] = participant_id # OPTIONAL - comments """Comments to associate with this reservation""" if comments is not None: data["comments"] = comments # OPTIONAL - cancel_existing """Defaults to false. If true, cancel any previous reservation(s) for this participant and appointment group.""" if cancel_existing is not None: data["cancel_existing"] = cancel_existing self.logger.debug("POST /api/v1/calendar_events/{id}/reservations with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/calendar_events/{id}/reservations".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42195
CalendarEventsAPI.update_calendar_event
train
def update_calendar_event(self, id, calendar_event_child_event_data_X_context_code=None, calendar_event_child_event_data_X_end_at=None, calendar_event_child_event_data_X_start_at=None, calendar_event_context_code=None, calendar_event_description=None, calendar_event_end_at=None, calendar_event_location_address=None, calendar_event_location_name=None, calendar_event_start_at=None, calendar_event_time_zone_edited=None, calendar_event_title=None): """ Update a calendar event. Update and return a calendar event """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - calendar_event[context_code] """Context code of the course/group/user to move this event to. Scheduler appointments and events with section-specific times cannot be moved between calendars.""" if calendar_event_context_code is not None: data["calendar_event[context_code]"] = calendar_event_context_code # OPTIONAL - calendar_event[title] """Short title for the calendar event.""" if calendar_event_title is not None: data["calendar_event[title]"] = calendar_event_title # OPTIONAL - calendar_event[description] """Longer HTML description of the event.""" if calendar_event_description is not None: data["calendar_event[description]"] = calendar_event_description # OPTIONAL - calendar_event[start_at] """Start date/time of the event.""" if calendar_event_start_at is not None: data["calendar_event[start_at]"] = calendar_event_start_at # OPTIONAL - calendar_event[end_at] """End date/time of the event.""" if calendar_event_end_at is not None: data["calendar_event[end_at]"] = calendar_event_end_at # OPTIONAL - calendar_event[location_name] """Location name of the event.""" if calendar_event_location_name is not None: data["calendar_event[location_name]"] = calendar_event_location_name # OPTIONAL - calendar_event[location_address] """Location address""" if calendar_event_location_address is not None: data["calendar_event[location_address]"] = calendar_event_location_address # OPTIONAL - calendar_event[time_zone_edited] """Time zone of the user editing the event. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.""" if calendar_event_time_zone_edited is not None: data["calendar_event[time_zone_edited]"] = calendar_event_time_zone_edited # OPTIONAL - calendar_event[child_event_data][X][start_at] """Section-level start time(s) if this is a course event. X can be any identifier, provided that it is consistent across the start_at, end_at and context_code""" if calendar_event_child_event_data_X_start_at is not None: data["calendar_event[child_event_data][X][start_at]"] = calendar_event_child_event_data_X_start_at # OPTIONAL - calendar_event[child_event_data][X][end_at] """Section-level end time(s) if this is a course event.""" if calendar_event_child_event_data_X_end_at is not None: data["calendar_event[child_event_data][X][end_at]"] = calendar_event_child_event_data_X_end_at # OPTIONAL - calendar_event[child_event_data][X][context_code] """Context code(s) corresponding to the section-level start and end time(s).""" if calendar_event_child_event_data_X_context_code is not None: data["calendar_event[child_event_data][X][context_code]"] = calendar_event_child_event_data_X_context_code self.logger.debug("PUT /api/v1/calendar_events/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/calendar_events/{id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42196
CalendarEventsAPI.delete_calendar_event
train
def delete_calendar_event(self, id, cancel_reason=None): """ Delete a calendar event. Delete an event from the calendar and return the deleted event """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - cancel_reason """Reason for deleting/canceling the event.""" if cancel_reason is not None: params["cancel_reason"] = cancel_reason self.logger.debug("DELETE /api/v1/calendar_events/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/calendar_events/{id}".format(**path), data=data, params=params, no_data=True)
python
{ "resource": "" }
q42197
tokens_required
train
def tokens_required(scopes='', new=False): """ Decorator for views to request an ESI Token. Accepts required scopes as a space-delimited string or list of strings of scope names. Can require a new token to be retrieved by SSO. Returns a QueryDict of Tokens. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # if we're coming back from SSO for a new token, return it token = _check_callback(request) if token and new: tokens = Token.objects.filter(pk=token.pk) logger.debug("Returning new token.") return view_func(request, tokens, *args, **kwargs) if not new: # ensure user logged in to check existing tokens if not request.user.is_authenticated: logger.debug( "Session {0} is not logged in. Redirecting to login.".format(request.session.session_key[:5])) from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) # collect tokens in db, check if still valid, return if any tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Retrieved {0} tokens for {1} session {2}".format(tokens.count(), request.user, request.session.session_key[:5])) return view_func(request, tokens, *args, **kwargs) # trigger creation of new token via sso logger.debug("No tokens identified for {0} session {1}. Redirecting to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
python
{ "resource": "" }
q42198
token_required
train
def token_required(scopes='', new=False): """ Decorator for views which supplies a single, user-selected token for the view to process. Same parameters as tokens_required. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # if we're coming back from SSO for a new token, return it token = _check_callback(request) if token and new: logger.debug("Got new token from {0} session {1}. Returning to view.".format(request.user, request.session.session_key[:5])) return view_func(request, token, *args, **kwargs) # if we're selecting a token, return it if request.method == 'POST': if request.POST.get("_add", False): logger.debug("{0} has selected to add new token. Redirecting to SSO.".format(request.user)) # user has selected to add a new token from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) token_pk = request.POST.get('_token', None) if token_pk: logger.debug("{0} has selected token {1}".format(request.user, token_pk)) try: token = Token.objects.get(pk=token_pk) # ensure token belongs to this user and has required scopes if ((token.user and token.user == request.user) or not token.user) and Token.objects.filter( pk=token_pk).require_scopes(scopes).require_valid().exists(): logger.debug("Selected token fulfills requirements of view. Returning.") return view_func(request, token, *args, **kwargs) except Token.DoesNotExist: logger.debug("Token {0} not found.".format(token_pk)) pass if not new: # present the user with token choices tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Returning list of available tokens for {0}.".format(request.user)) from esi.views import select_token return select_token(request, scopes=scopes, new=new) else: logger.debug("No tokens found for {0} session {1} with scopes {2}".format(request.user, request.session.session_key[:5], scopes)) # prompt the user to add a new token logger.debug("Redirecting {0} session {1} to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
python
{ "resource": "" }
q42199
ContactFinder.find
train
def find(self, ip): ''' Find the abuse contact for a IP address :param ip: IPv4 or IPv6 address to check :type ip: string :returns: emails associated with IP :rtype: list :returns: none if no contact could be found :rtype: None :raises: :py:class:`ValueError`: if ip is not properly formatted ''' ip = ipaddr.IPAddress(ip) rev = reversename(ip.exploded) revip, _ = rev.split(3) lookup = revip.concatenate(self.provider).to_text() contacts = self._get_txt_record(lookup) if contacts: return contacts.split(',')
python
{ "resource": "" }