_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q42600
parse_log
train
def parse_log(file_path): """ Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ if not os.path.isfile(file_path): return elements.error("Output Log", "Could not open file: " + file_path.split(os.sep)[-1]) headers = ["Converged Iterations", "Avg. Iterations to Converge", "Processor Count", "Dycore Type"] with open(file_path, 'r') as f: dycore_types = {"0": "Glide", "1": "Glam", "2": "Glissade", "3": "Albany_felix", "4": "BISICLES"} curr_step = 0 proc_count = 0 iter_number = 0 converged_iters = [] iters_to_converge = [] for line in f: split = line.split() if ('CISM dycore type' in line): if line.split()[-1] == '=': dycore_type = dycore_types[next(f).strip()] else: dycore_type = dycore_types[line.split()[-1]] elif ('total procs' in line): proc_count += int(line.split()[-1]) elif ('Nonlinear Solver Step' in line): curr_step = int(line.split()[4]) elif ('Compute ice velocities, time = ' in line): converged_iters.append(curr_step) curr_step = float(line.split()[-1]) elif ('"SOLVE_STATUS_CONVERGED"' in line): split = line.split() iters_to_converge.append(int(split[split.index('"SOLVE_STATUS_CONVERGED"') + 2])) elif ("Compute dH/dt" in line): iters_to_converge.append(int(iter_number)) elif len(split) > 0 and split[0].isdigit(): iter_number = split[0] if iters_to_converge == []: iters_to_converge.append(int(iter_number)) data = { "Dycore Type": dycore_type, "Processor Count": proc_count, "Converged Iterations": len(converged_iters), "Avg. Iterations to Converge": np.mean(iters_to_converge) } return elements.table("Output Log", headers, data)
python
{ "resource": "" }
q42601
parse_config
train
def parse_config(file_path): """ Convert the CISM configuration file to a python dictionary Args: file_path: absolute path to the configuration file Returns: A dictionary representation of the given file """ if not os.path.isfile(file_path): return {} parser = ConfigParser() parser.read(file_path) # Strip out inline comments for s in parser._sections: for v in six.iterkeys(parser._sections[s]): parser._sections[s][v] = parser._sections[s][v].split("#")[0].strip() return parser._sections
python
{ "resource": "" }
q42602
MainWidget._selectedRepoRow
train
def _selectedRepoRow(self): """ Return the currently select repo """ # TODO - figure out what happens if no repo is selected selectedModelIndexes = \ self.reposTableWidget.selectionModel().selectedRows() for index in selectedModelIndexes: return index.row()
python
{ "resource": "" }
q42603
MetaboliticsAnalysis.set_objective
train
def set_objective(self, measured_metabolites): ''' Updates objective function for given measured metabolites. :param dict measured_metabolites: dict in which keys are metabolite names and values are float numbers represent fold changes in metabolites. ''' self.clean_objective() for k, v in measured_metabolites.items(): m = self.model.metabolites.get_by_id(k) total_stoichiometry = m.total_stoichiometry( self.without_transports) for r in m.producers(self.without_transports): update_rate = v * r.metabolites[m] / total_stoichiometry r.objective_coefficient += update_rate
python
{ "resource": "" }
q42604
Routers
train
def Routers(typ, share, handler=RoutersHandler): """ Pass the result of this function to the handler argument in your attribute declaration """ _sharing_id, _mode = tuple(share.split(":")) _router_cls = ROUTERS.get(typ) class _Handler(handler): mode=_mode sharing_id=_sharing_id router_cls=_router_cls return _Handler
python
{ "resource": "" }
q42605
Segment.stringize
train
def stringize( self, rnf_profile, ): """Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths). """ coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return "({},{},{},{},{})".format( str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width) )
python
{ "resource": "" }
q42606
Segment.destringize
train
def destringize(self, string): """Get RNF values for this segment from its textual representation and save them into this object. Args: string (str): Textual representation of a segment. """ m = segment_destr_pattern.match(string) self.genome_id = int(m.group(1)) self.chr_id = int(m.group(2)) self.direction = m.group(3) self.left = int(m.group(4)) self.right = int(m.group(5))
python
{ "resource": "" }
q42607
AssignmentsAPI.list_assignments
train
def list_assignments(self, course_id, assignment_ids=None, bucket=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None, search_term=None): """ List assignments. Returns the list of assignments for the current context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If "observed_users" is passed, submissions for observed users will also be included as an array.""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "all_dates", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - search_term """The partial title of the assignments to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - override_assignment_dates """Apply assignment overrides for each assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - bucket """If included, only return certain assignments depending on due date and submission status.""" if bucket is not None: self._validate_enum(bucket, ["past", "overdue", "undated", "ungraded", "unsubmitted", "upcoming", "future"]) params["bucket"] = bucket # OPTIONAL - assignment_ids """if set, return only assignments specified""" if assignment_ids is not None: params["assignment_ids"] = assignment_ids self.logger.debug("GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, all_pages=True)
python
{ "resource": "" }
q42608
AssignmentsAPI.get_single_assignment
train
def get_single_assignment(self, id, course_id, all_dates=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None): """ Get a single assignment. Returns the assignment with the given id. "observed_users" is passed, submissions for observed users will also be included. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """Associations to include with the assignment. The "assignment_visibility" option requires that the Differentiated Assignments course feature be turned on. If""" if include is not None: self._validate_enum(include, ["submission", "assignment_visibility", "overrides", "observed_users"]) params["include"] = include # OPTIONAL - override_assignment_dates """Apply assignment overrides to the assignment, defaults to true.""" if override_assignment_dates is not None: params["override_assignment_dates"] = override_assignment_dates # OPTIONAL - needs_grading_count_by_section """Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false""" if needs_grading_count_by_section is not None: params["needs_grading_count_by_section"] = needs_grading_count_by_section # OPTIONAL - all_dates """All dates associated with the assignment, if applicable""" if all_dates is not None: params["all_dates"] = all_dates self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{id}".format(**path), data=data, params=params, single_item=True)
python
{ "resource": "" }
q42609
randomize_es
train
def randomize_es(es_queryset): """Randomize an elasticsearch queryset.""" return es_queryset.query( query.FunctionScore( functions=[function.RandomScore()] ) ).sort("-_score")
python
{ "resource": "" }
q42610
Configurator.configure
train
def configure(self, cfg, handler, path=""): """ Start configuration process for the provided handler Args: cfg (dict): config container handler (config.Handler class): config handler to use path (str): current path in the configuration progress """ # configure simple value attributes (str, int etc.) for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if attr.expected_type not in [list, dict]: cfg[name] = self.set(handler, attr, name, path, cfg) elif attr.default is None and not hasattr(handler, "configure_%s" % name): self.action_required.append(("%s.%s: %s" % (path, name, attr.help_text)).strip(".")) # configure attributes that have complex handlers defined # on the config Handler class (class methods prefixed by # configure_ prefix for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if hasattr(handler, "configure_%s" % name): fn = getattr(handler, "configure_%s" % name) fn(self, cfg, "%s.%s"% (path, name)) if attr.expected_type in [list, dict] and not cfg.get(name): try: del cfg[name] except KeyError: pass
python
{ "resource": "" }
q42611
Configurator.set
train
def set(self, handler, attr, name, path, cfg): """ Obtain value for config variable, by prompting the user for input and substituting a default value if needed. Also does validation on user input """ full_name = ("%s.%s" % (path, name)).strip(".") # obtain default value if attr.default is None: default = None else: try: comp = vodka.component.Component(cfg) default = handler.default(name, inst=comp) if self.skip_defaults: self.echo("%s: %s [default]" % (full_name, default)) return default except Exception: raise # render explanation self.echo("") self.echo(attr.help_text) if attr.choices: self.echo("choices: %s" % ", ".join([str(c) for c in attr.choices])) # obtain user input and validate until input is valid b = False while not b: try: if type(attr.expected_type) == type: r = self.prompt(full_name, default=default, type=attr.expected_type) r = attr.expected_type(r) else: r = self.prompt(full_name, default=default, type=str) except ValueError: self.echo("Value expected to be of type %s"% attr.expected_type) try: b = handler.check({name:r}, name, path) except Exception as inst: if hasattr(inst, "explanation"): self.echo(inst.explanation) else: raise return r
python
{ "resource": "" }
q42612
_from_keras_log_format
train
def _from_keras_log_format(data, **kwargs): """Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up. """ data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
python
{ "resource": "" }
q42613
from_keras_log
train
def from_keras_log(csv_path, output_dir_path, **kwargs): """Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up. """ # automatically get seperator by using Python's CSV parser data = pd.read_csv(csv_path, sep=None, engine='python') _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
python
{ "resource": "" }
q42614
Component.get_config
train
def get_config(self, key_name): """ Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component """ if key_name in self.config: return self.config.get(key_name) return self.Configuration.default(key_name, inst=self)
python
{ "resource": "" }
q42615
make_pagination_headers
train
def make_pagination_headers(request, limit, curpage, total, links=False): """Return Link Hypermedia Header.""" lastpage = math.ceil(total / limit) - 1 headers = {'X-Total-Count': str(total), 'X-Limit': str(limit), 'X-Page-Last': str(lastpage), 'X-Page': str(curpage)} if links: base = "{}?%s".format(request.path) links = {} links['first'] = base % urlencode(dict(request.query, **{VAR_PAGE: 0})) links['last'] = base % urlencode(dict(request.query, **{VAR_PAGE: lastpage})) if curpage: links['prev'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage - 1})) if curpage < lastpage: links['next'] = base % urlencode(dict(request.query, **{VAR_PAGE: curpage + 1})) headers['Link'] = ",".join(['<%s>; rel="%s"' % (v, n) for n, v in links.items()]) return headers
python
{ "resource": "" }
q42616
RESTHandler.bind
train
def bind(cls, app, *paths, methods=None, name=None, **kwargs): """Bind to the application. Generate URL, name if it's not provided. """ paths = paths or ['/%s(/{%s})?/?' % (cls.name, cls.name)] name = name or "api.%s" % cls.name return super(RESTHandler, cls).bind(app, *paths, methods=methods, name=name, **kwargs)
python
{ "resource": "" }
q42617
RESTHandler.dispatch
train
async def dispatch(self, request, view=None, **kwargs): """Process request.""" # Authorization endpoint self.auth = await self.authorize(request, **kwargs) # noqa # Load collection self.collection = await self.get_many(request, **kwargs) if request.method == 'POST' and view is None: return await super(RESTHandler, self).dispatch(request, **kwargs) # Load resource resource = await self.get_one(request, **kwargs) headers = {} if request.method == 'GET' and resource is None: # Filter resources if VAR_WHERE in request.query: self.collection = await self.filter(request, **kwargs) # Sort resources if VAR_SORT in request.query: sorting = [(name.strip('-'), name.startswith('-')) for name in request.query[VAR_SORT].split(',')] self.collection = await self.sort(*sorting, **kwargs) # Paginate resources per_page = request.query.get(VAR_PER_PAGE, self.meta.per_page) if per_page: try: per_page = int(per_page) if per_page: page = int(request.query.get(VAR_PAGE, 0)) offset = page * per_page self.collection, total = await self.paginate(request, offset, per_page) headers = make_pagination_headers( request, per_page, page, total, self.meta.page_links) except ValueError: raise RESTBadRequest(reason='Pagination params are invalid.') response = await super(RESTHandler, self).dispatch( request, resource=resource, view=view, **kwargs) response.headers.update(headers) return response
python
{ "resource": "" }
q42618
RESTHandler.get
train
async def get(self, request, resource=None, **kwargs): """Get resource or collection of resources. --- parameters: - name: resource in: path type: string """ if resource is not None and resource != '': return self.to_simple(request, resource, **kwargs) return self.to_simple(request, self.collection, many=True, **kwargs)
python
{ "resource": "" }
q42619
RESTHandler.load
train
async def load(self, request, resource=None, **kwargs): """Load resource from given data.""" schema = self.get_schema(request, resource=resource, **kwargs) data = await self.parse(request) resource, errors = schema.load( data, partial=resource is not None, many=isinstance(data, list)) if errors: raise RESTBadRequest(reason='Bad request', json={'errors': errors}) return resource
python
{ "resource": "" }
q42620
MergedPollDataView.render_to_response
train
def render_to_response(self, context, **response_kwargs): """ This endpoint sets very permiscuous CORS headers. Access-Control-Allow-Origin is set to the request Origin. This allows a page from ANY domain to make a request to this endpoint. Access-Control-Allow-Credentials is set to true. This allows requesting poll data in our authenticated test/staff environments. This particular combination of headers means this endpoint is a potential CSRF target. This enpoint MUST NOT write data. And it MUST NOT return any sensitive data. """ serializer = PollPublicSerializer(self.object) response = HttpResponse( json.dumps(serializer.data), content_type="application/json" ) if "HTTP_ORIGIN" in self.request.META: response["Access-Control-Allow-Origin"] = self.request.META["HTTP_ORIGIN"] response["Access-Control-Allow-Credentials"] = 'true' return response
python
{ "resource": "" }
q42621
resize
train
def resize(widthWindow, heightWindow): """Setup 3D projection for window""" glViewport(0, 0, widthWindow, heightWindow) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(70, 1.0*widthWindow/heightWindow, 0.001, 10000.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
python
{ "resource": "" }
q42622
to_d
train
def to_d(l): """ Converts list of dicts to dict. """ _d = {} for x in l: for k, v in x.items(): _d[k] = v return _d
python
{ "resource": "" }
q42623
tailor
train
def tailor(pattern_or_root, dimensions=None, distributed_dim='time', read_only=False): """ Return a TileManager to wrap the root descriptor and tailor all the dimensions to a specified window. Keyword arguments: root -- a NCObject descriptor. pattern -- a filename string to open a NCObject descriptor. dimensions -- a dictionary to configurate the dimensions limits. """ return TileManager(pattern_or_root, dimensions=dimensions, distributed_dim=distributed_dim, read_only=read_only)
python
{ "resource": "" }
q42624
get_dependants
train
def get_dependants(project_name): """Yield dependants of `project_name`.""" for package in get_installed_distributions(user_only=ENABLE_USER_SITE): if is_dependant(package, project_name): yield package.project_name
python
{ "resource": "" }
q42625
is_dependant
train
def is_dependant(package, project_name): """Determine whether `package` is a dependant of `project_name`.""" for requirement in package.requires(): # perform case-insensitive matching if requirement.project_name.lower() == project_name.lower(): return True return False
python
{ "resource": "" }
q42626
Miner.get_global_rate_limit
train
def get_global_rate_limit(self): """Get the global rate limit per client. :rtype: int :returns: The global rate limit for each client. """ r = urllib.request.urlopen('https://archive.org/metadata/iamine-rate-limiter') j = json.loads(r.read().decode('utf-8')) return int(j.get('metadata', {}).get('rate_per_second', 300))
python
{ "resource": "" }
q42627
ItemMiner.mine_items
train
def mine_items(self, identifiers, params=None, callback=None): """Mine metadata from Archive.org items. :param identifiers: Archive.org identifiers to be mined. :type identifiers: iterable :param params: URL parameters to send with each metadata request. :type params: dict :param callback: A callback function to be called on each :py:class:`aiohttp.client.ClientResponse`. :type callback: func """ # By default, don't cache item metadata in redis. params = {'dontcache': 1} if not params else {} requests = metadata_requests(identifiers, params, callback, self) yield from self.mine(requests)
python
{ "resource": "" }
q42628
Luis.analyze
train
def analyze(self, text): """Sends text to LUIS for analysis. Returns a LuisResult. """ logger.debug('Sending %r to LUIS app %s', text, self._url) r = requests.get(self._url, {'q': text}) logger.debug('Request sent to LUIS URL: %s', r.url) logger.debug( 'LUIS returned status %s with text: %s', r.status_code, r.text) r.raise_for_status() json_response = r.json() result = LuisResult._from_json(json_response) logger.debug('Returning %s', result) return result
python
{ "resource": "" }
q42629
enum
train
def enum(**enums): """ A basic enum implementation. Usage: >>> MY_ENUM = enum(FOO=1, BAR=2) >>> MY_ENUM.FOO 1 >>> MY_ENUM.BAR 2 """ # Enum values must be hashable to support reverse lookup. if not all(isinstance(val, collections.Hashable) for val in _values(enums)): raise EnumConstructionException('All enum values must be hashable.') # Cheating by maintaining a copy of original dict for iteration b/c iterators are hard. # It must be a deepcopy because new.classobj() modifies the original. en = copy.deepcopy(enums) e = type('Enum', (_EnumMethods,), dict((k, v) for k, v in _items(en))) try: e.choices = [(v, k) for k, v in sorted(_items(enums), key=itemgetter(1))] # DEPRECATED except TypeError: pass e.get_id_by_label = e.__dict__.get e.get_label_by_id = dict((v, k) for (k, v) in _items(enums)).get return e
python
{ "resource": "" }
q42630
Manager.create_translation_tasks
train
def create_translation_tasks(self, instance): """ Creates the translations tasks from the instance and its translatable children :param instance: :return: """ langs = self.get_languages() result = [] # get the previous and actual values # in case it's and "add" operation previous values will be empty previous_values, actual_values = self.get_previous_and_current_values(instance) # extract the differences differences = self.extract_diferences(previous_values, actual_values) self.log('\nprev: {}\nactu:{}\ndiff:{}'.format(previous_values, actual_values, differences)) if len(differences) > 0: # there are differences in the main model, so we create the tasks for it result += self.create_from_item(langs, instance.master, differences, trans_instance=self.instance) else: # no differences so we do nothing to the main model self.log('No differences we do nothing CREATE {}:{}'.format(self.master_class, instance.language_code)) return result
python
{ "resource": "" }
q42631
Manager.update_task
train
def update_task(self, differences): """ Updates a task as done if we have a new value for this alternative language :param differences: :return: """ self.log('differences UPDATING: {}'.format(differences)) object_name = '{} - {}'.format(self.app_label, self.instance.master._meta.verbose_name) lang = self.instance.language_code object_pk = self.instance.master.pk for field in differences: value = getattr(self.instance, field) if value is None or value == '': continue try: TransTask.objects.filter( language__code=lang, object_field=field, object_name=object_name, object_pk=object_pk ).update(done=True, date_modification=datetime.now(), object_field_value_translation=value) self.log('MARKED TASK AS DONE') except TransTask.DoesNotExist: self.log('error MARKING TASK AS DONE: {} - {} - {} - {}'.format(lang, field, object_name, object_pk))
python
{ "resource": "" }
q42632
Manager.get_previous_and_current_values
train
def get_previous_and_current_values(self, instance): """ Obtain the previous and actual values and compares them in order to detect which fields has changed :param instance: :param translation: :return: """ translated_field_names = self._get_translated_field_names(instance.master) if instance.pk: try: previous_obj = instance._meta.model.objects.get(pk=instance.pk) previous_values = self.get_obj_values(previous_obj, translated_field_names) except ObjectDoesNotExist: previous_values = {} else: previous_values = {} current_values = self.get_obj_values(instance, translated_field_names) return previous_values, current_values
python
{ "resource": "" }
q42633
Manager.get_obj_values
train
def get_obj_values(obj, translated_field_names): """ get the translated field values from translatable fields of an object :param obj: :param translated_field_names: :return: """ # set of translated fields to list fields = list(translated_field_names) values = {field: getattr(obj, field) for field in fields} return values
python
{ "resource": "" }
q42634
Manager._get_translated_field_names
train
def _get_translated_field_names(model_instance): """ Get the instance translatable fields :return: """ hvad_internal_fields = ['id', 'language_code', 'master', 'master_id', 'master_id'] translated_field_names = set(model_instance._translated_field_names) - set(hvad_internal_fields) return translated_field_names
python
{ "resource": "" }
q42635
Manager.get_languages
train
def get_languages(self, include_main=False): """ Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: """ if not self.master: raise Exception('TransManager - No master set') item_languages = self.get_languages_from_item(self.ct_master, self.master) languages = self.get_languages_from_model(self.ct_master.app_label, self.ct_master.model) if not languages: languages = self.get_languages_from_application(self.ct_master.app_label) # if not languages: # languages = self.get_languages_default() if not include_main: main_language = self.get_main_language() if main_language in languages: languages.remove(main_language) return list(set(item_languages + languages))
python
{ "resource": "" }
q42636
Manager.get_languages_from_model
train
def get_languages_from_model(app_label, model_label): """ Get the languages configured for the current model :param model_label: :param app_label: :return: """ try: mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransModelLanguage.DoesNotExist: return []
python
{ "resource": "" }
q42637
Manager.get_languages_from_application
train
def get_languages_from_application(app_label): """ Get the languages configured for the current application :param app_label: :return: """ try: mod_lan = TransApplicationLanguage.objects.filter(application=app_label).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransApplicationLanguage.DoesNotExist: return []
python
{ "resource": "" }
q42638
Manager.log
train
def log(self, msg): """ Log a message information adding the master_class and instance_class if available :param msg: :return: """ if self.master_class and self.instance_class: logger.info('{0} - {1} - {2} - {3} - lang: {4} msg: {5}'.format( self.ct_master.app_label, self.ct_master.model, self.instance_class, self.instance.language_code, self.instance.pk, msg) ) elif self.instance_class: logger.info('{} - {}: {}'.format(self.instance_class, self.instance.pk, msg)) else: logger.info('{}'.format(msg))
python
{ "resource": "" }
q42639
Manager.get_field_label
train
def get_field_label(self, trans, field): """ Get the field label from the _meta api of the model :param trans: :param field: :return: """ try: # get from the instance object_field_label = trans._meta.get_field_by_name(field)[0].verbose_name except Exception: try: # get from the class object_field_label = self.sender._meta.get_field_by_name(field)[0].verbose_name except Exception: # in the worst case we set the field name as field label object_field_label = field return object_field_label
python
{ "resource": "" }
q42640
Manager.get_translatable_children
train
def get_translatable_children(self, obj): """ Obtain all the translatable children from "obj" :param obj: :return: """ collector = NestedObjects(using='default') collector.collect([obj]) object_list = collector.nested() items = self.get_elements(object_list) # avoid first object because it's the main object return items[1:]
python
{ "resource": "" }
q42641
Manager.get_elements
train
def get_elements(self, object_list): """ Recursive method to iterate the tree of children in order to flatten it :param object_list: :return: """ result = [] for item in object_list: if isinstance(item, list): result += self.get_elements(item) elif isinstance(item, TranslatableModel): result.append(item) return result
python
{ "resource": "" }
q42642
Manager.update_model_languages
train
def update_model_languages(self, model_class, languages): """ Update the TransModelLanguages model with the selected languages :param model_class: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) new_langs = [lang for lang in qs] if not new_langs: return mod_lan, created = TransModelLanguage.objects.get_or_create( model='{} - {}'.format(model_class._meta.app_label, model_class._meta.model.__name__.lower()), ) exist_langs_codes = [lang.code for lang in mod_lan.languages.all()] for lang in new_langs: if lang.code not in exist_langs_codes: try: mod_lan.languages.add(lang) except IntegrityError: pass
python
{ "resource": "" }
q42643
Manager.add_item_languages
train
def add_item_languages(self, item, languages): """ Update the TransItemLanguage model with the selected languages :param item: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) new_langs = [lang for lang in qs] if not new_langs: return ct_item = ContentType.objects.get_for_model(item) item_lan, created = TransItemLanguage.objects.get_or_create(content_type_id=ct_item.id, object_id=item.id) item_lan.languages.add(*new_langs)
python
{ "resource": "" }
q42644
Manager.remove_item_languages
train
def remove_item_languages(self, item, languages): """ delete the selected languages from the TransItemLanguage model :param item: :param languages: :return: """ # get the langs we have to add to the TransModelLanguage qs = TransLanguage.objects.filter(code__in=languages) remove_langs = [lang for lang in qs] if not remove_langs: return ct_item = ContentType.objects.get_for_model(item) item_lan, created = TransItemLanguage.objects.get_or_create(content_type_id=ct_item.id, object_id=item.id) for lang in remove_langs: item_lan.languages.remove(lang) if item_lan.languages.count() == 0: item_lan.delete()
python
{ "resource": "" }
q42645
Manager.get_translation_from_instance
train
def get_translation_from_instance(instance, lang): """ Get the translation from the instance in a specific language, hits the db :param instance: :param lang: :return: """ try: translation = get_translation(instance, lang) except (AttributeError, ObjectDoesNotExist): translation = None return translation
python
{ "resource": "" }
q42646
Manager.create_translations_for_item_and_its_children
train
def create_translations_for_item_and_its_children(self, item, languages=None): """ Creates the translations from an item and defined languages and return the id's of the created tasks :param item: (master) :param languages: :return: """ if not self.master: self.set_master(item) if not languages: languages = self.get_languages() result_ids = [] # first process main object fields = self._get_translated_field_names(item) tasks = self.create_from_item(languages, item, fields) if tasks: result_ids += [task.pk for task in tasks] # then process child objects from main children = self.get_translatable_children(item) for child in children: fields = self._get_translated_field_names(child) tasks = self.create_from_item(languages, child, fields) if tasks: result_ids += [task.pk for task in tasks] return result_ids
python
{ "resource": "" }
q42647
RegistryHive.keys
train
def keys(self): """Iterates over the hive's keys. Yields WinRegKey namedtuples containing: path: path of the key "RootKey\\Key\\..." timestamp: date and time of last modification values: list of values (("ValueKey", "ValueType", ValueValue), ... ) """ for node in self.node_children(self.root()): yield from self._visit_registry(node, self._rootkey)
python
{ "resource": "" }
q42648
RegistryHive._value_data
train
def _value_data(self, value): """Parses binary and unidentified values.""" return codecs.decode( codecs.encode(self.value_value(value)[1], 'base64'), 'utf8')
python
{ "resource": "" }
q42649
intersection
train
def intersection(*args): """ Return the intersection of lists, using the first list to determine item order """ if not args: return [] # remove duplicates from first list whilst preserving order base = list(OrderedDict.fromkeys(args[0])) if len(args) == 1: return base else: others = set(args[1]).intersection(*args[2:]) return [e for e in base if e in others]
python
{ "resource": "" }
q42650
union
train
def union(*args): """ Return the union of lists, ordering by first seen in any list """ if not args: return [] base = args[0] for other in args[1:]: base.extend(other) return list(OrderedDict.fromkeys(base))
python
{ "resource": "" }
q42651
random_string
train
def random_string(length): """ Generates a random alphanumeric string """ # avoid things that could be mistaken ex: 'I' and '1' letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" return "".join([random.choice(letters) for _ in range(length)])
python
{ "resource": "" }
q42652
filter_dict
train
def filter_dict(d, keys): """ Creates a new dict from an existing dict that only has the given keys """ return {k: v for k, v in d.items() if k in keys}
python
{ "resource": "" }
q42653
get_cacheable
train
def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False): """ Gets the result of a method call, using the given key and TTL as a cache """ if not recalculate: cached = cache.get(cache_key) if cached is not None: return json.loads(cached) calculated = calculate() cache.set(cache_key, json.dumps(calculated), cache_ttl) return calculated
python
{ "resource": "" }
q42654
get_obj_cacheable
train
def get_obj_cacheable(obj, attr_name, calculate, recalculate=False): """ Gets the result of a method call, using the given object and attribute name as a cache """ if not recalculate and hasattr(obj, attr_name): return getattr(obj, attr_name) calculated = calculate() setattr(obj, attr_name, calculated) return calculated
python
{ "resource": "" }
q42655
datetime_to_ms
train
def datetime_to_ms(dt): """ Converts a datetime to a millisecond accuracy timestamp """ seconds = calendar.timegm(dt.utctimetuple()) return seconds * 1000 + int(dt.microsecond / 1000)
python
{ "resource": "" }
q42656
ms_to_datetime
train
def ms_to_datetime(ms): """ Converts a millisecond accuracy timestamp to a datetime """ dt = datetime.datetime.utcfromtimestamp(ms / 1000) return dt.replace(microsecond=(ms % 1000) * 1000).replace(tzinfo=pytz.utc)
python
{ "resource": "" }
q42657
chunks
train
def chunks(iterable, size): """ Splits a very large list into evenly sized chunks. Returns an iterator of lists that are no more than the size passed in. """ it = iter(iterable) item = list(islice(it, size)) while item: yield item item = list(islice(it, size))
python
{ "resource": "" }
q42658
is_owner
train
def is_owner(package, abspath): """Determine whether `abspath` belongs to `package`.""" try: files = package['files'] location = package['location'] except KeyError: return False paths = (os.path.abspath(os.path.join(location, f)) for f in files) return abspath in paths
python
{ "resource": "" }
q42659
RestURL._get_subfolder
train
def _get_subfolder(self, foldername, returntype, params=None, file_data=None): """Return an object of the requested type with the path relative to the current object's URL. Optionally, query parameters may be set.""" newurl = compat.urljoin(self.url, compat.quote(foldername), False) params = params or {} file_data = file_data or {} # Add the key-value pairs sent in params to query string if they # are so defined. query_dict = {} url_tuple = compat.urlsplit(newurl) urllist = list(url_tuple) if params: # As above, pull out first element from parse_qs' values query_dict = dict((k, v[0]) for k, v in cgi.parse_qs(urllist[3]).items()) for key, val in params.items(): # Lowercase bool string if isinstance(val, bool): query_dict[key] = str(val).lower() # Special case: convert an envelope to .bbox in the bb # parameter elif isinstance(val, geometry.Envelope): query_dict[key] = val.bbox # Another special case: strings can't be quoted/escaped at the # top level elif isinstance(val, gptypes.GPString): query_dict[key] = val.value # Just use the wkid of SpatialReferences elif isinstance(val, geometry.SpatialReference): query_dict[key] = val.wkid # If it's a list, make it a comma-separated string elif isinstance(val, (list, tuple, set)): val = ",".join([str(v.id) if isinstance(v, Layer) else str(v) for v in val]) # If it's a dictionary, dump as JSON elif isinstance(val, dict): val = json.dumps(val) # Ignore null values, and coerce string values (hopefully # everything sent in to a query has a sane __str__) elif val is not None: query_dict[key] = str(val) if self.__token__ is not None: query_dict['token'] = self.__token__ query_dict[REQUEST_REFERER_MAGIC_NAME] = self._referer or self.url # Replace URL query component with newly altered component urllist[3] = compat.urlencode(query_dict) newurl = urllist # Instantiate new RestURL or subclass rt = returntype(newurl, file_data) # Remind the resource where it came from try: rt.parent = self except: rt._parent = self return rt
python
{ "resource": "" }
q42660
RestURL._contents
train
def _contents(self): """The raw contents of the URL as fetched, this is done lazily. For non-lazy fetching this is accessed in the object constructor.""" if self.__urldata__ is Ellipsis or self.__cache_request__ is False: if self._file_data: # Special-case: do a multipart upload if there's file data self.__post__ = True boundary = "-"*12+str(uuid.uuid4())+"$" multipart_data = '' for k, v in cgi.parse_qs(self.query).items(): if not isinstance(v, list): v = [v] for val in v: multipart_data += boundary + "\r\n" multipart_data += ('Content-Disposition: form-data; ' 'name="%s"\r\n\r\n' % k) multipart_data += val + "\r\n" for k, v in self._file_data.items(): fn = os.path.basename(getattr(v, 'name', 'file')) ct = (mimetypes.guess_type(fn) or ("application/octet-stream",))[0] multipart_data += boundary + "\r\n" multipart_data += ('Content-Disposition: form-data; ' 'name="%s"; filename="%s"\r\n' 'Content-Type:%s\r\n\r\n' % (k, fn, ct)) multipart_data += v.read() + "\r\n" multipart_data += boundary + "--\r\n\r\n" req_dict = {'User-Agent' : USER_AGENT, 'Content-Type': 'multipart/form-data; boundary='+boundary[2:], 'Content-Length': str(len(multipart_data)) } if self._referer: req_dict['Referer'] = self._referer request = compat.urllib2.Request(self.url, multipart_data, req_dict) else: req_dict = {'User-Agent' : USER_AGENT} if self._referer: req_dict['Referer'] = self._referer request = compat.urllib2.Request(self.url, self.query if self.__post__ else None, req_dict) handle = compat.urllib2.urlopen(request) # Handle the special case of a redirect (only follow once) -- # Note that only the first 3 components (protocol, hostname, path) # are altered as component 4 is the query string, which can get # clobbered by the server. fetched_url = list(compat.urlsplit(handle.url)[:3]) if fetched_url != list(self._url[:3]): self._url[:3] = fetched_url return self._contents # No redirect, proceed as usual. self.__headers__ = compat.get_headers(handle) self.__urldata__ = handle.read() data = self.__urldata__ if self.__cache_request__ is False: self.__urldata__ = Ellipsis return data
python
{ "resource": "" }
q42661
RestURL._json_struct
train
def _json_struct(self): """The json data structure in the URL contents, it will cache this if it makes sense so it doesn't parse over and over.""" if self.__has_json__: if self.__cache_request__: if self.__json_struct__ is Ellipsis: if self._contents is not Ellipsis: self.__json_struct__ = json.loads( compat.ensure_string(self._contents) .strip() or '{}') else: return {} return self.__json_struct__ else: return json.loads(compat.ensure_string(self._contents)) else: # Return an empty dict for things so they don't have to special # case against a None value or anything return {}
python
{ "resource": "" }
q42662
RestURL.parent
train
def parent(self): "Get this object's parent" if self._parent: return self._parent # auto-compute parent if needed elif getattr(self, '__parent_type__', None): return self._get_subfolder('..' if self._url[2].endswith('/') else '.', self.__parent_type__) else: raise AttributeError("%r has no parent attribute" % type(self))
python
{ "resource": "" }
q42663
Folder._register_service_type
train
def _register_service_type(cls, subclass): """Registers subclass handlers of various service-type-specific service implementations. Look for classes decorated with @Folder._register_service_type for hints on how this works.""" if hasattr(subclass, '__service_type__'): cls._service_type_mapping[subclass.__service_type__] = subclass if subclass.__service_type__: setattr(subclass, subclass.__service_type__, property(lambda x: x)) return subclass
python
{ "resource": "" }
q42664
Folder.servicenames
train
def servicenames(self): "Give the list of services available in this folder." return set([service['name'].rstrip('/').split('/')[-1] for service in self._json_struct.get('services', [])])
python
{ "resource": "" }
q42665
Folder.services
train
def services(self): "Returns a list of Service objects available in this folder" return [self._get_subfolder("%s/%s/" % (s['name'].rstrip('/').split('/')[-1], s['type']), self._service_type_mapping.get(s['type'], Service)) for s in self._json_struct.get('services', [])]
python
{ "resource": "" }
q42666
MapLayer.QueryLayer
train
def QueryLayer(self, text=None, Geometry=None, inSR=None, spatialRel='esriSpatialRelIntersects', where=None, outFields=None, returnGeometry=None, outSR=None, objectIds=None, time=None, maxAllowableOffset=None, returnIdsOnly=None): """The query operation is performed on a layer resource. The result of this operation is a resultset resource. This resource provides information about query results including the values for the fields requested by the user. If you request geometry information, the geometry of each result is also returned in the resultset. B{Spatial Relation Options:} - esriSpatialRelIntersects - esriSpatialRelContains - esriSpatialRelCrosses - esriSpatialRelEnvelopeIntersects - esriSpatialRelIndexIntersects - esriSpatialRelOverlaps - esriSpatialRelTouches - esriSpatialRelWithin""" if not inSR: if Geometry: inSR = Geometry.spatialReference out = self._get_subfolder("./query", JsonResult, { 'text': text, 'geometry': geometry, 'inSR': inSR, 'spatialRel': spatialRel, 'where': where, 'outFields': outFields, 'returnGeometry': returnGeometry, 'outSR': outSR, 'objectIds': objectIds, 'time': utils.pythonvaluetotime( time), 'maxAllowableOffset': maxAllowableOffset, 'returnIdsOnly': returnIdsOnly }) return gptypes.GPFeatureRecordSetLayer.fromJson(out._json_struct)
python
{ "resource": "" }
q42667
MapLayer.timeInfo
train
def timeInfo(self): """Return the time info for this Map Service""" time_info = self._json_struct.get('timeInfo', {}) if not time_info: return None time_info = time_info.copy() if 'timeExtent' in time_info: time_info['timeExtent'] = utils.timetopythonvalue( time_info['timeExtent']) return time_info
python
{ "resource": "" }
q42668
GPExecutionResult.results
train
def results(self): "Returns a dict of outputs from the GPTask execution." if self._results is None: results = self._json_struct['results'] def result_iterator(): for result in results: datatype = None conversion = None for param in self.parent.parameters: if param['name'] == result['paramName']: datatype = param['datatype'] if datatype is None: conversion = str else: conversion = datatype.fromJson dt = result['paramName'] val = conversion(result['value']) yield (dt, val) self._results = dict(res for res in result_iterator()) return self._results
python
{ "resource": "" }
q42669
GPTask.Execute
train
def Execute(self, *params, **kw): """Synchronously execute the specified GP task. Parameters are passed in either in order or as keywords.""" fp = self.__expandparamstodict(params, kw) return self._get_subfolder('execute/', GPExecutionResult, fp)
python
{ "resource": "" }
q42670
GPTask.SubmitJob
train
def SubmitJob(self, *params, **kw): """Asynchronously execute the specified GP task. This will return a Geoprocessing Job object. Parameters are passed in either in order or as keywords.""" fp = self.__expandparamstodict(params, kw) return self._get_subfolder('submitJob/', GPJob, fp)._jobstatus
python
{ "resource": "" }
q42671
NetworkLayer.SolveClosestFacility
train
def SolveClosestFacility(self, facilities=None, incidents=None, barriers=None, polylineBarriers=None, polygonBarriers=None, attributeParameterValues=None, returnDirections=None, directionsLanguage=None, directionsStyleName=None, directionsLengthUnits=None, directionsTimeAttributeName=None, returnCFRoutes=None, returnFacilities=None, returnIncidents=None, returnBarriers=None, returnPolylineBarriers=None, returnPolygonBarriers=None, facilityReturnType=None, outputLines=None, defaultCutoff=None, defaultTargetFacilityCount=None, travelDirection=None, outSR=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None): """The solve operation is performed on a network layer resource of type closest facility.""" raise NotImplementedError()
python
{ "resource": "" }
q42672
RouteNetworkLayer.Solve
train
def Solve(self, stops=None, barriers=None, returnDirections=None, returnRoutes=None, returnStops=None, returnBarriers=None, outSR=None, ignoreInvalidLocations=None, outputLines=None, findBestSequence=None, preserveFirstStop=None, preserveLastStop=None, useTimeWindows=None, startTime=None, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, directionsLanguage=None, outputGeometryPrecision=None, directionsLengthUnits=None, directionsTimeAttributeName=None, attributeParameterValues=None, polylineBarriers=None, polygonBarriers=None): """The solve operation is performed on a network layer resource. At 9.3.1, the solve operation is supported only on the route layer. Or specifically, on a network layer whose layerType is esriNAServerRouteLayer. You can provide arguments to the solve route operation as query parameters defined in the parameters table below. """ def ptlist_as_semilist(lst): if isinstance(lst, geometry.Point): lst = [lst] if isinstance(lst, (list, tuple)): return ";".join(','.join(str(x) for x in pt) for pt in lst) return lst if self.layerType != "esriNAServerRouteLayer": raise TypeError("Layer is of type %s; Solve is not available." % self.layerType) return self._get_subfolder('solve/', NetworkSolveResult, {'stops': ptlist_as_semilist(stops), 'barriers': ptlist_as_semilist(barriers), 'returnDirections': returnDirections, 'returnRoutes': returnRoutes, 'returnStops': returnStops, 'returnBarriers': returnBarriers, 'outSR': outSR, 'ignoreInvalidLocations': ignoreInvalidLocations, 'outputLines': outputLines, 'findBestSequence': findBestSequence, 'preserveFirstStop': preserveFirstStop, 'preserveLastStop': preserveLastStop, 'useTimeWindows': useTimeWindows, 'startTime': startTime, 'accumulateAttributeNames': accumulateAttributeNames, 'impedanceAttributeName': impedanceAttributeName, 'restrictionAttributeNames': restrictionAttributeNames, 'restrictUTurns': restrictUTurns, 'useHierarchy': useHierarchy, 'directionsLanguage': directionsLanguage, 'outputGeometryPrecision': outputGeometryPrecision, 'directionsLengthUnits': directionsLengthUnits, 'directionsTimeAttributeName': directionsTimeAttributeName, 'attributeParameterValues': attributeParameterValues, 'polylineBarriers': polylineBarriers, 'polygonBarriers': polygonBarriers})
python
{ "resource": "" }
q42673
TaskBulksSerializer.save
train
def save(self, **kwargs): """ Method that creates the translations tasks for every selected instance :param kwargs: :return: """ try: # result_ids = [] manager = Manager() for item in self.model_class.objects.language(manager.get_main_language()).filter(pk__in=self.ids).all(): create_translations_for_item_and_its_children.delay(self.model_class, item.pk, self.languages, update_item_languages=True) # return TransTaskSerializer(TransTask.objects.filter(pk__in=result_ids), many=True).data return {'status': 'ok'} except Exception as e: raise serializers.ValidationError(detail=str(e))
python
{ "resource": "" }
q42674
get_version
train
def get_version(): """ Gets the current version of the package. """ version_py = os.path.join(os.path.dirname(__file__), 'deepgram', 'version.py') with open(version_py, 'r') as fh: for line in fh: if line.startswith('__version__'): return line.split('=')[-1].strip().replace('"', '') raise ValueError('Failed to parse version from: {}'.format(version_py))
python
{ "resource": "" }
q42675
config
train
def config(env=DEFAULT_ENV, default=None, **overrides): """Returns configured REDIS dictionary from REDIS_URL.""" config = {} s = os.environ.get(env, default) if s: config = parse(s) overrides = dict([(k.upper(), v) for k, v in overrides.items()]) config.update(overrides) return config
python
{ "resource": "" }
q42676
hash_filesystem
train
def hash_filesystem(filesystem, hashtype='sha1'): """Utility function for running the files iterator at once. Returns a dictionary. {'/path/on/filesystem': 'file_hash'} """ try: return dict(filesystem.checksums('/')) except RuntimeError: results = {} logging.warning("Error hashing disk %s contents, iterating over files.", filesystem.disk_path) for path in filesystem.nodes('/'): try: regular = stat.S_ISREG(filesystem.stat(path)['mode']) except RuntimeError: continue # unaccessible node if regular: try: results[path] = filesystem.checksum(path, hashtype=hashtype) except RuntimeError: logging.debug("Unable to hash %s.", path) return results
python
{ "resource": "" }
q42677
FileSystem.fsroot
train
def fsroot(self): """Returns the file system root.""" if self.osname == 'windows': return '{}:\\'.format( self._handler.inspect_get_drive_mappings(self._root)[0][0]) else: return self._handler.inspect_get_mountpoints(self._root)[0][0]
python
{ "resource": "" }
q42678
FileSystem.mount
train
def mount(self, readonly=True): """Mounts the given disk. It must be called before any other method. """ self._handler.add_drive_opts(self.disk_path, readonly=True) self._handler.launch() for mountpoint, device in self._inspect_disk(): if readonly: self._handler.mount_ro(device, mountpoint) else: self._handler.mount(device, mountpoint) if self._handler.inspect_get_type(self._root) == 'windows': self.path = self._windows_path else: self.path = posix_path
python
{ "resource": "" }
q42679
FileSystem._inspect_disk
train
def _inspect_disk(self): """Inspects the disk and returns the mountpoints mapping as a list which order is the supposed one for correct mounting. """ roots = self._handler.inspect_os() if roots: self._root = roots[0] return sorted(self._handler.inspect_get_mountpoints(self._root), key=lambda m: len(m[0])) else: raise RuntimeError("No OS found on the given disk image.")
python
{ "resource": "" }
q42680
FileSystem.download
train
def download(self, source, destination): """Downloads the file on the disk at source into destination.""" self._handler.download(posix_path(source), destination)
python
{ "resource": "" }
q42681
FileSystem.nodes
train
def nodes(self, path): """Iterates over the files and directories contained within the disk starting from the given path. Yields the path of the nodes. """ path = posix_path(path) yield from (self.path(path, e) for e in self._handler.find(path))
python
{ "resource": "" }
q42682
FileSystem.checksum
train
def checksum(self, path, hashtype='sha1'): """Returns the checksum of the given path.""" return self._handler.checksum(hashtype, posix_path(path))
python
{ "resource": "" }
q42683
FileSystem.checksums
train
def checksums(self, path, hashtype='sha1'): """Iterates over the files hashes contained within the disk starting from the given path. The hashtype keyword allows to choose the file hashing algorithm. Yields the following values: "C:\\Windows\\System32\\NTUSER.DAT", "hash" for windows "/home/user/text.txt", "hash" for other FS """ with NamedTemporaryFile(buffering=0) as tempfile: self._handler.checksums_out(hashtype, posix_path(path), tempfile.name) yield from ((self.path(f[1].lstrip('.')), f[0]) for f in (l.decode('utf8').strip().split(None, 1) for l in tempfile))
python
{ "resource": "" }
q42684
ListenCloselyApp.attend_pendings
train
def attend_pendings(self): """ Check all chats created with no agent assigned yet. Schedule a timer timeout to call it. """ chats_attended = [] pending_chats = Chat.pending.all() for pending_chat in pending_chats: free_agent = self.strategy.free_agent() if free_agent: pending_chat.attend_pending(free_agent, self) pending_chat.save() chats_attended.append(pending_chat) else: break return chats_attended
python
{ "resource": "" }
q42685
ListenCloselyApp.terminate_obsolete
train
def terminate_obsolete(self): """ Check chats can be considered as obsolete to terminate them """ chats_terminated = [] live_chats = Chat.live.all() for live_chat in live_chats: if live_chat.is_obsolete(self.time_obsolete_offset): live_chat.terminate() live_chat.save() chats_terminated.append(live_chat) return chats_terminated
python
{ "resource": "" }
q42686
ListenCloselyApp.on_message
train
def on_message(self, message_id_service, contact_id_service, content): """ To use as callback in message service backend """ try: live_chat = Chat.live.get( Q(agent__id_service=contact_id_service) | Q(asker__id_service=contact_id_service)) except ObjectDoesNotExist: self._new_chat_processing(message_id_service, contact_id_service, content) else: live_chat.handle_message(message_id_service, contact_id_service, content, self)
python
{ "resource": "" }
q42687
decode_path
train
def decode_path(file_path): """Turn a path name into unicode.""" if file_path is None: return if isinstance(file_path, six.binary_type): file_path = file_path.decode(sys.getfilesystemencoding()) return file_path
python
{ "resource": "" }
q42688
SocketConnector.handle_set_key
train
def handle_set_key(self): """Read incoming key from server""" track_id = self.reader.int() row = self.reader.int() value = self.reader.float() kind = self.reader.byte() logger.info(" -> track=%s, row=%s, value=%s, type=%s", track_id, row, value, kind) # Add or update track value track = self.tracks.get_by_id(track_id) track.add_or_update(row, value, kind)
python
{ "resource": "" }
q42689
SocketConnector.handle_delete_key
train
def handle_delete_key(self): """Read incoming delete key event from server""" track_id = self.reader.int() row = self.reader.int() logger.info(" -> track=%s, row=%s", track_id, row) # Delete the actual track value track = self.tracks.get_by_id(track_id) track.delete(row)
python
{ "resource": "" }
q42690
SocketConnector.handle_set_row
train
def handle_set_row(self): """Read incoming row change from server""" row = self.reader.int() logger.info(" -> row: %s", row) self.controller.row = row
python
{ "resource": "" }
q42691
SocketConnector.handle_pause
train
def handle_pause(self): """Read pause signal from server""" flag = self.reader.byte() if flag > 0: logger.info(" -> pause: on") self.controller.playing = False else: logger.info(" -> pause: off") self.controller.playing = True
python
{ "resource": "" }
q42692
TransLanguage.save
train
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Overwrite of the save method in order that when setting the language as main we deactivate any other model selected as main before :param force_insert: :param force_update: :param using: :param update_fields: :return: """ super().save(force_insert, force_update, using, update_fields) if self.main_language: TransLanguage.objects.exclude(pk=self.pk).update(main_language=False)
python
{ "resource": "" }
q42693
BaseTransformer.check_data_type
train
def check_data_type(self): """Check the type of the transformer and column match. Args: column_metadata(dict): Metadata of the column. Raises a ValueError if the types don't match """ metadata_type = self.column_metadata.get('type') if self.type != metadata_type and metadata_type not in self.type: raise ValueError('Types of transformer don\'t match')
python
{ "resource": "" }
q42694
DTTransformer.fit
train
def fit(self, col): """Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None """ dates = self.safe_datetime_cast(col) self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9
python
{ "resource": "" }
q42695
DTTransformer.safe_datetime_cast
train
def safe_datetime_cast(self, col): """Parses string values into datetime. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.Series """ casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce') if len(casted_dates[casted_dates.isnull()]): # This will raise an error for bad formatted data # but not for out of bonds or missing dates. slice_ = casted_dates.isnull() & ~col[self.col_name].isnull() col[slice_][self.col_name].apply(self.strptime_format) return casted_dates
python
{ "resource": "" }
q42696
DTTransformer.to_timestamp
train
def to_timestamp(self, data): """Transform a datetime series into linux epoch. Args: data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`. Returns: pandas.Series """ result = pd.Series(index=data.index) _slice = ~data[self.col_name].isnull() result[_slice] = data[_slice][self.col_name].astype('int64') return result
python
{ "resource": "" }
q42697
Hsp.chop_sequence
train
def chop_sequence(sequence, limit_length): """Input sequence is divided on smaller non-overlapping sequences with set length. """ return [sequence[i:i + limit_length] for i in range(0, len(sequence), limit_length)]
python
{ "resource": "" }
q42698
Hsp.get_tabular_str
train
def get_tabular_str(self): """Creates table-like string from fields. """ hsp_string = "" try: hsp_list = [ {"length": self.align_length}, {"e-value": self.expect}, {"score": self.score}, {"identities": self.identities}, {"positives": self.positives}, {"bits": self.bits}, {"query start": self.query_start}, {"query end": self.query_end}, {"subject start": self.sbjct_start}, {"subject end": self.sbjct_end}, ] for h in hsp_list: for k, v in h.items(): hsp_string += "{}\t{}\n".format(k, v) except: pass return hsp_string
python
{ "resource": "" }
q42699
Alignment.best_identities
train
def best_identities(self): """Returns identities of the best HSP in alignment. """ if len(self.hsp_list) > 0: return round(float(self.hsp_list[0].identities) / float(self.hsp_list[0].align_length) * 100, 1)
python
{ "resource": "" }