_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q44100
cli
train
def cli(ctx): """ This is a command line app to get useful stats from a trello board and report on them in useful ways. Requires the following environment varilables: TRELLOSTATS_APP_KEY=<your key here> TRELLOSTATS_APP_TOKEN=<your token here> """ ctx.obj = dict() ctx.obj['app_key'] = os.environ.get('TRELLOSTATS_APP_KEY') ctx.obj['app_token'] = os.environ.get('TRELLOSTATS_APP_TOKEN') init_db(db_proxy)
python
{ "resource": "" }
q44101
translation
train
def translation(language): """ Return a translation object in the default 'django' domain. """ global _translations if language not in _translations: _translations[language] = Translations(language) return _translations[language]
python
{ "resource": "" }
q44102
gettext
train
def gettext(message): """ Translate the 'message' string. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default _default = _default or translation(DEFAULT_LANGUAGE) translation_object = getattr(_active, 'value', _default) result = translation_object.gettext(message) return result
python
{ "resource": "" }
q44103
Translations._new_gnu_trans
train
def _new_gnu_trans(self, localedir, use_null_fallback=True): """ Return a mergeable gettext.GNUTranslations instance. A convenience wrapper. By default gettext uses 'fallback=False'. Using param `use_null_fallback` to avoid confusion with any other references to 'fallback'. """ use_null_fallback = False return gettext_module.translation( domain=self.domain, localedir=localedir, languages=[self.language], codeset='utf-8', fallback=use_null_fallback)
python
{ "resource": "" }
q44104
Translations.add_localedir_translations
train
def add_localedir_translations(self, localedir): """Merge translations from localedir.""" global _localedirs if localedir in self.localedirs: return self.localedirs.append(localedir) full_localedir = os.path.join(localedir, 'locale') if os.path.exists(full_localedir): translation = self._new_gnu_trans(full_localedir) self.merge(translation)
python
{ "resource": "" }
q44105
Translations.merge
train
def merge(self, other): """Merge another translation into this catalog.""" if not getattr(other, '_catalog', None): return # NullTranslations() has no _catalog if self._catalog is None: # Take plural and _info from first catalog found self.plural = other.plural self._info = other._info.copy() self._catalog = other._catalog.copy() else: self._catalog.update(other._catalog)
python
{ "resource": "" }
q44106
decode_input
train
def decode_input(text_in): """ Decodes `text_in` If text_in is is a string, then decode it as utf-8 string. If text_in is is a list of strings, then decode each string of it, then combine them into one outpust string. """ if type(text_in) == list: text_out = u' '.join([t.decode('utf-8') for t in text_in]) else: text_out = text_in.decode('utf-8') return text_out
python
{ "resource": "" }
q44107
File._set_local_file_path
train
def _set_local_file_path(self): """ Take from environment variable, create dirs and create file if doesn' exist. """ self.FILE_LOCAL = self._transfer.get_env('FILE_LOCAL') if not self.FILE_LOCAL: filename = '{}_{}.{}'.format(str(self._transfer.prefix), str(self._transfer.namespace), str(self.file_extension)) self.FILE_LOCAL = os.path.join(os.path.expanduser("~"), filename) dirs = os.path.dirname(self.FILE_LOCAL) if not os.path.exists(dirs): os.makedirs(dirs) try: open(self.FILE_LOCAL, "rb+").close() except: open(self.FILE_LOCAL, "a").close()
python
{ "resource": "" }
q44108
gen_post_status
train
def gen_post_status(): """ Show only published posts outside debug. """ if not app.config["DEBUG"]: post_status = and_(Post.status == PostStatus.PUBLISH) else: post_status = or_(Post.status == PostStatus.PUBLISH, Post.status == PostStatus.DRAFT) return post_status
python
{ "resource": "" }
q44109
Subject.subscribe
train
def subscribe(self, observer): """Subscribe an observer to this subject and return a subscription id """ sid = self._sn self.observers[sid] = observer self._sn += 1 return SubscribeID(self, sid)
python
{ "resource": "" }
q44110
Subject.unsubscribe
train
def unsubscribe(self, sid): """Disconnect an observer from this subject """ if sid not in self.observers: raise KeyError( 'Cannot disconnect a observer does not connected to subject' ) del self.observers[sid]
python
{ "resource": "" }
q44111
make_module_class
train
def make_module_class(name): """Takes the module referenced by name and make it a full class. """ source = sys.modules[name] members = vars(source) is_descriptor = lambda x: not isinstance(x, type) and hasattr(x, '__get__') descriptors = {k: v for (k, v) in members.items() if is_descriptor(v)} members = {k: v for (k, v) in members.items() if k not in descriptors} descriptors['__source'] = source target = type(name, (types.ModuleType,), descriptors)(name) target.__dict__.update(members) sys.modules[name] = target
python
{ "resource": "" }
q44112
build_ann
train
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1): """Build a neural net with the indicated input, hidden, and outout dimensions Arguments: params (dict or PyBrainParams namedtuple): default: {'N_hidden': 6} (this is the only parameter that affects the NN build) Returns: FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers """ N_input = N_input or 1 N_output = N_output or 1 N_hidden = N_hidden or tuple() if isinstance(N_hidden, (int, float, basestring)): N_hidden = (int(N_hidden),) hidden_layer_type = hidden_layer_type or tuple() hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type)) if verbosity > 0: print(N_hidden, ' layers of type ', hidden_layer_type) assert(len(N_hidden) == len(hidden_layer_type)) nn = pb.structure.FeedForwardNetwork() # layers nn.addInputModule(pb.structure.BiasUnit(name='bias')) nn.addInputModule(pb.structure.LinearLayer(N_input, name='input')) for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)): Nhid = int(Nhid) nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden'))) nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output')) # connections nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output'])) nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output'])) for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])): Nhid = int(Nhid) nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['hidden-{}'.format(i + 1)])) i = len(N_hidden) - 1 nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output'])) nn.sortModules() if FAST: try: nn.convertToFastNetwork() except: if verbosity > 0: print('Unable to convert slow PyBrain NN to a fast ARAC network...') if verbosity > 0: print(nn.connections) return nn
python
{ "resource": "" }
q44113
inputs_from_dataframe
train
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1): """ Build a sequence of vectors suitable for "activation" by a neural net Identical to `dataset_from_dataframe`, except that only the input vectors are returned (not a full DataSet instance) and default values for 2 arguments are changed: outputs: None And only the input vectors are return """ ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs, normalize=normalize, verbosity=verbosity) return ds['input']
python
{ "resource": "" }
q44114
build_trainer
train
def build_trainer(nn, ds, verbosity=1): """Configure neural net trainer from a pybrain dataset""" return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
python
{ "resource": "" }
q44115
plot_network_results
train
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True): """Identical to plot_trainer except `network` and `ds` must be provided separately""" df = sim_network(network=network, ds=ds, mean=mean, std=std) df.plot() plt.xlabel('Date') plt.ylabel('Threshold (kW)') plt.title(title) if show: try: # ipython notebook overrides plt.show and doesn't have a block kwarg plt.show(block=False) except TypeError: plt.show() if save: filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_') if isinstance(save, basestring) and os.path.isdir(save): filename = os.path.join(save, filename) plt.savefig(filename) if not show: plt.clf() return network, mean, std
python
{ "resource": "" }
q44116
trainer_results
train
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True): """Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer DataSet target and output values are denormalized before plotting with: output * std + mean Which inverses the normalization (output - mean) / std Args: trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet ds (DataSet): a pybrain DataSet to override the one contained in `trainer`. Required if trainer is a Network instance rather than a Trainer instance. mean (float): mean of the denormalized dataset (default: 0) Only affects the scale of the plot std (float): std (standard deviation) of the denormalized dataset (default: 1) title (str): title to display on the plot. Returns: 3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info """ return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title, show=show, save=save)
python
{ "resource": "" }
q44117
lex
train
def lex(args): """ Lex input and return a list of actions to perform. """ if len(args) == 0 or args[0] == SHOW: return [(SHOW, None)] elif args[0] == LOG: return [(LOG, None)] elif args[0] == ECHO: return [(ECHO, None)] elif args[0] == SET and args[1] == RATE: return tokenizeSetRate(args[2:]) elif args[0] == SET and args[1] == DAYS: return tokenizeSetDays(args[2:]) elif args[0] == TAKE: return tokenizeTake(args[1:]) elif args[0] == CANCEL: return tokenizeCancel(args[1:]) elif isMonth(args[0]): return tokenizeTake(args) else: print('Unknown commands: {}'.format(' '.join(args))) return []
python
{ "resource": "" }
q44118
Action.run_svc_action
train
def run_svc_action(self, name, replace=None, svc=None): """ backwards compatible to reflex service object. This looks for hooks on current object as well as in the actions sub-object. """ actions = svc.get('actions') if actions and actions.get(name): return self.run(name, actions=actions, replace=replace) if svc.get(name + "-hook"): return self.run(name, actions={ name: { "type": "hook", "url": svc.get(name + "-hook") } }, replace=replace) self.die("Unable to find action {name} on service {svc}", name=name, svc=svc.get('name', ''))
python
{ "resource": "" }
q44119
Action.run
train
def run(self, name, replace=None, actions=None): """ Do an action. If `replace` is provided as a dictionary, do a search/replace using %{} templates on content of action (unique to action type) """ self.actions = actions # incase we use group action = actions.get(name) if not action: self.die("Action not found: {}", name) action['name'] = name action_type = action.get('type', "none") try: func = getattr(self, '_run__' + action_type) except AttributeError: self.die("Unsupported action type " + action_type) try: return func(action, replace) except Exception as err: # pylint: disable=broad-except if self._debug: self.debug(traceback.format_exc()) self.die("Error running action name={} type={} error={}", name, action_type, err)
python
{ "resource": "" }
q44120
Action._run__group
train
def _run__group(self, action, replace): """ Run a group of actions in sequence. >>> Action().run("several", actions={ ... "several": { ... "type": "group", ... "actions": ["hello","call","then"] ... }, "hello": { ... "type": "exec", ... "cmd": "echo version=%{version}" ... }, "call": { ... "type": "hook", ... "url": "http://reflex.cold.org" ... }, "then": { ... "type": "exec", ... "cmd": "echo finished" ... }}, replace={ ... "version": "1712.10" ... }) version=1712.10 """ for target in action.get('actions', []): Action().run(target, actions=self.actions, replace=replace)
python
{ "resource": "" }
q44121
Action._run__exec
train
def _run__exec(self, action, replace): """ Run a system command >>> Action().run("hello", actions={ ... "hello": { ... "type": "exec", ... "cmd": "echo version=%{version}" ... }}, replace={ ... "version": "1712.10" ... }) version=1712.10 """ cmd = action.get('cmd') shell = False if isinstance(cmd, str): shell = True if replace and action.get("template", True): if shell: cmd = self.rfxcfg.macro_expand(cmd, replace) else: cmd = [self.rfxcfg.macro_expand(x, replace) for x in cmd] self.logf("Action {} exec\n", action['name']) self.logf("{}\n", cmd, level=common.log_cmd) if self.sys(cmd): self.logf("Success\n", level=common.log_good) return self.die("Failure\n", level=common.log_err)
python
{ "resource": "" }
q44122
_get_arg_names
train
def _get_arg_names(func): ''' this returns the arg names since dictionaries dont guarantee order ''' args, varargs, keywords, defaults = inspect.getargspec(func) return(tuple(args))
python
{ "resource": "" }
q44123
strict_defaults
train
def strict_defaults(fn): ''' use this decorator to enforce type checking on functions based on the function's defaults ''' @wraps(fn) def wrapper(*args, **kwargs): defaults = _get_default_args(fn) # dictionary that holds each default type needed_types={ key:type(defaults[key]) for key in defaults } # ordered tuple of the function's argument names arg_names=_get_arg_names(fn) assert not len(arg_names) - len(fn.__defaults__), '{} needs default variables on all arguments'.format(fn.__name__) # merge args to kwargs for easy parsing for i in range(len(args)): if args[i] not in kwargs.keys(): kwargs[arg_names[i]]=args[i] # assert that theyre all the correct type for name in needed_types: # do them all seperately so you can show what went wrong assert isinstance(kwargs[name],needed_types[name]), 'got {} and expected a {}'.format(kwargs[name],needed_types[name]) # return the refined results return fn(**kwargs) return wrapper
python
{ "resource": "" }
q44124
update_mailing_lists_in_m2m
train
def update_mailing_lists_in_m2m( sender=None, userprofile=None, pk_set=None, subscribe=None, unsubscribe=None, verbose=None, email_enabled=None, ): """ m2m_model = m2m model class for 'email_notifications' or 'sms_notifications'. """ response = None email_enabled = email_enabled or settings.EMAIL_ENABLED if email_enabled and site_notifications.loaded: if userprofile.email_notifications.through == sender: NotificationModel = django_apps.get_model("edc_notification.Notification") for notification_obj in NotificationModel.objects.filter( pk__in=list(pk_set), enabled=True ): notification_cls = site_notifications.get(notification_obj.name) notification = notification_cls() manager = MailingListManager( address=notification.email_to[0], display_name=notification.display_name, name=notification.name, ) response = manager.create(verbose=verbose) if subscribe: response = manager.subscribe(userprofile.user, verbose=verbose) elif unsubscribe: response = manager.unsubscribe(userprofile.user, verbose=verbose) return response
python
{ "resource": "" }
q44125
superdict
train
def superdict(arg=()): """Recursive defaultdict which can init with other dict """ def update(obj, arg): return obj.update(arg) or obj return update(defaultdict(superdict), arg)
python
{ "resource": "" }
q44126
deepcopy
train
def deepcopy(data): """Use pickle to do deep_copy""" try: return pickle.loads(pickle.dumps(data)) except TypeError: return copy.deepcopy(data)
python
{ "resource": "" }
q44127
deepcp
train
def deepcp(data): """Use ujson to do deep_copy""" import ujson try: return ujson.loads(ujson.dumps(data)) except Exception: return copy.deepcopy(data)
python
{ "resource": "" }
q44128
_do_denormalize
train
def _do_denormalize (version_tuple): """separate action function to allow for the memoize decorator. Lists, the most common thing passed in to the 'denormalize' below are not hashable. """ version_parts_list = [] for parts_tuple in itertools.imap(None,*([iter(version_tuple)]*4)): version_part = ''.join(fn(x) for fn, x in zip(_denormalize_fn_list, parts_tuple)) if version_part: version_parts_list.append(version_part) return '.'.join(version_parts_list)
python
{ "resource": "" }
q44129
History.load
train
def load(self, revision_path): """ Load revision file. :param revision_path: :type revision_path: str """ if not os.path.exists(revision_path): raise RuntimeError("revision file does not exist.") with open(revision_path, mode='r') as f: text = f.read() rev_strings = text.split("## ") for rev_string in rev_strings: if len(rev_string) == 0 or rev_string[:2] == "# ": continue try: revision = Revision() revision.parse(rev_string) except RuntimeError: raise RuntimeError("") self.insert(revision, len(self.revisions))
python
{ "resource": "" }
q44130
from_ast
train
def from_ast( pyast_node, node=None, node_cls=None, Node=Node, iter_fields=ast.iter_fields, AST=ast.AST): '''Convert the ast tree to a tater tree. ''' node_cls = node_cls or Node node = node or node_cls() name = pyast_node.__class__.__name__ attrs = [] for field, value in iter_fields(pyast_node): if name == 'Dict': for key, value in zip(pyast_node.keys, pyast_node.values): if isinstance(value, list): for item in value: if isinstance(item, AST): value = from_ast(item) elif isinstance(value, AST): value = from_ast(value) attrs.append((key.s, value)) else: if isinstance(value, list): for item in value: if isinstance(item, AST): value = from_ast(item) elif isinstance(value, AST): value = from_ast(value) attrs.append((field, value)) node.update(attrs, type=name) return node
python
{ "resource": "" }
q44131
Ladder.load
train
def load(self, ladderName): """retrieve the ladder settings from saved disk file""" self.name = ladderName # preset value to load self.filename with open(self.filename, "rb") as f: data = f.read() self.__dict__.update( json.loads(data) )
python
{ "resource": "" }
q44132
import_localities
train
def import_localities(path, delimiter=';'): """ Import localities from a CSV file. :param path: Path to the CSV file containing the localities. """ creates = [] updates = [] with open(path, mode="r") as infile: reader = csv.DictReader(infile, delimiter=str(delimiter)) with atomic(): for row in reader: row['point'] = Point(float(row['longitude']), float(row['latitude'])) locality, created = Locality.objects.update_or_create( id=row['id'], defaults=row ) if created: creates.append(locality) else: updates.append(locality) return creates, updates
python
{ "resource": "" }
q44133
get_netid_subscriptions
train
def get_netid_subscriptions(netid, subscription_codes): """ Returns a list of uwnetid.subscription objects corresponding to the netid and subscription code or list provided """ url = _netid_subscription_url(netid, subscription_codes) response = get_resource(url) return _json_to_subscriptions(response)
python
{ "resource": "" }
q44134
select_subscription
train
def select_subscription(subs_code, subscriptions): """ Return the uwnetid.subscription object with the subs_code. """ if subs_code and subscriptions: for subs in subscriptions: if (subs.subscription_code == subs_code): return subs return None
python
{ "resource": "" }
q44135
modify_subscription_status
train
def modify_subscription_status(netid, subscription_code, status): """ Post a subscription 'modify' action for the given netid and subscription_code """ url = _netid_subscription_url(netid, subscription_code) body = { 'action': 'modify', 'value': str(status) } response = post_resource(url, json.dumps(body)) return _json_to_subscriptions(response)
python
{ "resource": "" }
q44136
_netid_subscription_url
train
def _netid_subscription_url(netid, subscription_codes): """ Return UWNetId resource for provided netid and subscription code or code list """ return "{0}/{1}/subscription/{2}".format( url_base(), netid, (','.join([str(n) for n in subscription_codes]) if isinstance(subscription_codes, (list, tuple)) else subscription_codes))
python
{ "resource": "" }
q44137
_json_to_subscriptions
train
def _json_to_subscriptions(response_body): """ Returns a list of Subscription objects """ data = json.loads(response_body) subscriptions = [] for subscription_data in data.get("subscriptionList", []): subscriptions.append(Subscription().from_json( data.get('uwNetID'), subscription_data)) return subscriptions
python
{ "resource": "" }
q44138
_json_to_subscription_post_response
train
def _json_to_subscription_post_response(response_body): """ Returns a list of SubscriptionPostResponse objects """ data = json.loads(response_body) response_list = [] for response_data in data.get("responseList", []): response_list.append(SubscriptionPostResponse().from_json( data.get('uwNetID'), response_data)) return response_list
python
{ "resource": "" }
q44139
SignedPermission.has_permission
train
def has_permission(self, request, view): """Check list and create permissions based on sign and filters.""" if view.suffix == 'Instance': return True filter_and_actions = self._get_filter_and_actions( request.query_params.get('sign'), view.action, '{}.{}'.format( view.queryset.model._meta.app_label, view.queryset.model._meta.model_name ) ) if not filter_and_actions: return False if request.method == 'POST': for key, value in request.data.iteritems(): # Do unicode conversion because value will always be a # string if (key in filter_and_actions['filters'] and not unicode(filter_and_actions['filters'][key]) == unicode(value)): return False return True
python
{ "resource": "" }
q44140
SignedPermission.has_object_permission
train
def has_object_permission(self, request, view, obj=None): """Check object permissions based on filters.""" filter_and_actions = self._get_filter_and_actions( request.query_params.get('sign'), view.action, '{}.{}'.format(obj._meta.app_label, obj._meta.model_name)) if not filter_and_actions: return False qs = view.queryset.filter(**filter_and_actions['filters']) return qs.filter(id=obj.id).exists()
python
{ "resource": "" }
q44141
FileAwareParser.add_file_argument
train
def add_file_argument(self, *args, **kwargs): """ Add an argument that represents the location of a file :param args: :param kwargs: :return: """ rval = self.add_argument(*args, **kwargs) self.file_args.append(rval) return rval
python
{ "resource": "" }
q44142
FileAwareParser.add_argument
train
def add_argument(self, *args, **kwargs): """ Add an argument incorporating the default value into the help string :param args: :param kwargs: :return: """ defhelp = kwargs.pop("help", None) defaults = kwargs.pop("default", None) default = defaults if self.use_defaults else None if not defhelp or default is None or kwargs.get('action') == 'help': return super().add_argument(*args, help=defhelp, default=default, **kwargs) else: return super().add_argument(*args, help=defhelp + " (default: {})".format(default), default=default, **kwargs)
python
{ "resource": "" }
q44143
pfdicom.tagsInString_process
train
def tagsInString_process(self, d_DICOM, astr, *args, **kwargs): """ This method substitutes DICOM tags that are '%'-tagged in a string template with the actual tag lookup. For example, an output filename that is specified as the following string: %PatientAge-%PatientID-output.txt will be parsed to 006Y-4412364-ouptut.txt It is also possible to apply certain permutations/functions to a tag. For example, a function is identified by an underscore prefixed and suffixed string as part of the DICOM tag. If found, this function is applied to the tag value. For example, %PatientAge-%_md5|4_PatientID-output.txt will apply an md5 hash to the PatientID and use the first 4 characters: 006Y-7f38-output.txt """ b_tagsFound = False str_replace = '' # The lookup/processed tag value l_tags = [] # The input string split by '%' l_tagsToSub = [] # Remove any noise etc from each tag l_funcTag = [] # a function/tag list l_args = [] # the 'args' of the function func = '' # the function to apply tag = '' # the tag in the funcTag combo chars = '' # the number of resultant chars from func # result to use if '%' in astr: l_tags = astr.split('%')[1:] # Find which tags (mangled) in string match actual tags l_tagsToSub = [i for i in d_DICOM['l_tagRaw'] if any(i in b for b in l_tags)] # Need to arrange l_tagsToSub in same order as l_tags l_tagsToSubSort = sorted( l_tagsToSub, key = lambda x: [i for i, s in enumerate(l_tags) if x in s][0] ) for tag, func in zip(l_tagsToSubSort, l_tags): b_tagsFound = True str_replace = d_DICOM['d_dicomSimple'][tag] if 'md5' in func: str_replace = hashlib.md5(str_replace.encode('utf-8')).hexdigest() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') if len(l_args) > 1: chars = l_args[1] str_replace = str_replace[0:int(chars)] astr = astr.replace('_%s_' % func, '') if 'strmsk' in func: l_funcTag = func.split('_')[1:] func = l_funcTag[0] str_msk = func.split('|')[1] l_n = [] for i, j in zip(list(str_replace), list(str_msk)): if j == '*': l_n.append(i) else: l_n.append(j) str_replace = ''.join(l_n) astr = astr.replace('_%s_' % func, '') if 'nospc' in func: # pudb.set_trace() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') str_char = '' if len(l_args) > 1: str_char = l_args[1] # strip out all non-alphnumeric chars and # replace with space str_replace = re.sub(r'\W+', ' ', str_replace) # replace all spaces with str_char str_replace = str_char.join(str_replace.split()) astr = astr.replace('_%s_' % func, '') astr = astr.replace('%' + tag, str_replace) return { 'status': True, 'b_tagsFound': b_tagsFound, 'str_result': astr }
python
{ "resource": "" }
q44144
pfdicom.DICOMfile_read
train
def DICOMfile_read(self, *args, **kwargs): """ Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight! """ b_status = False l_tags = [] l_tagsToUse = [] d_tagsInString = {} str_file = "" d_DICOM = { 'dcm': None, 'd_dcm': {}, 'strRaw': '', 'l_tagRaw': [], 'd_json': {}, 'd_dicom': {}, 'd_dicomSimple': {} } for k, v in kwargs.items(): if k == 'file': str_file = v if k == 'l_tagsToUse': l_tags = v if len(args): l_file = args[0] str_file = l_file[0] str_localFile = os.path.basename(str_file) str_path = os.path.dirname(str_file) # self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir)) # self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path)) # self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile)) # self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file)) try: # self.dcm = dicom.read_file(str_file) d_DICOM['dcm'] = dicom.read_file(str_file) b_status = True except: self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error') self.dp.qprint('Failed to read %s' % str_file, comms = 'error') b_status = False d_DICOM['d_dcm'] = dict(d_DICOM['dcm']) d_DICOM['strRaw'] = str(d_DICOM['dcm']) d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir() if len(l_tags): l_tagsToUse = l_tags else: l_tagsToUse = d_DICOM['l_tagRaw'] if 'PixelData' in l_tagsToUse: l_tagsToUse.remove('PixelData') for key in l_tagsToUse: d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key) try: d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key) except: d_DICOM['d_dicomSimple'][key] = "no attribute" d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key]) # pudb.set_trace() d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem) str_outputFile = d_tagsInString['str_result'] return { 'status': b_status, 'inputPath': str_path, 'inputFilename': str_localFile, 'outputFileStem': str_outputFile, 'd_DICOM': d_DICOM, 'l_tagsToUse': l_tagsToUse }
python
{ "resource": "" }
q44145
pfdicom.filelist_prune
train
def filelist_prune(self, at_data, *args, **kwargs): """ Given a list of files, possibly prune list by extension. """ b_status = True l_file = [] str_path = at_data[0] al_file = at_data[1] if len(self.str_extension): al_file = [x for x in al_file if self.str_extension in x] if len(al_file): al_file.sort() l_file = al_file b_status = True else: self.dp.qprint( "No valid files to analyze found in path %s!" % str_path, comms = 'error', level = 3) l_file = None b_status = False return { 'status': b_status, 'l_file': l_file }
python
{ "resource": "" }
q44146
pfdicom.run
train
def run(self, *args, **kwargs): """ The run method is merely a thin shim down to the embedded pftree run method. """ b_status = True d_pftreeRun = {} d_inputAnalysis = {} d_env = self.env_check() b_timerStart = False self.dp.qprint( "\tStarting pfdicom run... (please be patient while running)", level = 1 ) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() if d_env['status']: d_pftreeRun = self.pf_tree.run(timerStart = False) else: b_status = False str_startDir = os.getcwd() os.chdir(self.str_inputDir) if b_status: if len(self.str_extension): d_inputAnalysis = self.pf_tree.tree_process( inputReadCallback = None, analysisCallback = self.filelist_prune, outputWriteCallback = None, applyResultsTo = 'inputTree', applyKey = 'l_file', persistAnalysisResults = True ) os.chdir(str_startDir) d_ret = { 'status': b_status and d_pftreeRun['status'], 'd_env': d_env, 'd_pftreeRun': d_pftreeRun, 'd_inputAnalysis': d_inputAnalysis, 'runTime': other.toc() } if self.b_json: self.ret_dump(d_ret, **kwargs) self.dp.qprint('\tReturning from pfdicom run...', level = 1) return d_ret
python
{ "resource": "" }
q44147
MajorDomoClient.reconnect_to_broker
train
def reconnect_to_broker(self): """Connect or reconnect to broker""" #print "CONNECT !" if self.client: self.poller.unregister(self.client) self.client.close() self.client = self.ctx.socket(zmq.DEALER) self.client.linger = 0 self.client.connect(self.broker) self.poller.register(self.client, zmq.POLLIN) if self.verbose: logging.info("I: connecting to broker at %s...", self.broker)
python
{ "resource": "" }
q44148
MajorDomoClient.send
train
def send(self, service, request): """Send request to broker """ if not isinstance(request, list): request = [request] # Prefix request with protocol frames # Frame 0: empty (REQ emulation) # Frame 1: "MDPCxy" (six bytes, MDP/Client x.y) # Frame 2: Service name (printable string) request = ['', MDP.C_CLIENT, service] + request if self.verbose: logging.warn("I: send request to '%s' service: ", service) dump(request) self.client.send_multipart(request)
python
{ "resource": "" }
q44149
MajorDomoClient.recv
train
def recv(self): """Returns the reply message or None if there was no reply.""" try: items = self.poller.poll(self.timeout) except KeyboardInterrupt: return # interrupted if items: # if we got a reply, process it msg = self.client.recv_multipart() self.close() if self.verbose: logging.info("I: received reply:") dump(msg) # Don't try to handle errors, just assert noisily assert len(msg) >= 4 #first drop will be drop (cause empty) header = msg.pop(0) header = msg.pop(0) assert MDP.C_CLIENT == header #this one contains servicename #TODO: exploit this header = msg.pop(0) return msg else: logging.warn("W: permanent error, abandoning request")
python
{ "resource": "" }
q44150
get_identity_document
train
async def get_identity_document(client: Client, current_block: dict, pubkey: str) -> Identity: """ Get the identity document of the pubkey :param client: Client to connect to the api :param current_block: Current block data :param pubkey: UID/Public key :rtype: Identity """ # Here we request for the path wot/lookup/pubkey lookup_data = await client(bma.wot.lookup, pubkey) # init vars uid = None timestamp = BlockUID.empty() signature = None # parse results for result in lookup_data['results']: if result["pubkey"] == pubkey: uids = result['uids'] for uid_data in uids: # capture data timestamp = BlockUID.from_str(uid_data["meta"]["timestamp"]) uid = uid_data["uid"] signature = uid_data["self"] # return self-certification document return Identity( version=10, currency=current_block['currency'], pubkey=pubkey, uid=uid, ts=timestamp, signature=signature )
python
{ "resource": "" }
q44151
get_certification_document
train
def get_certification_document(current_block: dict, self_cert_document: Identity, from_pubkey: str) -> Certification: """ Create and return a Certification document :param current_block: Current block data :param self_cert_document: Identity document :param from_pubkey: Pubkey of the certifier :rtype: Certification """ # construct Certification Document return Certification(version=10, currency=current_block['currency'], pubkey_from=from_pubkey, identity=self_cert_document, timestamp=BlockUID(current_block['number'], current_block['hash']), signature="")
python
{ "resource": "" }
q44152
detect_cycle
train
def detect_cycle(graph): """ search the given directed graph for cycles returns None if the given graph is cycle free otherwise it returns a path through the graph that contains a cycle :param graph: :return: """ visited_nodes = set() for node in list(graph): if node not in visited_nodes: cycle = _dfs_cycle_detect(graph, node, [node], visited_nodes) if cycle: return cycle return None
python
{ "resource": "" }
q44153
main
train
def main(): cmd = sys.argv cmd.pop(0) """ parse arguments and make go """ parser = argparse.ArgumentParser() parser.add_argument( '-s', '--src', help='source folder to watch', default='.', dest='src', metavar='folder' ) parser.add_argument( '-d', '--dest', help='source folder to watch', default=None, dest='dest', metavar='folder' ) args = parser.parse_args() print 'Initializing...' config.source_dir = os.path.abspath(args.src) if args.dest != None: config.dest_dir = os.path.abspath(args.dest) init_sources(config.source_dir) if cmd: c = cmd[0] commands = globals() if c in commands: commands[c]()
python
{ "resource": "" }
q44154
init_sources
train
def init_sources(path): """ initializes array of groups and their associated js files """ for f in dir_list(path): if(os.path.splitext(f)[1][1:] == config.source_ext): print "Source file discovered: %s" % (f) script = Script(f) if (script.filename not in config.sources.keys()): config.sources[script.path] = script parse.parse_dependencies(script,script)
python
{ "resource": "" }
q44155
start_scanner
train
def start_scanner(path): """ watch for file events in the supplied path """ try: observer = Observer() observer.start() stream = Stream(file_modified, path, file_events=True) observer.schedule(stream) print "Watching for changes. Press Ctrl-C to stop." while 1: pass except (KeyboardInterrupt, OSError, IOError): observer.unschedule(stream) observer.stop()
python
{ "resource": "" }
q44156
file_modified
train
def file_modified(event): """ react to file events """ if re.match(config.file_regex,event.name) or (event.name in config.sources.keys()): print "Change detected to: %s" % (event.name) config.stack = [] script = config.sources[event.name] if script.extension == config.source_ext: parse.parse_file(script) else: parse.parse_parents(script)
python
{ "resource": "" }
q44157
iter_tuple_from_csv
train
def iter_tuple_from_csv(path, iterator=False, chunksize=None, skiprows=None, nrows=None, **kwargs): """A high performance, low memory usage csv file row iterator function. :param path: csv file path. :param iterator: :param chunksize: :param skiprows: :param nrows: :yield tuple: **中文文档** 对dataframe进行tuple风格的高性能行遍历。 对用pandas从csv文件读取的dataframe进行逐行遍历时, iterrows和itertuple 都不是性能最高的方法。这是因为iterrows要生成Series对象, 而itertuple 也要对index进行访问。所以本方法是使用内建zip方法对所有的column进行打包 解压, 所以性能上是最佳的。 """ kwargs["iterator"] = iterator kwargs["chunksize"] = chunksize kwargs["skiprows"] = skiprows kwargs["nrows"] = nrows if iterator is True: for df in pd.read_csv(path, **kwargs): for tp in itertuple(df): yield tp else: df = pd.read_csv(path, **kwargs) for tp in itertuple(df): yield tp
python
{ "resource": "" }
q44158
index_row_dict_from_csv
train
def index_row_dict_from_csv(path, index_col=None, iterator=False, chunksize=None, skiprows=None, nrows=None, use_ordered_dict=True, **kwargs): """Read the csv into a dictionary. The key is it's index, the value is the dictionary form of the row. :param path: csv file path. :param index_col: None or str, the column that used as index. :param iterator: :param chunksize: :param skiprows: :param nrows: :param use_ordered_dict: :returns: {index_1: row1, index2: row2, ...} **中文文档** 读取csv, 选择一值完全不重复, 可作为index的列作为index, 生成一个字典 数据结构, 使得可以通过index直接访问row。 """ _kwargs = dict(list(kwargs.items())) _kwargs["iterator"] = None _kwargs["chunksize"] = None _kwargs["skiprows"] = 0 _kwargs["nrows"] = 1 df = pd.read_csv(path, index_col=index_col, **_kwargs) columns = df.columns if index_col is None: raise Exception("please give index_col!") if use_ordered_dict: table = OrderedDict() else: table = dict() kwargs["iterator"] = iterator kwargs["chunksize"] = chunksize kwargs["skiprows"] = skiprows kwargs["nrows"] = nrows if iterator is True: for df in pd.read_csv(path, index_col=index_col, **kwargs): for ind, tp in zip(df.index, itertuple(df)): table[ind] = dict(zip(columns, tp)) else: df = pd.read_csv(path, index_col=index_col, **kwargs) for ind, tp in zip(df.index, itertuple(df)): table[ind] = dict(zip(columns, tp)) return table
python
{ "resource": "" }
q44159
native_path
train
def native_path(path): # pragma: no cover """ Always return a native path, that is unicode on Python 3 and bytestring on Python 2. Taken `from Django <http://bit.ly/1r3gogZ>`_. """ if PY2 and not isinstance(path, bytes): return path.encode(fs_encoding) return path
python
{ "resource": "" }
q44160
select_field
train
def select_field(col, field_or_fields, filters=None): """Select single or multiple fields. :params field_or_fields: str or list of str :returns headers: headers :return data: list of row **中文文档** - 在选择单列时, 返回的是 str, list. - 在选择多列时, 返回的是 str list, list of list. 返回单列或多列的数据。 """ fields = _preprocess_field_or_fields(field_or_fields) if filters is None: filters = dict() wanted = {field: True for field in fields} if len(fields) == 1: header = fields[0] data = [doc.get(header) for doc in col.find(filters, wanted)] return header, data else: headers = list(fields) data = [[doc.get(header) for header in headers] for doc in col.find(filters, wanted)] return headers, data
python
{ "resource": "" }
q44161
select_distinct_field
train
def select_distinct_field(col, field_or_fields, filters=None): """Select distinct value or combination of values of single or multiple fields. :params fields: str or list of str. :return data: list of list. **中文文档** 选择多列中出现过的所有可能的排列组合。 """ fields = _preprocess_field_or_fields(field_or_fields) if filters is None: filters = dict() if len(fields) == 1: key = fields[0] data = list(col.find(filters).distinct(key)) return data else: pipeline = [ { "$match": filters }, { "$group": { "_id": {key: "$" + key for key in fields}, }, }, ] data = list() for doc in col.aggregate(pipeline): # doc = {"_id": {"a": 0, "b": 0}} ... data.append([doc["_id"][key] for key in fields]) return data
python
{ "resource": "" }
q44162
random_sample
train
def random_sample(col, n=5, filters=None): """Randomly select n document from query result set. If no query specified, then from entire collection. **中文文档** 从collection中随机选择 ``n`` 个样本。 """ pipeline = list() if filters is not None: pipeline.append({"$match": filters}) pipeline.append({"$sample": {"size": n}}) return list(col.aggregate(pipeline))
python
{ "resource": "" }
q44163
_before_flush_handler
train
def _before_flush_handler(session, _flush_context, _instances): """Update version ID for all dirty, modified rows""" dialect = get_dialect(session) for row in session.dirty: if isinstance(row, SavageModelMixin) and is_modified(row, dialect): # Update row version_id row.update_version_id()
python
{ "resource": "" }
q44164
register_text_type
train
def register_text_type(content_type, default_encoding, dumper, loader): """ Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines. """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_string = dumper handler.string_to_dict = loader handler.default_encoding = default_encoding or handler.default_encoding
python
{ "resource": "" }
q44165
register_binary_type
train
def register_binary_type(content_type, dumper, loader): """ Register handling for a binary content type. :param str content_type: content type to register the hooks for :param dumper: called to decode bytes into a dictionary. Calling convention: ``dumper(obj_dict) -> bytes``. :param loader: called to encode a dictionary into a byte string. Calling convention: ``loader(obj_bytes) -> dict`` """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_bytes = dumper handler.bytes_to_dict = loader
python
{ "resource": "" }
q44166
_ContentHandler.unpack_bytes
train
def unpack_bytes(self, obj_bytes, encoding=None): """Unpack a byte stream into a dictionary.""" assert self.bytes_to_dict or self.string_to_dict encoding = encoding or self.default_encoding LOGGER.debug('%r decoding %d bytes with encoding of %s', self, len(obj_bytes), encoding) if self.bytes_to_dict: return escape.recursive_unicode(self.bytes_to_dict(obj_bytes)) return self.string_to_dict(obj_bytes.decode(encoding))
python
{ "resource": "" }
q44167
_ContentHandler.pack_bytes
train
def pack_bytes(self, obj_dict, encoding=None): """Pack a dictionary into a byte stream.""" assert self.dict_to_bytes or self.dict_to_string encoding = encoding or self.default_encoding or 'utf-8' LOGGER.debug('%r encoding dict with encoding %s', self, encoding) if self.dict_to_bytes: return None, self.dict_to_bytes(obj_dict) try: return encoding, self.dict_to_string(obj_dict).encode(encoding) except LookupError as error: raise web.HTTPError( 406, 'failed to encode result %r', error, reason='target charset {0} not found'.format(encoding)) except UnicodeEncodeError as error: LOGGER.warning('failed to encode text as %s - %s, trying utf-8', encoding, str(error)) return 'utf-8', self.dict_to_string(obj_dict).encode('utf-8')
python
{ "resource": "" }
q44168
HandlerMixin.get_request_body
train
def get_request_body(self): """ Decodes the request body and returns it. :return: the decoded request body as a :class:`dict` instance. :raises: :class:`tornado.web.HTTPError` if the body cannot be decoded (415) or if decoding fails (400) """ if self._request_body is None: content_type_str = self.request.headers.get( 'Content-Type', 'application/octet-stream') LOGGER.debug('decoding request body of type %s', content_type_str) content_type = headers.parse_content_type(content_type_str) try: selected, requested = algorithms.select_content_type( [content_type], _content_types.values()) except errors.NoMatch: raise web.HTTPError( 415, 'cannot decoded content type %s', content_type_str, reason='Unexpected content type') handler = _content_handlers[str(selected)] try: self._request_body = handler.unpack_bytes( self.request.body, encoding=content_type.parameters.get('charset'), ) except ValueError as error: raise web.HTTPError( 400, 'failed to decode content body - %r', error, reason='Content body decode failure') return self._request_body
python
{ "resource": "" }
q44169
HandlerMixin.send_response
train
def send_response(self, response_dict): """ Encode a response according to the request. :param dict response_dict: the response to send :raises: :class:`tornado.web.HTTPError` if no acceptable content type exists This method will encode `response_dict` using the most appropriate encoder based on the :mailheader:`Accept` request header and the available encoders. The result is written to the client by calling ``self.write`` after setting the response content type using ``self.set_header``. """ accept = headers.parse_http_accept_header( self.request.headers.get('Accept', '*/*')) try: selected, _ = algorithms.select_content_type( accept, _content_types.values()) except errors.NoMatch: raise web.HTTPError(406, 'no acceptable content type for %s in %r', accept, _content_types.values(), reason='Content Type Not Acceptable') LOGGER.debug('selected %s as outgoing content type', selected) handler = _content_handlers[str(selected)] accept = self.request.headers.get('Accept-Charset', '*') charsets = headers.parse_accept_charset(accept) charset = charsets[0] if charsets[0] != '*' else None LOGGER.debug('encoding response body using %r with encoding %s', handler, charset) encoding, response_bytes = handler.pack_bytes(response_dict, encoding=charset) if encoding: # don't overwrite the value in _content_types copied = datastructures.ContentType(selected.content_type, selected.content_subtype, selected.parameters) copied.parameters['charset'] = encoding selected = copied self.set_header('Content-Type', str(selected)) self.write(response_bytes)
python
{ "resource": "" }
q44170
parse_env
train
def parse_env(config_schema, env): """Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment. """ try: return { key: item_schema.parse(key, env.get(key)) for key, item_schema in config_schema.items() } except KeyError as error: raise MissingConfigError( "Required config not set: {}".format(error.args[0]) )
python
{ "resource": "" }
q44171
Schema.parse
train
def parse(self, key, value): """Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed. """ if value is not None: try: return self._parser(value) except Exception: raise ParsingError("Error parsing {}".format(key)) elif self._default is not SENTINAL: return self._default else: raise KeyError(key)
python
{ "resource": "" }
q44172
write_json_to_temp_file
train
def write_json_to_temp_file(data): """Writes JSON data to a temporary file and returns the path to it""" fp = tempfile.NamedTemporaryFile(delete=False) fp.write(json.dumps(data).encode('utf-8')) fp.close() return fp.name
python
{ "resource": "" }
q44173
mock_lockfile_update
train
def mock_lockfile_update(path): """ This is a mock update. In place of this, you might simply shell out to a command like `yarn upgrade`. """ updated_lockfile_contents = { 'package1': '1.2.0' } with open(path, 'w+') as f: f.write(json.dumps(updated_lockfile_contents, indent=4)) return updated_lockfile_contents
python
{ "resource": "" }
q44174
print_settings_example
train
def print_settings_example(): """ You can use settings to get additional information from the user via their dependencies.io configuration file. Settings will be automatically injected as env variables with the "SETTING_" prefix. All settings will be passed as strings. More complex types will be json encoded. You should always provide defaults, if possible. """ SETTING_EXAMPLE_LIST = json.loads(os.getenv('SETTING_EXAMPLE_LIST', '[]')) SETTING_EXAMPLE_STRING = os.getenv('SETTING_EXAMPLE_STRING', 'default') print('List setting values: {}'.format(SETTING_EXAMPLE_LIST)) print('String setting value: {}'.format(SETTING_EXAMPLE_STRING))
python
{ "resource": "" }
q44175
otsu
train
def otsu(fpath): """ Returns value of otsu threshold for an image """ img = imread(fpath, as_grey=True) thresh = skimage.filter.threshold_otsu(img) return thresh
python
{ "resource": "" }
q44176
move_to
train
def move_to(name): """ Path to image folders """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'images', name) print path.normpath(datapath) return path.normpath(datapath)
python
{ "resource": "" }
q44177
labels
train
def labels(): """ Path to labels file """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'train_solution.csv') return path.normpath(datapath)
python
{ "resource": "" }
q44178
_make_json_result
train
def _make_json_result(code, message="", results=None): """ An utility method to prepare a JSON result string, usable by the SignalReceiver :param code: A HTTP Code :param message: An associated message """ return code, json.dumps({'code': code, 'message': message, 'results': results})
python
{ "resource": "" }
q44179
temp_file_context
train
def temp_file_context(raw_dump_path, logger=None): """this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.""" try: yield raw_dump_path finally: if 'TEMPORARY' in raw_dump_path: try: os.unlink(raw_dump_path) except OSError: if logger is None: logger = FakeLogger() logger.warning( 'unable to delete %s. manual deletion is required.', raw_dump_path, exc_info=True )
python
{ "resource": "" }
q44180
membership
train
async def membership(client: Client, membership_signed_raw: str) -> ClientResponse: """ POST a Membership document :param client: Client to connect to the api :param membership_signed_raw: Membership signed raw document :return: """ return await client.post(MODULE + '/membership', {'membership': membership_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
{ "resource": "" }
q44181
blocks
train
async def blocks(client: Client, count: int, start: int) -> list: """ GET list of blocks from the blockchain :param client: Client to connect to the api :param count: Number of blocks :param start: First block number :return: """ assert type(count) is int assert type(start) is int return await client.get(MODULE + '/blocks/%d/%d' % (count, start), schema=BLOCKS_SCHEMA)
python
{ "resource": "" }
q44182
hardship
train
async def hardship(client: Client, pubkey: str) -> dict: """ GET hardship level for given member's public key for writing next block :param client: Client to connect to the api :param pubkey: Public key of the member :return: """ return await client.get(MODULE + '/hardship/%s' % pubkey, schema=HARDSHIP_SCHEMA)
python
{ "resource": "" }
q44183
block_uid
train
def block_uid(value: Union[str, BlockUID, None]) -> BlockUID: """ Convert value to BlockUID instance :param value: Value to convert :return: """ if isinstance(value, BlockUID): return value elif isinstance(value, str): return BlockUID.from_str(value) elif value is None: return BlockUID.empty() else: raise TypeError("Cannot convert {0} to BlockUID".format(type(value)))
python
{ "resource": "" }
q44184
make_heartbeat
train
def make_heartbeat(port, path, peer_uid, node_uid, app_id): """ Prepares the heart beat UDP packet Format : Little endian * Kind of beat (1 byte) * Herald HTTP server port (2 bytes) * Herald HTTP servlet path length (2 bytes) * Herald HTTP servlet path (variable, UTF-8) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Node UID length (2 bytes) * Node UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param port: The port to access the Herald HTTP server :param path: The path to the Herald HTTP servlet :param peer_uid: The UID of the peer :param node_uid: The UID of the node :param app_id: Application ID :return: The heart beat packet content (byte array) """ # Type and port... packet = struct.pack("<BBH", PACKET_FORMAT_VERSION, PACKET_TYPE_HEARTBEAT, port) for string in (path, peer_uid, node_uid, app_id): # Strings... string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet
python
{ "resource": "" }
q44185
MulticastReceiver.start
train
def start(self): """ Starts listening to the socket :return: True if the socket has been created """ # Create the multicast socket (update the group) self._socket, self._group = create_multicast_socket(self._group, self._port) # Start the listening thread self._stop_event.clear() self._thread = threading.Thread( target=self.__read, name="MulticastReceiver-{0}".format(self._port)) self._thread.start()
python
{ "resource": "" }
q44186
MulticastReceiver.stop
train
def stop(self): """ Stops listening to the socket """ # Stop the loop self._stop_event.set() # Join the thread self._thread.join() self._thread = None # Close the socket close_multicast_socket(self._socket, self._group)
python
{ "resource": "" }
q44187
MulticastReceiver._handle_heartbeat
train
def _handle_heartbeat(self, sender, data): """ Handles a raw heart beat :param sender: Sender (address, port) tuple :param data: Raw packet data """ # Format of packet parsed, data = self._unpack("<B", data) format = parsed[0] if format == PACKET_FORMAT_VERSION: # Kind of beat parsed, data = self._unpack("<B", data) kind = parsed[0] if kind == PACKET_TYPE_HEARTBEAT: # Extract content parsed, data = self._unpack("<H", data) port = parsed[0] path, data = self._unpack_string(data) uid, data = self._unpack_string(data) node_uid, data = self._unpack_string(data) try: app_id, data = self._unpack_string(data) except struct.error: # Compatibility with previous version app_id = herald.DEFAULT_APPLICATION_ID elif kind == PACKET_TYPE_LASTBEAT: # Peer is going away uid, data = self._unpack_string(data) app_id, data = self._unpack_string(data) port = -1 path = None node_uid = None else: _logger.warning("Unknown kind of packet: %d", kind) return try: self._callback(kind, uid, node_uid, app_id, sender[0], port, path) except Exception as ex: _logger.exception("Error handling heart beat: %s", ex)
python
{ "resource": "" }
q44188
MulticastReceiver._unpack_string
train
def _unpack_string(self, data): """ Unpacks the next string from the given data :param data: A datagram, starting at a string size :return: A (string, unread_data) tuple """ # Get the size of the string result, data = self._unpack("<H", data) size = result[0] # Read it string_bytes = data[:size] # Convert it return to_unicode(string_bytes), data[size:]
python
{ "resource": "" }
q44189
MulticastReceiver.__read
train
def __read(self): """ Reads packets from the socket """ # Set the socket as non-blocking self._socket.setblocking(0) while not self._stop_event.is_set(): # Watch for content ready = select.select([self._socket], [], [], 1) if ready[0]: # Socket is ready data, sender = self._socket.recvfrom(1024) try: self._handle_heartbeat(sender, data) except Exception as ex: _logger.exception("Error handling the heart beat: %s", ex)
python
{ "resource": "" }
q44190
get_log_config
train
def get_log_config(component, handlers, level='DEBUG', path='/var/log/vfine/'): """Return a log config for django project.""" config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s][%(threadName)s]' + '[%(name)s.%(funcName)s():%(lineno)d] %(message)s' }, 'color': { '()': 'shaw.log.SplitColoredFormatter', 'format': "%(asctime)s " + "%(log_color)s%(bold)s[%(levelname)s]%(reset)s" + "[%(threadName)s][%(name)s.%(funcName)s():%(lineno)d] " + "%(blue)s%(message)s" } }, 'handlers': { 'debug': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.debug.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'standard', }, 'color': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.color.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'color', }, 'info': { 'level': 'INFO', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.info.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'standard', }, 'error': { 'level': 'ERROR', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.error.log', 'maxBytes': 1024 * 1024 * 100, 'backupCount': 5, 'formatter': 'standard', }, 'console': { 'level': level, 'class': 'logging.StreamHandler', 'formatter': 'standard' }, }, 'loggers': { 'django': { 'handlers': handlers, 'level': 'INFO', 'propagate': False }, 'django.request': { 'handlers': handlers, 'level': 'INFO', 'propagate': False, }, '': { 'handlers': handlers, 'level': level, 'propagate': False }, } } return config
python
{ "resource": "" }
q44191
SplitColoredFormatter.format
train
def format(self, record): """Format a message from a record object.""" record = ColoredRecord(record) record.log_color = self.color(self.log_colors, record.levelname) # Set secondary log colors if self.secondary_log_colors: for name, log_colors in self.secondary_log_colors.items(): color = self.color(log_colors, record.levelname) setattr(record, name + '_log_color', color) # Format the message if sys.version_info > (2, 7): message = super(ColoredFormatter, self).format(record) else: message = logging.Formatter.format(self, record) # Add a reset code to the end of the message # (if it wasn't explicitly added in format str) if self.reset and not message.endswith(escape_codes['reset']): message += escape_codes['reset'] if '|' in message: desc, data = message.split("|", 1) desc = desc + escape_codes['reset'] data = escape_codes['green'] + data message = desc + '|' + data return message
python
{ "resource": "" }
q44192
get_console_logger
train
def get_console_logger(): """ just for kkconst demos """ global __console_logger if __console_logger: return __console_logger logger = logging.getLogger("kkconst") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) __console_logger = logger return logger
python
{ "resource": "" }
q44193
reduce_base
train
def reduce_base(amount: int, base: int) -> tuple: """ Compute the reduced base of the given parameters :param amount: the amount value :param base: current base value :return: tuple containing computed (amount, base) """ if amount == 0: return 0, 0 next_amount = amount next_base = base next_amount_is_integer = True while next_amount_is_integer: amount = next_amount base = next_base if next_amount % 10 == 0: next_amount = int(next_amount / 10) next_base += 1 else: next_amount_is_integer = False return int(amount), int(base)
python
{ "resource": "" }
q44194
InputSource.from_inline
train
def from_inline(cls: Type[InputSourceType], tx_version: int, inline: str) -> InputSourceType: """ Return Transaction instance from inline string format :param tx_version: Version number of the document :param inline: Inline string format :return: """ if tx_version == 2: data = InputSource.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline input") source_offset = 0 amount = 0 base = 0 else: data = InputSource.re_inline_v3.match(inline) if data is None: raise MalformedDocumentError("Inline input") source_offset = 2 amount = int(data.group(1)) base = int(data.group(2)) if data.group(1 + source_offset): source = data.group(1 + source_offset) origin_id = data.group(2 + source_offset) index = int(data.group(3 + source_offset)) else: source = data.group(4 + source_offset) origin_id = data.group(5 + source_offset) index = int(data.group(6 + source_offset)) return cls(amount, base, source, origin_id, index)
python
{ "resource": "" }
q44195
OutputSource.from_inline
train
def from_inline(cls: Type[OutputSourceType], inline: str) -> OutputSourceType: """ Return OutputSource instance from inline string format :param inline: Inline string format :return: """ data = OutputSource.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline output") amount = int(data.group(1)) base = int(data.group(2)) condition_text = data.group(3) return cls(amount, base, condition_text)
python
{ "resource": "" }
q44196
OutputSource.condition_from_text
train
def condition_from_text(text) -> Condition: """ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: """ try: condition = pypeg2.parse(text, output.Condition) except SyntaxError: # Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156 # In such a case, they are store as empty PEG grammar object and considered unlockable condition = Condition(text) return condition
python
{ "resource": "" }
q44197
SIGParameter.from_parameter
train
def from_parameter(cls: Type[SIGParameterType], parameter: str) -> Optional[SIGParameterType]: """ Return a SIGParameter instance from an index parameter :param parameter: Index parameter :return: """ sig = SIGParameter.re_sig.match(parameter) if sig: return cls(int(sig.group(1))) return None
python
{ "resource": "" }
q44198
XHXParameter.from_parameter
train
def from_parameter(cls: Type[XHXParameterType], parameter: str) -> Optional[XHXParameterType]: """ Return a XHXParameter instance from an index parameter :param parameter: Index parameter :return: """ xhx = XHXParameter.re_xhx.match(parameter) if xhx: return cls(int(xhx.group(1))) return None
python
{ "resource": "" }
q44199
UnlockParameter.from_parameter
train
def from_parameter(cls: Type[UnlockParameterType], parameter: str) -> Optional[Union[SIGParameter, XHXParameter]]: """ Return UnlockParameter instance from parameter string :param parameter: Parameter string :return: """ sig_param = SIGParameter.from_parameter(parameter) if sig_param: return sig_param else: xhx_param = XHXParameter.from_parameter(parameter) if xhx_param: return xhx_param return None
python
{ "resource": "" }