id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
9,169
def pick_gradient_value(grad_list, gradient_level): return grad_list[int(round(((gradient_level * (len(grad_list) - 1)) / 100)))]
[ "def", "pick_gradient_value", "(", "grad_list", ",", "gradient_level", ")", ":", "return", "grad_list", "[", "int", "(", "round", "(", "(", "(", "gradient_level", "*", "(", "len", "(", "grad_list", ")", "-", "1", ")", ")", "/", "100", ")", ")", ")", "]" ]
given a list of colors and gradient percent .
train
false
9,170
def app_label_to_app_module(app_label): app = models.get_app(app_label) module_name = '.'.join(app.__name__.split('.')[:(-1)]) try: module = sys.modules[module_name] except KeyError: __import__(module_name, {}, {}, ['']) module = sys.modules[module_name] return module
[ "def", "app_label_to_app_module", "(", "app_label", ")", ":", "app", "=", "models", ".", "get_app", "(", "app_label", ")", "module_name", "=", "'.'", ".", "join", "(", "app", ".", "__name__", ".", "split", "(", "'.'", ")", "[", ":", "(", "-", "1", ")", "]", ")", "try", ":", "module", "=", "sys", ".", "modules", "[", "module_name", "]", "except", "KeyError", ":", "__import__", "(", "module_name", ",", "{", "}", ",", "{", "}", ",", "[", "''", "]", ")", "module", "=", "sys", ".", "modules", "[", "module_name", "]", "return", "module" ]
given the app label .
train
false
9,172
def _dict_to_bson(doc, check_keys, opts, top_level=True): if _raw_document_class(doc): return doc.raw try: elements = [] if (top_level and ('_id' in doc)): elements.append(_name_value_to_bson('_id\x00', doc['_id'], check_keys, opts)) for (key, value) in iteritems(doc): if ((not top_level) or (key != '_id')): elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: raise TypeError(('encoder expected a mapping type but got: %r' % (doc,))) encoded = ''.join(elements) return ((_PACK_INT((len(encoded) + 5)) + encoded) + '\x00')
[ "def", "_dict_to_bson", "(", "doc", ",", "check_keys", ",", "opts", ",", "top_level", "=", "True", ")", ":", "if", "_raw_document_class", "(", "doc", ")", ":", "return", "doc", ".", "raw", "try", ":", "elements", "=", "[", "]", "if", "(", "top_level", "and", "(", "'_id'", "in", "doc", ")", ")", ":", "elements", ".", "append", "(", "_name_value_to_bson", "(", "'_id\\x00'", ",", "doc", "[", "'_id'", "]", ",", "check_keys", ",", "opts", ")", ")", "for", "(", "key", ",", "value", ")", "in", "iteritems", "(", "doc", ")", ":", "if", "(", "(", "not", "top_level", ")", "or", "(", "key", "!=", "'_id'", ")", ")", ":", "elements", ".", "append", "(", "_element_to_bson", "(", "key", ",", "value", ",", "check_keys", ",", "opts", ")", ")", "except", "AttributeError", ":", "raise", "TypeError", "(", "(", "'encoder expected a mapping type but got: %r'", "%", "(", "doc", ",", ")", ")", ")", "encoded", "=", "''", ".", "join", "(", "elements", ")", "return", "(", "(", "_PACK_INT", "(", "(", "len", "(", "encoded", ")", "+", "5", ")", ")", "+", "encoded", ")", "+", "'\\x00'", ")" ]
encode a document to bson .
train
true
9,174
@retry_on_failure def test_cp12452(): expected_dir = ['__module__', '__class__', '__del__', '__delattr__', '__doc__', '__getattribute__', '__hash__', '__init__', '__iter__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', 'bufsize', 'close', 'closed', 'default_bufsize', 'fileno', 'flush', 'mode', 'name', 'next', 'read', 'readline', 'readlines', 'softspace', 'write', 'writelines'] fileobject_dir = dir(socket._fileobject) missing = [x for x in expected_dir if (x not in fileobject_dir)] AreEqual([], missing)
[ "@", "retry_on_failure", "def", "test_cp12452", "(", ")", ":", "expected_dir", "=", "[", "'__module__'", ",", "'__class__'", ",", "'__del__'", ",", "'__delattr__'", ",", "'__doc__'", ",", "'__getattribute__'", ",", "'__hash__'", ",", "'__init__'", ",", "'__iter__'", ",", "'__new__'", ",", "'__reduce__'", ",", "'__reduce_ex__'", ",", "'__repr__'", ",", "'__setattr__'", ",", "'__str__'", ",", "'bufsize'", ",", "'close'", ",", "'closed'", ",", "'default_bufsize'", ",", "'fileno'", ",", "'flush'", ",", "'mode'", ",", "'name'", ",", "'next'", ",", "'read'", ",", "'readline'", ",", "'readlines'", ",", "'softspace'", ",", "'write'", ",", "'writelines'", "]", "fileobject_dir", "=", "dir", "(", "socket", ".", "_fileobject", ")", "missing", "=", "[", "x", "for", "x", "in", "expected_dir", "if", "(", "x", "not", "in", "fileobject_dir", ")", "]", "AreEqual", "(", "[", "]", ",", "missing", ")" ]
fully test socket .
train
false
9,175
def inet_ntoa(address): if (len(address) != 4): raise dns.exception.SyntaxError return ('%u.%u.%u.%u' % (ord(address[0]), ord(address[1]), ord(address[2]), ord(address[3])))
[ "def", "inet_ntoa", "(", "address", ")", ":", "if", "(", "len", "(", "address", ")", "!=", "4", ")", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "return", "(", "'%u.%u.%u.%u'", "%", "(", "ord", "(", "address", "[", "0", "]", ")", ",", "ord", "(", "address", "[", "1", "]", ")", ",", "ord", "(", "address", "[", "2", "]", ")", ",", "ord", "(", "address", "[", "3", "]", ")", ")", ")" ]
inet_ntoa -> ip_address_string convert an ip address from 32-bit packed binary format to string format .
train
false
9,178
def create_imap_message(db_session, account, folder, msg): new_message = Message.create_from_synced(account=account, mid=msg.uid, folder_name=folder.name, received_date=msg.internaldate, body_string=msg.body) existing_copy = reconcile_message(new_message, db_session) if (existing_copy is not None): new_message = existing_copy imapuid = ImapUid(account=account, folder=folder, msg_uid=msg.uid, message=new_message) imapuid.update_flags(msg.flags) if (msg.g_labels is not None): imapuid.update_labels(msg.g_labels) with db_session.no_autoflush: is_draft = (imapuid.is_draft and ((folder.canonical_name == 'drafts') or (folder.canonical_name == 'all'))) update_message_metadata(db_session, account, new_message, is_draft) update_contacts_from_message(db_session, new_message, account.namespace) return imapuid
[ "def", "create_imap_message", "(", "db_session", ",", "account", ",", "folder", ",", "msg", ")", ":", "new_message", "=", "Message", ".", "create_from_synced", "(", "account", "=", "account", ",", "mid", "=", "msg", ".", "uid", ",", "folder_name", "=", "folder", ".", "name", ",", "received_date", "=", "msg", ".", "internaldate", ",", "body_string", "=", "msg", ".", "body", ")", "existing_copy", "=", "reconcile_message", "(", "new_message", ",", "db_session", ")", "if", "(", "existing_copy", "is", "not", "None", ")", ":", "new_message", "=", "existing_copy", "imapuid", "=", "ImapUid", "(", "account", "=", "account", ",", "folder", "=", "folder", ",", "msg_uid", "=", "msg", ".", "uid", ",", "message", "=", "new_message", ")", "imapuid", ".", "update_flags", "(", "msg", ".", "flags", ")", "if", "(", "msg", ".", "g_labels", "is", "not", "None", ")", ":", "imapuid", ".", "update_labels", "(", "msg", ".", "g_labels", ")", "with", "db_session", ".", "no_autoflush", ":", "is_draft", "=", "(", "imapuid", ".", "is_draft", "and", "(", "(", "folder", ".", "canonical_name", "==", "'drafts'", ")", "or", "(", "folder", ".", "canonical_name", "==", "'all'", ")", ")", ")", "update_message_metadata", "(", "db_session", ",", "account", ",", "new_message", ",", "is_draft", ")", "update_contacts_from_message", "(", "db_session", ",", "new_message", ",", "account", ".", "namespace", ")", "return", "imapuid" ]
imap-specific message creation logic .
train
false
9,179
def natural_key(s): return tuple((_nkconv(m) for m in _nkre.findall(s)))
[ "def", "natural_key", "(", "s", ")", ":", "return", "tuple", "(", "(", "_nkconv", "(", "m", ")", "for", "m", "in", "_nkre", ".", "findall", "(", "s", ")", ")", ")" ]
converts string s into a tuple that will sort "naturally" .
train
false
9,180
def usage_percent(used, total, _round=None): try: ret = ((used / total) * 100) except ZeroDivisionError: ret = (0.0 if (isinstance(used, float) or isinstance(total, float)) else 0) if (_round is not None): return round(ret, _round) else: return ret
[ "def", "usage_percent", "(", "used", ",", "total", ",", "_round", "=", "None", ")", ":", "try", ":", "ret", "=", "(", "(", "used", "/", "total", ")", "*", "100", ")", "except", "ZeroDivisionError", ":", "ret", "=", "(", "0.0", "if", "(", "isinstance", "(", "used", ",", "float", ")", "or", "isinstance", "(", "total", ",", "float", ")", ")", "else", "0", ")", "if", "(", "_round", "is", "not", "None", ")", ":", "return", "round", "(", "ret", ",", "_round", ")", "else", ":", "return", "ret" ]
calculate percentage usage of used against total .
train
false
9,181
def _summarize_str(st): return (st[:56][::(-1)].split(',', 1)[(-1)][::(-1)] + ', ...')
[ "def", "_summarize_str", "(", "st", ")", ":", "return", "(", "st", "[", ":", "56", "]", "[", ":", ":", "(", "-", "1", ")", "]", ".", "split", "(", "','", ",", "1", ")", "[", "(", "-", "1", ")", "]", "[", ":", ":", "(", "-", "1", ")", "]", "+", "', ...'", ")" ]
make summary string .
train
false
9,183
@conf.commands.register def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, filter=None, timeout=2, verbose=None, **kargs): if (verbose is None): verbose = conf.verb if (filter is None): filter = '(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))' if (l4 is None): (a, b) = sr((IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport)), timeout=timeout, filter=filter, verbose=verbose, **kargs) else: filter = 'ip' (a, b) = sr((IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / l4), timeout=timeout, filter=filter, verbose=verbose, **kargs) a = TracerouteResult(a.res) if verbose: a.show() return (a, b)
[ "@", "conf", ".", "commands", ".", "register", "def", "traceroute", "(", "target", ",", "dport", "=", "80", ",", "minttl", "=", "1", ",", "maxttl", "=", "30", ",", "sport", "=", "RandShort", "(", ")", ",", "l4", "=", "None", ",", "filter", "=", "None", ",", "timeout", "=", "2", ",", "verbose", "=", "None", ",", "**", "kargs", ")", ":", "if", "(", "verbose", "is", "None", ")", ":", "verbose", "=", "conf", ".", "verb", "if", "(", "filter", "is", "None", ")", ":", "filter", "=", "'(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))'", "if", "(", "l4", "is", "None", ")", ":", "(", "a", ",", "b", ")", "=", "sr", "(", "(", "IP", "(", "dst", "=", "target", ",", "id", "=", "RandShort", "(", ")", ",", "ttl", "=", "(", "minttl", ",", "maxttl", ")", ")", "/", "TCP", "(", "seq", "=", "RandInt", "(", ")", ",", "sport", "=", "sport", ",", "dport", "=", "dport", ")", ")", ",", "timeout", "=", "timeout", ",", "filter", "=", "filter", ",", "verbose", "=", "verbose", ",", "**", "kargs", ")", "else", ":", "filter", "=", "'ip'", "(", "a", ",", "b", ")", "=", "sr", "(", "(", "IP", "(", "dst", "=", "target", ",", "id", "=", "RandShort", "(", ")", ",", "ttl", "=", "(", "minttl", ",", "maxttl", ")", ")", "/", "l4", ")", ",", "timeout", "=", "timeout", ",", "filter", "=", "filter", ",", "verbose", "=", "verbose", ",", "**", "kargs", ")", "a", "=", "TracerouteResult", "(", "a", ".", "res", ")", "if", "verbose", ":", "a", ".", "show", "(", ")", "return", "(", "a", ",", "b", ")" ]
performs a traceroute to a 3rd party host cli example: .
train
true
9,185
def _get_timezone_name(timezone): try: return timezone.zone except AttributeError: return timezone.tzname(None)
[ "def", "_get_timezone_name", "(", "timezone", ")", ":", "try", ":", "return", "timezone", ".", "zone", "except", "AttributeError", ":", "return", "timezone", ".", "tzname", "(", "None", ")" ]
returns the name of timezone .
train
false
9,186
def submit_puids(recording_puids): warn('PUID support was dropped at the server\nnothing will be submitted', Warning, stacklevel=2) return {'message': {'text': 'OK'}}
[ "def", "submit_puids", "(", "recording_puids", ")", ":", "warn", "(", "'PUID support was dropped at the server\\nnothing will be submitted'", ",", "Warning", ",", "stacklevel", "=", "2", ")", "return", "{", "'message'", ":", "{", "'text'", ":", "'OK'", "}", "}" ]
submit puids .
train
false
9,187
def get_nexusvm_binding(vlan_id, instance_id): LOG.debug(_('get_nexusvm_binding() called')) session = db.get_session() try: binding = session.query(nexus_models_v2.NexusPortBinding).filter_by(instance_id=instance_id).filter_by(vlan_id=vlan_id).first() return binding except exc.NoResultFound: raise c_exc.NexusPortBindingNotFound(vlan_id=vlan_id)
[ "def", "get_nexusvm_binding", "(", "vlan_id", ",", "instance_id", ")", ":", "LOG", ".", "debug", "(", "_", "(", "'get_nexusvm_binding() called'", ")", ")", "session", "=", "db", ".", "get_session", "(", ")", "try", ":", "binding", "=", "session", ".", "query", "(", "nexus_models_v2", ".", "NexusPortBinding", ")", ".", "filter_by", "(", "instance_id", "=", "instance_id", ")", ".", "filter_by", "(", "vlan_id", "=", "vlan_id", ")", ".", "first", "(", ")", "return", "binding", "except", "exc", ".", "NoResultFound", ":", "raise", "c_exc", ".", "NexusPortBindingNotFound", "(", "vlan_id", "=", "vlan_id", ")" ]
lists nexusvm bindings .
train
false
9,190
def describe_policy_version(policyName, policyVersionId, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) policy = conn.get_policy_version(policyName=policyName, policyVersionId=policyVersionId) if policy: keys = ('policyName', 'policyArn', 'policyDocument', 'policyVersionId', 'isDefaultVersion') return {'policy': dict([(k, policy.get(k)) for k in keys])} else: return {'policy': None} except ClientError as e: err = salt.utils.boto3.get_error(e) if (e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException'): return {'policy': None} return {'error': salt.utils.boto3.get_error(e)}
[ "def", "describe_policy_version", "(", "policyName", ",", "policyVersionId", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "policy", "=", "conn", ".", "get_policy_version", "(", "policyName", "=", "policyName", ",", "policyVersionId", "=", "policyVersionId", ")", "if", "policy", ":", "keys", "=", "(", "'policyName'", ",", "'policyArn'", ",", "'policyDocument'", ",", "'policyVersionId'", ",", "'isDefaultVersion'", ")", "return", "{", "'policy'", ":", "dict", "(", "[", "(", "k", ",", "policy", ".", "get", "(", "k", ")", ")", "for", "k", "in", "keys", "]", ")", "}", "else", ":", "return", "{", "'policy'", ":", "None", "}", "except", "ClientError", "as", "e", ":", "err", "=", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "if", "(", "e", ".", "response", ".", "get", "(", "'Error'", ",", "{", "}", ")", ".", "get", "(", "'Code'", ")", "==", "'ResourceNotFoundException'", ")", ":", "return", "{", "'policy'", ":", "None", "}", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
given a policy name and version describe its properties .
train
false
9,192
def clean_proc_dir(opts): for basefilename in os.listdir(salt.minion.get_proc_dir(opts['cachedir'])): fn_ = os.path.join(salt.minion.get_proc_dir(opts['cachedir']), basefilename) with salt.utils.fopen(fn_, 'rb') as fp_: job = None try: job = salt.payload.Serial(opts).load(fp_) except Exception: if salt.utils.is_windows(): fp_.close() try: os.unlink(fn_) continue except OSError: continue log.debug('schedule.clean_proc_dir: checking job {0} for process existence'.format(job)) if ((job is not None) and ('pid' in job)): if salt.utils.process.os_is_running(job['pid']): log.debug('schedule.clean_proc_dir: Cleaning proc dir, pid {0} still exists.'.format(job['pid'])) else: if salt.utils.is_windows(): fp_.close() try: os.unlink(fn_) except OSError: pass
[ "def", "clean_proc_dir", "(", "opts", ")", ":", "for", "basefilename", "in", "os", ".", "listdir", "(", "salt", ".", "minion", ".", "get_proc_dir", "(", "opts", "[", "'cachedir'", "]", ")", ")", ":", "fn_", "=", "os", ".", "path", ".", "join", "(", "salt", ".", "minion", ".", "get_proc_dir", "(", "opts", "[", "'cachedir'", "]", ")", ",", "basefilename", ")", "with", "salt", ".", "utils", ".", "fopen", "(", "fn_", ",", "'rb'", ")", "as", "fp_", ":", "job", "=", "None", "try", ":", "job", "=", "salt", ".", "payload", ".", "Serial", "(", "opts", ")", ".", "load", "(", "fp_", ")", "except", "Exception", ":", "if", "salt", ".", "utils", ".", "is_windows", "(", ")", ":", "fp_", ".", "close", "(", ")", "try", ":", "os", ".", "unlink", "(", "fn_", ")", "continue", "except", "OSError", ":", "continue", "log", ".", "debug", "(", "'schedule.clean_proc_dir: checking job {0} for process existence'", ".", "format", "(", "job", ")", ")", "if", "(", "(", "job", "is", "not", "None", ")", "and", "(", "'pid'", "in", "job", ")", ")", ":", "if", "salt", ".", "utils", ".", "process", ".", "os_is_running", "(", "job", "[", "'pid'", "]", ")", ":", "log", ".", "debug", "(", "'schedule.clean_proc_dir: Cleaning proc dir, pid {0} still exists.'", ".", "format", "(", "job", "[", "'pid'", "]", ")", ")", "else", ":", "if", "salt", ".", "utils", ".", "is_windows", "(", ")", ":", "fp_", ".", "close", "(", ")", "try", ":", "os", ".", "unlink", "(", "fn_", ")", "except", "OSError", ":", "pass" ]
loop through jid files in the minion proc directory and remove any that refer to processes that no longer exist .
train
true
9,195
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix, dtype=np.int): return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
[ "def", "grid_to_graph", "(", "n_x", ",", "n_y", ",", "n_z", "=", "1", ",", "mask", "=", "None", ",", "return_as", "=", "sparse", ".", "coo_matrix", ",", "dtype", "=", "np", ".", "int", ")", ":", "return", "_to_graph", "(", "n_x", ",", "n_y", ",", "n_z", ",", "mask", "=", "mask", ",", "return_as", "=", "return_as", ",", "dtype", "=", "dtype", ")" ]
graph of the pixel-to-pixel connections edges exist if 2 voxels are connected .
train
false
9,197
def set_placeholder_cache(placeholder, lang, site_id, content, request): from django.core.cache import cache key = _get_placeholder_cache_key(placeholder, lang, site_id, request) duration = min(get_cms_setting('CACHE_DURATIONS')['content'], placeholder.get_cache_expiration(request, now())) cache.set(key, content, duration) (version, vary_on_list) = _get_placeholder_cache_version(placeholder, lang, site_id) _set_placeholder_cache_version(placeholder, lang, site_id, version, vary_on_list, duration=duration)
[ "def", "set_placeholder_cache", "(", "placeholder", ",", "lang", ",", "site_id", ",", "content", ",", "request", ")", ":", "from", "django", ".", "core", ".", "cache", "import", "cache", "key", "=", "_get_placeholder_cache_key", "(", "placeholder", ",", "lang", ",", "site_id", ",", "request", ")", "duration", "=", "min", "(", "get_cms_setting", "(", "'CACHE_DURATIONS'", ")", "[", "'content'", "]", ",", "placeholder", ".", "get_cache_expiration", "(", "request", ",", "now", "(", ")", ")", ")", "cache", ".", "set", "(", "key", ",", "content", ",", "duration", ")", "(", "version", ",", "vary_on_list", ")", "=", "_get_placeholder_cache_version", "(", "placeholder", ",", "lang", ",", "site_id", ")", "_set_placeholder_cache_version", "(", "placeholder", ",", "lang", ",", "site_id", ",", "version", ",", "vary_on_list", ",", "duration", "=", "duration", ")" ]
sets the placeholder cache with the rendered placeholder .
train
false
9,198
def media_play_pause(hass): hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE)
[ "def", "media_play_pause", "(", "hass", ")", ":", "hass", ".", "services", ".", "call", "(", "DOMAIN", ",", "SERVICE_MEDIA_PLAY_PAUSE", ")" ]
press the keyboard button for play/pause .
train
false
9,199
def cfg_from_list(cfg_list): from ast import literal_eval assert ((len(cfg_list) % 2) == 0) for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]): key_list = k.split('.') d = __C for subkey in key_list[:(-1)]: assert d.has_key(subkey) d = d[subkey] subkey = key_list[(-1)] assert d.has_key(subkey) try: value = literal_eval(v) except: value = v assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey])) d[subkey] = value
[ "def", "cfg_from_list", "(", "cfg_list", ")", ":", "from", "ast", "import", "literal_eval", "assert", "(", "(", "len", "(", "cfg_list", ")", "%", "2", ")", "==", "0", ")", "for", "(", "k", ",", "v", ")", "in", "zip", "(", "cfg_list", "[", "0", ":", ":", "2", "]", ",", "cfg_list", "[", "1", ":", ":", "2", "]", ")", ":", "key_list", "=", "k", ".", "split", "(", "'.'", ")", "d", "=", "__C", "for", "subkey", "in", "key_list", "[", ":", "(", "-", "1", ")", "]", ":", "assert", "d", ".", "has_key", "(", "subkey", ")", "d", "=", "d", "[", "subkey", "]", "subkey", "=", "key_list", "[", "(", "-", "1", ")", "]", "assert", "d", ".", "has_key", "(", "subkey", ")", "try", ":", "value", "=", "literal_eval", "(", "v", ")", "except", ":", "value", "=", "v", "assert", "(", "type", "(", "value", ")", "==", "type", "(", "d", "[", "subkey", "]", ")", ")", ",", "'type {} does not match original type {}'", ".", "format", "(", "type", "(", "value", ")", ",", "type", "(", "d", "[", "subkey", "]", ")", ")", "d", "[", "subkey", "]", "=", "value" ]
set config keys via list .
train
false
9,201
def get_function_groups(): return __function_groups__.copy()
[ "def", "get_function_groups", "(", ")", ":", "return", "__function_groups__", ".", "copy", "(", ")" ]
returns a dict with keys of function-group names and values of lists of function names ie {group_names: [function_names]} .
train
false
9,202
def createAlignment(sequences, alphabet): return MultipleSeqAlignment((SeqRecord(Seq(s, alphabet), id=('sequence%i' % (i + 1))) for (i, s) in enumerate(sequences)), alphabet)
[ "def", "createAlignment", "(", "sequences", ",", "alphabet", ")", ":", "return", "MultipleSeqAlignment", "(", "(", "SeqRecord", "(", "Seq", "(", "s", ",", "alphabet", ")", ",", "id", "=", "(", "'sequence%i'", "%", "(", "i", "+", "1", ")", ")", ")", "for", "(", "i", ",", "s", ")", "in", "enumerate", "(", "sequences", ")", ")", ",", "alphabet", ")" ]
create an alignment object from a list of sequences .
train
false
9,203
def CreateInteractiveWindowUserPreference(makeDoc=None, makeFrame=None): bCreate = LoadPreference('Show at startup', 1) if bCreate: CreateInteractiveWindow(makeDoc, makeFrame)
[ "def", "CreateInteractiveWindowUserPreference", "(", "makeDoc", "=", "None", ",", "makeFrame", "=", "None", ")", ":", "bCreate", "=", "LoadPreference", "(", "'Show at startup'", ",", "1", ")", "if", "bCreate", ":", "CreateInteractiveWindow", "(", "makeDoc", ",", "makeFrame", ")" ]
create some sort of interactive window if the users preference say we should .
train
false
9,204
def is_cidr_host(cidr): if ('/' not in str(cidr)): raise ValueError(_("cidr doesn't contain a '/'")) net = netaddr.IPNetwork(cidr) if (net.version == 4): return (net.prefixlen == n_const.IPv4_BITS) return (net.prefixlen == n_const.IPv6_BITS)
[ "def", "is_cidr_host", "(", "cidr", ")", ":", "if", "(", "'/'", "not", "in", "str", "(", "cidr", ")", ")", ":", "raise", "ValueError", "(", "_", "(", "\"cidr doesn't contain a '/'\"", ")", ")", "net", "=", "netaddr", ".", "IPNetwork", "(", "cidr", ")", "if", "(", "net", ".", "version", "==", "4", ")", ":", "return", "(", "net", ".", "prefixlen", "==", "n_const", ".", "IPv4_BITS", ")", "return", "(", "net", ".", "prefixlen", "==", "n_const", ".", "IPv6_BITS", ")" ]
determines if the cidr passed in represents a single host network .
train
false
9,205
def copy_folders(source, destination): if os.path.exists(os.path.join('dist', destination)): shutil.rmtree(os.path.join('dist', destination)) shutil.copytree(os.path.join(source), os.path.join('dist', destination))
[ "def", "copy_folders", "(", "source", ",", "destination", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "'dist'", ",", "destination", ")", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "'dist'", ",", "destination", ")", ")", "shutil", ".", "copytree", "(", "os", ".", "path", ".", "join", "(", "source", ")", ",", "os", ".", "path", ".", "join", "(", "'dist'", ",", "destination", ")", ")" ]
copy files & folders from source to destination .
train
false
9,206
def DisplayAccountTree(account, accounts, links, depth=0): prefix = (('-' * depth) * 2) print ('%s%s, %s' % (prefix, account['customerId'], account['name'])) if (account['customerId'] in links): for child_link in links[account['customerId']]: child_account = accounts[child_link['clientCustomerId']] DisplayAccountTree(child_account, accounts, links, (depth + 1))
[ "def", "DisplayAccountTree", "(", "account", ",", "accounts", ",", "links", ",", "depth", "=", "0", ")", ":", "prefix", "=", "(", "(", "'-'", "*", "depth", ")", "*", "2", ")", "print", "(", "'%s%s, %s'", "%", "(", "prefix", ",", "account", "[", "'customerId'", "]", ",", "account", "[", "'name'", "]", ")", ")", "if", "(", "account", "[", "'customerId'", "]", "in", "links", ")", ":", "for", "child_link", "in", "links", "[", "account", "[", "'customerId'", "]", "]", ":", "child_account", "=", "accounts", "[", "child_link", "[", "'clientCustomerId'", "]", "]", "DisplayAccountTree", "(", "child_account", ",", "accounts", ",", "links", ",", "(", "depth", "+", "1", ")", ")" ]
displays an account tree .
train
true
9,207
def create_stubs(stubs_to_create=None): if (stubs_to_create is None): stubs_to_create = {} if (not isinstance(stubs_to_create, dict)): raise TypeError(('create_stub must be passed a dict, but a %s was given.' % type(stubs_to_create).__name__)) def inner_stub_out(fn): @wraps(fn) def instance_stub_out(self, *args, **kwargs): for key in stubs_to_create: if (not (isinstance(stubs_to_create[key], tuple) or isinstance(stubs_to_create[key], list))): raise TypeError(('The values of the create_stub dict must be lists or tuples, but is a %s.' % type(stubs_to_create[key]).__name__)) for value in stubs_to_create[key]: self.mox.StubOutWithMock(key, value) return fn(self, *args, **kwargs) return instance_stub_out return inner_stub_out
[ "def", "create_stubs", "(", "stubs_to_create", "=", "None", ")", ":", "if", "(", "stubs_to_create", "is", "None", ")", ":", "stubs_to_create", "=", "{", "}", "if", "(", "not", "isinstance", "(", "stubs_to_create", ",", "dict", ")", ")", ":", "raise", "TypeError", "(", "(", "'create_stub must be passed a dict, but a %s was given.'", "%", "type", "(", "stubs_to_create", ")", ".", "__name__", ")", ")", "def", "inner_stub_out", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "instance_stub_out", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "for", "key", "in", "stubs_to_create", ":", "if", "(", "not", "(", "isinstance", "(", "stubs_to_create", "[", "key", "]", ",", "tuple", ")", "or", "isinstance", "(", "stubs_to_create", "[", "key", "]", ",", "list", ")", ")", ")", ":", "raise", "TypeError", "(", "(", "'The values of the create_stub dict must be lists or tuples, but is a %s.'", "%", "type", "(", "stubs_to_create", "[", "key", "]", ")", ".", "__name__", ")", ")", "for", "value", "in", "stubs_to_create", "[", "key", "]", ":", "self", ".", "mox", ".", "StubOutWithMock", "(", "key", ",", "value", ")", "return", "fn", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "return", "instance_stub_out", "return", "inner_stub_out" ]
decorator to simplify setting up multiple stubs at once via mox .
train
false
9,208
@builtin(u'Lower-case text', lower, apply_func_to_match_groups) def replace_lowercase(match, number, file_name, metadata, dictionaries, data, functions, *args, **kwargs): return apply_func_to_match_groups(match, lower)
[ "@", "builtin", "(", "u'Lower-case text'", ",", "lower", ",", "apply_func_to_match_groups", ")", "def", "replace_lowercase", "(", "match", ",", "number", ",", "file_name", ",", "metadata", ",", "dictionaries", ",", "data", ",", "functions", ",", "*", "args", ",", "**", "kwargs", ")", ":", "return", "apply_func_to_match_groups", "(", "match", ",", "lower", ")" ]
make matched text lower case .
train
false
9,210
def parseFormattedText(text): state = _FormattingParser() for ch in text: state.process(ch) return state.complete()
[ "def", "parseFormattedText", "(", "text", ")", ":", "state", "=", "_FormattingParser", "(", ")", "for", "ch", "in", "text", ":", "state", ".", "process", "(", "ch", ")", "return", "state", ".", "complete", "(", ")" ]
parse text containing irc formatting codes into structured information .
train
false
9,211
def pack_dunder(name): mod = sys.modules[name] if (not hasattr(mod, '__utils__')): setattr(mod, '__utils__', salt.loader.utils(mod.__opts__))
[ "def", "pack_dunder", "(", "name", ")", ":", "mod", "=", "sys", ".", "modules", "[", "name", "]", "if", "(", "not", "hasattr", "(", "mod", ",", "'__utils__'", ")", ")", ":", "setattr", "(", "mod", ",", "'__utils__'", ",", "salt", ".", "loader", ".", "utils", "(", "mod", ".", "__opts__", ")", ")" ]
compatibility helper function to make __utils__ available on demand .
train
true
9,212
def get_file_size(fileobj): currpos = fileobj.tell() fileobj.seek(0, 2) total_size = fileobj.tell() fileobj.seek(currpos) return total_size
[ "def", "get_file_size", "(", "fileobj", ")", ":", "currpos", "=", "fileobj", ".", "tell", "(", ")", "fileobj", ".", "seek", "(", "0", ",", "2", ")", "total_size", "=", "fileobj", ".", "tell", "(", ")", "fileobj", ".", "seek", "(", "currpos", ")", "return", "total_size" ]
returns the file size .
train
true
9,214
def nsdecls(*prefixes): return u' '.join([(u'xmlns:%s="%s"' % (pfx, nsmap[pfx])) for pfx in prefixes])
[ "def", "nsdecls", "(", "*", "prefixes", ")", ":", "return", "u' '", ".", "join", "(", "[", "(", "u'xmlns:%s=\"%s\"'", "%", "(", "pfx", ",", "nsmap", "[", "pfx", "]", ")", ")", "for", "pfx", "in", "prefixes", "]", ")" ]
return a string containing a namespace declaration for each of *nspfxs* .
train
false
9,216
@register.inclusion_tag('addons/impala/review_add_box.html') @jinja2.contextfunction def impala_review_add_box(context, addon): c = dict(context.items()) c['addon'] = addon return c
[ "@", "register", ".", "inclusion_tag", "(", "'addons/impala/review_add_box.html'", ")", "@", "jinja2", ".", "contextfunction", "def", "impala_review_add_box", "(", "context", ",", "addon", ")", ":", "c", "=", "dict", "(", "context", ".", "items", "(", ")", ")", "c", "[", "'addon'", "]", "=", "addon", "return", "c" ]
details page: show a box for the user to post a review .
train
false
9,217
def is_complete_v4_key(v4_key): assert (len(v4_key.path_element_list()) >= 1) last_element = v4_key.path_element((len(v4_key.path_element_list()) - 1)) return (last_element.has_id() or last_element.has_name())
[ "def", "is_complete_v4_key", "(", "v4_key", ")", ":", "assert", "(", "len", "(", "v4_key", ".", "path_element_list", "(", ")", ")", ">=", "1", ")", "last_element", "=", "v4_key", ".", "path_element", "(", "(", "len", "(", "v4_key", ".", "path_element_list", "(", ")", ")", "-", "1", ")", ")", "return", "(", "last_element", ".", "has_id", "(", ")", "or", "last_element", ".", "has_name", "(", ")", ")" ]
returns true if a key specifies an id or name .
train
false
9,218
def template_substitute(text, **kwargs): for (name, value) in kwargs.items(): placeholder_pattern = ('{%s}' % name) if (placeholder_pattern in text): text = text.replace(placeholder_pattern, value) return text
[ "def", "template_substitute", "(", "text", ",", "**", "kwargs", ")", ":", "for", "(", "name", ",", "value", ")", "in", "kwargs", ".", "items", "(", ")", ":", "placeholder_pattern", "=", "(", "'{%s}'", "%", "name", ")", "if", "(", "placeholder_pattern", "in", "text", ")", ":", "text", "=", "text", ".", "replace", "(", "placeholder_pattern", ",", "value", ")", "return", "text" ]
replace placeholders in text by using the data mapping .
train
true
9,219
def _formatting_rule_has_first_group_only(national_prefix_formatting_rule): if (national_prefix_formatting_rule is None): return True return bool(fullmatch(_FIRST_GROUP_ONLY_PREFIX_PATTERN, national_prefix_formatting_rule))
[ "def", "_formatting_rule_has_first_group_only", "(", "national_prefix_formatting_rule", ")", ":", "if", "(", "national_prefix_formatting_rule", "is", "None", ")", ":", "return", "True", "return", "bool", "(", "fullmatch", "(", "_FIRST_GROUP_ONLY_PREFIX_PATTERN", ",", "national_prefix_formatting_rule", ")", ")" ]
helper function to check if the national prefix formatting rule has the first group only .
train
false
9,220
def _sqrt_mod_tonelli_shanks(a, p): s = trailing((p - 1)) t = (p >> s) while 1: d = randint(2, (p - 1)) r = legendre_symbol(d, p) if (r == (-1)): break A = pow(a, t, p) D = pow(d, t, p) m = 0 for i in range(s): adm = ((A * pow(D, m, p)) % p) adm = pow(adm, (2 ** ((s - 1) - i)), p) if ((adm % p) == (p - 1)): m += (2 ** i) x = ((pow(a, ((t + 1) // 2), p) * pow(D, (m // 2), p)) % p) return x
[ "def", "_sqrt_mod_tonelli_shanks", "(", "a", ",", "p", ")", ":", "s", "=", "trailing", "(", "(", "p", "-", "1", ")", ")", "t", "=", "(", "p", ">>", "s", ")", "while", "1", ":", "d", "=", "randint", "(", "2", ",", "(", "p", "-", "1", ")", ")", "r", "=", "legendre_symbol", "(", "d", ",", "p", ")", "if", "(", "r", "==", "(", "-", "1", ")", ")", ":", "break", "A", "=", "pow", "(", "a", ",", "t", ",", "p", ")", "D", "=", "pow", "(", "d", ",", "t", ",", "p", ")", "m", "=", "0", "for", "i", "in", "range", "(", "s", ")", ":", "adm", "=", "(", "(", "A", "*", "pow", "(", "D", ",", "m", ",", "p", ")", ")", "%", "p", ")", "adm", "=", "pow", "(", "adm", ",", "(", "2", "**", "(", "(", "s", "-", "1", ")", "-", "i", ")", ")", ",", "p", ")", "if", "(", "(", "adm", "%", "p", ")", "==", "(", "p", "-", "1", ")", ")", ":", "m", "+=", "(", "2", "**", "i", ")", "x", "=", "(", "(", "pow", "(", "a", ",", "(", "(", "t", "+", "1", ")", "//", "2", ")", ",", "p", ")", "*", "pow", "(", "D", ",", "(", "m", "//", "2", ")", ",", "p", ")", ")", "%", "p", ")", "return", "x" ]
returns the square root in the case of p prime with p == 1 references .
train
false
9,221
def bc_dist(expr): (factor, mat) = expr.as_coeff_mmul() if ((factor != 1) and isinstance(unpack(mat), BlockMatrix)): B = unpack(mat).blocks return BlockMatrix([[(factor * B[(i, j)]) for j in range(B.cols)] for i in range(B.rows)]) return expr
[ "def", "bc_dist", "(", "expr", ")", ":", "(", "factor", ",", "mat", ")", "=", "expr", ".", "as_coeff_mmul", "(", ")", "if", "(", "(", "factor", "!=", "1", ")", "and", "isinstance", "(", "unpack", "(", "mat", ")", ",", "BlockMatrix", ")", ")", ":", "B", "=", "unpack", "(", "mat", ")", ".", "blocks", "return", "BlockMatrix", "(", "[", "[", "(", "factor", "*", "B", "[", "(", "i", ",", "j", ")", "]", ")", "for", "j", "in", "range", "(", "B", ".", "cols", ")", "]", "for", "i", "in", "range", "(", "B", ".", "rows", ")", "]", ")", "return", "expr" ]
turn a*[x .
train
false
9,223
def geohash(latitude, longitude, datedow): h = hashlib.md5(datedow).hexdigest() (p, q) = [('%f' % float.fromhex(('0.' + x))) for x in (h[:16], h[16:32])] print ('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
[ "def", "geohash", "(", "latitude", ",", "longitude", ",", "datedow", ")", ":", "h", "=", "hashlib", ".", "md5", "(", "datedow", ")", ".", "hexdigest", "(", ")", "(", "p", ",", "q", ")", "=", "[", "(", "'%f'", "%", "float", ".", "fromhex", "(", "(", "'0.'", "+", "x", ")", ")", ")", "for", "x", "in", "(", "h", "[", ":", "16", "]", ",", "h", "[", "16", ":", "32", "]", ")", "]", "print", "(", "'%d%s %d%s'", "%", "(", "latitude", ",", "p", "[", "1", ":", "]", ",", "longitude", ",", "q", "[", "1", ":", "]", ")", ")" ]
compute geohash() using the munroe algorithm .
train
false
9,226
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
create a new rc2 cipher .
train
false
9,227
def sparse_test_class(getset=True, slicing=True, slicing_assign=True, fancy_indexing=True, fancy_assign=True, fancy_multidim_indexing=True, fancy_multidim_assign=True, minmax=True, nnz_axis=True): bases = (_TestCommon, _possibly_unimplemented(_TestGetSet, getset), _TestSolve, _TestInplaceArithmetic, _TestArithmetic, _possibly_unimplemented(_TestSlicing, slicing), _possibly_unimplemented(_TestSlicingAssign, slicing_assign), _possibly_unimplemented(_TestFancyIndexing, fancy_indexing), _possibly_unimplemented(_TestFancyIndexingAssign, fancy_assign), _possibly_unimplemented(_TestFancyMultidim, (fancy_indexing and fancy_multidim_indexing)), _possibly_unimplemented(_TestFancyMultidimAssign, (fancy_multidim_assign and fancy_assign)), _possibly_unimplemented(_TestMinMax, minmax), _possibly_unimplemented(_TestGetNnzAxis, nnz_axis)) names = {} for cls in bases: for name in cls.__dict__: if (not name.startswith('test_')): continue old_cls = names.get(name) if (old_cls is not None): raise ValueError(('Test class %s overloads test %s defined in %s' % (cls.__name__, name, old_cls.__name__))) names[name] = cls return type('TestBase', bases, {})
[ "def", "sparse_test_class", "(", "getset", "=", "True", ",", "slicing", "=", "True", ",", "slicing_assign", "=", "True", ",", "fancy_indexing", "=", "True", ",", "fancy_assign", "=", "True", ",", "fancy_multidim_indexing", "=", "True", ",", "fancy_multidim_assign", "=", "True", ",", "minmax", "=", "True", ",", "nnz_axis", "=", "True", ")", ":", "bases", "=", "(", "_TestCommon", ",", "_possibly_unimplemented", "(", "_TestGetSet", ",", "getset", ")", ",", "_TestSolve", ",", "_TestInplaceArithmetic", ",", "_TestArithmetic", ",", "_possibly_unimplemented", "(", "_TestSlicing", ",", "slicing", ")", ",", "_possibly_unimplemented", "(", "_TestSlicingAssign", ",", "slicing_assign", ")", ",", "_possibly_unimplemented", "(", "_TestFancyIndexing", ",", "fancy_indexing", ")", ",", "_possibly_unimplemented", "(", "_TestFancyIndexingAssign", ",", "fancy_assign", ")", ",", "_possibly_unimplemented", "(", "_TestFancyMultidim", ",", "(", "fancy_indexing", "and", "fancy_multidim_indexing", ")", ")", ",", "_possibly_unimplemented", "(", "_TestFancyMultidimAssign", ",", "(", "fancy_multidim_assign", "and", "fancy_assign", ")", ")", ",", "_possibly_unimplemented", "(", "_TestMinMax", ",", "minmax", ")", ",", "_possibly_unimplemented", "(", "_TestGetNnzAxis", ",", "nnz_axis", ")", ")", "names", "=", "{", "}", "for", "cls", "in", "bases", ":", "for", "name", "in", "cls", ".", "__dict__", ":", "if", "(", "not", "name", ".", "startswith", "(", "'test_'", ")", ")", ":", "continue", "old_cls", "=", "names", ".", "get", "(", "name", ")", "if", "(", "old_cls", "is", "not", "None", ")", ":", "raise", "ValueError", "(", "(", "'Test class %s overloads test %s defined in %s'", "%", "(", "cls", ".", "__name__", ",", "name", ",", "old_cls", ".", "__name__", ")", ")", ")", "names", "[", "name", "]", "=", "cls", "return", "type", "(", "'TestBase'", ",", "bases", ",", "{", "}", ")" ]
construct a base class .
train
false
9,228
def set_dhcp_all(iface): set_dhcp_ip(iface) set_dhcp_dns(iface) return {'Interface': iface, 'DNS Server': 'DHCP', 'DHCP enabled': 'Yes'}
[ "def", "set_dhcp_all", "(", "iface", ")", ":", "set_dhcp_ip", "(", "iface", ")", "set_dhcp_dns", "(", "iface", ")", "return", "{", "'Interface'", ":", "iface", ",", "'DNS Server'", ":", "'DHCP'", ",", "'DHCP enabled'", ":", "'Yes'", "}" ]
set both ip address and dns to dhcp cli example: .
train
false
9,230
def render_to_kmz(*args, **kwargs): return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)), mimetype='application/vnd.google-earth.kmz')
[ "def", "render_to_kmz", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "HttpResponse", "(", "compress_kml", "(", "loader", ".", "render_to_string", "(", "*", "args", ",", "**", "kwargs", ")", ")", ",", "mimetype", "=", "'application/vnd.google-earth.kmz'", ")" ]
compresses the kml content and returns as kmz .
train
false
9,231
def get_random_port(socket_type='all', min_port=5000, max_port=60000): assert (socket_type in ('all', 'tcp', 'udp')), ('Invalid socket type %s' % type(socket_type)) assert isinstance(min_port, int), ('Invalid min_port type %s' % type(min_port)) assert isinstance(max_port, int), ('Invalid max_port type %s' % type(max_port)) assert (0 < min_port <= max_port <= 65535), ('Invalid min_port and mac_port values %s, %s' % (min_port, max_port)) working_port = None try_port = random.randint(min_port, max_port) while (try_port <= 65535): if check_random_port(try_port, socket_type): working_port = try_port break try_port += 1 logger.debug('Got a working random port %s', working_port) return working_port
[ "def", "get_random_port", "(", "socket_type", "=", "'all'", ",", "min_port", "=", "5000", ",", "max_port", "=", "60000", ")", ":", "assert", "(", "socket_type", "in", "(", "'all'", ",", "'tcp'", ",", "'udp'", ")", ")", ",", "(", "'Invalid socket type %s'", "%", "type", "(", "socket_type", ")", ")", "assert", "isinstance", "(", "min_port", ",", "int", ")", ",", "(", "'Invalid min_port type %s'", "%", "type", "(", "min_port", ")", ")", "assert", "isinstance", "(", "max_port", ",", "int", ")", ",", "(", "'Invalid max_port type %s'", "%", "type", "(", "max_port", ")", ")", "assert", "(", "0", "<", "min_port", "<=", "max_port", "<=", "65535", ")", ",", "(", "'Invalid min_port and mac_port values %s, %s'", "%", "(", "min_port", ",", "max_port", ")", ")", "working_port", "=", "None", "try_port", "=", "random", ".", "randint", "(", "min_port", ",", "max_port", ")", "while", "(", "try_port", "<=", "65535", ")", ":", "if", "check_random_port", "(", "try_port", ",", "socket_type", ")", ":", "working_port", "=", "try_port", "break", "try_port", "+=", "1", "logger", ".", "debug", "(", "'Got a working random port %s'", ",", "working_port", ")", "return", "working_port" ]
gets a random port number that works .
train
false
9,232
def invert_dict(d): if isinstance(d, dict): temp = d else: temp = dict(d) result = {} for (key, val) in temp.iteritems(): if (val not in result): result[val] = [] result[val].append(key) return result
[ "def", "invert_dict", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "temp", "=", "d", "else", ":", "temp", "=", "dict", "(", "d", ")", "result", "=", "{", "}", "for", "(", "key", ",", "val", ")", "in", "temp", ".", "iteritems", "(", ")", ":", "if", "(", "val", "not", "in", "result", ")", ":", "result", "[", "val", "]", "=", "[", "]", "result", "[", "val", "]", ".", "append", "(", "key", ")", "return", "result" ]
returns inverse of d .
train
false
9,236
def alert_response(): alert_id = get_vars.get('alert_id') if alert_id: table = s3db.deploy_response f = table.alert_id f.readable = f.writable = False f.default = alert_id atable = s3db.deploy_alert alert = db((atable.id == alert_id)).select(atable.mission_id, limitby=(0, 1)).first() if alert: f = table.mission_id f.readable = f.writable = False f.default = alert.mission_id human_resource_id = auth.s3_logged_in_human_resource() if human_resource_id: f = table.human_resource_id_id f.readable = f.writable = False f.default = alert_id table.message_id.readable = False return s3_rest_controller('deploy', 'response')
[ "def", "alert_response", "(", ")", ":", "alert_id", "=", "get_vars", ".", "get", "(", "'alert_id'", ")", "if", "alert_id", ":", "table", "=", "s3db", ".", "deploy_response", "f", "=", "table", ".", "alert_id", "f", ".", "readable", "=", "f", ".", "writable", "=", "False", "f", ".", "default", "=", "alert_id", "atable", "=", "s3db", ".", "deploy_alert", "alert", "=", "db", "(", "(", "atable", ".", "id", "==", "alert_id", ")", ")", ".", "select", "(", "atable", ".", "mission_id", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "if", "alert", ":", "f", "=", "table", ".", "mission_id", "f", ".", "readable", "=", "f", ".", "writable", "=", "False", "f", ".", "default", "=", "alert", ".", "mission_id", "human_resource_id", "=", "auth", ".", "s3_logged_in_human_resource", "(", ")", "if", "human_resource_id", ":", "f", "=", "table", ".", "human_resource_id_id", "f", ".", "readable", "=", "f", ".", "writable", "=", "False", "f", ".", "default", "=", "alert_id", "table", ".", "message_id", ".", "readable", "=", "False", "return", "s3_rest_controller", "(", "'deploy'", ",", "'response'", ")" ]
restful crud controller - used to allow rit memebers to apply for positions @todo: block all methods but create => what next_url? .
train
false
9,237
def header_string(headers_dict): header_list = [] if ('Content-Type' in headers_dict): header_list.append((headers_dict['Content-Type'] + '\n')) if ('Date' in headers_dict): header_list.append((headers_dict['Date'] + '\n')) if ('Content-MD5' in headers_dict): header_list.append((headers_dict['Content-MD5'] + '\n')) return ''.join(header_list)
[ "def", "header_string", "(", "headers_dict", ")", ":", "header_list", "=", "[", "]", "if", "(", "'Content-Type'", "in", "headers_dict", ")", ":", "header_list", ".", "append", "(", "(", "headers_dict", "[", "'Content-Type'", "]", "+", "'\\n'", ")", ")", "if", "(", "'Date'", "in", "headers_dict", ")", ":", "header_list", ".", "append", "(", "(", "headers_dict", "[", "'Date'", "]", "+", "'\\n'", ")", ")", "if", "(", "'Content-MD5'", "in", "headers_dict", ")", ":", "header_list", ".", "append", "(", "(", "headers_dict", "[", "'Content-MD5'", "]", "+", "'\\n'", ")", ")", "return", "''", ".", "join", "(", "header_list", ")" ]
given a dictionary of headers .
train
false
9,238
@bdd.when('the documentation is up to date') def update_documentation(): base_path = os.path.dirname(os.path.abspath(qutebrowser.__file__)) doc_path = os.path.join(base_path, 'html', 'doc') script_path = os.path.join(base_path, '..', 'scripts') if (not os.path.exists(doc_path)): return if all((docutils.docs_up_to_date(p) for p in os.listdir(doc_path))): return try: subprocess.call(['asciidoc'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except OSError: pytest.skip('Docs outdated and asciidoc unavailable!') update_script = os.path.join(script_path, 'asciidoc2html.py') subprocess.call([sys.executable, update_script])
[ "@", "bdd", ".", "when", "(", "'the documentation is up to date'", ")", "def", "update_documentation", "(", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "qutebrowser", ".", "__file__", ")", ")", "doc_path", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'html'", ",", "'doc'", ")", "script_path", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'..'", ",", "'scripts'", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "doc_path", ")", ")", ":", "return", "if", "all", "(", "(", "docutils", ".", "docs_up_to_date", "(", "p", ")", "for", "p", "in", "os", ".", "listdir", "(", "doc_path", ")", ")", ")", ":", "return", "try", ":", "subprocess", ".", "call", "(", "[", "'asciidoc'", "]", ",", "stdout", "=", "subprocess", ".", "DEVNULL", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "except", "OSError", ":", "pytest", ".", "skip", "(", "'Docs outdated and asciidoc unavailable!'", ")", "update_script", "=", "os", ".", "path", ".", "join", "(", "script_path", ",", "'asciidoc2html.py'", ")", "subprocess", ".", "call", "(", "[", "sys", ".", "executable", ",", "update_script", "]", ")" ]
update the docs before testing :help .
train
false
9,239
def join_ipv4_segments(segments): return '.'.join([str(s) for s in segments])
[ "def", "join_ipv4_segments", "(", "segments", ")", ":", "return", "'.'", ".", "join", "(", "[", "str", "(", "s", ")", "for", "s", "in", "segments", "]", ")" ]
helper method to join ip numeric segment pieces back into a full ip address .
train
false
9,242
def pack4(v): assert (0 <= v <= 4294967295) return struct.pack('<I', v)
[ "def", "pack4", "(", "v", ")", ":", "assert", "(", "0", "<=", "v", "<=", "4294967295", ")", "return", "struct", ".", "pack", "(", "'<I'", ",", "v", ")" ]
takes a 32 bit integer and returns a 4 byte string representing the number in little endian .
train
false
9,243
@ioflo.base.deeding.deedify('SaltZmqRetFork', ioinits={'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr', 'mkey': '.salt.var.zmq.master_key', 'aes': '.salt.var.zmq.aes'}) def zmq_ret_fork(self): self.proc_mgr.value.add_process(ZmqRet, args=(self.opts.value, self.mkey.value, self.aes.value))
[ "@", "ioflo", ".", "base", ".", "deeding", ".", "deedify", "(", "'SaltZmqRetFork'", ",", "ioinits", "=", "{", "'opts'", ":", "'.salt.opts'", ",", "'proc_mgr'", ":", "'.salt.usr.proc_mgr'", ",", "'mkey'", ":", "'.salt.var.zmq.master_key'", ",", "'aes'", ":", "'.salt.var.zmq.aes'", "}", ")", "def", "zmq_ret_fork", "(", "self", ")", ":", "self", ".", "proc_mgr", ".", "value", ".", "add_process", "(", "ZmqRet", ",", "args", "=", "(", "self", ".", "opts", ".", "value", ",", "self", ".", "mkey", ".", "value", ",", "self", ".", "aes", ".", "value", ")", ")" ]
create the forked process for the zeromq ret port .
train
false
9,244
def run_experiment(): return [(random.random() < 0.5) for _ in range(1000)]
[ "def", "run_experiment", "(", ")", ":", "return", "[", "(", "random", ".", "random", "(", ")", "<", "0.5", ")", "for", "_", "in", "range", "(", "1000", ")", "]" ]
flip a fair coin 1000 times .
train
false
9,245
def from_utc(utcTime, fmt=None): if (fmt is None): try_formats = ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S'] else: try_formats = [fmt] for fmt in try_formats: try: time_struct = datetime.datetime.strptime(utcTime, fmt) except ValueError: pass else: date = int(time.mktime(time_struct.timetuple())) return date else: raise ValueError('No UTC format matches {}'.format(utcTime))
[ "def", "from_utc", "(", "utcTime", ",", "fmt", "=", "None", ")", ":", "if", "(", "fmt", "is", "None", ")", ":", "try_formats", "=", "[", "'%Y-%m-%d %H:%M:%S.%f'", ",", "'%Y-%m-%d %H:%M:%S'", "]", "else", ":", "try_formats", "=", "[", "fmt", "]", "for", "fmt", "in", "try_formats", ":", "try", ":", "time_struct", "=", "datetime", ".", "datetime", ".", "strptime", "(", "utcTime", ",", "fmt", ")", "except", "ValueError", ":", "pass", "else", ":", "date", "=", "int", "(", "time", ".", "mktime", "(", "time_struct", ".", "timetuple", "(", ")", ")", ")", "return", "date", "else", ":", "raise", "ValueError", "(", "'No UTC format matches {}'", ".", "format", "(", "utcTime", ")", ")" ]
convert utc time string to time .
train
true
9,246
def KindPathFromKey(key): path = key.to_path() kinds = [] is_kind = True for item in path: if is_kind: kinds.append(item) is_kind = (not is_kind) kind_path = '/'.join(kinds) return kind_path
[ "def", "KindPathFromKey", "(", "key", ")", ":", "path", "=", "key", ".", "to_path", "(", ")", "kinds", "=", "[", "]", "is_kind", "=", "True", "for", "item", "in", "path", ":", "if", "is_kind", ":", "kinds", ".", "append", "(", "item", ")", "is_kind", "=", "(", "not", "is_kind", ")", "kind_path", "=", "'/'", ".", "join", "(", "kinds", ")", "return", "kind_path" ]
return kinds path as /-delimited string for a particular key .
train
false
9,247
def groupinstall(group, options=None): manager = MANAGER if (options is None): options = [] elif isinstance(options, str): options = [options] options = ' '.join(options) run_as_root(('%(manager)s %(options)s groupinstall "%(group)s"' % locals()), pty=False)
[ "def", "groupinstall", "(", "group", ",", "options", "=", "None", ")", ":", "manager", "=", "MANAGER", "if", "(", "options", "is", "None", ")", ":", "options", "=", "[", "]", "elif", "isinstance", "(", "options", ",", "str", ")", ":", "options", "=", "[", "options", "]", "options", "=", "' '", ".", "join", "(", "options", ")", "run_as_root", "(", "(", "'%(manager)s %(options)s groupinstall \"%(group)s\"'", "%", "locals", "(", ")", ")", ",", "pty", "=", "False", ")" ]
install a group of packages .
train
true
9,248
def check_has_write_access_permission(view_func): def decorate(request, *args, **kwargs): if (not has_write_access(request.user)): raise PopupException(_('You are not allowed to modify the metastore.'), detail=_('You have must have metastore:write permissions'), error_code=301) return view_func(request, *args, **kwargs) return wraps(view_func)(decorate)
[ "def", "check_has_write_access_permission", "(", "view_func", ")", ":", "def", "decorate", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "not", "has_write_access", "(", "request", ".", "user", ")", ")", ":", "raise", "PopupException", "(", "_", "(", "'You are not allowed to modify the metastore.'", ")", ",", "detail", "=", "_", "(", "'You have must have metastore:write permissions'", ")", ",", "error_code", "=", "301", ")", "return", "view_func", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", "return", "wraps", "(", "view_func", ")", "(", "decorate", ")" ]
decorator ensuring that the user is not a read only user .
train
false
9,251
def get_soql_fields(soql): soql_fields = re.search('(?<=select)(?s)(.*)(?=from)', soql, re.IGNORECASE) soql_fields = re.sub(' ', '', soql_fields.group()) soql_fields = re.sub(' DCTB ', '', soql_fields) fields = re.split(',|\n|\r|', soql_fields) fields = [field for field in fields if (field != '')] return fields
[ "def", "get_soql_fields", "(", "soql", ")", ":", "soql_fields", "=", "re", ".", "search", "(", "'(?<=select)(?s)(.*)(?=from)'", ",", "soql", ",", "re", ".", "IGNORECASE", ")", "soql_fields", "=", "re", ".", "sub", "(", "' '", ",", "''", ",", "soql_fields", ".", "group", "(", ")", ")", "soql_fields", "=", "re", ".", "sub", "(", "' DCTB '", ",", "''", ",", "soql_fields", ")", "fields", "=", "re", ".", "split", "(", "',|\\n|\\r|'", ",", "soql_fields", ")", "fields", "=", "[", "field", "for", "field", "in", "fields", "if", "(", "field", "!=", "''", ")", "]", "return", "fields" ]
gets queried columns names .
train
true
9,252
def multi_filter_str(flt): assert hasattr(flt, 'filters'), 'Conditional filter required' (yield name(flt))
[ "def", "multi_filter_str", "(", "flt", ")", ":", "assert", "hasattr", "(", "flt", ",", "'filters'", ")", ",", "'Conditional filter required'", "(", "yield", "name", "(", "flt", ")", ")" ]
yield readable conditional filter .
train
false
9,253
def isFinalResult(result): logger.log((u"Checking if we should keep searching after we've found " + result.name), logger.DEBUG) show_obj = result.episodes[0].show (any_qualities, best_qualities) = Quality.splitQuality(show_obj.quality) if (best_qualities and (result.quality < max(best_qualities))): return False elif (any_qualities and (result.quality == max(any_qualities))): return True elif (best_qualities and (result.quality == max(best_qualities))): if (any_qualities and (result.quality < max(any_qualities))): return False else: return True else: return False
[ "def", "isFinalResult", "(", "result", ")", ":", "logger", ".", "log", "(", "(", "u\"Checking if we should keep searching after we've found \"", "+", "result", ".", "name", ")", ",", "logger", ".", "DEBUG", ")", "show_obj", "=", "result", ".", "episodes", "[", "0", "]", ".", "show", "(", "any_qualities", ",", "best_qualities", ")", "=", "Quality", ".", "splitQuality", "(", "show_obj", ".", "quality", ")", "if", "(", "best_qualities", "and", "(", "result", ".", "quality", "<", "max", "(", "best_qualities", ")", ")", ")", ":", "return", "False", "elif", "(", "any_qualities", "and", "(", "result", ".", "quality", "==", "max", "(", "any_qualities", ")", ")", ")", ":", "return", "True", "elif", "(", "best_qualities", "and", "(", "result", ".", "quality", "==", "max", "(", "best_qualities", ")", ")", ")", ":", "if", "(", "any_qualities", "and", "(", "result", ".", "quality", "<", "max", "(", "any_qualities", ")", ")", ")", ":", "return", "False", "else", ":", "return", "True", "else", ":", "return", "False" ]
checks if the given result is good enough quality that we can stop searching for other ones .
train
false
9,254
def mark_for_escaping(s): if isinstance(s, (SafeData, EscapeData)): return s if (isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes)): return EscapeBytes(s) if isinstance(s, (six.text_type, Promise)): return EscapeText(s) return EscapeBytes(bytes(s))
[ "def", "mark_for_escaping", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "(", "SafeData", ",", "EscapeData", ")", ")", ":", "return", "s", "if", "(", "isinstance", "(", "s", ",", "bytes", ")", "or", "(", "isinstance", "(", "s", ",", "Promise", ")", "and", "s", ".", "_delegate_bytes", ")", ")", ":", "return", "EscapeBytes", "(", "s", ")", "if", "isinstance", "(", "s", ",", "(", "six", ".", "text_type", ",", "Promise", ")", ")", ":", "return", "EscapeText", "(", "s", ")", "return", "EscapeBytes", "(", "bytes", "(", "s", ")", ")" ]
explicitly mark a string as requiring html escaping upon output .
train
false
9,255
def conference_submissions(**kwargs): submissions = [] for conf in Conference.find(): if (hasattr(conf, 'is_meeting') and (conf.is_meeting is False)): continue projects = set() tags = Tag.find(Q('name', 'iexact', conf.endpoint.lower())).values_list('pk', flat=True) nodes = Node.find(((Q('tags', 'in', tags) & Q('is_public', 'eq', True)) & Q('is_deleted', 'ne', True))) projects.update(list(nodes)) for (idx, node) in enumerate(projects): submissions.append(_render_conference_node(node, idx, conf)) num_submissions = len(projects) conf.num_submissions = num_submissions conf.save() if (num_submissions < settings.CONFERENCE_MIN_COUNT): continue submissions.sort(key=(lambda submission: submission['dateCreated']), reverse=True) return {'submissions': submissions}
[ "def", "conference_submissions", "(", "**", "kwargs", ")", ":", "submissions", "=", "[", "]", "for", "conf", "in", "Conference", ".", "find", "(", ")", ":", "if", "(", "hasattr", "(", "conf", ",", "'is_meeting'", ")", "and", "(", "conf", ".", "is_meeting", "is", "False", ")", ")", ":", "continue", "projects", "=", "set", "(", ")", "tags", "=", "Tag", ".", "find", "(", "Q", "(", "'name'", ",", "'iexact'", ",", "conf", ".", "endpoint", ".", "lower", "(", ")", ")", ")", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", "nodes", "=", "Node", ".", "find", "(", "(", "(", "Q", "(", "'tags'", ",", "'in'", ",", "tags", ")", "&", "Q", "(", "'is_public'", ",", "'eq'", ",", "True", ")", ")", "&", "Q", "(", "'is_deleted'", ",", "'ne'", ",", "True", ")", ")", ")", "projects", ".", "update", "(", "list", "(", "nodes", ")", ")", "for", "(", "idx", ",", "node", ")", "in", "enumerate", "(", "projects", ")", ":", "submissions", ".", "append", "(", "_render_conference_node", "(", "node", ",", "idx", ",", "conf", ")", ")", "num_submissions", "=", "len", "(", "projects", ")", "conf", ".", "num_submissions", "=", "num_submissions", "conf", ".", "save", "(", ")", "if", "(", "num_submissions", "<", "settings", ".", "CONFERENCE_MIN_COUNT", ")", ":", "continue", "submissions", ".", "sort", "(", "key", "=", "(", "lambda", "submission", ":", "submission", "[", "'dateCreated'", "]", ")", ",", "reverse", "=", "True", ")", "return", "{", "'submissions'", ":", "submissions", "}" ]
return data for all osf4m submissions .
train
false
9,256
def scrub_comments(src_text): pattern_text = ((multi_line_comment + u'|') + single_line_comment) pattern = re.compile(pattern_text, (re.M | re.S)) scrubed_src_text = pattern.sub(u'', src_text) return scrubed_src_text
[ "def", "scrub_comments", "(", "src_text", ")", ":", "pattern_text", "=", "(", "(", "multi_line_comment", "+", "u'|'", ")", "+", "single_line_comment", ")", "pattern", "=", "re", ".", "compile", "(", "pattern_text", ",", "(", "re", ".", "M", "|", "re", ".", "S", ")", ")", "scrubed_src_text", "=", "pattern", ".", "sub", "(", "u''", ",", "src_text", ")", "return", "scrubed_src_text" ]
replace all commented portions of a given source text as spaces .
train
false
9,257
def rm_local_tmp_dir(path): return shutil.rmtree(path)
[ "def", "rm_local_tmp_dir", "(", "path", ")", ":", "return", "shutil", ".", "rmtree", "(", "path", ")" ]
remove a local temp directory .
train
false
9,260
def pretty_print(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length) printer.pretty(obj) printer.flush() sys.stdout.write(newline) sys.stdout.flush()
[ "def", "pretty_print", "(", "obj", ",", "verbose", "=", "False", ",", "max_width", "=", "79", ",", "newline", "=", "'\\n'", ",", "max_seq_length", "=", "MAX_SEQ_LENGTH", ")", ":", "printer", "=", "RepresentationPrinter", "(", "sys", ".", "stdout", ",", "verbose", ",", "max_width", ",", "newline", ",", "max_seq_length", "=", "max_seq_length", ")", "printer", ".", "pretty", "(", "obj", ")", "printer", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "newline", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
prints expr in pretty form .
train
false
9,263
def DumpSchema(): path = ('LDAP://%srootDSE' % server) rootdse = ADsGetObject(path) name = rootdse.Get('schemaNamingContext') path = (('LDAP://' + server) + name) print 'Binding to', path ob = ADsGetObject(path) nclasses = nattr = nsub = nunk = 0 for child in ob: class_name = child.Class if (class_name == 'classSchema'): _DumpClass(child) nclasses = (nclasses + 1) elif (class_name == 'attributeSchema'): _DumpAttribute(child) nattr = (nattr + 1) elif (class_name == 'subSchema'): nsub = (nsub + 1) else: print 'Unknown class:', class_name nunk = (nunk + 1) if verbose_level: print 'Processed', nclasses, 'classes' print 'Processed', nattr, 'attributes' print 'Processed', nsub, "sub-schema's" print 'Processed', nunk, 'unknown types'
[ "def", "DumpSchema", "(", ")", ":", "path", "=", "(", "'LDAP://%srootDSE'", "%", "server", ")", "rootdse", "=", "ADsGetObject", "(", "path", ")", "name", "=", "rootdse", ".", "Get", "(", "'schemaNamingContext'", ")", "path", "=", "(", "(", "'LDAP://'", "+", "server", ")", "+", "name", ")", "print", "'Binding to'", ",", "path", "ob", "=", "ADsGetObject", "(", "path", ")", "nclasses", "=", "nattr", "=", "nsub", "=", "nunk", "=", "0", "for", "child", "in", "ob", ":", "class_name", "=", "child", ".", "Class", "if", "(", "class_name", "==", "'classSchema'", ")", ":", "_DumpClass", "(", "child", ")", "nclasses", "=", "(", "nclasses", "+", "1", ")", "elif", "(", "class_name", "==", "'attributeSchema'", ")", ":", "_DumpAttribute", "(", "child", ")", "nattr", "=", "(", "nattr", "+", "1", ")", "elif", "(", "class_name", "==", "'subSchema'", ")", ":", "nsub", "=", "(", "nsub", "+", "1", ")", "else", ":", "print", "'Unknown class:'", ",", "class_name", "nunk", "=", "(", "nunk", "+", "1", ")", "if", "verbose_level", ":", "print", "'Processed'", ",", "nclasses", ",", "'classes'", "print", "'Processed'", ",", "nattr", ",", "'attributes'", "print", "'Processed'", ",", "nsub", ",", "\"sub-schema's\"", "print", "'Processed'", ",", "nunk", ",", "'unknown types'" ]
dumps the default dse schema .
train
false
9,264
def fixture(scope='function', params=None, autouse=False, ids=None, name=None): if (callable(scope) and (params is None) and (autouse == False)): return FixtureFunctionMarker('function', params, autouse, name=name)(scope) if ((params is not None) and (not isinstance(params, (list, tuple)))): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
[ "def", "fixture", "(", "scope", "=", "'function'", ",", "params", "=", "None", ",", "autouse", "=", "False", ",", "ids", "=", "None", ",", "name", "=", "None", ")", ":", "if", "(", "callable", "(", "scope", ")", "and", "(", "params", "is", "None", ")", "and", "(", "autouse", "==", "False", ")", ")", ":", "return", "FixtureFunctionMarker", "(", "'function'", ",", "params", ",", "autouse", ",", "name", "=", "name", ")", "(", "scope", ")", "if", "(", "(", "params", "is", "not", "None", ")", "and", "(", "not", "isinstance", "(", "params", ",", "(", "list", ",", "tuple", ")", ")", ")", ")", ":", "params", "=", "list", "(", "params", ")", "return", "FixtureFunctionMarker", "(", "scope", ",", "params", ",", "autouse", ",", "ids", "=", "ids", ",", "name", "=", "name", ")" ]
decorator to mark a fixture factory function .
train
false
9,265
def naturalSeq(t): return [natural(x) for x in t]
[ "def", "naturalSeq", "(", "t", ")", ":", "return", "[", "natural", "(", "x", ")", "for", "x", "in", "t", "]" ]
natural sort key function for sequences .
train
false
9,266
def _get_list_table_columns_and_formatters(fields, objs, exclude_fields=(), filters=None): if (not fields): return ([], {}) if (not objs): obj = None elif isinstance(objs, list): obj = objs[0] else: obj = objs columns = [] formatters = {} non_existent_fields = [] exclude_fields = set(exclude_fields) for field in fields.split(','): if (not hasattr(obj, field)): non_existent_fields.append(field) continue if (field in exclude_fields): continue (field_title, formatter) = utils.make_field_formatter(field, filters) columns.append(field_title) formatters[field_title] = formatter exclude_fields.add(field) if non_existent_fields: raise exceptions.CommandError((_('Non-existent fields are specified: %s') % non_existent_fields)) return (columns, formatters)
[ "def", "_get_list_table_columns_and_formatters", "(", "fields", ",", "objs", ",", "exclude_fields", "=", "(", ")", ",", "filters", "=", "None", ")", ":", "if", "(", "not", "fields", ")", ":", "return", "(", "[", "]", ",", "{", "}", ")", "if", "(", "not", "objs", ")", ":", "obj", "=", "None", "elif", "isinstance", "(", "objs", ",", "list", ")", ":", "obj", "=", "objs", "[", "0", "]", "else", ":", "obj", "=", "objs", "columns", "=", "[", "]", "formatters", "=", "{", "}", "non_existent_fields", "=", "[", "]", "exclude_fields", "=", "set", "(", "exclude_fields", ")", "for", "field", "in", "fields", ".", "split", "(", "','", ")", ":", "if", "(", "not", "hasattr", "(", "obj", ",", "field", ")", ")", ":", "non_existent_fields", ".", "append", "(", "field", ")", "continue", "if", "(", "field", "in", "exclude_fields", ")", ":", "continue", "(", "field_title", ",", "formatter", ")", "=", "utils", ".", "make_field_formatter", "(", "field", ",", "filters", ")", "columns", ".", "append", "(", "field_title", ")", "formatters", "[", "field_title", "]", "=", "formatter", "exclude_fields", ".", "add", "(", "field", ")", "if", "non_existent_fields", ":", "raise", "exceptions", ".", "CommandError", "(", "(", "_", "(", "'Non-existent fields are specified: %s'", ")", "%", "non_existent_fields", ")", ")", "return", "(", "columns", ",", "formatters", ")" ]
check and add fields to output columns .
train
false
9,267
def run_fitZIG(input_path, out_path, mapping_category, subcategory_1, subcategory_2): command_args = [('-i %s -o %s -c %s -x %s -y %s' % (input_path, out_path, mapping_category, subcategory_1, subcategory_2))] rsl = RExecutor(TmpDir=get_qiime_temp_dir()) app_result = rsl(command_args=command_args, script_name='fitZIG.r') return app_result
[ "def", "run_fitZIG", "(", "input_path", ",", "out_path", ",", "mapping_category", ",", "subcategory_1", ",", "subcategory_2", ")", ":", "command_args", "=", "[", "(", "'-i %s -o %s -c %s -x %s -y %s'", "%", "(", "input_path", ",", "out_path", ",", "mapping_category", ",", "subcategory_1", ",", "subcategory_2", ")", ")", "]", "rsl", "=", "RExecutor", "(", "TmpDir", "=", "get_qiime_temp_dir", "(", ")", ")", "app_result", "=", "rsl", "(", "command_args", "=", "command_args", ",", "script_name", "=", "'fitZIG.r'", ")", "return", "app_result" ]
run metagenomeseqs fitzig algorithm through rscript .
train
false
9,268
def make_message(command_id, arguments=tuple()): return (((MESSAGE_START + (command_id,)) + arguments) + (midi.SYSEX_END,))
[ "def", "make_message", "(", "command_id", ",", "arguments", "=", "tuple", "(", ")", ")", ":", "return", "(", "(", "(", "MESSAGE_START", "+", "(", "command_id", ",", ")", ")", "+", "arguments", ")", "+", "(", "midi", ".", "SYSEX_END", ",", ")", ")" ]
creates a simple message for the email parameters supplied .
train
false
9,269
def _validate_sleep(minutes): if isinstance(minutes, str): if (minutes.lower() in ['never', 'off']): return 'Never' else: msg = 'Invalid String Value for Minutes.\nString values must be "Never" or "Off".\nPassed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\nBoolean value "On" or "True" is not allowed.\nSalt CLI converts "On" to boolean True.\nPassed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if (minutes in range(1, 181)): return minutes else: msg = 'Invalid Integer Value for Minutes.\nInteger values must be between 1 and 180.\nPassed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\nPassed: {0}'.format(minutes) raise SaltInvocationError(msg)
[ "def", "_validate_sleep", "(", "minutes", ")", ":", "if", "isinstance", "(", "minutes", ",", "str", ")", ":", "if", "(", "minutes", ".", "lower", "(", ")", "in", "[", "'never'", ",", "'off'", "]", ")", ":", "return", "'Never'", "else", ":", "msg", "=", "'Invalid String Value for Minutes.\\nString values must be \"Never\" or \"Off\".\\nPassed: {0}'", ".", "format", "(", "minutes", ")", "raise", "SaltInvocationError", "(", "msg", ")", "elif", "isinstance", "(", "minutes", ",", "bool", ")", ":", "if", "minutes", ":", "msg", "=", "'Invalid Boolean Value for Minutes.\\nBoolean value \"On\" or \"True\" is not allowed.\\nSalt CLI converts \"On\" to boolean True.\\nPassed: {0}'", ".", "format", "(", "minutes", ")", "raise", "SaltInvocationError", "(", "msg", ")", "else", ":", "return", "'Never'", "elif", "isinstance", "(", "minutes", ",", "int", ")", ":", "if", "(", "minutes", "in", "range", "(", "1", ",", "181", ")", ")", ":", "return", "minutes", "else", ":", "msg", "=", "'Invalid Integer Value for Minutes.\\nInteger values must be between 1 and 180.\\nPassed: {0}'", ".", "format", "(", "minutes", ")", "raise", "SaltInvocationError", "(", "msg", ")", "else", ":", "msg", "=", "'Unknown Variable Type Passed for Minutes.\\nPassed: {0}'", ".", "format", "(", "minutes", ")", "raise", "SaltInvocationError", "(", "msg", ")" ]
helper function that validates the minutes parameter .
train
false
9,271
def _inject(): NS = globals() GLNS = _GL.__dict__ used_names = [] used_names.extend([names[0] for names in _pyopengl2._functions_to_import]) used_names.extend([name for name in _pyopengl2._used_functions]) NS['_used_names'] = used_names used_constants = set(_constants.__dict__) injected_constants = 0 injected_functions = 0 for name in dir(_GL): if name.startswith('GL_'): if (name not in used_constants): NS[name] = GLNS[name] injected_constants += 1 elif name.startswith('gl'): if ((name + ',') in _deprecated_functions): pass elif (name in used_names): pass else: NS[name] = GLNS[name] injected_functions += 1
[ "def", "_inject", "(", ")", ":", "NS", "=", "globals", "(", ")", "GLNS", "=", "_GL", ".", "__dict__", "used_names", "=", "[", "]", "used_names", ".", "extend", "(", "[", "names", "[", "0", "]", "for", "names", "in", "_pyopengl2", ".", "_functions_to_import", "]", ")", "used_names", ".", "extend", "(", "[", "name", "for", "name", "in", "_pyopengl2", ".", "_used_functions", "]", ")", "NS", "[", "'_used_names'", "]", "=", "used_names", "used_constants", "=", "set", "(", "_constants", ".", "__dict__", ")", "injected_constants", "=", "0", "injected_functions", "=", "0", "for", "name", "in", "dir", "(", "_GL", ")", ":", "if", "name", ".", "startswith", "(", "'GL_'", ")", ":", "if", "(", "name", "not", "in", "used_constants", ")", ":", "NS", "[", "name", "]", "=", "GLNS", "[", "name", "]", "injected_constants", "+=", "1", "elif", "name", ".", "startswith", "(", "'gl'", ")", ":", "if", "(", "(", "name", "+", "','", ")", "in", "_deprecated_functions", ")", ":", "pass", "elif", "(", "name", "in", "used_names", ")", ":", "pass", "else", ":", "NS", "[", "name", "]", "=", "GLNS", "[", "name", "]", "injected_functions", "+=", "1" ]
copy functions from opengl .
train
true
9,272
@logic.side_effect_free def datastore_search(context, data_dict): schema = context.get('schema', dsschema.datastore_search_schema()) (data_dict, errors) = _validate(data_dict, schema, context) if errors: raise p.toolkit.ValidationError(errors) res_id = data_dict['resource_id'] data_dict['connection_url'] = config['ckan.datastore.write_url'] resources_sql = sqlalchemy.text(u'SELECT alias_of FROM "_table_metadata"\n WHERE name = :id') results = db._get_engine(data_dict).execute(resources_sql, id=res_id) if (not (results.rowcount > 0)): raise p.toolkit.ObjectNotFound(p.toolkit._('Resource "{0}" was not found.'.format(res_id))) if (not (data_dict['resource_id'] in WHITELISTED_RESOURCES)): resource_id = results.fetchone()[0] if resource_id: data_dict['resource_id'] = resource_id p.toolkit.check_access('datastore_search', context, data_dict) result = db.search(context, data_dict) result.pop('id', None) result.pop('connection_url') return result
[ "@", "logic", ".", "side_effect_free", "def", "datastore_search", "(", "context", ",", "data_dict", ")", ":", "schema", "=", "context", ".", "get", "(", "'schema'", ",", "dsschema", ".", "datastore_search_schema", "(", ")", ")", "(", "data_dict", ",", "errors", ")", "=", "_validate", "(", "data_dict", ",", "schema", ",", "context", ")", "if", "errors", ":", "raise", "p", ".", "toolkit", ".", "ValidationError", "(", "errors", ")", "res_id", "=", "data_dict", "[", "'resource_id'", "]", "data_dict", "[", "'connection_url'", "]", "=", "config", "[", "'ckan.datastore.write_url'", "]", "resources_sql", "=", "sqlalchemy", ".", "text", "(", "u'SELECT alias_of FROM \"_table_metadata\"\\n WHERE name = :id'", ")", "results", "=", "db", ".", "_get_engine", "(", "data_dict", ")", ".", "execute", "(", "resources_sql", ",", "id", "=", "res_id", ")", "if", "(", "not", "(", "results", ".", "rowcount", ">", "0", ")", ")", ":", "raise", "p", ".", "toolkit", ".", "ObjectNotFound", "(", "p", ".", "toolkit", ".", "_", "(", "'Resource \"{0}\" was not found.'", ".", "format", "(", "res_id", ")", ")", ")", "if", "(", "not", "(", "data_dict", "[", "'resource_id'", "]", "in", "WHITELISTED_RESOURCES", ")", ")", ":", "resource_id", "=", "results", ".", "fetchone", "(", ")", "[", "0", "]", "if", "resource_id", ":", "data_dict", "[", "'resource_id'", "]", "=", "resource_id", "p", ".", "toolkit", ".", "check_access", "(", "'datastore_search'", ",", "context", ",", "data_dict", ")", "result", "=", "db", ".", "search", "(", "context", ",", "data_dict", ")", "result", ".", "pop", "(", "'id'", ",", "None", ")", "result", ".", "pop", "(", "'connection_url'", ")", "return", "result" ]
search a datastore resource .
train
false
9,273
def _format_lazy(format_string, *args, **kwargs): return format_string.format(*args, **kwargs)
[ "def", "_format_lazy", "(", "format_string", ",", "*", "args", ",", "**", "kwargs", ")", ":", "return", "format_string", ".", "format", "(", "*", "args", ",", "**", "kwargs", ")" ]
apply str .
train
false
9,275
def _is_optional(substructure): if (type(substructure) == Optional): return True if (type(substructure) is dict): for value in substructure.values(): if (not _is_optional(value)): return False return True return False
[ "def", "_is_optional", "(", "substructure", ")", ":", "if", "(", "type", "(", "substructure", ")", "==", "Optional", ")", ":", "return", "True", "if", "(", "type", "(", "substructure", ")", "is", "dict", ")", ":", "for", "value", "in", "substructure", ".", "values", "(", ")", ":", "if", "(", "not", "_is_optional", "(", "value", ")", ")", ":", "return", "False", "return", "True", "return", "False" ]
determines if a substructure is an optional part of the configuration .
train
false
9,276
def show_term_protect(name=None, instance_id=None, call=None, quiet=False): if (call != 'action'): raise SaltCloudSystemExit('The show_term_protect action must be called with -a or --action.') if (not instance_id): instance_id = _get_node(name)['instanceId'] params = {'Action': 'DescribeInstanceAttribute', 'InstanceId': instance_id, 'Attribute': 'disableApiTermination'} result = aws.query(params, location=get_location(), provider=get_provider(), return_root=True, opts=__opts__, sigver='4') disable_protect = False for item in result: if ('value' in item): disable_protect = item['value'] break log.log((logging.DEBUG if (quiet is True) else logging.INFO), 'Termination Protection is {0} for {1}'.format((((disable_protect == 'true') and 'enabled') or 'disabled'), name)) return disable_protect
[ "def", "show_term_protect", "(", "name", "=", "None", ",", "instance_id", "=", "None", ",", "call", "=", "None", ",", "quiet", "=", "False", ")", ":", "if", "(", "call", "!=", "'action'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The show_term_protect action must be called with -a or --action.'", ")", "if", "(", "not", "instance_id", ")", ":", "instance_id", "=", "_get_node", "(", "name", ")", "[", "'instanceId'", "]", "params", "=", "{", "'Action'", ":", "'DescribeInstanceAttribute'", ",", "'InstanceId'", ":", "instance_id", ",", "'Attribute'", ":", "'disableApiTermination'", "}", "result", "=", "aws", ".", "query", "(", "params", ",", "location", "=", "get_location", "(", ")", ",", "provider", "=", "get_provider", "(", ")", ",", "return_root", "=", "True", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", "disable_protect", "=", "False", "for", "item", "in", "result", ":", "if", "(", "'value'", "in", "item", ")", ":", "disable_protect", "=", "item", "[", "'value'", "]", "break", "log", ".", "log", "(", "(", "logging", ".", "DEBUG", "if", "(", "quiet", "is", "True", ")", "else", "logging", ".", "INFO", ")", ",", "'Termination Protection is {0} for {1}'", ".", "format", "(", "(", "(", "(", "disable_protect", "==", "'true'", ")", "and", "'enabled'", ")", "or", "'disabled'", ")", ",", "name", ")", ")", "return", "disable_protect" ]
show the details from ec2 concerning an ami .
train
true
9,277
def check_valid_abd_naming(pattern=None): if (pattern is None): pattern = sickbeard.NAMING_PATTERN logger.log(((u'Checking whether the pattern ' + pattern) + u' is valid for an air-by-date episode'), logger.DEBUG) valid = validate_name(pattern, abd=True) return valid
[ "def", "check_valid_abd_naming", "(", "pattern", "=", "None", ")", ":", "if", "(", "pattern", "is", "None", ")", ":", "pattern", "=", "sickbeard", ".", "NAMING_PATTERN", "logger", ".", "log", "(", "(", "(", "u'Checking whether the pattern '", "+", "pattern", ")", "+", "u' is valid for an air-by-date episode'", ")", ",", "logger", ".", "DEBUG", ")", "valid", "=", "validate_name", "(", "pattern", ",", "abd", "=", "True", ")", "return", "valid" ]
checks if the name is can be parsed back to its original form for an air-by-date format .
train
false
9,278
def download_content_pack(fobj, lang, minimal=False): url = CONTENT_PACK_URL_TEMPLATE.format(version=SHORTVERSION, langcode=lang, suffix=('-minimal' if minimal else '')) logging.info('Downloading content pack from {}'.format(url)) httpf = urllib.urlopen(url) shutil.copyfileobj(httpf, fobj) fobj.seek(0) zf = zipfile.ZipFile(fobj) httpf.close() return zf
[ "def", "download_content_pack", "(", "fobj", ",", "lang", ",", "minimal", "=", "False", ")", ":", "url", "=", "CONTENT_PACK_URL_TEMPLATE", ".", "format", "(", "version", "=", "SHORTVERSION", ",", "langcode", "=", "lang", ",", "suffix", "=", "(", "'-minimal'", "if", "minimal", "else", "''", ")", ")", "logging", ".", "info", "(", "'Downloading content pack from {}'", ".", "format", "(", "url", ")", ")", "httpf", "=", "urllib", ".", "urlopen", "(", "url", ")", "shutil", ".", "copyfileobj", "(", "httpf", ",", "fobj", ")", "fobj", ".", "seek", "(", "0", ")", "zf", "=", "zipfile", ".", "ZipFile", "(", "fobj", ")", "httpf", ".", "close", "(", ")", "return", "zf" ]
given a file object where the content pack lang will be stored .
train
false
9,280
def get_start_time(): start_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=1, minutes=5)) return format_rfc3339(start_time)
[ "def", "get_start_time", "(", ")", ":", "start_time", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "timedelta", "(", "hours", "=", "1", ",", "minutes", "=", "5", ")", ")", "return", "format_rfc3339", "(", "start_time", ")" ]
returns the start time for the 5-minute window to read the custom metric from within .
train
false
9,281
def py_run(command_options='', return_std=False, stdout=None, stderr=None, script='epylint'): if (os.name == 'nt'): script += '.bat' command_line = ((script + ' ') + command_options) if (stdout is None): if return_std: stdout = PIPE else: stdout = sys.stdout if (stderr is None): if return_std: stderr = PIPE else: stderr = sys.stderr p = Popen(command_line, shell=True, stdout=stdout, stderr=stderr, env=_get_env(), universal_newlines=True) p.wait() if return_std: return (p.stdout, p.stderr)
[ "def", "py_run", "(", "command_options", "=", "''", ",", "return_std", "=", "False", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "script", "=", "'epylint'", ")", ":", "if", "(", "os", ".", "name", "==", "'nt'", ")", ":", "script", "+=", "'.bat'", "command_line", "=", "(", "(", "script", "+", "' '", ")", "+", "command_options", ")", "if", "(", "stdout", "is", "None", ")", ":", "if", "return_std", ":", "stdout", "=", "PIPE", "else", ":", "stdout", "=", "sys", ".", "stdout", "if", "(", "stderr", "is", "None", ")", ":", "if", "return_std", ":", "stderr", "=", "PIPE", "else", ":", "stderr", "=", "sys", ".", "stderr", "p", "=", "Popen", "(", "command_line", ",", "shell", "=", "True", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "env", "=", "_get_env", "(", ")", ",", "universal_newlines", "=", "True", ")", "p", ".", "wait", "(", ")", "if", "return_std", ":", "return", "(", "p", ".", "stdout", ",", "p", ".", "stderr", ")" ]
run pylint from python command_options is a string containing pylint command line options; return_std indicates return of created standard output and error ; stdout and stderr are file-like objects in which standard output could be written .
train
false
9,283
def event_type(): return s3_rest_controller('event', 'event_type')
[ "def", "event_type", "(", ")", ":", "return", "s3_rest_controller", "(", "'event'", ",", "'event_type'", ")" ]
restful crud controller .
train
false
9,284
def fields(): f = {} if coin_flip(): if coin_flip(): f['copyrighted'] = coin_flip() if coin_flip(): f['size'] = random.randint(100, 10000000) if coin_flip(): f['color'] = random.choice(('blue', 'pink', 'fuchsia', 'rose', 'mauve', 'black')) return f
[ "def", "fields", "(", ")", ":", "f", "=", "{", "}", "if", "coin_flip", "(", ")", ":", "if", "coin_flip", "(", ")", ":", "f", "[", "'copyrighted'", "]", "=", "coin_flip", "(", ")", "if", "coin_flip", "(", ")", ":", "f", "[", "'size'", "]", "=", "random", ".", "randint", "(", "100", ",", "10000000", ")", "if", "coin_flip", "(", ")", ":", "f", "[", "'color'", "]", "=", "random", ".", "choice", "(", "(", "'blue'", ",", "'pink'", ",", "'fuchsia'", ",", "'rose'", ",", "'mauve'", ",", "'black'", ")", ")", "return", "f" ]
generate some fake extra fields .
train
false
9,285
def safe_config_file(config_file): config_file_lower = config_file.lower() if _SENSITIVE_FILENAME_REGEX.search(config_file_lower): return False proc = subprocess.Popen(['file', config_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (file_output, _) = proc.communicate() if ('ASCII' in file_output): possible_password_file = empty_or_all_comments = True with open(config_file) as config_fd: for line in config_fd: if (not (line.isspace() or line.lstrip().startswith('#'))): empty_or_all_comments = False if line.startswith('-----BEGIN'): return False elif (':' not in line): possible_password_file = False return (empty_or_all_comments or (not possible_password_file)) return False
[ "def", "safe_config_file", "(", "config_file", ")", ":", "config_file_lower", "=", "config_file", ".", "lower", "(", ")", "if", "_SENSITIVE_FILENAME_REGEX", ".", "search", "(", "config_file_lower", ")", ":", "return", "False", "proc", "=", "subprocess", ".", "Popen", "(", "[", "'file'", ",", "config_file", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "file_output", ",", "_", ")", "=", "proc", ".", "communicate", "(", ")", "if", "(", "'ASCII'", "in", "file_output", ")", ":", "possible_password_file", "=", "empty_or_all_comments", "=", "True", "with", "open", "(", "config_file", ")", "as", "config_fd", ":", "for", "line", "in", "config_fd", ":", "if", "(", "not", "(", "line", ".", "isspace", "(", ")", "or", "line", ".", "lstrip", "(", ")", ".", "startswith", "(", "'#'", ")", ")", ")", ":", "empty_or_all_comments", "=", "False", "if", "line", ".", "startswith", "(", "'-----BEGIN'", ")", ":", "return", "False", "elif", "(", "':'", "not", "in", "line", ")", ":", "possible_password_file", "=", "False", "return", "(", "empty_or_all_comments", "or", "(", "not", "possible_password_file", ")", ")", "return", "False" ]
returns true if config_file can be safely copied .
train
false
9,286
def vonmisesmle(data, axis=None): mu = circmean(data, axis=None) kappa = _A1inv(np.mean(np.cos((data - mu)), axis)) return (mu, kappa)
[ "def", "vonmisesmle", "(", "data", ",", "axis", "=", "None", ")", ":", "mu", "=", "circmean", "(", "data", ",", "axis", "=", "None", ")", "kappa", "=", "_A1inv", "(", "np", ".", "mean", "(", "np", ".", "cos", "(", "(", "data", "-", "mu", ")", ")", ",", "axis", ")", ")", "return", "(", "mu", ",", "kappa", ")" ]
computes the maximum likelihood estimator for the parameters of the von mises distribution .
train
false
9,287
def smart_capwords(s, sep=None): words = s.split(sep) for (i, word) in enumerate(words): if all((x.islower() for x in word)): words[i] = word.capitalize() return (sep or ' ').join(words)
[ "def", "smart_capwords", "(", "s", ",", "sep", "=", "None", ")", ":", "words", "=", "s", ".", "split", "(", "sep", ")", "for", "(", "i", ",", "word", ")", "in", "enumerate", "(", "words", ")", ":", "if", "all", "(", "(", "x", ".", "islower", "(", ")", "for", "x", "in", "word", ")", ")", ":", "words", "[", "i", "]", "=", "word", ".", "capitalize", "(", ")", "return", "(", "sep", "or", "' '", ")", ".", "join", "(", "words", ")" ]
like string .
train
false
9,288
def get_named_tmpfile_from_ctx(ctx, filename, dir): filename = basic_util.strip_path(filename) for ctx_file in ctx.files(): ctx_file_name = basic_util.strip_path(ctx_file) if (filename == ctx_file_name): try: fctx = ctx[ctx_file] except LookupError: fctx = None continue if fctx: fh = tempfile.NamedTemporaryFile('wb', prefix='tmp-toolshed-gntfc', dir=dir) tmp_filename = fh.name fh.close() fh = open(tmp_filename, 'wb') fh.write(fctx.data()) fh.close() return tmp_filename return None
[ "def", "get_named_tmpfile_from_ctx", "(", "ctx", ",", "filename", ",", "dir", ")", ":", "filename", "=", "basic_util", ".", "strip_path", "(", "filename", ")", "for", "ctx_file", "in", "ctx", ".", "files", "(", ")", ":", "ctx_file_name", "=", "basic_util", ".", "strip_path", "(", "ctx_file", ")", "if", "(", "filename", "==", "ctx_file_name", ")", ":", "try", ":", "fctx", "=", "ctx", "[", "ctx_file", "]", "except", "LookupError", ":", "fctx", "=", "None", "continue", "if", "fctx", ":", "fh", "=", "tempfile", ".", "NamedTemporaryFile", "(", "'wb'", ",", "prefix", "=", "'tmp-toolshed-gntfc'", ",", "dir", "=", "dir", ")", "tmp_filename", "=", "fh", ".", "name", "fh", ".", "close", "(", ")", "fh", "=", "open", "(", "tmp_filename", ",", "'wb'", ")", "fh", ".", "write", "(", "fctx", ".", "data", "(", ")", ")", "fh", ".", "close", "(", ")", "return", "tmp_filename", "return", "None" ]
return a named temporary file created from a specified file with a given name included in a repository changeset revision .
train
false
9,291
def rebot(*outputs, **options): return Rebot().execute(*outputs, **options)
[ "def", "rebot", "(", "*", "outputs", ",", "**", "options", ")", ":", "return", "Rebot", "(", ")", ".", "execute", "(", "*", "outputs", ",", "**", "options", ")" ]
programmatic entry point for post-processing outputs .
train
false
9,292
def _CreateDatastoreConfig(): return datastore_rpc.Configuration(force_writes=True)
[ "def", "_CreateDatastoreConfig", "(", ")", ":", "return", "datastore_rpc", ".", "Configuration", "(", "force_writes", "=", "True", ")" ]
create datastore config for use during datastore admin operations .
train
false
9,293
def test_cc_fit(): ratio = 'auto' cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED) cc.fit(X, Y) assert_equal(cc.min_c_, 0) assert_equal(cc.maj_c_, 1) assert_equal(cc.stats_c_[0], 3) assert_equal(cc.stats_c_[1], 7)
[ "def", "test_cc_fit", "(", ")", ":", "ratio", "=", "'auto'", "cc", "=", "ClusterCentroids", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "cc", ".", "fit", "(", "X", ",", "Y", ")", "assert_equal", "(", "cc", ".", "min_c_", ",", "0", ")", "assert_equal", "(", "cc", ".", "maj_c_", ",", "1", ")", "assert_equal", "(", "cc", ".", "stats_c_", "[", "0", "]", ",", "3", ")", "assert_equal", "(", "cc", ".", "stats_c_", "[", "1", "]", ",", "7", ")" ]
test the fitting method .
train
false
9,294
def get_network_interface(name=None, network_interface_id=None, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} result = _get_network_interface(conn, name, network_interface_id) if ('error' in result): if (result['error']['message'] == 'No ENIs found.'): r['result'] = None return r return result eni = result['result'] r['result'] = _describe_network_interface(eni) return r
[ "def", "get_network_interface", "(", "name", "=", "None", ",", "network_interface_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "r", "=", "{", "}", "result", "=", "_get_network_interface", "(", "conn", ",", "name", ",", "network_interface_id", ")", "if", "(", "'error'", "in", "result", ")", ":", "if", "(", "result", "[", "'error'", "]", "[", "'message'", "]", "==", "'No ENIs found.'", ")", ":", "r", "[", "'result'", "]", "=", "None", "return", "r", "return", "result", "eni", "=", "result", "[", "'result'", "]", "r", "[", "'result'", "]", "=", "_describe_network_interface", "(", "eni", ")", "return", "r" ]
get an elastic network interface .
train
true
9,295
def _has_non_ascii_characters(data_string): try: data_string.encode('ascii') except UnicodeEncodeError: return True return False
[ "def", "_has_non_ascii_characters", "(", "data_string", ")", ":", "try", ":", "data_string", ".", "encode", "(", "'ascii'", ")", "except", "UnicodeEncodeError", ":", "return", "True", "return", "False" ]
check if provided string contains non ascii characters .
train
false
9,297
def shelter_unit(): def prep(r): if (r.representation == 'plain'): record_id = r.id table = s3db.cr_shelter_unit row = db((table.id == record_id)).select(table.shelter_id, limitby=(0, 1)).first() shelter_id = row.shelter_id s3db.configure('cr_shelter_unit', popup_url=URL(c='cr', f='shelter', args=[shelter_id, 'shelter_unit', record_id])) return True elif ((r.representation in ('json', 'geojson', 'plain')) or (r.method == 'import')): return True return False s3.prep = prep return s3_rest_controller()
[ "def", "shelter_unit", "(", ")", ":", "def", "prep", "(", "r", ")", ":", "if", "(", "r", ".", "representation", "==", "'plain'", ")", ":", "record_id", "=", "r", ".", "id", "table", "=", "s3db", ".", "cr_shelter_unit", "row", "=", "db", "(", "(", "table", ".", "id", "==", "record_id", ")", ")", ".", "select", "(", "table", ".", "shelter_id", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "shelter_id", "=", "row", ".", "shelter_id", "s3db", ".", "configure", "(", "'cr_shelter_unit'", ",", "popup_url", "=", "URL", "(", "c", "=", "'cr'", ",", "f", "=", "'shelter'", ",", "args", "=", "[", "shelter_id", ",", "'shelter_unit'", ",", "record_id", "]", ")", ")", "return", "True", "elif", "(", "(", "r", ".", "representation", "in", "(", "'json'", ",", "'geojson'", ",", "'plain'", ")", ")", "or", "(", "r", ".", "method", "==", "'import'", ")", ")", ":", "return", "True", "return", "False", "s3", ".", "prep", "=", "prep", "return", "s3_rest_controller", "(", ")" ]
rest controller to retrieve options for shelter unit selection show layer on map imports .
train
false
9,300
def read_properties_core(xml_source): properties = DocumentProperties() root = fromstring(xml_source) creator_node = root.find(QName(NAMESPACES['dc'], 'creator').text) if (creator_node is not None): properties.creator = creator_node.text else: properties.creator = '' last_modified_by_node = root.find(QName(NAMESPACES['cp'], 'lastModifiedBy').text) if (last_modified_by_node is not None): properties.last_modified_by = last_modified_by_node.text else: properties.last_modified_by = '' created_node = root.find(QName(NAMESPACES['dcterms'], 'created').text) if (created_node is not None): properties.created = W3CDTF_to_datetime(created_node.text) else: properties.created = datetime.datetime.now() modified_node = root.find(QName(NAMESPACES['dcterms'], 'modified').text) if (modified_node is not None): properties.modified = W3CDTF_to_datetime(modified_node.text) else: properties.modified = properties.created return properties
[ "def", "read_properties_core", "(", "xml_source", ")", ":", "properties", "=", "DocumentProperties", "(", ")", "root", "=", "fromstring", "(", "xml_source", ")", "creator_node", "=", "root", ".", "find", "(", "QName", "(", "NAMESPACES", "[", "'dc'", "]", ",", "'creator'", ")", ".", "text", ")", "if", "(", "creator_node", "is", "not", "None", ")", ":", "properties", ".", "creator", "=", "creator_node", ".", "text", "else", ":", "properties", ".", "creator", "=", "''", "last_modified_by_node", "=", "root", ".", "find", "(", "QName", "(", "NAMESPACES", "[", "'cp'", "]", ",", "'lastModifiedBy'", ")", ".", "text", ")", "if", "(", "last_modified_by_node", "is", "not", "None", ")", ":", "properties", ".", "last_modified_by", "=", "last_modified_by_node", ".", "text", "else", ":", "properties", ".", "last_modified_by", "=", "''", "created_node", "=", "root", ".", "find", "(", "QName", "(", "NAMESPACES", "[", "'dcterms'", "]", ",", "'created'", ")", ".", "text", ")", "if", "(", "created_node", "is", "not", "None", ")", ":", "properties", ".", "created", "=", "W3CDTF_to_datetime", "(", "created_node", ".", "text", ")", "else", ":", "properties", ".", "created", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "modified_node", "=", "root", ".", "find", "(", "QName", "(", "NAMESPACES", "[", "'dcterms'", "]", ",", "'modified'", ")", ".", "text", ")", "if", "(", "modified_node", "is", "not", "None", ")", ":", "properties", ".", "modified", "=", "W3CDTF_to_datetime", "(", "modified_node", ".", "text", ")", "else", ":", "properties", ".", "modified", "=", "properties", ".", "created", "return", "properties" ]
read assorted file properties .
train
false
9,301
def missingDependencies(target_module): dependencies = getattr(target_module, '_dependencies', []) return [i for i in dependencies if (not testImport(i))]
[ "def", "missingDependencies", "(", "target_module", ")", ":", "dependencies", "=", "getattr", "(", "target_module", ",", "'_dependencies'", ",", "[", "]", ")", "return", "[", "i", "for", "i", "in", "dependencies", "if", "(", "not", "testImport", "(", "i", ")", ")", "]" ]
returns a list of dependencies of the module that the current interpreter cannot import .
train
false
9,302
def _find_clusters_1dir_parts(x, x_in, connectivity, max_step, partitions, t_power, ndimage): if (partitions is None): (clusters, sums) = _find_clusters_1dir(x, x_in, connectivity, max_step, t_power, ndimage) else: clusters = list() sums = list() for p in range((np.max(partitions) + 1)): x_i = np.logical_and(x_in, (partitions == p)) out = _find_clusters_1dir(x, x_i, connectivity, max_step, t_power, ndimage) clusters += out[0] sums.append(out[1]) sums = np.concatenate(sums) return (clusters, sums)
[ "def", "_find_clusters_1dir_parts", "(", "x", ",", "x_in", ",", "connectivity", ",", "max_step", ",", "partitions", ",", "t_power", ",", "ndimage", ")", ":", "if", "(", "partitions", "is", "None", ")", ":", "(", "clusters", ",", "sums", ")", "=", "_find_clusters_1dir", "(", "x", ",", "x_in", ",", "connectivity", ",", "max_step", ",", "t_power", ",", "ndimage", ")", "else", ":", "clusters", "=", "list", "(", ")", "sums", "=", "list", "(", ")", "for", "p", "in", "range", "(", "(", "np", ".", "max", "(", "partitions", ")", "+", "1", ")", ")", ":", "x_i", "=", "np", ".", "logical_and", "(", "x_in", ",", "(", "partitions", "==", "p", ")", ")", "out", "=", "_find_clusters_1dir", "(", "x", ",", "x_i", ",", "connectivity", ",", "max_step", ",", "t_power", ",", "ndimage", ")", "clusters", "+=", "out", "[", "0", "]", "sums", ".", "append", "(", "out", "[", "1", "]", ")", "sums", "=", "np", ".", "concatenate", "(", "sums", ")", "return", "(", "clusters", ",", "sums", ")" ]
deal with partitions .
train
false
9,303
def datacite_metadata(doi, title, creators, publisher, publication_year, pretty_print=False): creators = [CREATOR(CREATOR_NAME(each)) for each in creators] root = E.resource(E.identifier(doi, identifierType='DOI'), E.creators(*creators), E.titles(E.title(title)), E.publisher(publisher), E.publicationYear(str(publication_year))) root.attrib[('{%s}schemaLocation' % XSI)] = SCHEMA_LOCATION return lxml.etree.tostring(root, pretty_print=pretty_print)
[ "def", "datacite_metadata", "(", "doi", ",", "title", ",", "creators", ",", "publisher", ",", "publication_year", ",", "pretty_print", "=", "False", ")", ":", "creators", "=", "[", "CREATOR", "(", "CREATOR_NAME", "(", "each", ")", ")", "for", "each", "in", "creators", "]", "root", "=", "E", ".", "resource", "(", "E", ".", "identifier", "(", "doi", ",", "identifierType", "=", "'DOI'", ")", ",", "E", ".", "creators", "(", "*", "creators", ")", ",", "E", ".", "titles", "(", "E", ".", "title", "(", "title", ")", ")", ",", "E", ".", "publisher", "(", "publisher", ")", ",", "E", ".", "publicationYear", "(", "str", "(", "publication_year", ")", ")", ")", "root", ".", "attrib", "[", "(", "'{%s}schemaLocation'", "%", "XSI", ")", "]", "=", "SCHEMA_LOCATION", "return", "lxml", ".", "etree", ".", "tostring", "(", "root", ",", "pretty_print", "=", "pretty_print", ")" ]
return the formatted datacite metadata xml as a string .
train
false
9,304
def is_revoked(events, token_data): return any([matches(e, token_data) for e in events])
[ "def", "is_revoked", "(", "events", ",", "token_data", ")", ":", "return", "any", "(", "[", "matches", "(", "e", ",", "token_data", ")", "for", "e", "in", "events", "]", ")" ]
check if a token matches a revocation event .
train
false
9,307
def process_content_pack_dir(pack_dir): assert pack_dir.startswith('/tmp') config_file_path = os.path.join(pack_dir, 'config.yaml') if os.path.isfile(config_file_path): os.remove(config_file_path)
[ "def", "process_content_pack_dir", "(", "pack_dir", ")", ":", "assert", "pack_dir", ".", "startswith", "(", "'/tmp'", ")", "config_file_path", "=", "os", ".", "path", ".", "join", "(", "pack_dir", ",", "'config.yaml'", ")", "if", "os", ".", "path", ".", "isfile", "(", "config_file_path", ")", ":", "os", ".", "remove", "(", "config_file_path", ")" ]
remove config .
train
false
9,308
def parseAcceptHeader(value): chunks = [chunk.strip() for chunk in value.split(',')] accept = [] for chunk in chunks: parts = [s.strip() for s in chunk.split(';')] mtype = parts.pop(0) if ('/' not in mtype): continue (main, sub) = mtype.split('/', 1) for ext in parts: if ('=' in ext): (k, v) = ext.split('=', 1) if (k == 'q'): try: q = float(v) break except ValueError: pass else: q = 1.0 accept.append((q, main, sub)) accept.sort() accept.reverse() return [(main, sub, q) for (q, main, sub) in accept]
[ "def", "parseAcceptHeader", "(", "value", ")", ":", "chunks", "=", "[", "chunk", ".", "strip", "(", ")", "for", "chunk", "in", "value", ".", "split", "(", "','", ")", "]", "accept", "=", "[", "]", "for", "chunk", "in", "chunks", ":", "parts", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "chunk", ".", "split", "(", "';'", ")", "]", "mtype", "=", "parts", ".", "pop", "(", "0", ")", "if", "(", "'/'", "not", "in", "mtype", ")", ":", "continue", "(", "main", ",", "sub", ")", "=", "mtype", ".", "split", "(", "'/'", ",", "1", ")", "for", "ext", "in", "parts", ":", "if", "(", "'='", "in", "ext", ")", ":", "(", "k", ",", "v", ")", "=", "ext", ".", "split", "(", "'='", ",", "1", ")", "if", "(", "k", "==", "'q'", ")", ":", "try", ":", "q", "=", "float", "(", "v", ")", "break", "except", "ValueError", ":", "pass", "else", ":", "q", "=", "1.0", "accept", ".", "append", "(", "(", "q", ",", "main", ",", "sub", ")", ")", "accept", ".", "sort", "(", ")", "accept", ".", "reverse", "(", ")", "return", "[", "(", "main", ",", "sub", ",", "q", ")", "for", "(", "q", ",", "main", ",", "sub", ")", "in", "accept", "]" ]
parse an accept header .
train
true
9,309
def full_text_search_missing(): try: testing.db.execute('CREATE VIRTUAL TABLE t using FTS3;') testing.db.execute('DROP TABLE t;') return False except: return True
[ "def", "full_text_search_missing", "(", ")", ":", "try", ":", "testing", ".", "db", ".", "execute", "(", "'CREATE VIRTUAL TABLE t using FTS3;'", ")", "testing", ".", "db", ".", "execute", "(", "'DROP TABLE t;'", ")", "return", "False", "except", ":", "return", "True" ]
test if full text search is not implemented and return false if it is and true otherwise .
train
false
9,311
def getIsPointInsideALoop(loops, point): for loop in loops: if euclidean.isPointInsideLoop(loop, point): return True return False
[ "def", "getIsPointInsideALoop", "(", "loops", ",", "point", ")", ":", "for", "loop", "in", "loops", ":", "if", "euclidean", ".", "isPointInsideLoop", "(", "loop", ",", "point", ")", ":", "return", "True", "return", "False" ]
determine if a point is inside a loop of a loop list .
train
false