id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
40,668
def listMediaFiles(path): if ((not dir) or (not os.path.isdir(path))): return [] files = [] for curFile in os.listdir(path): fullCurFile = os.path.join(path, curFile) if (os.path.isdir(fullCurFile) and (not curFile.startswith(u'.')) and (not (curFile == u'Extras'))): files += listMediaFiles(fullCurFile) elif isMediaFile(curFile): files.append(fullCurFile) return files
[ "def", "listMediaFiles", "(", "path", ")", ":", "if", "(", "(", "not", "dir", ")", "or", "(", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ")", ")", ":", "return", "[", "]", "files", "=", "[", "]", "for", "curFile", "in", "os", ".", "listdir", "(", "path", ")", ":", "fullCurFile", "=", "os", ".", "path", ".", "join", "(", "path", ",", "curFile", ")", "if", "(", "os", ".", "path", ".", "isdir", "(", "fullCurFile", ")", "and", "(", "not", "curFile", ".", "startswith", "(", "u'.'", ")", ")", "and", "(", "not", "(", "curFile", "==", "u'Extras'", ")", ")", ")", ":", "files", "+=", "listMediaFiles", "(", "fullCurFile", ")", "elif", "isMediaFile", "(", "curFile", ")", ":", "files", ".", "append", "(", "fullCurFile", ")", "return", "files" ]
get a list of files possibly containing media in a path .
train
false
40,670
@register.tag(name='get_inline_types') def do_get_inline_types(parser, token): try: (tag_name, arg) = token.contents.split(None, 1) except ValueError: raise template.TemplateSyntaxError, ('%s tag requires arguments' % token.contents.split()[0]) m = re.search('as (\\w+)', arg) if (not m): raise template.TemplateSyntaxError, ('%s tag had invalid arguments' % tag_name) var_name = m.groups()[0] return InlineTypes(var_name)
[ "@", "register", ".", "tag", "(", "name", "=", "'get_inline_types'", ")", "def", "do_get_inline_types", "(", "parser", ",", "token", ")", ":", "try", ":", "(", "tag_name", ",", "arg", ")", "=", "token", ".", "contents", ".", "split", "(", "None", ",", "1", ")", "except", "ValueError", ":", "raise", "template", ".", "TemplateSyntaxError", ",", "(", "'%s tag requires arguments'", "%", "token", ".", "contents", ".", "split", "(", ")", "[", "0", "]", ")", "m", "=", "re", ".", "search", "(", "'as (\\\\w+)'", ",", "arg", ")", "if", "(", "not", "m", ")", ":", "raise", "template", ".", "TemplateSyntaxError", ",", "(", "'%s tag had invalid arguments'", "%", "tag_name", ")", "var_name", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "return", "InlineTypes", "(", "var_name", ")" ]
gets all inline types .
train
false
40,671
def optional_import(name, feature=None, source=None, comment=None): try: return __import__(name) except ImportError: return MissingPackage(name, feature, source, comment)
[ "def", "optional_import", "(", "name", ",", "feature", "=", "None", ",", "source", "=", "None", ",", "comment", "=", "None", ")", ":", "try", ":", "return", "__import__", "(", "name", ")", "except", "ImportError", ":", "return", "MissingPackage", "(", "name", ",", "feature", ",", "source", ",", "comment", ")" ]
optionally import package name .
train
false
40,672
def ensure_views(): options = _get_options(ret=None) _response = _request('GET', ((options['url'] + options['db']) + '/_design/salt')) if ('error' in _response): return set_salt_view() for view in get_valid_salt_views(): if (view not in _response['views']): return set_salt_view() return True
[ "def", "ensure_views", "(", ")", ":", "options", "=", "_get_options", "(", "ret", "=", "None", ")", "_response", "=", "_request", "(", "'GET'", ",", "(", "(", "options", "[", "'url'", "]", "+", "options", "[", "'db'", "]", ")", "+", "'/_design/salt'", ")", ")", "if", "(", "'error'", "in", "_response", ")", ":", "return", "set_salt_view", "(", ")", "for", "view", "in", "get_valid_salt_views", "(", ")", ":", "if", "(", "view", "not", "in", "_response", "[", "'views'", "]", ")", ":", "return", "set_salt_view", "(", ")", "return", "True" ]
this function makes sure that all the views that should exist in the design document do exist .
train
true
40,673
def pd(*args): f = inspect.currentframe() f = inspect.getouterframes(f)[1][1] f = (((f != '<stdin>') and f) or os.getcwd()) return os.path.join(os.path.dirname(os.path.realpath(f)), *args)
[ "def", "pd", "(", "*", "args", ")", ":", "f", "=", "inspect", ".", "currentframe", "(", ")", "f", "=", "inspect", ".", "getouterframes", "(", "f", ")", "[", "1", "]", "[", "1", "]", "f", "=", "(", "(", "(", "f", "!=", "'<stdin>'", ")", "and", "f", ")", "or", "os", ".", "getcwd", "(", ")", ")", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "f", ")", ")", ",", "*", "args", ")" ]
returns the path to the parent directory of the script that calls pd() + given relative path .
train
false
40,674
def sys_write_flush(s): sys.stdout.write(s) sys.stdout.flush()
[ "def", "sys_write_flush", "(", "s", ")", ":", "sys", ".", "stdout", ".", "write", "(", "s", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
writes and flushes without delay a text in the console .
train
false
40,675
def test_rgb_to_hsl_part_10(): assert (rgb_to_hsl(102, 153, 153) == (180, 20, 50)) assert (rgb_to_hsl(51, 204, 204) == (180, 60, 50)) assert (rgb_to_hsl(0, 255, 255) == (180, 100, 50))
[ "def", "test_rgb_to_hsl_part_10", "(", ")", ":", "assert", "(", "rgb_to_hsl", "(", "102", ",", "153", ",", "153", ")", "==", "(", "180", ",", "20", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "51", ",", "204", ",", "204", ")", "==", "(", "180", ",", "60", ",", "50", ")", ")", "assert", "(", "rgb_to_hsl", "(", "0", ",", "255", ",", "255", ")", "==", "(", "180", ",", "100", ",", "50", ")", ")" ]
test rgb to hsl color function .
train
false
40,676
def dictfind(dictionary, element): for (key, value) in dictionary.iteritems(): if (element is value): return key
[ "def", "dictfind", "(", "dictionary", ",", "element", ")", ":", "for", "(", "key", ",", "value", ")", "in", "dictionary", ".", "iteritems", "(", ")", ":", "if", "(", "element", "is", "value", ")", ":", "return", "key" ]
returns a key whose value in dictionary is element or .
train
false
40,678
def versioned_base(plugin, version): return Meta(u'VersionedBase', (object,), {u'__metaclass__': Meta, u'plugin': plugin, u'version': version})
[ "def", "versioned_base", "(", "plugin", ",", "version", ")", ":", "return", "Meta", "(", "u'VersionedBase'", ",", "(", "object", ",", ")", ",", "{", "u'__metaclass__'", ":", "Meta", ",", "u'plugin'", ":", "plugin", ",", "u'version'", ":", "version", "}", ")" ]
returns a class which can be used like base .
train
false
40,680
def _http(method, url, headers=None, **kw): params = None boundary = None if (method == 'UPLOAD'): (params, boundary) = _encode_multipart(**kw) else: params = _encode_params(**kw) http_url = (('%s?%s' % (url, params)) if (method == _HTTP_GET) else url) http_body = (None if (method == 'GET') else params) logging.error(('%s: %s' % (method, http_url))) req = urllib2.Request(http_url, data=http_body) req.add_header('Accept-Encoding', 'gzip') if headers: for (k, v) in headers.iteritems(): req.add_header(k, v) if boundary: req.add_header('Content-Type', ('multipart/form-data; boundary=%s' % boundary)) try: resp = urllib2.urlopen(req, timeout=5) return _read_http_body(resp) finally: pass
[ "def", "_http", "(", "method", ",", "url", ",", "headers", "=", "None", ",", "**", "kw", ")", ":", "params", "=", "None", "boundary", "=", "None", "if", "(", "method", "==", "'UPLOAD'", ")", ":", "(", "params", ",", "boundary", ")", "=", "_encode_multipart", "(", "**", "kw", ")", "else", ":", "params", "=", "_encode_params", "(", "**", "kw", ")", "http_url", "=", "(", "(", "'%s?%s'", "%", "(", "url", ",", "params", ")", ")", "if", "(", "method", "==", "_HTTP_GET", ")", "else", "url", ")", "http_body", "=", "(", "None", "if", "(", "method", "==", "'GET'", ")", "else", "params", ")", "logging", ".", "error", "(", "(", "'%s: %s'", "%", "(", "method", ",", "http_url", ")", ")", ")", "req", "=", "urllib2", ".", "Request", "(", "http_url", ",", "data", "=", "http_body", ")", "req", ".", "add_header", "(", "'Accept-Encoding'", ",", "'gzip'", ")", "if", "headers", ":", "for", "(", "k", ",", "v", ")", "in", "headers", ".", "iteritems", "(", ")", ":", "req", ".", "add_header", "(", "k", ",", "v", ")", "if", "boundary", ":", "req", ".", "add_header", "(", "'Content-Type'", ",", "(", "'multipart/form-data; boundary=%s'", "%", "boundary", ")", ")", "try", ":", "resp", "=", "urllib2", ".", "urlopen", "(", "req", ",", "timeout", "=", "5", ")", "return", "_read_http_body", "(", "resp", ")", "finally", ":", "pass" ]
send http request and return response text .
train
true
40,682
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): app = inliner.document.settings.env.app ref = ('https://www.github.com/' + text) node = nodes.reference(rawtext, text, refuri=ref, **options) return ([node], [])
[ "def", "ghuser_role", "(", "name", ",", "rawtext", ",", "text", ",", "lineno", ",", "inliner", ",", "options", "=", "{", "}", ",", "content", "=", "[", "]", ")", ":", "app", "=", "inliner", ".", "document", ".", "settings", ".", "env", ".", "app", "ref", "=", "(", "'https://www.github.com/'", "+", "text", ")", "node", "=", "nodes", ".", "reference", "(", "rawtext", ",", "text", ",", "refuri", "=", "ref", ",", "**", "options", ")", "return", "(", "[", "node", "]", ",", "[", "]", ")" ]
link to a github user .
train
true
40,683
def get_cache_hit_ratio(name): try: result = ((get_delta((NAME_PREFIX + 'cache_hit')) / get_delta((NAME_PREFIX + 'client_req'))) * 100) except ZeroDivisionError: result = 0 return result
[ "def", "get_cache_hit_ratio", "(", "name", ")", ":", "try", ":", "result", "=", "(", "(", "get_delta", "(", "(", "NAME_PREFIX", "+", "'cache_hit'", ")", ")", "/", "get_delta", "(", "(", "NAME_PREFIX", "+", "'client_req'", ")", ")", ")", "*", "100", ")", "except", "ZeroDivisionError", ":", "result", "=", "0", "return", "result" ]
return cache hit ratio .
train
false
40,684
def print_python(expr, **settings): print(python(expr, **settings))
[ "def", "print_python", "(", "expr", ",", "**", "settings", ")", ":", "print", "(", "python", "(", "expr", ",", "**", "settings", ")", ")" ]
print expression to be evaluated in python .
train
false
40,685
def _multiple_range_string_to_set(ranges_str): char_set = set() for range_str in ranges_str.split(', '): if range_str.startswith('and '): range_str = range_str[4:] char_set.update(_range_string_to_set(range_str)) return char_set
[ "def", "_multiple_range_string_to_set", "(", "ranges_str", ")", ":", "char_set", "=", "set", "(", ")", "for", "range_str", "in", "ranges_str", ".", "split", "(", "', '", ")", ":", "if", "range_str", ".", "startswith", "(", "'and '", ")", ":", "range_str", "=", "range_str", "[", "4", ":", "]", "char_set", ".", "update", "(", "_range_string_to_set", "(", "range_str", ")", ")", "return", "char_set" ]
convert a string of multiple ranges to a set .
train
false
40,687
def explore_folder(c, name): while True: c.select_folder(name, readonly=True) msgdict = c.fetch('1:*', ['BODY.PEEK[HEADER.FIELDS (FROM SUBJECT)]', 'FLAGS', 'INTERNALDATE', 'RFC822.SIZE']) print for uid in sorted(msgdict): items = msgdict[uid] print ('%6d %20s %6d bytes %s' % (uid, items['INTERNALDATE'], items['RFC822.SIZE'], ' '.join(items['FLAGS']))) for i in items['BODY[HEADER.FIELDS (FROM SUBJECT)]'].splitlines(): print (' ' * 6), i.strip() reply = raw_input(('Folder %s - type a message UID, or "q" to quit: ' % name)).strip() if reply.lower().startswith('q'): break try: reply = int(reply) except ValueError: print 'Please type an integer or "q" to quit' else: if (reply in msgdict): explore_message(c, reply) c.close_folder()
[ "def", "explore_folder", "(", "c", ",", "name", ")", ":", "while", "True", ":", "c", ".", "select_folder", "(", "name", ",", "readonly", "=", "True", ")", "msgdict", "=", "c", ".", "fetch", "(", "'1:*'", ",", "[", "'BODY.PEEK[HEADER.FIELDS (FROM SUBJECT)]'", ",", "'FLAGS'", ",", "'INTERNALDATE'", ",", "'RFC822.SIZE'", "]", ")", "print", "for", "uid", "in", "sorted", "(", "msgdict", ")", ":", "items", "=", "msgdict", "[", "uid", "]", "print", "(", "'%6d %20s %6d bytes %s'", "%", "(", "uid", ",", "items", "[", "'INTERNALDATE'", "]", ",", "items", "[", "'RFC822.SIZE'", "]", ",", "' '", ".", "join", "(", "items", "[", "'FLAGS'", "]", ")", ")", ")", "for", "i", "in", "items", "[", "'BODY[HEADER.FIELDS (FROM SUBJECT)]'", "]", ".", "splitlines", "(", ")", ":", "print", "(", "' '", "*", "6", ")", ",", "i", ".", "strip", "(", ")", "reply", "=", "raw_input", "(", "(", "'Folder %s - type a message UID, or \"q\" to quit: '", "%", "name", ")", ")", ".", "strip", "(", ")", "if", "reply", ".", "lower", "(", ")", ".", "startswith", "(", "'q'", ")", ":", "break", "try", ":", "reply", "=", "int", "(", "reply", ")", "except", "ValueError", ":", "print", "'Please type an integer or \"q\" to quit'", "else", ":", "if", "(", "reply", "in", "msgdict", ")", ":", "explore_message", "(", "c", ",", "reply", ")", "c", ".", "close_folder", "(", ")" ]
list the messages in folder name and let the user choose one .
train
false
40,688
def deferToThreadPool(reactor, threadpool, f, *args, **kwargs): d = defer.Deferred() def onResult(success, result): if success: reactor.callFromThread(d.callback, result) else: reactor.callFromThread(d.errback, result) threadpool.callInThreadWithCallback(onResult, f, *args, **kwargs) return d
[ "def", "deferToThreadPool", "(", "reactor", ",", "threadpool", ",", "f", ",", "*", "args", ",", "**", "kwargs", ")", ":", "d", "=", "defer", ".", "Deferred", "(", ")", "def", "onResult", "(", "success", ",", "result", ")", ":", "if", "success", ":", "reactor", ".", "callFromThread", "(", "d", ".", "callback", ",", "result", ")", "else", ":", "reactor", ".", "callFromThread", "(", "d", ".", "errback", ",", "result", ")", "threadpool", ".", "callInThreadWithCallback", "(", "onResult", ",", "f", ",", "*", "args", ",", "**", "kwargs", ")", "return", "d" ]
call the function c{f} using a thread from the given threadpool and return the result as a deferred .
train
false
40,689
def getChildElementsByLocalName(childNodes, localName): childElementsByLocalName = [] for childNode in childNodes: if (localName.lower() == childNode.getNodeName()): childElementsByLocalName.append(childNode) return childElementsByLocalName
[ "def", "getChildElementsByLocalName", "(", "childNodes", ",", "localName", ")", ":", "childElementsByLocalName", "=", "[", "]", "for", "childNode", "in", "childNodes", ":", "if", "(", "localName", ".", "lower", "(", ")", "==", "childNode", ".", "getNodeName", "(", ")", ")", ":", "childElementsByLocalName", ".", "append", "(", "childNode", ")", "return", "childElementsByLocalName" ]
get the childnodes which have the given local name .
train
false
40,690
def get_device_type_sizes(): if hasattr(get_device_type_sizes, 'rval'): return get_device_type_sizes.rval gpu_ptr_size = 8 cpu_ptr_size = 8 int_size = 8 try: cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray t = cuda_ndarray.ptr_int_size() (gpu_ptr_size, cpu_ptr_size, int_size, gpu_int_size) = t assert (int_size == gpu_int_size), (int_size, gpu_int_size) del gpu_int_size del t except Exception as e: _logger.warning(('Optimization Warning: Got the following error, but you can ignore it. This could cause less GpuElemwise fused together.\n%s' % e)) rval = get_device_type_sizes.rval = dict(gpu_ptr_size=gpu_ptr_size, cpu_ptr_size=cpu_ptr_size, int_size=int_size) return rval
[ "def", "get_device_type_sizes", "(", ")", ":", "if", "hasattr", "(", "get_device_type_sizes", ",", "'rval'", ")", ":", "return", "get_device_type_sizes", ".", "rval", "gpu_ptr_size", "=", "8", "cpu_ptr_size", "=", "8", "int_size", "=", "8", "try", ":", "cuda_ndarray", "=", "theano", ".", "sandbox", ".", "cuda", ".", "cuda_ndarray", ".", "cuda_ndarray", "t", "=", "cuda_ndarray", ".", "ptr_int_size", "(", ")", "(", "gpu_ptr_size", ",", "cpu_ptr_size", ",", "int_size", ",", "gpu_int_size", ")", "=", "t", "assert", "(", "int_size", "==", "gpu_int_size", ")", ",", "(", "int_size", ",", "gpu_int_size", ")", "del", "gpu_int_size", "del", "t", "except", "Exception", "as", "e", ":", "_logger", ".", "warning", "(", "(", "'Optimization Warning: Got the following error, but you can ignore it. This could cause less GpuElemwise fused together.\\n%s'", "%", "e", ")", ")", "rval", "=", "get_device_type_sizes", ".", "rval", "=", "dict", "(", "gpu_ptr_size", "=", "gpu_ptr_size", ",", "cpu_ptr_size", "=", "cpu_ptr_size", ",", "int_size", "=", "int_size", ")", "return", "rval" ]
returns tuple (gpu ptr size .
train
false
40,691
def ci_skip(registry, xml_parent, data): rpobj = XML.SubElement(xml_parent, 'ruby-proxy-object') robj = XML.SubElement(rpobj, 'ruby-object', attrib={'pluginid': 'ci-skip', 'ruby-class': 'Jenkins::Tasks::BuildWrapperProxy'}) pluginid = XML.SubElement(robj, 'pluginid', {'pluginid': 'ci-skip', 'ruby-class': 'String'}) pluginid.text = 'ci-skip' obj = XML.SubElement(robj, 'object', {'ruby-class': 'CiSkipWrapper', 'pluginid': 'ci-skip'}) XML.SubElement(obj, 'ci__skip', {'pluginid': 'ci-skip', 'ruby-class': 'NilClass'})
[ "def", "ci_skip", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "rpobj", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'ruby-proxy-object'", ")", "robj", "=", "XML", ".", "SubElement", "(", "rpobj", ",", "'ruby-object'", ",", "attrib", "=", "{", "'pluginid'", ":", "'ci-skip'", ",", "'ruby-class'", ":", "'Jenkins::Tasks::BuildWrapperProxy'", "}", ")", "pluginid", "=", "XML", ".", "SubElement", "(", "robj", ",", "'pluginid'", ",", "{", "'pluginid'", ":", "'ci-skip'", ",", "'ruby-class'", ":", "'String'", "}", ")", "pluginid", ".", "text", "=", "'ci-skip'", "obj", "=", "XML", ".", "SubElement", "(", "robj", ",", "'object'", ",", "{", "'ruby-class'", ":", "'CiSkipWrapper'", ",", "'pluginid'", ":", "'ci-skip'", "}", ")", "XML", ".", "SubElement", "(", "obj", ",", "'ci__skip'", ",", "{", "'pluginid'", ":", "'ci-skip'", ",", "'ruby-class'", ":", "'NilClass'", "}", ")" ]
yaml: ci-skip skip making a build for certain push .
train
false
40,692
def call_rename(*args, **kwargs): dev_id = _get_devices(kwargs) if (len(dev_id) > 1): raise CommandExecutionError('Only one device can be renamed at a time') if ('title' not in kwargs): raise CommandExecutionError('Title is missing') return _set(dev_id[0], {'name': kwargs['title']}, method='')
[ "def", "call_rename", "(", "*", "args", ",", "**", "kwargs", ")", ":", "dev_id", "=", "_get_devices", "(", "kwargs", ")", "if", "(", "len", "(", "dev_id", ")", ">", "1", ")", ":", "raise", "CommandExecutionError", "(", "'Only one device can be renamed at a time'", ")", "if", "(", "'title'", "not", "in", "kwargs", ")", ":", "raise", "CommandExecutionError", "(", "'Title is missing'", ")", "return", "_set", "(", "dev_id", "[", "0", "]", ",", "{", "'name'", ":", "kwargs", "[", "'title'", "]", "}", ",", "method", "=", "''", ")" ]
rename a device .
train
true
40,693
def facility(): s3db.configure('org_facility', create_next=URL(c='vol', f='facility', args=['[id]', 'read'])) return s3db.org_facility_controller()
[ "def", "facility", "(", ")", ":", "s3db", ".", "configure", "(", "'org_facility'", ",", "create_next", "=", "URL", "(", "c", "=", "'vol'", ",", "f", "=", "'facility'", ",", "args", "=", "[", "'[id]'", ",", "'read'", "]", ")", ")", "return", "s3db", ".", "org_facility_controller", "(", ")" ]
restful crud controller .
train
false
40,694
def labels_from_probs(probs): last_axis = (len(np.shape(probs)) - 1) labels = np.argmax(probs, axis=last_axis) return np.asarray(labels, dtype=np.int32)
[ "def", "labels_from_probs", "(", "probs", ")", ":", "last_axis", "=", "(", "len", "(", "np", ".", "shape", "(", "probs", ")", ")", "-", "1", ")", "labels", "=", "np", ".", "argmax", "(", "probs", ",", "axis", "=", "last_axis", ")", "return", "np", ".", "asarray", "(", "labels", ",", "dtype", "=", "np", ".", "int32", ")" ]
helper function: computes argmax along last dimension of array to obtain labels .
train
false
40,695
def _utc_localize_index_level_0(df): idx = df.index df.index = pd.MultiIndex.from_product((idx.levels[0].tz_localize('utc'), idx.levels[1]), names=idx.names) return df
[ "def", "_utc_localize_index_level_0", "(", "df", ")", ":", "idx", "=", "df", ".", "index", "df", ".", "index", "=", "pd", ".", "MultiIndex", ".", "from_product", "(", "(", "idx", ".", "levels", "[", "0", "]", ".", "tz_localize", "(", "'utc'", ")", ",", "idx", ".", "levels", "[", "1", "]", ")", ",", "names", "=", "idx", ".", "names", ")", "return", "df" ]
tz_localize the first level of a multiindexed dataframe to utc .
train
false
40,696
def process_month_hours(month_date, start_hour=0, days=None): (year, month) = month_date.split('-') (year, month) = (int(year), int(month)) days = (days or xrange(1, (calendar.monthrange(year, month)[1] + 1))) hours = xrange(start_hour, 24) for day in days: for hour in hours: hour_date = ('%04d-%02d-%02d-%02d' % (year, month, day, hour)) log_path = os.path.join(RAW_LOG_DIR, ('%s.log.gz' % hour_date)) if (not s3_key_exists(s3_connection, log_path)): log_path = os.path.join(RAW_LOG_DIR, ('%s.log.bz2' % hour_date)) if (not s3_key_exists(s3_connection, log_path)): print ('Missing log for %s' % hour_date) continue print ('Processing %s' % log_path) process_pixel_log(log_path, fast=True) hours = xrange(24)
[ "def", "process_month_hours", "(", "month_date", ",", "start_hour", "=", "0", ",", "days", "=", "None", ")", ":", "(", "year", ",", "month", ")", "=", "month_date", ".", "split", "(", "'-'", ")", "(", "year", ",", "month", ")", "=", "(", "int", "(", "year", ")", ",", "int", "(", "month", ")", ")", "days", "=", "(", "days", "or", "xrange", "(", "1", ",", "(", "calendar", ".", "monthrange", "(", "year", ",", "month", ")", "[", "1", "]", "+", "1", ")", ")", ")", "hours", "=", "xrange", "(", "start_hour", ",", "24", ")", "for", "day", "in", "days", ":", "for", "hour", "in", "hours", ":", "hour_date", "=", "(", "'%04d-%02d-%02d-%02d'", "%", "(", "year", ",", "month", ",", "day", ",", "hour", ")", ")", "log_path", "=", "os", ".", "path", ".", "join", "(", "RAW_LOG_DIR", ",", "(", "'%s.log.gz'", "%", "hour_date", ")", ")", "if", "(", "not", "s3_key_exists", "(", "s3_connection", ",", "log_path", ")", ")", ":", "log_path", "=", "os", ".", "path", ".", "join", "(", "RAW_LOG_DIR", ",", "(", "'%s.log.bz2'", "%", "hour_date", ")", ")", "if", "(", "not", "s3_key_exists", "(", "s3_connection", ",", "log_path", ")", ")", ":", "print", "(", "'Missing log for %s'", "%", "hour_date", ")", "continue", "print", "(", "'Processing %s'", "%", "log_path", ")", "process_pixel_log", "(", "log_path", ",", "fast", "=", "True", ")", "hours", "=", "xrange", "(", "24", ")" ]
process hourly logs from entire month .
train
false
40,697
def _get_closest_begin(begin_before, end_before): end_iter = reversed(end_before) begin_iter = reversed(begin_before) while True: try: b = next(begin_iter) except: raise NoEnvError('No open environment detected') try: e = next(end_iter) except: break if (not (b.begin() < e.begin())): break return b
[ "def", "_get_closest_begin", "(", "begin_before", ",", "end_before", ")", ":", "end_iter", "=", "reversed", "(", "end_before", ")", "begin_iter", "=", "reversed", "(", "begin_before", ")", "while", "True", ":", "try", ":", "b", "=", "next", "(", "begin_iter", ")", "except", ":", "raise", "NoEnvError", "(", "'No open environment detected'", ")", "try", ":", "e", "=", "next", "(", "end_iter", ")", "except", ":", "break", "if", "(", "not", "(", "b", ".", "begin", "(", ")", "<", "e", ".", "begin", "(", ")", ")", ")", ":", "break", "return", "b" ]
returns the closest begin .
train
false
40,698
def uidb36_to_uidb64(uidb36): try: uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36)))) except ValueError: uidb64 = '1' return uidb64
[ "def", "uidb36_to_uidb64", "(", "uidb36", ")", ":", "try", ":", "uidb64", "=", "force_text", "(", "urlsafe_base64_encode", "(", "force_bytes", "(", "base36_to_int", "(", "uidb36", ")", ")", ")", ")", "except", "ValueError", ":", "uidb64", "=", "'1'", "return", "uidb64" ]
needed to support old password reset urls that use base36-encoded user ids URL#diff-c571286052438b2e3190f8db8331a92br231 args: uidb36: base36-encoded user id returns: base64-encoded user id .
train
false
40,700
def save_send(socket, data): while (len(data) > 0): try: send_data_size = socket.send(data) data = data[send_data_size:] except error as msg: sleep(0.01)
[ "def", "save_send", "(", "socket", ",", "data", ")", ":", "while", "(", "len", "(", "data", ")", ">", "0", ")", ":", "try", ":", "send_data_size", "=", "socket", ".", "send", "(", "data", ")", "data", "=", "data", "[", "send_data_size", ":", "]", "except", "error", "as", "msg", ":", "sleep", "(", "0.01", ")" ]
send data to a socket .
train
false
40,701
def get_config_md5(): all_opts = sorted([c for c in _config_var_list if c.in_c_key], key=(lambda cv: cv.fullname)) return theano.gof.utils.hash_from_code('\n'.join([('%s = %s' % (cv.fullname, cv.__get__(True, None))) for cv in all_opts]))
[ "def", "get_config_md5", "(", ")", ":", "all_opts", "=", "sorted", "(", "[", "c", "for", "c", "in", "_config_var_list", "if", "c", ".", "in_c_key", "]", ",", "key", "=", "(", "lambda", "cv", ":", "cv", ".", "fullname", ")", ")", "return", "theano", ".", "gof", ".", "utils", ".", "hash_from_code", "(", "'\\n'", ".", "join", "(", "[", "(", "'%s = %s'", "%", "(", "cv", ".", "fullname", ",", "cv", ".", "__get__", "(", "True", ",", "None", ")", ")", ")", "for", "cv", "in", "all_opts", "]", ")", ")" ]
return a string md5 of the current config options .
train
false
40,702
def auto_reconnect_cursor(func): @wraps(func) def inner(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as e: if (not can_reconnect(e)): raise self.db.close(reconnect=True) self.cursor = self.db._cursor() return func(self, *args, **kwargs) return inner
[ "def", "auto_reconnect_cursor", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "return", "func", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "except", "Exception", "as", "e", ":", "if", "(", "not", "can_reconnect", "(", "e", ")", ")", ":", "raise", "self", ".", "db", ".", "close", "(", "reconnect", "=", "True", ")", "self", ".", "cursor", "=", "self", ".", "db", ".", "_cursor", "(", ")", "return", "func", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "return", "inner" ]
attempt to safely reconnect when an error is hit that resembles the bouncer disconnecting the client due to a timeout/etc during a cursor execution .
train
false
40,703
def submit_bulk_course_email(request, course_key, email_id): email_obj = CourseEmail.objects.get(id=email_id) targets = Counter([target.target_type for target in email_obj.targets.all()]) targets = [(target if (count <= 1) else '{} {}'.format(count, target)) for (target, count) in targets.iteritems()] task_type = 'bulk_course_email' task_class = send_bulk_course_email task_input = {'email_id': email_id, 'to_option': targets} task_key_stub = str(email_id) task_key = hashlib.md5(task_key_stub).hexdigest() return submit_task(request, task_type, task_class, course_key, task_input, task_key)
[ "def", "submit_bulk_course_email", "(", "request", ",", "course_key", ",", "email_id", ")", ":", "email_obj", "=", "CourseEmail", ".", "objects", ".", "get", "(", "id", "=", "email_id", ")", "targets", "=", "Counter", "(", "[", "target", ".", "target_type", "for", "target", "in", "email_obj", ".", "targets", ".", "all", "(", ")", "]", ")", "targets", "=", "[", "(", "target", "if", "(", "count", "<=", "1", ")", "else", "'{} {}'", ".", "format", "(", "count", ",", "target", ")", ")", "for", "(", "target", ",", "count", ")", "in", "targets", ".", "iteritems", "(", ")", "]", "task_type", "=", "'bulk_course_email'", "task_class", "=", "send_bulk_course_email", "task_input", "=", "{", "'email_id'", ":", "email_id", ",", "'to_option'", ":", "targets", "}", "task_key_stub", "=", "str", "(", "email_id", ")", "task_key", "=", "hashlib", ".", "md5", "(", "task_key_stub", ")", ".", "hexdigest", "(", ")", "return", "submit_task", "(", "request", ",", "task_type", ",", "task_class", ",", "course_key", ",", "task_input", ",", "task_key", ")" ]
request to have bulk email sent as a background task .
train
false
40,704
def _lowess_tricube(t): t[:] = np.absolute(t) _lowess_mycube(t) t[:] = np.negative(t) t += 1 _lowess_mycube(t)
[ "def", "_lowess_tricube", "(", "t", ")", ":", "t", "[", ":", "]", "=", "np", ".", "absolute", "(", "t", ")", "_lowess_mycube", "(", "t", ")", "t", "[", ":", "]", "=", "np", ".", "negative", "(", "t", ")", "t", "+=", "1", "_lowess_mycube", "(", "t", ")" ]
the _tricube function applied to a numpy array .
train
false
40,707
def incrementCounter(technique): kb.counters[technique] = (getCounter(technique) + 1)
[ "def", "incrementCounter", "(", "technique", ")", ":", "kb", ".", "counters", "[", "technique", "]", "=", "(", "getCounter", "(", "technique", ")", "+", "1", ")" ]
increments query counter for a given technique .
train
false
40,708
@cache_permission def can_add_translation(user, project): return check_permission(user, project, 'trans.add_translation')
[ "@", "cache_permission", "def", "can_add_translation", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.add_translation'", ")" ]
checks whether user can view reports on given project .
train
false
40,709
def get_image_info(url): r = requests.get(url, stream=True) image_size = r.headers.get('content-length') image_size = (float(image_size) / 1000) image_max_size = 10000 image_data = {'content_type': '', 'size': image_size, 'width': 0, 'height': 0} if (image_size > image_max_size): return image_data data = None parser = ImageFile.Parser() while True: data = r.raw.read(1024) if (not data): break parser.feed(data) if parser.image: image_data['content_type'] = parser.image.format image_data['width'] = parser.image.size[0] image_data['height'] = parser.image.size[1] break return image_data
[ "def", "get_image_info", "(", "url", ")", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "image_size", "=", "r", ".", "headers", ".", "get", "(", "'content-length'", ")", "image_size", "=", "(", "float", "(", "image_size", ")", "/", "1000", ")", "image_max_size", "=", "10000", "image_data", "=", "{", "'content_type'", ":", "''", ",", "'size'", ":", "image_size", ",", "'width'", ":", "0", ",", "'height'", ":", "0", "}", "if", "(", "image_size", ">", "image_max_size", ")", ":", "return", "image_data", "data", "=", "None", "parser", "=", "ImageFile", ".", "Parser", "(", ")", "while", "True", ":", "data", "=", "r", ".", "raw", ".", "read", "(", "1024", ")", "if", "(", "not", "data", ")", ":", "break", "parser", ".", "feed", "(", "data", ")", "if", "parser", ".", "image", ":", "image_data", "[", "'content_type'", "]", "=", "parser", ".", "image", ".", "format", "image_data", "[", "'width'", "]", "=", "parser", ".", "image", ".", "size", "[", "0", "]", "image_data", "[", "'height'", "]", "=", "parser", ".", "image", ".", "size", "[", "1", "]", "break", "return", "image_data" ]
returns the content-type .
train
false
40,710
def kid_rsa_private_key(a, b, A, B): M = ((a * b) - 1) e = ((A * M) + a) d = ((B * M) + b) n = (((e * d) - 1) // M) return (n, d)
[ "def", "kid_rsa_private_key", "(", "a", ",", "b", ",", "A", ",", "B", ")", ":", "M", "=", "(", "(", "a", "*", "b", ")", "-", "1", ")", "e", "=", "(", "(", "A", "*", "M", ")", "+", "a", ")", "d", "=", "(", "(", "B", "*", "M", ")", "+", "b", ")", "n", "=", "(", "(", "(", "e", "*", "d", ")", "-", "1", ")", "//", "M", ")", "return", "(", "n", ",", "d", ")" ]
compute m = a b - 1 .
train
false
40,715
def Put(entities, **kwargs): return PutAsync(entities, **kwargs).get_result()
[ "def", "Put", "(", "entities", ",", "**", "kwargs", ")", ":", "return", "PutAsync", "(", "entities", ",", "**", "kwargs", ")", ".", "get_result", "(", ")" ]
store one or more entities in the datastore .
train
false
40,716
def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): if (not isinstance(codec_options, CodecOptions)): raise _CODEC_OPTIONS_TYPE_ERROR docs = [] position = 0 end = (len(data) - 1) use_raw = _raw_document_class(codec_options.document_class) try: while (position < end): obj_size = _UNPACK_INT(data[position:(position + 4)])[0] if ((len(data) - position) < obj_size): raise InvalidBSON('invalid object size') obj_end = ((position + obj_size) - 1) if (data[obj_end:(position + obj_size)] != '\x00'): raise InvalidBSON('bad eoo') if use_raw: docs.append(codec_options.document_class(data[position:(obj_end + 1)], codec_options)) else: docs.append(_elements_to_dict(data, (position + 4), obj_end, codec_options)) position += obj_size return docs except InvalidBSON: raise except Exception: (_, exc_value, exc_tb) = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb)
[ "def", "decode_all", "(", "data", ",", "codec_options", "=", "DEFAULT_CODEC_OPTIONS", ")", ":", "if", "(", "not", "isinstance", "(", "codec_options", ",", "CodecOptions", ")", ")", ":", "raise", "_CODEC_OPTIONS_TYPE_ERROR", "docs", "=", "[", "]", "position", "=", "0", "end", "=", "(", "len", "(", "data", ")", "-", "1", ")", "use_raw", "=", "_raw_document_class", "(", "codec_options", ".", "document_class", ")", "try", ":", "while", "(", "position", "<", "end", ")", ":", "obj_size", "=", "_UNPACK_INT", "(", "data", "[", "position", ":", "(", "position", "+", "4", ")", "]", ")", "[", "0", "]", "if", "(", "(", "len", "(", "data", ")", "-", "position", ")", "<", "obj_size", ")", ":", "raise", "InvalidBSON", "(", "'invalid object size'", ")", "obj_end", "=", "(", "(", "position", "+", "obj_size", ")", "-", "1", ")", "if", "(", "data", "[", "obj_end", ":", "(", "position", "+", "obj_size", ")", "]", "!=", "'\\x00'", ")", ":", "raise", "InvalidBSON", "(", "'bad eoo'", ")", "if", "use_raw", ":", "docs", ".", "append", "(", "codec_options", ".", "document_class", "(", "data", "[", "position", ":", "(", "obj_end", "+", "1", ")", "]", ",", "codec_options", ")", ")", "else", ":", "docs", ".", "append", "(", "_elements_to_dict", "(", "data", ",", "(", "position", "+", "4", ")", ",", "obj_end", ",", "codec_options", ")", ")", "position", "+=", "obj_size", "return", "docs", "except", "InvalidBSON", ":", "raise", "except", "Exception", ":", "(", "_", ",", "exc_value", ",", "exc_tb", ")", "=", "sys", ".", "exc_info", "(", ")", "reraise", "(", "InvalidBSON", ",", "exc_value", ",", "exc_tb", ")" ]
decode bson data to multiple documents .
train
true
40,718
def libvlc_video_set_scale(p_mi, f_factor): f = (_Cfunctions.get('libvlc_video_set_scale', None) or _Cfunction('libvlc_video_set_scale', ((1,), (1,)), None, None, MediaPlayer, ctypes.c_float)) return f(p_mi, f_factor)
[ "def", "libvlc_video_set_scale", "(", "p_mi", ",", "f_factor", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_video_set_scale'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_set_scale'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "ctypes", ".", "c_float", ")", ")", "return", "f", "(", "p_mi", ",", "f_factor", ")" ]
set the video scaling factor .
train
true
40,719
def _AddMessageMethods(message_descriptor, cls): _AddListFieldsMethod(message_descriptor, cls) _AddHasFieldMethod(message_descriptor, cls) _AddClearFieldMethod(message_descriptor, cls) if message_descriptor.is_extendable: _AddClearExtensionMethod(cls) _AddHasExtensionMethod(cls) _AddClearMethod(message_descriptor, cls) _AddEqualsMethod(message_descriptor, cls) _AddStrMethod(message_descriptor, cls) _AddUnicodeMethod(message_descriptor, cls) _AddSetListenerMethod(cls) _AddByteSizeMethod(message_descriptor, cls) _AddSerializeToStringMethod(message_descriptor, cls) _AddSerializePartialToStringMethod(message_descriptor, cls) _AddMergeFromStringMethod(message_descriptor, cls) _AddIsInitializedMethod(message_descriptor, cls) _AddMergeFromMethod(cls)
[ "def", "_AddMessageMethods", "(", "message_descriptor", ",", "cls", ")", ":", "_AddListFieldsMethod", "(", "message_descriptor", ",", "cls", ")", "_AddHasFieldMethod", "(", "message_descriptor", ",", "cls", ")", "_AddClearFieldMethod", "(", "message_descriptor", ",", "cls", ")", "if", "message_descriptor", ".", "is_extendable", ":", "_AddClearExtensionMethod", "(", "cls", ")", "_AddHasExtensionMethod", "(", "cls", ")", "_AddClearMethod", "(", "message_descriptor", ",", "cls", ")", "_AddEqualsMethod", "(", "message_descriptor", ",", "cls", ")", "_AddStrMethod", "(", "message_descriptor", ",", "cls", ")", "_AddUnicodeMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSetListenerMethod", "(", "cls", ")", "_AddByteSizeMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSerializeToStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSerializePartialToStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddMergeFromStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddIsInitializedMethod", "(", "message_descriptor", ",", "cls", ")", "_AddMergeFromMethod", "(", "cls", ")" ]
adds implementations of all message methods to cls .
train
false
40,720
def process_memory(): return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
[ "def", "process_memory", "(", ")", ":", "return", "resource", ".", "getrusage", "(", "resource", ".", "RUSAGE_SELF", ")", ".", "ru_maxrss" ]
in kb according to URL .
train
false
40,724
def linux_distribution(distname='', version='', id='', supported_dists=_supported_dists, full_distribution_name=1): try: etc = os.listdir(_UNIXCONFDIR) except OSError: return (distname, version, id) etc.sort() for file in etc: m = _release_filename.match(file) if (m is not None): (_distname, dummy) = m.groups() if (_distname in supported_dists): distname = _distname break else: return _dist_try_harder(distname, version, id) with open(os.path.join(_UNIXCONFDIR, file), 'r', encoding='utf-8', errors='surrogateescape') as f: firstline = f.readline() (_distname, _version, _id) = _parse_release_file(firstline) if (_distname and full_distribution_name): distname = _distname if _version: version = _version if _id: id = _id return (distname, version, id)
[ "def", "linux_distribution", "(", "distname", "=", "''", ",", "version", "=", "''", ",", "id", "=", "''", ",", "supported_dists", "=", "_supported_dists", ",", "full_distribution_name", "=", "1", ")", ":", "try", ":", "etc", "=", "os", ".", "listdir", "(", "_UNIXCONFDIR", ")", "except", "OSError", ":", "return", "(", "distname", ",", "version", ",", "id", ")", "etc", ".", "sort", "(", ")", "for", "file", "in", "etc", ":", "m", "=", "_release_filename", ".", "match", "(", "file", ")", "if", "(", "m", "is", "not", "None", ")", ":", "(", "_distname", ",", "dummy", ")", "=", "m", ".", "groups", "(", ")", "if", "(", "_distname", "in", "supported_dists", ")", ":", "distname", "=", "_distname", "break", "else", ":", "return", "_dist_try_harder", "(", "distname", ",", "version", ",", "id", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "_UNIXCONFDIR", ",", "file", ")", ",", "'r'", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'surrogateescape'", ")", "as", "f", ":", "firstline", "=", "f", ".", "readline", "(", ")", "(", "_distname", ",", "_version", ",", "_id", ")", "=", "_parse_release_file", "(", "firstline", ")", "if", "(", "_distname", "and", "full_distribution_name", ")", ":", "distname", "=", "_distname", "if", "_version", ":", "version", "=", "_version", "if", "_id", ":", "id", "=", "_id", "return", "(", "distname", ",", "version", ",", "id", ")" ]
return information about the current linux distribution as a tuple with items as follows: * id_name: if *full_distribution_name* is false .
train
false
40,725
def generate_pingback_content(soup, target, max_length, trunc_char='...'): link = soup.find('a', href=target) content = strip_tags(six.text_type(link.findParent())) index = content.index(link.string) if (len(content) > max_length): middle = (max_length // 2) start = (index - middle) end = (index + middle) if (start <= 0): end -= start extract = content[0:end] else: extract = ('%s%s' % (trunc_char, content[start:end])) if (end < len(content)): extract += trunc_char return extract return content
[ "def", "generate_pingback_content", "(", "soup", ",", "target", ",", "max_length", ",", "trunc_char", "=", "'...'", ")", ":", "link", "=", "soup", ".", "find", "(", "'a'", ",", "href", "=", "target", ")", "content", "=", "strip_tags", "(", "six", ".", "text_type", "(", "link", ".", "findParent", "(", ")", ")", ")", "index", "=", "content", ".", "index", "(", "link", ".", "string", ")", "if", "(", "len", "(", "content", ")", ">", "max_length", ")", ":", "middle", "=", "(", "max_length", "//", "2", ")", "start", "=", "(", "index", "-", "middle", ")", "end", "=", "(", "index", "+", "middle", ")", "if", "(", "start", "<=", "0", ")", ":", "end", "-=", "start", "extract", "=", "content", "[", "0", ":", "end", "]", "else", ":", "extract", "=", "(", "'%s%s'", "%", "(", "trunc_char", ",", "content", "[", "start", ":", "end", "]", ")", ")", "if", "(", "end", "<", "len", "(", "content", ")", ")", ":", "extract", "+=", "trunc_char", "return", "extract", "return", "content" ]
generate a description text for the pingback .
train
true
40,726
def hessian_times_vector(gradient, parameter, vector, r_op=False): if r_op: return tensor.Rop(gradient, parameter, vector) return tensor.grad(tensor.sum((gradient * vector)), parameter)
[ "def", "hessian_times_vector", "(", "gradient", ",", "parameter", ",", "vector", ",", "r_op", "=", "False", ")", ":", "if", "r_op", ":", "return", "tensor", ".", "Rop", "(", "gradient", ",", "parameter", ",", "vector", ")", "return", "tensor", ".", "grad", "(", "tensor", ".", "sum", "(", "(", "gradient", "*", "vector", ")", ")", ",", "parameter", ")" ]
return an expression for the hessian times a vector .
train
false
40,727
def convert_dict_colors_to_same_type(colors_dict, colortype='rgb'): for key in colors_dict: if ('#' in colors_dict[key]): colors_dict[key] = color_parser(colors_dict[key], hex_to_rgb) colors_dict[key] = color_parser(colors_dict[key], label_rgb) elif isinstance(colors_dict[key], tuple): colors_dict[key] = color_parser(colors_dict[key], convert_to_RGB_255) colors_dict[key] = color_parser(colors_dict[key], label_rgb) if (colortype == 'rgb'): return colors_dict elif (colortype == 'tuple'): for key in colors_dict: colors_dict[key] = color_parser(colors_dict[key], unlabel_rgb) colors_dict[key] = color_parser(colors_dict[key], unconvert_from_RGB_255) return colors_dict else: raise exceptions.PlotlyError('You must select either rgb or tuple for your colortype variable.')
[ "def", "convert_dict_colors_to_same_type", "(", "colors_dict", ",", "colortype", "=", "'rgb'", ")", ":", "for", "key", "in", "colors_dict", ":", "if", "(", "'#'", "in", "colors_dict", "[", "key", "]", ")", ":", "colors_dict", "[", "key", "]", "=", "color_parser", "(", "colors_dict", "[", "key", "]", ",", "hex_to_rgb", ")", "colors_dict", "[", "key", "]", "=", "color_parser", "(", "colors_dict", "[", "key", "]", ",", "label_rgb", ")", "elif", "isinstance", "(", "colors_dict", "[", "key", "]", ",", "tuple", ")", ":", "colors_dict", "[", "key", "]", "=", "color_parser", "(", "colors_dict", "[", "key", "]", ",", "convert_to_RGB_255", ")", "colors_dict", "[", "key", "]", "=", "color_parser", "(", "colors_dict", "[", "key", "]", ",", "label_rgb", ")", "if", "(", "colortype", "==", "'rgb'", ")", ":", "return", "colors_dict", "elif", "(", "colortype", "==", "'tuple'", ")", ":", "for", "key", "in", "colors_dict", ":", "colors_dict", "[", "key", "]", "=", "color_parser", "(", "colors_dict", "[", "key", "]", ",", "unlabel_rgb", ")", "colors_dict", "[", "key", "]", "=", "color_parser", "(", "colors_dict", "[", "key", "]", ",", "unconvert_from_RGB_255", ")", "return", "colors_dict", "else", ":", "raise", "exceptions", ".", "PlotlyError", "(", "'You must select either rgb or tuple for your colortype variable.'", ")" ]
converts a colors in a dictioanry of colors to the specified color type .
train
false
40,728
def get_host_keys(): try: result = [] with open(ssh_file(KNOWN_HOSTS), u'r') as handle: for line in handle: line = line.strip() if is_key_line(line): result.append(parse_hosts_line(line)) except IOError: return [] return result
[ "def", "get_host_keys", "(", ")", ":", "try", ":", "result", "=", "[", "]", "with", "open", "(", "ssh_file", "(", "KNOWN_HOSTS", ")", ",", "u'r'", ")", "as", "handle", ":", "for", "line", "in", "handle", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "is_key_line", "(", "line", ")", ":", "result", ".", "append", "(", "parse_hosts_line", "(", "line", ")", ")", "except", "IOError", ":", "return", "[", "]", "return", "result" ]
returns list of host keys .
train
false
40,730
def list_queues(backend='sqlite'): queue_funcs = salt.loader.queues(__opts__) cmd = '{0}.list_queues'.format(backend) if (cmd not in queue_funcs): raise SaltInvocationError('Function "{0}" is not available'.format(cmd)) ret = queue_funcs[cmd]() return ret
[ "def", "list_queues", "(", "backend", "=", "'sqlite'", ")", ":", "queue_funcs", "=", "salt", ".", "loader", ".", "queues", "(", "__opts__", ")", "cmd", "=", "'{0}.list_queues'", ".", "format", "(", "backend", ")", "if", "(", "cmd", "not", "in", "queue_funcs", ")", ":", "raise", "SaltInvocationError", "(", "'Function \"{0}\" is not available'", ".", "format", "(", "cmd", ")", ")", "ret", "=", "queue_funcs", "[", "cmd", "]", "(", ")", "return", "ret" ]
return a list of salt queues on the salt master .
train
true
40,731
@then(u'an undefined-step snippets section exists') def step_undefined_step_snippets_section_exists(context): context.execute_steps(u'\n Then the command output should contain:\n """\n You can implement step definitions for undefined steps with these snippets:\n """\n ')
[ "@", "then", "(", "u'an undefined-step snippets section exists'", ")", "def", "step_undefined_step_snippets_section_exists", "(", "context", ")", ":", "context", ".", "execute_steps", "(", "u'\\n Then the command output should contain:\\n \"\"\"\\n You can implement step definitions for undefined steps with these snippets:\\n \"\"\"\\n '", ")" ]
checks if an undefined-step snippet section is in behave command output .
train
false
40,733
def fourier_sin_seq(func, limits, n): from sympy.integrals import integrate (x, L) = (limits[0], (limits[2] - limits[1])) sin_term = sin(((((2 * n) * pi) * x) / L)) return SeqFormula((((2 * sin_term) * integrate((func * sin_term), limits)) / L), (n, 1, oo))
[ "def", "fourier_sin_seq", "(", "func", ",", "limits", ",", "n", ")", ":", "from", "sympy", ".", "integrals", "import", "integrate", "(", "x", ",", "L", ")", "=", "(", "limits", "[", "0", "]", ",", "(", "limits", "[", "2", "]", "-", "limits", "[", "1", "]", ")", ")", "sin_term", "=", "sin", "(", "(", "(", "(", "(", "2", "*", "n", ")", "*", "pi", ")", "*", "x", ")", "/", "L", ")", ")", "return", "SeqFormula", "(", "(", "(", "(", "2", "*", "sin_term", ")", "*", "integrate", "(", "(", "func", "*", "sin_term", ")", ",", "limits", ")", ")", "/", "L", ")", ",", "(", "n", ",", "1", ",", "oo", ")", ")" ]
returns the sin sequence in a fourier series .
train
false
40,734
def _match(key, value, attrs): if (key not in attrs): return False if (value == '*'): return True if (key != 'objectclass'): return (value in attrs[key]) values = _subs(value) for v in values: if (v in attrs[key]): return True return False
[ "def", "_match", "(", "key", ",", "value", ",", "attrs", ")", ":", "if", "(", "key", "not", "in", "attrs", ")", ":", "return", "False", "if", "(", "value", "==", "'*'", ")", ":", "return", "True", "if", "(", "key", "!=", "'objectclass'", ")", ":", "return", "(", "value", "in", "attrs", "[", "key", "]", ")", "values", "=", "_subs", "(", "value", ")", "for", "v", "in", "values", ":", "if", "(", "v", "in", "attrs", "[", "key", "]", ")", ":", "return", "True", "return", "False" ]
match a given key and value against an attribute list .
train
false
40,736
def unique_contributors(nodes, node): for added_node in nodes: if (set(added_node['contributors']).intersection(node['contributors']) != set()): return False return True
[ "def", "unique_contributors", "(", "nodes", ",", "node", ")", ":", "for", "added_node", "in", "nodes", ":", "if", "(", "set", "(", "added_node", "[", "'contributors'", "]", ")", ".", "intersection", "(", "node", "[", "'contributors'", "]", ")", "!=", "set", "(", ")", ")", ":", "return", "False", "return", "True" ]
projects in new and noteworthy should not have common contributors .
train
false
40,737
def ftrace(func): def w(*args, **kargs): global __ftraceDepth pfx = (' ' * __ftraceDepth) print(((pfx + func.__name__) + ' start')) __ftraceDepth += 1 try: rv = func(*args, **kargs) finally: __ftraceDepth -= 1 print(((pfx + func.__name__) + ' done')) return rv return w
[ "def", "ftrace", "(", "func", ")", ":", "def", "w", "(", "*", "args", ",", "**", "kargs", ")", ":", "global", "__ftraceDepth", "pfx", "=", "(", "' '", "*", "__ftraceDepth", ")", "print", "(", "(", "(", "pfx", "+", "func", ".", "__name__", ")", "+", "' start'", ")", ")", "__ftraceDepth", "+=", "1", "try", ":", "rv", "=", "func", "(", "*", "args", ",", "**", "kargs", ")", "finally", ":", "__ftraceDepth", "-=", "1", "print", "(", "(", "(", "pfx", "+", "func", ".", "__name__", ")", "+", "' done'", ")", ")", "return", "rv", "return", "w" ]
decorator used for marking the beginning and end of function calls .
train
false
40,738
def _bem_find_surface(bem, id_): if isinstance(id_, string_types): name = id_ id_ = _surf_dict[id_] else: name = _bem_explain_surface(id_) idx = np.where((np.array([s['id'] for s in bem['surfs']]) == id_))[0] if (len(idx) != 1): raise RuntimeError(('BEM model does not have the %s triangulation' % name.replace('_', ' '))) return bem['surfs'][idx[0]]
[ "def", "_bem_find_surface", "(", "bem", ",", "id_", ")", ":", "if", "isinstance", "(", "id_", ",", "string_types", ")", ":", "name", "=", "id_", "id_", "=", "_surf_dict", "[", "id_", "]", "else", ":", "name", "=", "_bem_explain_surface", "(", "id_", ")", "idx", "=", "np", ".", "where", "(", "(", "np", ".", "array", "(", "[", "s", "[", "'id'", "]", "for", "s", "in", "bem", "[", "'surfs'", "]", "]", ")", "==", "id_", ")", ")", "[", "0", "]", "if", "(", "len", "(", "idx", ")", "!=", "1", ")", ":", "raise", "RuntimeError", "(", "(", "'BEM model does not have the %s triangulation'", "%", "name", ".", "replace", "(", "'_'", ",", "' '", ")", ")", ")", "return", "bem", "[", "'surfs'", "]", "[", "idx", "[", "0", "]", "]" ]
find surface from already-loaded bem .
train
false
40,739
def index_page(request): return HttpResponse('<html><body>Dummy page</body></html>')
[ "def", "index_page", "(", "request", ")", ":", "return", "HttpResponse", "(", "'<html><body>Dummy page</body></html>'", ")" ]
dummy index page .
train
false
40,741
def _game_is_active(gameinfo, inactive_interval): gametime = _game_datetime(gameinfo) now = _now() if (gametime >= now): return ((gametime - now).total_seconds() <= inactive_interval) return (gameinfo['eid'] not in _completed)
[ "def", "_game_is_active", "(", "gameinfo", ",", "inactive_interval", ")", ":", "gametime", "=", "_game_datetime", "(", "gameinfo", ")", "now", "=", "_now", "(", ")", "if", "(", "gametime", ">=", "now", ")", ":", "return", "(", "(", "gametime", "-", "now", ")", ".", "total_seconds", "(", ")", "<=", "inactive_interval", ")", "return", "(", "gameinfo", "[", "'eid'", "]", "not", "in", "_completed", ")" ]
returns true if the game is active .
train
false
40,742
def _equalize(lists, fillval=None): lists = map(list, lists) upper = max((len(x) for x in lists)) for lst in lists: diff = (upper - len(lst)) if diff: lst.extend(([fillval] * diff)) return lists
[ "def", "_equalize", "(", "lists", ",", "fillval", "=", "None", ")", ":", "lists", "=", "map", "(", "list", ",", "lists", ")", "upper", "=", "max", "(", "(", "len", "(", "x", ")", "for", "x", "in", "lists", ")", ")", "for", "lst", "in", "lists", ":", "diff", "=", "(", "upper", "-", "len", "(", "lst", ")", ")", "if", "diff", ":", "lst", ".", "extend", "(", "(", "[", "fillval", "]", "*", "diff", ")", ")", "return", "lists" ]
pad all given list items in lists to be the same length .
train
false
40,744
def leftjoin(left_stream, right_stream, key=(lambda x: x), unused=None): left_stream = iter(left_stream) right_stream = iter(right_stream) try: right = next(right_stream) for left in left_stream: while (right and (key(left) > key(right))): if (unused is not None): unused(right) right = next(right_stream) if (key(left) == key(right)): (yield (left, right)) del left right = next(right_stream) else: (yield (left, None)) except StopIteration: try: (yield (left, None)) except NameError: pass for left in left_stream: (yield (left, None)) else: if (unused is not None): try: unused(right) except NameError: pass for right in right_stream: unused(right)
[ "def", "leftjoin", "(", "left_stream", ",", "right_stream", ",", "key", "=", "(", "lambda", "x", ":", "x", ")", ",", "unused", "=", "None", ")", ":", "left_stream", "=", "iter", "(", "left_stream", ")", "right_stream", "=", "iter", "(", "right_stream", ")", "try", ":", "right", "=", "next", "(", "right_stream", ")", "for", "left", "in", "left_stream", ":", "while", "(", "right", "and", "(", "key", "(", "left", ")", ">", "key", "(", "right", ")", ")", ")", ":", "if", "(", "unused", "is", "not", "None", ")", ":", "unused", "(", "right", ")", "right", "=", "next", "(", "right_stream", ")", "if", "(", "key", "(", "left", ")", "==", "key", "(", "right", ")", ")", ":", "(", "yield", "(", "left", ",", "right", ")", ")", "del", "left", "right", "=", "next", "(", "right_stream", ")", "else", ":", "(", "yield", "(", "left", ",", "None", ")", ")", "except", "StopIteration", ":", "try", ":", "(", "yield", "(", "left", ",", "None", ")", ")", "except", "NameError", ":", "pass", "for", "left", "in", "left_stream", ":", "(", "yield", "(", "left", ",", "None", ")", ")", "else", ":", "if", "(", "unused", "is", "not", "None", ")", ":", "try", ":", "unused", "(", "right", ")", "except", "NameError", ":", "pass", "for", "right", "in", "right_stream", ":", "unused", "(", "right", ")" ]
a "left join" operation on sorted iterators yields pairs .
train
false
40,746
def astronaut(): return load('astronaut.png')
[ "def", "astronaut", "(", ")", ":", "return", "load", "(", "'astronaut.png'", ")" ]
colour image of the astronaut eileen collins .
train
false
40,747
def make_string_uc(seq): seq = seq[8:] return make_string(seq)
[ "def", "make_string_uc", "(", "seq", ")", ":", "seq", "=", "seq", "[", "8", ":", "]", "return", "make_string", "(", "seq", ")" ]
special version to deal with the code in the first 8 bytes of a user comment .
train
false
40,748
def unit_price(offer, line): return line.unit_effective_price
[ "def", "unit_price", "(", "offer", ",", "line", ")", ":", "return", "line", ".", "unit_effective_price" ]
return the relevant price for a given basket line .
train
false
40,749
def valid_identifier(s): if isinstance(s, _strtypes): if ((not s) or s[0].isdigit()): return return s.replace(' ', '_').replace('.', '_').replace('-', '_') return s
[ "def", "valid_identifier", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "_strtypes", ")", ":", "if", "(", "(", "not", "s", ")", "or", "s", "[", "0", "]", ".", "isdigit", "(", ")", ")", ":", "return", "return", "s", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "replace", "(", "'.'", ",", "'_'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "return", "s" ]
rewrite a string to be a valid identifier if it contains .
train
false
40,750
def natstobits(X): return (logbasechange(np.e, 2) * X)
[ "def", "natstobits", "(", "X", ")", ":", "return", "(", "logbasechange", "(", "np", ".", "e", ",", "2", ")", "*", "X", ")" ]
converts from nats to bits .
train
false
40,751
def munge_lists(listA, listB): if (listA is None): return listB if (listB is None): return listA if (not isinstance(listA, list)): listA = [listA] if (not isinstance(listB, list)): listB = [listB] return (listA + listB)
[ "def", "munge_lists", "(", "listA", ",", "listB", ")", ":", "if", "(", "listA", "is", "None", ")", ":", "return", "listB", "if", "(", "listB", "is", "None", ")", ":", "return", "listA", "if", "(", "not", "isinstance", "(", "listA", ",", "list", ")", ")", ":", "listA", "=", "[", "listA", "]", "if", "(", "not", "isinstance", "(", "listB", ",", "list", ")", ")", ":", "listB", "=", "[", "listB", "]", "return", "(", "listA", "+", "listB", ")" ]
combine two lists into a single list .
train
false
40,752
def edges(G, nbunch=None): return G.edges(nbunch)
[ "def", "edges", "(", "G", ",", "nbunch", "=", "None", ")", ":", "return", "G", ".", "edges", "(", "nbunch", ")" ]
returns an iterator of edge objects for the given list of nodes .
train
false
40,754
def get_promo_traffic(thing, start, end): if isinstance(thing, Link): imp_fn = traffic.AdImpressionsByCodename.promotion_history click_fn = traffic.ClickthroughsByCodename.promotion_history elif isinstance(thing, PromoCampaign): imp_fn = traffic.TargetedImpressionsByCodename.promotion_history click_fn = traffic.TargetedClickthroughsByCodename.promotion_history imps = imp_fn(thing._fullname, start.replace(tzinfo=None), end.replace(tzinfo=None)) clicks = click_fn(thing._fullname, start.replace(tzinfo=None), end.replace(tzinfo=None)) if (imps and (not clicks)): clicks = [(imps[0][0], (0,))] elif (clicks and (not imps)): imps = [(clicks[0][0], (0,))] history = traffic.zip_timeseries(imps, clicks, order='ascending') return history
[ "def", "get_promo_traffic", "(", "thing", ",", "start", ",", "end", ")", ":", "if", "isinstance", "(", "thing", ",", "Link", ")", ":", "imp_fn", "=", "traffic", ".", "AdImpressionsByCodename", ".", "promotion_history", "click_fn", "=", "traffic", ".", "ClickthroughsByCodename", ".", "promotion_history", "elif", "isinstance", "(", "thing", ",", "PromoCampaign", ")", ":", "imp_fn", "=", "traffic", ".", "TargetedImpressionsByCodename", ".", "promotion_history", "click_fn", "=", "traffic", ".", "TargetedClickthroughsByCodename", ".", "promotion_history", "imps", "=", "imp_fn", "(", "thing", ".", "_fullname", ",", "start", ".", "replace", "(", "tzinfo", "=", "None", ")", ",", "end", ".", "replace", "(", "tzinfo", "=", "None", ")", ")", "clicks", "=", "click_fn", "(", "thing", ".", "_fullname", ",", "start", ".", "replace", "(", "tzinfo", "=", "None", ")", ",", "end", ".", "replace", "(", "tzinfo", "=", "None", ")", ")", "if", "(", "imps", "and", "(", "not", "clicks", ")", ")", ":", "clicks", "=", "[", "(", "imps", "[", "0", "]", "[", "0", "]", ",", "(", "0", ",", ")", ")", "]", "elif", "(", "clicks", "and", "(", "not", "imps", ")", ")", ":", "imps", "=", "[", "(", "clicks", "[", "0", "]", "[", "0", "]", ",", "(", "0", ",", ")", ")", "]", "history", "=", "traffic", ".", "zip_timeseries", "(", "imps", ",", "clicks", ",", "order", "=", "'ascending'", ")", "return", "history" ]
get traffic for a promoted link or promocampaign .
train
false
40,755
def _getUserSid(user): ret = {} sid_pattern = '^S-1(-\\d+){1,}$' if (user and re.match(sid_pattern, user, re.I)): try: sid = win32security.GetBinarySid(user) except Exception as e: ret['result'] = False ret['comment'] = 'Unable to obtain the binary security identifier for {0}. The exception was {1}.'.format(user, e) else: try: win32security.LookupAccountSid('', sid) ret['result'] = True ret['sid'] = sid except Exception as e: ret['result'] = False ret['comment'] = 'Unable to lookup the account for the security identifier {0}. The exception was {1}.'.format(user, e) else: try: sid = (win32security.LookupAccountName('', user)[0] if user else None) ret['result'] = True ret['sid'] = sid except Exception as e: ret['result'] = False ret['comment'] = 'Unable to obtain the security identifier for {0}. The exception was {1}.'.format(user, e) return ret
[ "def", "_getUserSid", "(", "user", ")", ":", "ret", "=", "{", "}", "sid_pattern", "=", "'^S-1(-\\\\d+){1,}$'", "if", "(", "user", "and", "re", ".", "match", "(", "sid_pattern", ",", "user", ",", "re", ".", "I", ")", ")", ":", "try", ":", "sid", "=", "win32security", ".", "GetBinarySid", "(", "user", ")", "except", "Exception", "as", "e", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Unable to obtain the binary security identifier for {0}. The exception was {1}.'", ".", "format", "(", "user", ",", "e", ")", "else", ":", "try", ":", "win32security", ".", "LookupAccountSid", "(", "''", ",", "sid", ")", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'sid'", "]", "=", "sid", "except", "Exception", "as", "e", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Unable to lookup the account for the security identifier {0}. The exception was {1}.'", ".", "format", "(", "user", ",", "e", ")", "else", ":", "try", ":", "sid", "=", "(", "win32security", ".", "LookupAccountName", "(", "''", ",", "user", ")", "[", "0", "]", "if", "user", "else", "None", ")", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'sid'", "]", "=", "sid", "except", "Exception", "as", "e", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Unable to obtain the security identifier for {0}. The exception was {1}.'", ".", "format", "(", "user", ",", "e", ")", "return", "ret" ]
return a state error dictionary .
train
true
40,756
def strip_html(text): return _striptags_re.sub(u'', text)
[ "def", "strip_html", "(", "text", ")", ":", "return", "_striptags_re", ".", "sub", "(", "u''", ",", "text", ")" ]
strips html .
train
false
40,757
def test_feature_max_length_on_scenario(): feature = Feature.from_string(FEATURE1) assert_equals(feature.max_length, 76)
[ "def", "test_feature_max_length_on_scenario", "(", ")", ":", "feature", "=", "Feature", ".", "from_string", "(", "FEATURE1", ")", "assert_equals", "(", "feature", ".", "max_length", ",", "76", ")" ]
the max length of a feature considering when the scenario is longer than the remaining things .
train
false
40,758
def destroy(name, call=None): log.info('Attempting to delete instance %s', name) if (not vb_machine_exists(name)): return "{0} doesn't exist and can't be deleted".format(name) cloud.fire_event('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) vb_destroy_machine(name) cloud.fire_event('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
[ "def", "destroy", "(", "name", ",", "call", "=", "None", ")", ":", "log", ".", "info", "(", "'Attempting to delete instance %s'", ",", "name", ")", "if", "(", "not", "vb_machine_exists", "(", "name", ")", ")", ":", "return", "\"{0} doesn't exist and can't be deleted\"", ".", "format", "(", "name", ")", "cloud", ".", "fire_event", "(", "'event'", ",", "'destroying instance'", ",", "'salt/cloud/{0}/destroying'", ".", "format", "(", "name", ")", ",", "args", "=", "{", "'name'", ":", "name", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "vb_destroy_machine", "(", "name", ")", "cloud", ".", "fire_event", "(", "'event'", ",", "'destroyed instance'", ",", "'salt/cloud/{0}/destroyed'", ".", "format", "(", "name", ")", ",", "args", "=", "{", "'name'", ":", "name", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")" ]
destroy a machine by name .
train
true
40,759
def create_string_table(workbook): strings = set() for sheet in workbook.worksheets: for cell in sheet.get_cell_collection(): if ((cell.data_type == cell.TYPE_STRING) and (cell._value is not None)): strings.add(cell.value) return dict(((key, i) for (i, key) in enumerate(strings)))
[ "def", "create_string_table", "(", "workbook", ")", ":", "strings", "=", "set", "(", ")", "for", "sheet", "in", "workbook", ".", "worksheets", ":", "for", "cell", "in", "sheet", ".", "get_cell_collection", "(", ")", ":", "if", "(", "(", "cell", ".", "data_type", "==", "cell", ".", "TYPE_STRING", ")", "and", "(", "cell", ".", "_value", "is", "not", "None", ")", ")", ":", "strings", ".", "add", "(", "cell", ".", "value", ")", "return", "dict", "(", "(", "(", "key", ",", "i", ")", "for", "(", "i", ",", "key", ")", "in", "enumerate", "(", "strings", ")", ")", ")" ]
compile the string table for a workbook .
train
false
40,763
def verify_index_list(test): if (not (str(type(test)) == "<class 'curator.indexlist.IndexList'>")): raise TypeError('Not an IndexList object. Type: {0}.'.format(type(test)))
[ "def", "verify_index_list", "(", "test", ")", ":", "if", "(", "not", "(", "str", "(", "type", "(", "test", ")", ")", "==", "\"<class 'curator.indexlist.IndexList'>\"", ")", ")", ":", "raise", "TypeError", "(", "'Not an IndexList object. Type: {0}.'", ".", "format", "(", "type", "(", "test", ")", ")", ")" ]
test if test is a proper :class:curator .
train
false
40,765
def test_goto_definition_at_zero(): assert (Script('a', 1, 1).goto_definitions() == []) s = Script('str', 1, 1).goto_definitions() assert (len(s) == 1) assert (list(s)[0].description == 'class str') assert (Script('', 1, 0).goto_definitions() == [])
[ "def", "test_goto_definition_at_zero", "(", ")", ":", "assert", "(", "Script", "(", "'a'", ",", "1", ",", "1", ")", ".", "goto_definitions", "(", ")", "==", "[", "]", ")", "s", "=", "Script", "(", "'str'", ",", "1", ",", "1", ")", ".", "goto_definitions", "(", ")", "assert", "(", "len", "(", "s", ")", "==", "1", ")", "assert", "(", "list", "(", "s", ")", "[", "0", "]", ".", "description", "==", "'class str'", ")", "assert", "(", "Script", "(", "''", ",", "1", ",", "0", ")", ".", "goto_definitions", "(", ")", "==", "[", "]", ")" ]
at zero usually sometimes raises unicode issues .
train
false
40,766
def create_model(session, forward_only): dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) model = seq2seq_model.Seq2SeqModel(FLAGS.from_vocab_size, FLAGS.to_vocab_size, _buckets, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, forward_only=forward_only, dtype=dtype) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if (ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path)): print(('Reading model parameters from %s' % ckpt.model_checkpoint_path)) model.saver.restore(session, ckpt.model_checkpoint_path) else: print('Created model with fresh parameters.') session.run(tf.global_variables_initializer()) return model
[ "def", "create_model", "(", "session", ",", "forward_only", ")", ":", "dtype", "=", "(", "tf", ".", "float16", "if", "FLAGS", ".", "use_fp16", "else", "tf", ".", "float32", ")", "model", "=", "seq2seq_model", ".", "Seq2SeqModel", "(", "FLAGS", ".", "from_vocab_size", ",", "FLAGS", ".", "to_vocab_size", ",", "_buckets", ",", "FLAGS", ".", "size", ",", "FLAGS", ".", "num_layers", ",", "FLAGS", ".", "max_gradient_norm", ",", "FLAGS", ".", "batch_size", ",", "FLAGS", ".", "learning_rate", ",", "FLAGS", ".", "learning_rate_decay_factor", ",", "forward_only", "=", "forward_only", ",", "dtype", "=", "dtype", ")", "ckpt", "=", "tf", ".", "train", ".", "get_checkpoint_state", "(", "FLAGS", ".", "train_dir", ")", "if", "(", "ckpt", "and", "tf", ".", "train", ".", "checkpoint_exists", "(", "ckpt", ".", "model_checkpoint_path", ")", ")", ":", "print", "(", "(", "'Reading model parameters from %s'", "%", "ckpt", ".", "model_checkpoint_path", ")", ")", "model", ".", "saver", ".", "restore", "(", "session", ",", "ckpt", ".", "model_checkpoint_path", ")", "else", ":", "print", "(", "'Created model with fresh parameters.'", ")", "session", ".", "run", "(", "tf", ".", "global_variables_initializer", "(", ")", ")", "return", "model" ]
creates a model .
train
false
40,768
@receiver(models.signals.post_save, sender=UserProfile) def invalidate_user_profile_country_cache(sender, instance, **kwargs): changed_fields = getattr(instance, '_changed_fields', {}) if ('country' in changed_fields): cache_key = UserProfile.country_cache_key_name(instance.user_id) cache.delete(cache_key) log.info('Country changed in UserProfile for %s, cache deleted', instance.user_id)
[ "@", "receiver", "(", "models", ".", "signals", ".", "post_save", ",", "sender", "=", "UserProfile", ")", "def", "invalidate_user_profile_country_cache", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "changed_fields", "=", "getattr", "(", "instance", ",", "'_changed_fields'", ",", "{", "}", ")", "if", "(", "'country'", "in", "changed_fields", ")", ":", "cache_key", "=", "UserProfile", ".", "country_cache_key_name", "(", "instance", ".", "user_id", ")", "cache", ".", "delete", "(", "cache_key", ")", "log", ".", "info", "(", "'Country changed in UserProfile for %s, cache deleted'", ",", "instance", ".", "user_id", ")" ]
invalidate the cache of country in userprofile model .
train
false
40,770
def hasattr(attr): def has_attr(value): return _hasattr(value, attr) return has_attr
[ "def", "hasattr", "(", "attr", ")", ":", "def", "has_attr", "(", "value", ")", ":", "return", "_hasattr", "(", "value", ",", "attr", ")", "return", "has_attr" ]
verifies that the object has an attribute with the given name .
train
false
40,772
def remove_stopwords(tokens, language): from nltk.corpus import stopwords stop_words = stopwords.words(language) tokens = (set(tokens) - set(stop_words)) return tokens
[ "def", "remove_stopwords", "(", "tokens", ",", "language", ")", ":", "from", "nltk", ".", "corpus", "import", "stopwords", "stop_words", "=", "stopwords", ".", "words", "(", "language", ")", "tokens", "=", "(", "set", "(", "tokens", ")", "-", "set", "(", "stop_words", ")", ")", "return", "tokens" ]
takes a language .
train
false
40,773
def floating_ip_allocate_address(context, project_id, pool): return IMPL.floating_ip_allocate_address(context, project_id, pool)
[ "def", "floating_ip_allocate_address", "(", "context", ",", "project_id", ",", "pool", ")", ":", "return", "IMPL", ".", "floating_ip_allocate_address", "(", "context", ",", "project_id", ",", "pool", ")" ]
allocate free floating ip from specified pool and return the address .
train
false
40,774
@login_required def group_join(request, slug, template_name='groups/group_join_confirm.html'): group = get_object_or_404(Group, slug=slug, is_active=True) if (request.method == 'POST'): membership = GroupMember(group=group, user=request.user) membership.save() return redirect(request, group) return render(request, template_name, {'group': group})
[ "@", "login_required", "def", "group_join", "(", "request", ",", "slug", ",", "template_name", "=", "'groups/group_join_confirm.html'", ")", ":", "group", "=", "get_object_or_404", "(", "Group", ",", "slug", "=", "slug", ",", "is_active", "=", "True", ")", "if", "(", "request", ".", "method", "==", "'POST'", ")", ":", "membership", "=", "GroupMember", "(", "group", "=", "group", ",", "user", "=", "request", ".", "user", ")", "membership", ".", "save", "(", ")", "return", "redirect", "(", "request", ",", "group", ")", "return", "render", "(", "request", ",", "template_name", ",", "{", "'group'", ":", "group", "}", ")" ]
returns a group join confirmation page .
train
false
40,775
def parse_ftp_list_line(ftp_list_line): return FTPListDataParser().parse_line(ftp_list_line)
[ "def", "parse_ftp_list_line", "(", "ftp_list_line", ")", ":", "return", "FTPListDataParser", "(", ")", ".", "parse_line", "(", "ftp_list_line", ")" ]
convenience function that instantiates an ftplistdataparser object and passes ftp_list_line to the objects parse_line() method .
train
false
40,776
def reconstruct_skel_matrix(A, k, idx): if _is_real(A): return backend.idd_copycols(A, k, (idx + 1)) else: return backend.idz_copycols(A, k, (idx + 1))
[ "def", "reconstruct_skel_matrix", "(", "A", ",", "k", ",", "idx", ")", ":", "if", "_is_real", "(", "A", ")", ":", "return", "backend", ".", "idd_copycols", "(", "A", ",", "k", ",", "(", "idx", "+", "1", ")", ")", "else", ":", "return", "backend", ".", "idz_copycols", "(", "A", ",", "k", ",", "(", "idx", "+", "1", ")", ")" ]
reconstruct skeleton matrix from id .
train
false
40,777
def test_initialize_app(settings): settings.SENTRY_OPTIONS = {'system.secret-key': 'secret-key'} bootstrap_options(settings) apply_legacy_settings(settings)
[ "def", "test_initialize_app", "(", "settings", ")", ":", "settings", ".", "SENTRY_OPTIONS", "=", "{", "'system.secret-key'", ":", "'secret-key'", "}", "bootstrap_options", "(", "settings", ")", "apply_legacy_settings", "(", "settings", ")" ]
just a sanity check of the full initialization process .
train
false
40,778
def add_timeframed_query_manager(sender, **kwargs): if (not issubclass(sender, TimeFramedModel)): return try: sender._meta.get_field(u'timeframed') raise ImproperlyConfigured((u"Model '%s' has a field named 'timeframed' which conflicts with the TimeFramedModel manager." % sender.__name__)) except FieldDoesNotExist: pass sender.add_to_class(u'timeframed', QueryManager(((models.Q(start__lte=now) | models.Q(start__isnull=True)) & (models.Q(end__gte=now) | models.Q(end__isnull=True)))))
[ "def", "add_timeframed_query_manager", "(", "sender", ",", "**", "kwargs", ")", ":", "if", "(", "not", "issubclass", "(", "sender", ",", "TimeFramedModel", ")", ")", ":", "return", "try", ":", "sender", ".", "_meta", ".", "get_field", "(", "u'timeframed'", ")", "raise", "ImproperlyConfigured", "(", "(", "u\"Model '%s' has a field named 'timeframed' which conflicts with the TimeFramedModel manager.\"", "%", "sender", ".", "__name__", ")", ")", "except", "FieldDoesNotExist", ":", "pass", "sender", ".", "add_to_class", "(", "u'timeframed'", ",", "QueryManager", "(", "(", "(", "models", ".", "Q", "(", "start__lte", "=", "now", ")", "|", "models", ".", "Q", "(", "start__isnull", "=", "True", ")", ")", "&", "(", "models", ".", "Q", "(", "end__gte", "=", "now", ")", "|", "models", ".", "Q", "(", "end__isnull", "=", "True", ")", ")", ")", ")", ")" ]
add a querymanager for a specific timeframe .
train
false
40,779
def uniqify(seq): seen = set() seen_add = seen.add return [x for x in seq if ((x not in seen) and (not seen_add(x)))]
[ "def", "uniqify", "(", "seq", ")", ":", "seen", "=", "set", "(", ")", "seen_add", "=", "seen", ".", "add", "return", "[", "x", "for", "x", "in", "seq", "if", "(", "(", "x", "not", "in", "seen", ")", "and", "(", "not", "seen_add", "(", "x", ")", ")", ")", "]" ]
remove duplicates from list preserving order originally by dave kirby .
train
true
40,780
@contextlib.contextmanager def savefile_open(filename, binary=False, encoding='utf-8'): f = QSaveFile(filename) cancelled = False try: open_ok = f.open(QIODevice.WriteOnly) if (not open_ok): raise QtOSError(f) if binary: new_f = PyQIODevice(f) else: new_f = io.TextIOWrapper(PyQIODevice(f), encoding=encoding) (yield new_f) new_f.flush() except: f.cancelWriting() cancelled = True raise finally: commit_ok = f.commit() if ((not commit_ok) and (not cancelled)): raise QtOSError(f, msg='Commit failed!')
[ "@", "contextlib", ".", "contextmanager", "def", "savefile_open", "(", "filename", ",", "binary", "=", "False", ",", "encoding", "=", "'utf-8'", ")", ":", "f", "=", "QSaveFile", "(", "filename", ")", "cancelled", "=", "False", "try", ":", "open_ok", "=", "f", ".", "open", "(", "QIODevice", ".", "WriteOnly", ")", "if", "(", "not", "open_ok", ")", ":", "raise", "QtOSError", "(", "f", ")", "if", "binary", ":", "new_f", "=", "PyQIODevice", "(", "f", ")", "else", ":", "new_f", "=", "io", ".", "TextIOWrapper", "(", "PyQIODevice", "(", "f", ")", ",", "encoding", "=", "encoding", ")", "(", "yield", "new_f", ")", "new_f", ".", "flush", "(", ")", "except", ":", "f", ".", "cancelWriting", "(", ")", "cancelled", "=", "True", "raise", "finally", ":", "commit_ok", "=", "f", ".", "commit", "(", ")", "if", "(", "(", "not", "commit_ok", ")", "and", "(", "not", "cancelled", ")", ")", ":", "raise", "QtOSError", "(", "f", ",", "msg", "=", "'Commit failed!'", ")" ]
context manager to easily use a qsavefile .
train
false
40,781
def _generate_method(name, func): source = _method_code_template.format(method=name) glbls = {} exec_(source, glbls) method = njit(glbls['method']) @wraps(func) def wrapper(*args, **kwargs): return method(*args, **kwargs) return wrapper
[ "def", "_generate_method", "(", "name", ",", "func", ")", ":", "source", "=", "_method_code_template", ".", "format", "(", "method", "=", "name", ")", "glbls", "=", "{", "}", "exec_", "(", "source", ",", "glbls", ")", "method", "=", "njit", "(", "glbls", "[", "'method'", "]", ")", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "method", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapper" ]
generate a wrapper for calling a method .
train
false
40,782
def _req_fancy_property(cls, header, even_if_nonexistent=False): def getter(self): try: if ((header in self.headers) or even_if_nonexistent): return cls(self.headers.get(header)) except ValueError: return None def setter(self, value): self.headers[header] = value return property(getter, setter, doc=('Retrieve and set the %s property in the WSGI environ, as a %s object' % (header, cls.__name__)))
[ "def", "_req_fancy_property", "(", "cls", ",", "header", ",", "even_if_nonexistent", "=", "False", ")", ":", "def", "getter", "(", "self", ")", ":", "try", ":", "if", "(", "(", "header", "in", "self", ".", "headers", ")", "or", "even_if_nonexistent", ")", ":", "return", "cls", "(", "self", ".", "headers", ".", "get", "(", "header", ")", ")", "except", "ValueError", ":", "return", "None", "def", "setter", "(", "self", ",", "value", ")", ":", "self", ".", "headers", "[", "header", "]", "=", "value", "return", "property", "(", "getter", ",", "setter", ",", "doc", "=", "(", "'Retrieve and set the %s property in the WSGI environ, as a %s object'", "%", "(", "header", ",", "cls", ".", "__name__", ")", ")", ")" ]
set and retrieve "fancy" properties .
train
false
40,783
def getatime(filename): return os.stat(filename).st_atime
[ "def", "getatime", "(", "filename", ")", ":", "return", "os", ".", "stat", "(", "filename", ")", ".", "st_atime" ]
return the last access time of a file .
train
false
40,784
def write_compressed(path, content): with gzip.open(path, 'wb') as f: f.write(content)
[ "def", "write_compressed", "(", "path", ",", "content", ")", ":", "with", "gzip", ".", "open", "(", "path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "content", ")" ]
write a compressed file to path .
train
false
40,785
def getPriority(element): try: return getPriorityStrict(element) except ValueError: return Max
[ "def", "getPriority", "(", "element", ")", ":", "try", ":", "return", "getPriorityStrict", "(", "element", ")", "except", "ValueError", ":", "return", "Max" ]
get the priority of this element returns max if no priority is specified or the priority value is invalid .
train
false
40,786
def ip_in_subnet(ip_addr, cidr): return salt.utils.network.in_subnet(cidr, ip_addr)
[ "def", "ip_in_subnet", "(", "ip_addr", ",", "cidr", ")", ":", "return", "salt", ".", "utils", ".", "network", ".", "in_subnet", "(", "cidr", ",", "ip_addr", ")" ]
returns true if given ip is within specified subnet .
train
false
40,787
def _write_dig_points(fname, dig_points): (_, ext) = op.splitext(fname) dig_points = np.asarray(dig_points) if ((dig_points.ndim != 2) or (dig_points.shape[1] != 3)): err = ('Points must be of shape (n_points, 3), not %s' % (dig_points.shape,)) raise ValueError(err) if (ext == '.txt'): with open(fname, 'wb') as fid: version = __version__ now = dt.now().strftime('%I:%M%p on %B %d, %Y') fid.write(b('% Ascii 3D points file created by mne-python version {version} at {now}\n'.format(version=version, now=now))) fid.write(b('% {N} 3D points, x y z per line\n'.format(N=len(dig_points)))) np.savetxt(fid, dig_points, delimiter=' DCTB ', newline='\n') else: msg = ("Unrecognized extension: %r. Need '.txt'." % ext) raise ValueError(msg)
[ "def", "_write_dig_points", "(", "fname", ",", "dig_points", ")", ":", "(", "_", ",", "ext", ")", "=", "op", ".", "splitext", "(", "fname", ")", "dig_points", "=", "np", ".", "asarray", "(", "dig_points", ")", "if", "(", "(", "dig_points", ".", "ndim", "!=", "2", ")", "or", "(", "dig_points", ".", "shape", "[", "1", "]", "!=", "3", ")", ")", ":", "err", "=", "(", "'Points must be of shape (n_points, 3), not %s'", "%", "(", "dig_points", ".", "shape", ",", ")", ")", "raise", "ValueError", "(", "err", ")", "if", "(", "ext", "==", "'.txt'", ")", ":", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "fid", ":", "version", "=", "__version__", "now", "=", "dt", ".", "now", "(", ")", ".", "strftime", "(", "'%I:%M%p on %B %d, %Y'", ")", "fid", ".", "write", "(", "b", "(", "'% Ascii 3D points file created by mne-python version {version} at {now}\\n'", ".", "format", "(", "version", "=", "version", ",", "now", "=", "now", ")", ")", ")", "fid", ".", "write", "(", "b", "(", "'% {N} 3D points, x y z per line\\n'", ".", "format", "(", "N", "=", "len", "(", "dig_points", ")", ")", ")", ")", "np", ".", "savetxt", "(", "fid", ",", "dig_points", ",", "delimiter", "=", "' DCTB '", ",", "newline", "=", "'\\n'", ")", "else", ":", "msg", "=", "(", "\"Unrecognized extension: %r. Need '.txt'.\"", "%", "ext", ")", "raise", "ValueError", "(", "msg", ")" ]
write points to text file .
train
false
40,788
def _edit_dist(s1, s2): dist = 0 for i in range(len(s1)): if (s1[i] != s2[i]): dist += 1 return dist
[ "def", "_edit_dist", "(", "s1", ",", "s2", ")", ":", "dist", "=", "0", "for", "i", "in", "range", "(", "len", "(", "s1", ")", ")", ":", "if", "(", "s1", "[", "i", "]", "!=", "s2", "[", "i", "]", ")", ":", "dist", "+=", "1", "return", "dist" ]
computes edit between to strings of equal len designed for strings of nucleotides .
train
false
40,789
def preserve_builtin_query_params(url, request=None): if (request is None): return url overrides = [api_settings.URL_FORMAT_OVERRIDE] for param in overrides: if (param and (param in request.GET)): value = request.GET[param] url = replace_query_param(url, param, value) return url
[ "def", "preserve_builtin_query_params", "(", "url", ",", "request", "=", "None", ")", ":", "if", "(", "request", "is", "None", ")", ":", "return", "url", "overrides", "=", "[", "api_settings", ".", "URL_FORMAT_OVERRIDE", "]", "for", "param", "in", "overrides", ":", "if", "(", "param", "and", "(", "param", "in", "request", ".", "GET", ")", ")", ":", "value", "=", "request", ".", "GET", "[", "param", "]", "url", "=", "replace_query_param", "(", "url", ",", "param", ",", "value", ")", "return", "url" ]
given an incoming request .
train
true
40,790
def test_close_error(): d = gs_deleter.Deleter() d.close() with pytest.raises(exception.UserCritical): d.delete('no value should work')
[ "def", "test_close_error", "(", ")", ":", "d", "=", "gs_deleter", ".", "Deleter", "(", ")", "d", ".", "close", "(", ")", "with", "pytest", ".", "raises", "(", "exception", ".", "UserCritical", ")", ":", "d", ".", "delete", "(", "'no value should work'", ")" ]
ensure that attempts to use a closed deleter results in an error .
train
false
40,791
def get_hibernate_timeout(scheme=None): return _get_powercfg_minute_values(scheme, 'SUB_SLEEP', 'HIBERNATEIDLE', 'Hibernate after')
[ "def", "get_hibernate_timeout", "(", "scheme", "=", "None", ")", ":", "return", "_get_powercfg_minute_values", "(", "scheme", ",", "'SUB_SLEEP'", ",", "'HIBERNATEIDLE'", ",", "'Hibernate after'", ")" ]
get the current hibernate timeout of the given scheme cli example: .
train
false
40,792
def _parse_family_pb(family_pb): result = {} for column in family_pb.columns: result[column.qualifier] = cells = [] for cell in column.cells: val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros)) cells.append(val_pair) return (family_pb.name, result)
[ "def", "_parse_family_pb", "(", "family_pb", ")", ":", "result", "=", "{", "}", "for", "column", "in", "family_pb", ".", "columns", ":", "result", "[", "column", ".", "qualifier", "]", "=", "cells", "=", "[", "]", "for", "cell", "in", "column", ".", "cells", ":", "val_pair", "=", "(", "cell", ".", "value", ",", "_datetime_from_microseconds", "(", "cell", ".", "timestamp_micros", ")", ")", "cells", ".", "append", "(", "val_pair", ")", "return", "(", "family_pb", ".", "name", ",", "result", ")" ]
parses a family protobuf into a dictionary .
train
true
40,793
def writeContentsLine(hypertextFile, output): summarizedFileName = hypertextFile[:hypertextFile.rfind('.')] numberOfDots = summarizedFileName.count('.') prefixSpaces = ('&nbsp;&nbsp;' * numberOfDots) if (numberOfDots > 0): summarizedFileName = summarizedFileName[(summarizedFileName.rfind('.') + 1):] capitalizedSummarizedFileName = settings.getEachWordCapitalized(summarizedFileName) output.write(('%s<a href="%s">%s</a><br>\n' % (prefixSpaces, hypertextFile, capitalizedSummarizedFileName)))
[ "def", "writeContentsLine", "(", "hypertextFile", ",", "output", ")", ":", "summarizedFileName", "=", "hypertextFile", "[", ":", "hypertextFile", ".", "rfind", "(", "'.'", ")", "]", "numberOfDots", "=", "summarizedFileName", ".", "count", "(", "'.'", ")", "prefixSpaces", "=", "(", "'&nbsp;&nbsp;'", "*", "numberOfDots", ")", "if", "(", "numberOfDots", ">", "0", ")", ":", "summarizedFileName", "=", "summarizedFileName", "[", "(", "summarizedFileName", ".", "rfind", "(", "'.'", ")", "+", "1", ")", ":", "]", "capitalizedSummarizedFileName", "=", "settings", ".", "getEachWordCapitalized", "(", "summarizedFileName", ")", "output", ".", "write", "(", "(", "'%s<a href=\"%s\">%s</a><br>\\n'", "%", "(", "prefixSpaces", ",", "hypertextFile", ",", "capitalizedSummarizedFileName", ")", ")", ")" ]
write a line of the contents file .
train
false
40,794
def colorspace(im, bw=False, replace_alpha=False, **kwargs): if (im.mode == 'I'): im = im.point(list(_points_table()), 'L') is_transparent = utils.is_transparent(im) is_grayscale = (im.mode in ('L', 'LA')) new_mode = im.mode if (is_grayscale or bw): new_mode = 'L' else: new_mode = 'RGB' if is_transparent: if replace_alpha: if (im.mode != 'RGBA'): im = im.convert('RGBA') base = Image.new('RGBA', im.size, replace_alpha) base.paste(im, mask=im) im = base else: new_mode = (new_mode + 'A') if (im.mode != new_mode): im = im.convert(new_mode) return im
[ "def", "colorspace", "(", "im", ",", "bw", "=", "False", ",", "replace_alpha", "=", "False", ",", "**", "kwargs", ")", ":", "if", "(", "im", ".", "mode", "==", "'I'", ")", ":", "im", "=", "im", ".", "point", "(", "list", "(", "_points_table", "(", ")", ")", ",", "'L'", ")", "is_transparent", "=", "utils", ".", "is_transparent", "(", "im", ")", "is_grayscale", "=", "(", "im", ".", "mode", "in", "(", "'L'", ",", "'LA'", ")", ")", "new_mode", "=", "im", ".", "mode", "if", "(", "is_grayscale", "or", "bw", ")", ":", "new_mode", "=", "'L'", "else", ":", "new_mode", "=", "'RGB'", "if", "is_transparent", ":", "if", "replace_alpha", ":", "if", "(", "im", ".", "mode", "!=", "'RGBA'", ")", ":", "im", "=", "im", ".", "convert", "(", "'RGBA'", ")", "base", "=", "Image", ".", "new", "(", "'RGBA'", ",", "im", ".", "size", ",", "replace_alpha", ")", "base", ".", "paste", "(", "im", ",", "mask", "=", "im", ")", "im", "=", "base", "else", ":", "new_mode", "=", "(", "new_mode", "+", "'A'", ")", "if", "(", "im", ".", "mode", "!=", "new_mode", ")", ":", "im", "=", "im", ".", "convert", "(", "new_mode", ")", "return", "im" ]
convert images to the correct color space .
train
true
40,795
def _lasso_stability_path(X, y, mask, weights, eps): X = (X * weights[np.newaxis, :]) X = X[safe_mask(X, mask), :] y = y[mask] alpha_max = (np.max(np.abs(np.dot(X.T, y))) / X.shape[0]) alpha_min = (eps * alpha_max) with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) (alphas, _, coefs) = lars_path(X, y, method='lasso', verbose=False, alpha_min=alpha_min) alphas /= alphas[0] alphas = alphas[::(-1)] coefs = coefs[:, ::(-1)] mask = (alphas >= eps) mask[0] = True alphas = alphas[mask] coefs = coefs[:, mask] return (alphas, coefs)
[ "def", "_lasso_stability_path", "(", "X", ",", "y", ",", "mask", ",", "weights", ",", "eps", ")", ":", "X", "=", "(", "X", "*", "weights", "[", "np", ".", "newaxis", ",", ":", "]", ")", "X", "=", "X", "[", "safe_mask", "(", "X", ",", "mask", ")", ",", ":", "]", "y", "=", "y", "[", "mask", "]", "alpha_max", "=", "(", "np", ".", "max", "(", "np", ".", "abs", "(", "np", ".", "dot", "(", "X", ".", "T", ",", "y", ")", ")", ")", "/", "X", ".", "shape", "[", "0", "]", ")", "alpha_min", "=", "(", "eps", "*", "alpha_max", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ",", "ConvergenceWarning", ")", "(", "alphas", ",", "_", ",", "coefs", ")", "=", "lars_path", "(", "X", ",", "y", ",", "method", "=", "'lasso'", ",", "verbose", "=", "False", ",", "alpha_min", "=", "alpha_min", ")", "alphas", "/=", "alphas", "[", "0", "]", "alphas", "=", "alphas", "[", ":", ":", "(", "-", "1", ")", "]", "coefs", "=", "coefs", "[", ":", ",", ":", ":", "(", "-", "1", ")", "]", "mask", "=", "(", "alphas", ">=", "eps", ")", "mask", "[", "0", "]", "=", "True", "alphas", "=", "alphas", "[", "mask", "]", "coefs", "=", "coefs", "[", ":", ",", "mask", "]", "return", "(", "alphas", ",", "coefs", ")" ]
inner loop of lasso_stability_path .
train
false
40,796
def migrate_non_shared_inc(vm_, target, ssh=False): cmd = (((_get_migrate_command() + ' --copy-storage-inc ') + vm_) + _get_target(target, ssh)) stdout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] return salt.utils.to_str(stdout)
[ "def", "migrate_non_shared_inc", "(", "vm_", ",", "target", ",", "ssh", "=", "False", ")", ":", "cmd", "=", "(", "(", "(", "_get_migrate_command", "(", ")", "+", "' --copy-storage-inc '", ")", "+", "vm_", ")", "+", "_get_target", "(", "target", ",", "ssh", ")", ")", "stdout", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "return", "salt", ".", "utils", ".", "to_str", "(", "stdout", ")" ]
attempt to execute non-shared storage "all" migration cli example: .
train
true
40,797
def checkfuncname(b, frame): if (not b.funcname): if (b.line != frame.f_lineno): return False return True if (frame.f_code.co_name != b.funcname): return False if (not b.func_first_executable_line): b.func_first_executable_line = frame.f_lineno if (b.func_first_executable_line != frame.f_lineno): return False return True
[ "def", "checkfuncname", "(", "b", ",", "frame", ")", ":", "if", "(", "not", "b", ".", "funcname", ")", ":", "if", "(", "b", ".", "line", "!=", "frame", ".", "f_lineno", ")", ":", "return", "False", "return", "True", "if", "(", "frame", ".", "f_code", ".", "co_name", "!=", "b", ".", "funcname", ")", ":", "return", "False", "if", "(", "not", "b", ".", "func_first_executable_line", ")", ":", "b", ".", "func_first_executable_line", "=", "frame", ".", "f_lineno", "if", "(", "b", ".", "func_first_executable_line", "!=", "frame", ".", "f_lineno", ")", ":", "return", "False", "return", "True" ]
check whether we should break here because of b .
train
true