id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
8,494
def local_path_as_url(filename): return ('file://' + urllib.pathname2url(os.path.abspath(filename)))
[ "def", "local_path_as_url", "(", "filename", ")", ":", "return", "(", "'file://'", "+", "urllib", ".", "pathname2url", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", ")" ]
takes a local .
train
false
8,497
def parse_set(source, info): version = ((info.flags & _ALL_VERSIONS) or DEFAULT_VERSION) saved_ignore = source.ignore_space source.ignore_space = False negate = source.match('^') try: if (version == VERSION0): item = parse_set_imp_union(source, info) else: item = parse_set_union(source, info) if (not source.match(']')): raise error('missing ]', source.string, source.pos) finally: source.ignore_space = saved_ignore if negate: item = item.with_flags(positive=(not item.positive)) item = item.with_flags(case_flags=make_case_flags(info)) return item
[ "def", "parse_set", "(", "source", ",", "info", ")", ":", "version", "=", "(", "(", "info", ".", "flags", "&", "_ALL_VERSIONS", ")", "or", "DEFAULT_VERSION", ")", "saved_ignore", "=", "source", ".", "ignore_space", "source", ".", "ignore_space", "=", "False", "negate", "=", "source", ".", "match", "(", "'^'", ")", "try", ":", "if", "(", "version", "==", "VERSION0", ")", ":", "item", "=", "parse_set_imp_union", "(", "source", ",", "info", ")", "else", ":", "item", "=", "parse_set_union", "(", "source", ",", "info", ")", "if", "(", "not", "source", ".", "match", "(", "']'", ")", ")", ":", "raise", "error", "(", "'missing ]'", ",", "source", ".", "string", ",", "source", ".", "pos", ")", "finally", ":", "source", ".", "ignore_space", "=", "saved_ignore", "if", "negate", ":", "item", "=", "item", ".", "with_flags", "(", "positive", "=", "(", "not", "item", ".", "positive", ")", ")", "item", "=", "item", ".", "with_flags", "(", "case_flags", "=", "make_case_flags", "(", "info", ")", ")", "return", "item" ]
parses a character set .
train
false
8,498
def _adjust_lines(lines): formatted_lines = [] for l in lines: l = l.replace('\r\n', '\n').replace('\r', '\n').strip() if l.lower().startswith('matrix'): formatted_lines.append(l) else: l = l.replace('\n', ' ') if l: formatted_lines.append(l) return formatted_lines
[ "def", "_adjust_lines", "(", "lines", ")", ":", "formatted_lines", "=", "[", "]", "for", "l", "in", "lines", ":", "l", "=", "l", ".", "replace", "(", "'\\r\\n'", ",", "'\\n'", ")", ".", "replace", "(", "'\\r'", ",", "'\\n'", ")", ".", "strip", "(", ")", "if", "l", ".", "lower", "(", ")", ".", "startswith", "(", "'matrix'", ")", ":", "formatted_lines", ".", "append", "(", "l", ")", "else", ":", "l", "=", "l", ".", "replace", "(", "'\\n'", ",", "' '", ")", "if", "l", ":", "formatted_lines", ".", "append", "(", "l", ")", "return", "formatted_lines" ]
adjust linebreaks to match ; .
train
false
8,500
def test_aggregate_build_agg_args__reuse_of_intermediates(): from dask.dataframe.groupby import _build_agg_args no_mean_spec = [('foo', 'sum', 'input'), ('bar', 'count', 'input')] with_mean_spec = [('foo', 'sum', 'input'), ('bar', 'count', 'input'), ('baz', 'mean', 'input')] (no_mean_chunks, no_mean_aggs, no_mean_finalizers) = _build_agg_args(no_mean_spec) (with_mean_chunks, with_mean_aggs, with_mean_finalizers) = _build_agg_args(with_mean_spec) assert (len(no_mean_chunks) == len(with_mean_chunks)) assert (len(no_mean_aggs) == len(with_mean_aggs)) assert (len(no_mean_finalizers) == len(no_mean_spec)) assert (len(with_mean_finalizers) == len(with_mean_spec))
[ "def", "test_aggregate_build_agg_args__reuse_of_intermediates", "(", ")", ":", "from", "dask", ".", "dataframe", ".", "groupby", "import", "_build_agg_args", "no_mean_spec", "=", "[", "(", "'foo'", ",", "'sum'", ",", "'input'", ")", ",", "(", "'bar'", ",", "'count'", ",", "'input'", ")", "]", "with_mean_spec", "=", "[", "(", "'foo'", ",", "'sum'", ",", "'input'", ")", ",", "(", "'bar'", ",", "'count'", ",", "'input'", ")", ",", "(", "'baz'", ",", "'mean'", ",", "'input'", ")", "]", "(", "no_mean_chunks", ",", "no_mean_aggs", ",", "no_mean_finalizers", ")", "=", "_build_agg_args", "(", "no_mean_spec", ")", "(", "with_mean_chunks", ",", "with_mean_aggs", ",", "with_mean_finalizers", ")", "=", "_build_agg_args", "(", "with_mean_spec", ")", "assert", "(", "len", "(", "no_mean_chunks", ")", "==", "len", "(", "with_mean_chunks", ")", ")", "assert", "(", "len", "(", "no_mean_aggs", ")", "==", "len", "(", "with_mean_aggs", ")", ")", "assert", "(", "len", "(", "no_mean_finalizers", ")", "==", "len", "(", "no_mean_spec", ")", ")", "assert", "(", "len", "(", "with_mean_finalizers", ")", "==", "len", "(", "with_mean_spec", ")", ")" ]
aggregate reuses intermediates .
train
false
8,501
def test_doctest_mode(): _ip.magic('doctest_mode') _ip.magic('doctest_mode')
[ "def", "test_doctest_mode", "(", ")", ":", "_ip", ".", "magic", "(", "'doctest_mode'", ")", "_ip", ".", "magic", "(", "'doctest_mode'", ")" ]
toggle doctest_mode twice .
train
false
8,503
def update_network(network, name, profile=None): conn = _auth(profile) return conn.update_network(network, name)
[ "def", "update_network", "(", "network", ",", "name", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "update_network", "(", "network", ",", "name", ")" ]
updates a network cli example: .
train
true
8,504
def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count)
[ "def", "hscan", "(", "key", ",", "cursor", "=", "0", ",", "match", "=", "None", ",", "count", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "db", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_connect", "(", "host", ",", "port", ",", "db", ",", "password", ")", "return", "server", ".", "hscan", "(", "key", ",", "cursor", "=", "cursor", ",", "match", "=", "match", ",", "count", "=", "count", ")" ]
incrementally iterate hash fields and associated values .
train
true
8,505
def _RegistryQuery(key, value=None): text = None try: text = _RegistryQueryBase('Sysnative', key, value) except OSError as e: if (e.errno == errno.ENOENT): text = _RegistryQueryBase('System32', key, value) else: raise return text
[ "def", "_RegistryQuery", "(", "key", ",", "value", "=", "None", ")", ":", "text", "=", "None", "try", ":", "text", "=", "_RegistryQueryBase", "(", "'Sysnative'", ",", "key", ",", "value", ")", "except", "OSError", "as", "e", ":", "if", "(", "e", ".", "errno", "==", "errno", ".", "ENOENT", ")", ":", "text", "=", "_RegistryQueryBase", "(", "'System32'", ",", "key", ",", "value", ")", "else", ":", "raise", "return", "text" ]
use reg .
train
false
8,506
def test_quickmark_completion(qtmodeltester, quickmarks): model = miscmodels.QuickmarkCompletionModel() qtmodeltester.data_display_may_return_none = True qtmodeltester.check(model) _check_completions(model, {'Quickmarks': [('aw', 'https://wiki.archlinux.org', ''), ('ddg', 'https://duckduckgo.com', ''), ('wiki', 'https://wikipedia.org', '')]})
[ "def", "test_quickmark_completion", "(", "qtmodeltester", ",", "quickmarks", ")", ":", "model", "=", "miscmodels", ".", "QuickmarkCompletionModel", "(", ")", "qtmodeltester", ".", "data_display_may_return_none", "=", "True", "qtmodeltester", ".", "check", "(", "model", ")", "_check_completions", "(", "model", ",", "{", "'Quickmarks'", ":", "[", "(", "'aw'", ",", "'https://wiki.archlinux.org'", ",", "''", ")", ",", "(", "'ddg'", ",", "'https://duckduckgo.com'", ",", "''", ")", ",", "(", "'wiki'", ",", "'https://wikipedia.org'", ",", "''", ")", "]", "}", ")" ]
test the results of quickmark completion .
train
false
8,508
def test_cut_off(): assert (hug.types.cut_off(10)('text') == 'text') assert (hug.types.cut_off(10)(10) == '10') assert (hug.types.cut_off(10)('some really long text') == 'some reall') assert ('10' in hug.types.cut_off(10).__doc__)
[ "def", "test_cut_off", "(", ")", ":", "assert", "(", "hug", ".", "types", ".", "cut_off", "(", "10", ")", "(", "'text'", ")", "==", "'text'", ")", "assert", "(", "hug", ".", "types", ".", "cut_off", "(", "10", ")", "(", "10", ")", "==", "'10'", ")", "assert", "(", "hug", ".", "types", ".", "cut_off", "(", "10", ")", "(", "'some really long text'", ")", "==", "'some reall'", ")", "assert", "(", "'10'", "in", "hug", ".", "types", ".", "cut_off", "(", "10", ")", ".", "__doc__", ")" ]
test to ensure that hugs cut_off type works as expected .
train
false
8,509
def detach_volume(name=None, kwargs=None, instance_id=None, call=None): if (call != 'action'): raise SaltCloudSystemExit('The detach_volume action must be called with -a or --action.') if (not kwargs): kwargs = {} if ('volume_id' not in kwargs): log.error('A volume_id is required.') return False params = {'Action': 'DetachVolume', 'VolumeId': kwargs['volume_id']} data = aws.query(params, return_url=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') return data
[ "def", "detach_volume", "(", "name", "=", "None", ",", "kwargs", "=", "None", ",", "instance_id", "=", "None", ",", "call", "=", "None", ")", ":", "if", "(", "call", "!=", "'action'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The detach_volume action must be called with -a or --action.'", ")", "if", "(", "not", "kwargs", ")", ":", "kwargs", "=", "{", "}", "if", "(", "'volume_id'", "not", "in", "kwargs", ")", ":", "log", ".", "error", "(", "'A volume_id is required.'", ")", "return", "False", "params", "=", "{", "'Action'", ":", "'DetachVolume'", ",", "'VolumeId'", ":", "kwargs", "[", "'volume_id'", "]", "}", "data", "=", "aws", ".", "query", "(", "params", ",", "return_url", "=", "True", ",", "location", "=", "get_location", "(", ")", ",", "provider", "=", "get_provider", "(", ")", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", "return", "data" ]
detaches a volume .
train
false
8,510
def signal_alarm(n): if hasattr(signal, 'alarm'): signal.alarm(n)
[ "def", "signal_alarm", "(", "n", ")", ":", "if", "hasattr", "(", "signal", ",", "'alarm'", ")", ":", "signal", ".", "alarm", "(", "n", ")" ]
call signal .
train
false
8,511
def analyze_syntax(text): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped(['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) service = discovery.build('language', 'v1beta1', http=http) body = {'document': {'type': 'PLAIN_TEXT', 'content': text}, 'features': {'extract_syntax': True}, 'encodingType': get_native_encoding_type()} request = service.documents().annotateText(body=body) return request.execute()
[ "def", "analyze_syntax", "(", "text", ")", ":", "credentials", "=", "GoogleCredentials", ".", "get_application_default", "(", ")", "scoped_credentials", "=", "credentials", ".", "create_scoped", "(", "[", "'https://www.googleapis.com/auth/cloud-platform'", "]", ")", "http", "=", "httplib2", ".", "Http", "(", ")", "scoped_credentials", ".", "authorize", "(", "http", ")", "service", "=", "discovery", ".", "build", "(", "'language'", ",", "'v1beta1'", ",", "http", "=", "http", ")", "body", "=", "{", "'document'", ":", "{", "'type'", ":", "'PLAIN_TEXT'", ",", "'content'", ":", "text", "}", ",", "'features'", ":", "{", "'extract_syntax'", ":", "True", "}", ",", "'encodingType'", ":", "get_native_encoding_type", "(", ")", "}", "request", "=", "service", ".", "documents", "(", ")", ".", "annotateText", "(", "body", "=", "body", ")", "return", "request", ".", "execute", "(", ")" ]
use the nl api to analyze the given text string .
train
false
8,513
def create_linkextractor_from_specs(specs): specs = specs.copy() (ltype, value) = (specs.pop('type'), specs.pop('value')) if (ltype == 'module'): cls = load_object(value) return cls(**specs) for (key, cls, ignore) in _TYPE_MAP: if (key == ltype): if ignore: return cls(**specs) return cls(value, **specs) raise ValueError('Invalid link extractor type specification')
[ "def", "create_linkextractor_from_specs", "(", "specs", ")", ":", "specs", "=", "specs", ".", "copy", "(", ")", "(", "ltype", ",", "value", ")", "=", "(", "specs", ".", "pop", "(", "'type'", ")", ",", "specs", ".", "pop", "(", "'value'", ")", ")", "if", "(", "ltype", "==", "'module'", ")", ":", "cls", "=", "load_object", "(", "value", ")", "return", "cls", "(", "**", "specs", ")", "for", "(", "key", ",", "cls", ",", "ignore", ")", "in", "_TYPE_MAP", ":", "if", "(", "key", "==", "ltype", ")", ":", "if", "ignore", ":", "return", "cls", "(", "**", "specs", ")", "return", "cls", "(", "value", ",", "**", "specs", ")", "raise", "ValueError", "(", "'Invalid link extractor type specification'", ")" ]
return a link extractor instance from specs .
train
false
8,514
def _get_date_range(frequency, period_start, period_end): assert (frequency == 'months') if (frequency == 'months'): if (not (period_start or period_end)): cur_date = datetime.datetime.now() first_this_month = datetime.datetime(year=cur_date.year, month=cur_date.month, day=1, hour=0, minute=0, second=0) period_end = (first_this_month - datetime.timedelta(seconds=1)) period_start = datetime.datetime(year=period_end.year, month=period_end.month, day=1) else: period_end = (period_end or (period_start + datetime.timedelta(days=30))) period_start = (period_start or (period_end - datetime.timedelta(days=30))) return (period_start, period_end)
[ "def", "_get_date_range", "(", "frequency", ",", "period_start", ",", "period_end", ")", ":", "assert", "(", "frequency", "==", "'months'", ")", "if", "(", "frequency", "==", "'months'", ")", ":", "if", "(", "not", "(", "period_start", "or", "period_end", ")", ")", ":", "cur_date", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "first_this_month", "=", "datetime", ".", "datetime", "(", "year", "=", "cur_date", ".", "year", ",", "month", "=", "cur_date", ".", "month", ",", "day", "=", "1", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ")", "period_end", "=", "(", "first_this_month", "-", "datetime", ".", "timedelta", "(", "seconds", "=", "1", ")", ")", "period_start", "=", "datetime", ".", "datetime", "(", "year", "=", "period_end", ".", "year", ",", "month", "=", "period_end", ".", "month", ",", "day", "=", "1", ")", "else", ":", "period_end", "=", "(", "period_end", "or", "(", "period_start", "+", "datetime", ".", "timedelta", "(", "days", "=", "30", ")", ")", ")", "period_start", "=", "(", "period_start", "or", "(", "period_end", "-", "datetime", ".", "timedelta", "(", "days", "=", "30", ")", ")", ")", "return", "(", "period_start", ",", "period_end", ")" ]
hack function .
train
false
8,515
def resource_data_get_by_key(context, resource_id, key): result = context.session.query(models.ResourceData).filter_by(resource_id=resource_id).filter_by(key=key).first() if (not result): raise exception.NotFound(_('No resource data found')) return result
[ "def", "resource_data_get_by_key", "(", "context", ",", "resource_id", ",", "key", ")", ":", "result", "=", "context", ".", "session", ".", "query", "(", "models", ".", "ResourceData", ")", ".", "filter_by", "(", "resource_id", "=", "resource_id", ")", ".", "filter_by", "(", "key", "=", "key", ")", ".", "first", "(", ")", "if", "(", "not", "result", ")", ":", "raise", "exception", ".", "NotFound", "(", "_", "(", "'No resource data found'", ")", ")", "return", "result" ]
looks up resource_data by resource_id and key .
train
false
8,517
def test_extract_array_1d_even(): assert np.all((extract_array(np.arange(4), (2,), (0,), fill_value=(-99)) == np.array([(-99), 0]))) for i in [1, 2, 3]: assert np.all((extract_array(np.arange(4), (2,), (i,)) == np.array([(i - 1), i]))) assert np.all((extract_array(np.arange(4.0), (2,), (4,), fill_value=np.inf) == np.array([3, np.inf])))
[ "def", "test_extract_array_1d_even", "(", ")", ":", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4", ")", ",", "(", "2", ",", ")", ",", "(", "0", ",", ")", ",", "fill_value", "=", "(", "-", "99", ")", ")", "==", "np", ".", "array", "(", "[", "(", "-", "99", ")", ",", "0", "]", ")", ")", ")", "for", "i", "in", "[", "1", ",", "2", ",", "3", "]", ":", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4", ")", ",", "(", "2", ",", ")", ",", "(", "i", ",", ")", ")", "==", "np", ".", "array", "(", "[", "(", "i", "-", "1", ")", ",", "i", "]", ")", ")", ")", "assert", "np", ".", "all", "(", "(", "extract_array", "(", "np", ".", "arange", "(", "4.0", ")", ",", "(", "2", ",", ")", ",", "(", "4", ",", ")", ",", "fill_value", "=", "np", ".", "inf", ")", "==", "np", ".", "array", "(", "[", "3", ",", "np", ".", "inf", "]", ")", ")", ")" ]
extract 1 d arrays .
train
false
8,518
@content_type('multipart/form-data') def multipart(body, **header_params): if (header_params and ('boundary' in header_params)): if (type(header_params['boundary']) is str): header_params['boundary'] = header_params['boundary'].encode() form = parse_multipart((body.stream if hasattr(body, 'stream') else body), header_params) for (key, value) in form.items(): if ((type(value) is list) and (len(value) is 1)): form[key] = value[0] return form
[ "@", "content_type", "(", "'multipart/form-data'", ")", "def", "multipart", "(", "body", ",", "**", "header_params", ")", ":", "if", "(", "header_params", "and", "(", "'boundary'", "in", "header_params", ")", ")", ":", "if", "(", "type", "(", "header_params", "[", "'boundary'", "]", ")", "is", "str", ")", ":", "header_params", "[", "'boundary'", "]", "=", "header_params", "[", "'boundary'", "]", ".", "encode", "(", ")", "form", "=", "parse_multipart", "(", "(", "body", ".", "stream", "if", "hasattr", "(", "body", ",", "'stream'", ")", "else", "body", ")", ",", "header_params", ")", "for", "(", "key", ",", "value", ")", "in", "form", ".", "items", "(", ")", ":", "if", "(", "(", "type", "(", "value", ")", "is", "list", ")", "and", "(", "len", "(", "value", ")", "is", "1", ")", ")", ":", "form", "[", "key", "]", "=", "value", "[", "0", "]", "return", "form" ]
converts multipart form data into native python objects .
train
true
8,519
def get_directory_handle(path): return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS, None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None)
[ "def", "get_directory_handle", "(", "path", ")", ":", "return", "CreateFileW", "(", "path", ",", "FILE_LIST_DIRECTORY", ",", "WATCHDOG_FILE_SHARE_FLAGS", ",", "None", ",", "OPEN_EXISTING", ",", "WATCHDOG_FILE_FLAGS", ",", "None", ")" ]
returns a windows handle to the specified directory path .
train
false
8,521
def win_encode(s): if isinstance(s, unicode): return s.encode(locale.getpreferredencoding(False)) elif isinstance(s, bytes): return s elif (s is not None): raise TypeError(('Expected bytes or text, but got %r' % (s,)))
[ "def", "win_encode", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "return", "s", ".", "encode", "(", "locale", ".", "getpreferredencoding", "(", "False", ")", ")", "elif", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "s", "elif", "(", "s", "is", "not", "None", ")", ":", "raise", "TypeError", "(", "(", "'Expected bytes or text, but got %r'", "%", "(", "s", ",", ")", ")", ")" ]
encode unicodes for process arguments on windows .
train
true
8,522
def detect_config_change(): net_devices = NetworkDevice.objects.all() config_changed = False for a_device in net_devices: if ('cisco' in a_device.device_class): if (not a_device.cfg_file): print 'Initial device backup: {}'.format(a_device.device_name) backup_config(a_device) config_changed = True continue last_changed = int(snmp_wrapper(a_device, oid=global_params.OID_RUNNING_LAST_CHANGED)) if (last_changed > a_device.cfg_last_changed): print '>>>Running configuration changed: {}'.format(a_device.device_name) config_diffs = backup_config(a_device) config_changed = True if config_diffs: print 'Sending email notification regarding changes\n' subject = 'Network Device Changed: {}'.format(a_device.device_name) send_mail(global_params.EMAIL_RECIPIENT, subject, config_diffs, global_params.EMAIL_SENDER) else: a_device.cfg_last_changed = last_changed a_device.save() if config_changed: print 'Checking configuration changes into git' git_handling()
[ "def", "detect_config_change", "(", ")", ":", "net_devices", "=", "NetworkDevice", ".", "objects", ".", "all", "(", ")", "config_changed", "=", "False", "for", "a_device", "in", "net_devices", ":", "if", "(", "'cisco'", "in", "a_device", ".", "device_class", ")", ":", "if", "(", "not", "a_device", ".", "cfg_file", ")", ":", "print", "'Initial device backup: {}'", ".", "format", "(", "a_device", ".", "device_name", ")", "backup_config", "(", "a_device", ")", "config_changed", "=", "True", "continue", "last_changed", "=", "int", "(", "snmp_wrapper", "(", "a_device", ",", "oid", "=", "global_params", ".", "OID_RUNNING_LAST_CHANGED", ")", ")", "if", "(", "last_changed", ">", "a_device", ".", "cfg_last_changed", ")", ":", "print", "'>>>Running configuration changed: {}'", ".", "format", "(", "a_device", ".", "device_name", ")", "config_diffs", "=", "backup_config", "(", "a_device", ")", "config_changed", "=", "True", "if", "config_diffs", ":", "print", "'Sending email notification regarding changes\\n'", "subject", "=", "'Network Device Changed: {}'", ".", "format", "(", "a_device", ".", "device_name", ")", "send_mail", "(", "global_params", ".", "EMAIL_RECIPIENT", ",", "subject", ",", "config_diffs", ",", "global_params", ".", "EMAIL_SENDER", ")", "else", ":", "a_device", ".", "cfg_last_changed", "=", "last_changed", "a_device", ".", "save", "(", ")", "if", "config_changed", ":", "print", "'Checking configuration changes into git'", "git_handling", "(", ")" ]
use snmp to detect configuration changes .
train
false
8,524
def broadcast_shapes(*args): x = (list(np.atleast_1d(args[0])) if args else ()) for arg in args[1:]: y = list(np.atleast_1d(arg)) if (len(x) < len(y)): (x, y) = (y, x) x[(- len(y)):] = [(j if (i == 1) else (i if (j == 1) else (i if (i == j) else 0))) for (i, j) in zip(x[(- len(y)):], y)] if (not all(x)): return None return tuple(x)
[ "def", "broadcast_shapes", "(", "*", "args", ")", ":", "x", "=", "(", "list", "(", "np", ".", "atleast_1d", "(", "args", "[", "0", "]", ")", ")", "if", "args", "else", "(", ")", ")", "for", "arg", "in", "args", "[", "1", ":", "]", ":", "y", "=", "list", "(", "np", ".", "atleast_1d", "(", "arg", ")", ")", "if", "(", "len", "(", "x", ")", "<", "len", "(", "y", ")", ")", ":", "(", "x", ",", "y", ")", "=", "(", "y", ",", "x", ")", "x", "[", "(", "-", "len", "(", "y", ")", ")", ":", "]", "=", "[", "(", "j", "if", "(", "i", "==", "1", ")", "else", "(", "i", "if", "(", "j", "==", "1", ")", "else", "(", "i", "if", "(", "i", "==", "j", ")", "else", "0", ")", ")", ")", "for", "(", "i", ",", "j", ")", "in", "zip", "(", "x", "[", "(", "-", "len", "(", "y", ")", ")", ":", "]", ",", "y", ")", "]", "if", "(", "not", "all", "(", "x", ")", ")", ":", "return", "None", "return", "tuple", "(", "x", ")" ]
determines output shape from broadcasting arrays .
train
false
8,525
def test_jd1_is_mult_of_half_or_one(): t1 = Time('2000:001:00:00:00.00000001', scale='tai') assert (np.round((t1.jd1 * 2)) == (t1.jd1 * 2)) t1 = Time(1.23456789, 12345678.90123456, format='jd', scale='tai') assert (np.round(t1.jd1) == t1.jd1)
[ "def", "test_jd1_is_mult_of_half_or_one", "(", ")", ":", "t1", "=", "Time", "(", "'2000:001:00:00:00.00000001'", ",", "scale", "=", "'tai'", ")", "assert", "(", "np", ".", "round", "(", "(", "t1", ".", "jd1", "*", "2", ")", ")", "==", "(", "t1", ".", "jd1", "*", "2", ")", ")", "t1", "=", "Time", "(", "1.23456789", ",", "12345678.90123456", ",", "format", "=", "'jd'", ",", "scale", "=", "'tai'", ")", "assert", "(", "np", ".", "round", "(", "t1", ".", "jd1", ")", "==", "t1", ".", "jd1", ")" ]
check that jd1 is a multiple of 0 .
train
false
8,526
def _check_channel_names(inst, ref_names): if isinstance(ref_names, str): ref_names = [ref_names] ref_idx = pick_channels(inst.info['ch_names'], ref_names) assert_true(len(ref_idx), len(ref_names)) inst.info._check_consistency()
[ "def", "_check_channel_names", "(", "inst", ",", "ref_names", ")", ":", "if", "isinstance", "(", "ref_names", ",", "str", ")", ":", "ref_names", "=", "[", "ref_names", "]", "ref_idx", "=", "pick_channels", "(", "inst", ".", "info", "[", "'ch_names'", "]", ",", "ref_names", ")", "assert_true", "(", "len", "(", "ref_idx", ")", ",", "len", "(", "ref_names", ")", ")", "inst", ".", "info", ".", "_check_consistency", "(", ")" ]
check channel names .
train
false
8,527
def _get_repo(**kwargs): for key in ('fromrepo', 'repo'): try: return kwargs[key] except KeyError: pass return ''
[ "def", "_get_repo", "(", "**", "kwargs", ")", ":", "for", "key", "in", "(", "'fromrepo'", ",", "'repo'", ")", ":", "try", ":", "return", "kwargs", "[", "key", "]", "except", "KeyError", ":", "pass", "return", "''" ]
check the kwargs for either fromrepo or repo and return the value .
train
false
8,529
def parse_keystring(keystr): if is_special_key(keystr): return [_parse_single_key(keystr)] else: return [_parse_single_key(char) for char in keystr]
[ "def", "parse_keystring", "(", "keystr", ")", ":", "if", "is_special_key", "(", "keystr", ")", ":", "return", "[", "_parse_single_key", "(", "keystr", ")", "]", "else", ":", "return", "[", "_parse_single_key", "(", "char", ")", "for", "char", "in", "keystr", "]" ]
parse a keystring like <ctrl-x> or xyz and return a keyinfo list .
train
false
8,532
def make_xheader(lms_callback_url, lms_key, queue_name): return json.dumps({'lms_callback_url': lms_callback_url, 'lms_key': lms_key, 'queue_name': queue_name})
[ "def", "make_xheader", "(", "lms_callback_url", ",", "lms_key", ",", "queue_name", ")", ":", "return", "json", ".", "dumps", "(", "{", "'lms_callback_url'", ":", "lms_callback_url", ",", "'lms_key'", ":", "lms_key", ",", "'queue_name'", ":", "queue_name", "}", ")" ]
generate header for delivery and reply of queue request .
train
false
8,533
def login_as_learner(context, learner_name='mrpibb', learner_pass='abc123'): if (not FacilityUser.objects.filter(username=learner_name)): class ContextWithMixin(FacilityMixins, ): def __init__(self): self.browser = context.browser context_wm = ContextWithMixin() context_wm.create_student(username=learner_name, password=learner_pass) facility = FacilityUser.objects.get(username=learner_name).facility.id _login_user(context, learner_name, learner_pass, facility=facility)
[ "def", "login_as_learner", "(", "context", ",", "learner_name", "=", "'mrpibb'", ",", "learner_pass", "=", "'abc123'", ")", ":", "if", "(", "not", "FacilityUser", ".", "objects", ".", "filter", "(", "username", "=", "learner_name", ")", ")", ":", "class", "ContextWithMixin", "(", "FacilityMixins", ",", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "browser", "=", "context", ".", "browser", "context_wm", "=", "ContextWithMixin", "(", ")", "context_wm", ".", "create_student", "(", "username", "=", "learner_name", ",", "password", "=", "learner_pass", ")", "facility", "=", "FacilityUser", ".", "objects", ".", "get", "(", "username", "=", "learner_name", ")", ".", "facility", ".", "id", "_login_user", "(", "context", ",", "learner_name", ",", "learner_pass", ",", "facility", "=", "facility", ")" ]
log in as a learner specified by the optional arguments .
train
false
8,534
def test_hermite_catmull_rom(Chart, datas): chart = Chart(interpolate='hermite', interpolation_parameters={'type': 'catmull_rom'}) chart = make_data(chart, datas) assert chart.render()
[ "def", "test_hermite_catmull_rom", "(", "Chart", ",", "datas", ")", ":", "chart", "=", "Chart", "(", "interpolate", "=", "'hermite'", ",", "interpolation_parameters", "=", "{", "'type'", ":", "'catmull_rom'", "}", ")", "chart", "=", "make_data", "(", "chart", ",", "datas", ")", "assert", "chart", ".", "render", "(", ")" ]
test hermite catmull rom interpolation .
train
false
8,535
def audit_log(name, **kwargs): payload = u', '.join(['{k}="{v}"'.format(k=k, v=v) for (k, v) in sorted(kwargs.items())]) message = u'{name}: {payload}'.format(name=name, payload=payload) log.info(message)
[ "def", "audit_log", "(", "name", ",", "**", "kwargs", ")", ":", "payload", "=", "u', '", ".", "join", "(", "[", "'{k}=\"{v}\"'", ".", "format", "(", "k", "=", "k", ",", "v", "=", "v", ")", "for", "(", "k", ",", "v", ")", "in", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", "]", ")", "message", "=", "u'{name}: {payload}'", ".", "format", "(", "name", "=", "name", ",", "payload", "=", "payload", ")", "log", ".", "info", "(", "message", ")" ]
dry helper used to emit an info-level log message .
train
false
8,536
def reload_httpd(request): notifier().reload('http') return HttpResponse('OK')
[ "def", "reload_httpd", "(", "request", ")", ":", "notifier", "(", ")", ".", "reload", "(", "'http'", ")", "return", "HttpResponse", "(", "'OK'", ")" ]
restart httpd .
train
false
8,537
def using_systemd(): return run('which systemctl', quiet=True).succeeded
[ "def", "using_systemd", "(", ")", ":", "return", "run", "(", "'which systemctl'", ",", "quiet", "=", "True", ")", ".", "succeeded" ]
return true if using systemd example:: from fabtools .
train
false
8,538
def extract_node(code, module_name=''): def _extract(node): if isinstance(node, nodes.Discard): return node.value else: return node requested_lines = [] for (idx, line) in enumerate(code.splitlines()): if line.strip().endswith(_STATEMENT_SELECTOR): requested_lines.append((idx + 1)) tree = build_module(code, module_name=module_name) extracted = [] if requested_lines: for line in requested_lines: extracted.append(_find_statement_by_line(tree, line)) extracted.extend(_extract_expressions(tree)) if (not extracted): extracted.append(tree.body[(-1)]) extracted = [_extract(node) for node in extracted] if (len(extracted) == 1): return extracted[0] else: return extracted
[ "def", "extract_node", "(", "code", ",", "module_name", "=", "''", ")", ":", "def", "_extract", "(", "node", ")", ":", "if", "isinstance", "(", "node", ",", "nodes", ".", "Discard", ")", ":", "return", "node", ".", "value", "else", ":", "return", "node", "requested_lines", "=", "[", "]", "for", "(", "idx", ",", "line", ")", "in", "enumerate", "(", "code", ".", "splitlines", "(", ")", ")", ":", "if", "line", ".", "strip", "(", ")", ".", "endswith", "(", "_STATEMENT_SELECTOR", ")", ":", "requested_lines", ".", "append", "(", "(", "idx", "+", "1", ")", ")", "tree", "=", "build_module", "(", "code", ",", "module_name", "=", "module_name", ")", "extracted", "=", "[", "]", "if", "requested_lines", ":", "for", "line", "in", "requested_lines", ":", "extracted", ".", "append", "(", "_find_statement_by_line", "(", "tree", ",", "line", ")", ")", "extracted", ".", "extend", "(", "_extract_expressions", "(", "tree", ")", ")", "if", "(", "not", "extracted", ")", ":", "extracted", ".", "append", "(", "tree", ".", "body", "[", "(", "-", "1", ")", "]", ")", "extracted", "=", "[", "_extract", "(", "node", ")", "for", "node", "in", "extracted", "]", "if", "(", "len", "(", "extracted", ")", "==", "1", ")", ":", "return", "extracted", "[", "0", "]", "else", ":", "return", "extracted" ]
parses some python code as a module and extracts a designated ast node .
train
true
8,539
def matrix_multiply_mapper(m, element): (matrix, i, j, value) = element if (matrix == 'A'): for column in range(m): (yield ((i, column), (j, value))) else: for row in range(m): (yield ((row, j), (i, value)))
[ "def", "matrix_multiply_mapper", "(", "m", ",", "element", ")", ":", "(", "matrix", ",", "i", ",", "j", ",", "value", ")", "=", "element", "if", "(", "matrix", "==", "'A'", ")", ":", "for", "column", "in", "range", "(", "m", ")", ":", "(", "yield", "(", "(", "i", ",", "column", ")", ",", "(", "j", ",", "value", ")", ")", ")", "else", ":", "for", "row", "in", "range", "(", "m", ")", ":", "(", "yield", "(", "(", "row", ",", "j", ")", ",", "(", "i", ",", "value", ")", ")", ")" ]
m is the common dimension element is a tuple .
train
false
8,541
def _add_role_and_annotate(var, role, annotations=()): add_role(var, role) for annotation in annotations: add_annotation(var, annotation)
[ "def", "_add_role_and_annotate", "(", "var", ",", "role", ",", "annotations", "=", "(", ")", ")", ":", "add_role", "(", "var", ",", "role", ")", "for", "annotation", "in", "annotations", ":", "add_annotation", "(", "var", ",", "annotation", ")" ]
add a role and zero or more annotations to a variable .
train
false
8,542
def sign_seq(poly_seq, x): return [sign(LC(poly_seq[i], x)) for i in range(len(poly_seq))]
[ "def", "sign_seq", "(", "poly_seq", ",", "x", ")", ":", "return", "[", "sign", "(", "LC", "(", "poly_seq", "[", "i", "]", ",", "x", ")", ")", "for", "i", "in", "range", "(", "len", "(", "poly_seq", ")", ")", "]" ]
given a sequence of polynomials poly_seq .
train
false
8,546
@require_POST @login_required def request_course_creator(request): user_requested_access(request.user) return JsonResponse({'Status': 'OK'})
[ "@", "require_POST", "@", "login_required", "def", "request_course_creator", "(", "request", ")", ":", "user_requested_access", "(", "request", ".", "user", ")", "return", "JsonResponse", "(", "{", "'Status'", ":", "'OK'", "}", ")" ]
user has requested course creation access .
train
false
8,548
def test_totodile(session, media_root): totodile = session.query(tables.PokemonSpecies).filter_by(identifier=u'totodile').one() accessor = media.PokemonSpeciesMedia(media_root, totodile) assert (accessor.sprite() == accessor.sprite(female=True))
[ "def", "test_totodile", "(", "session", ",", "media_root", ")", ":", "totodile", "=", "session", ".", "query", "(", "tables", ".", "PokemonSpecies", ")", ".", "filter_by", "(", "identifier", "=", "u'totodile'", ")", ".", "one", "(", ")", "accessor", "=", "media", ".", "PokemonSpeciesMedia", "(", "media_root", ",", "totodile", ")", "assert", "(", "accessor", ".", "sprite", "(", ")", "==", "accessor", ".", "sprite", "(", "female", "=", "True", ")", ")" ]
totodiles female sprite -- same as male .
train
false
8,549
def test_get_absolute_url_not_defined(): class Table(tables.Table, ): first_name = tables.Column() last_name = tables.LinkColumn() table = Table([dict(first_name=u'Jan Pieter', last_name=u'Waagmeester')]) with pytest.raises(TypeError): table.as_html(build_request())
[ "def", "test_get_absolute_url_not_defined", "(", ")", ":", "class", "Table", "(", "tables", ".", "Table", ",", ")", ":", "first_name", "=", "tables", ".", "Column", "(", ")", "last_name", "=", "tables", ".", "LinkColumn", "(", ")", "table", "=", "Table", "(", "[", "dict", "(", "first_name", "=", "u'Jan Pieter'", ",", "last_name", "=", "u'Waagmeester'", ")", "]", ")", "with", "pytest", ".", "raises", "(", "TypeError", ")", ":", "table", ".", "as_html", "(", "build_request", "(", ")", ")" ]
the dict doesnt have a get_absolute_url() .
train
false
8,550
def group_update(groupname, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createroles=None, createuser=None, encrypted=None, inherit=None, login=None, superuser=None, replication=None, rolepassword=None, groups=None, runas=None): return _role_update(groupname, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, createdb=createdb, typ_='group', createroles=createroles, createuser=createuser, encrypted=encrypted, login=login, inherit=inherit, superuser=superuser, replication=replication, rolepassword=rolepassword, groups=groups, runas=runas)
[ "def", "group_update", "(", "groupname", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "createdb", "=", "None", ",", "createroles", "=", "None", ",", "createuser", "=", "None", ",", "encrypted", "=", "None", ",", "inherit", "=", "None", ",", "login", "=", "None", ",", "superuser", "=", "None", ",", "replication", "=", "None", ",", "rolepassword", "=", "None", ",", "groups", "=", "None", ",", "runas", "=", "None", ")", ":", "return", "_role_update", "(", "groupname", ",", "user", "=", "user", ",", "host", "=", "host", ",", "port", "=", "port", ",", "maintenance_db", "=", "maintenance_db", ",", "password", "=", "password", ",", "createdb", "=", "createdb", ",", "typ_", "=", "'group'", ",", "createroles", "=", "createroles", ",", "createuser", "=", "createuser", ",", "encrypted", "=", "encrypted", ",", "login", "=", "login", ",", "inherit", "=", "inherit", ",", "superuser", "=", "superuser", ",", "replication", "=", "replication", ",", "rolepassword", "=", "rolepassword", ",", "groups", "=", "groups", ",", "runas", "=", "runas", ")" ]
set the given properties on a group and update it .
train
false
8,551
def _state_session(state): if state.session_id: try: return _sessions[state.session_id] except KeyError: pass return None
[ "def", "_state_session", "(", "state", ")", ":", "if", "state", ".", "session_id", ":", "try", ":", "return", "_sessions", "[", "state", ".", "session_id", "]", "except", "KeyError", ":", "pass", "return", "None" ]
given an :class: .
train
false
8,552
def test_pkg_finder(): mod1 = u'astropy.utils.introspection' mod2 = u'astropy.utils.tests.test_introspection' mod3 = u'astropy.utils.tests.test_introspection' assert (find_current_module(0).__name__ == mod1) assert (find_current_module(1).__name__ == mod2) assert (find_current_module(0, True).__name__ == mod3)
[ "def", "test_pkg_finder", "(", ")", ":", "mod1", "=", "u'astropy.utils.introspection'", "mod2", "=", "u'astropy.utils.tests.test_introspection'", "mod3", "=", "u'astropy.utils.tests.test_introspection'", "assert", "(", "find_current_module", "(", "0", ")", ".", "__name__", "==", "mod1", ")", "assert", "(", "find_current_module", "(", "1", ")", ".", "__name__", "==", "mod2", ")", "assert", "(", "find_current_module", "(", "0", ",", "True", ")", ".", "__name__", "==", "mod3", ")" ]
tests that the find_current_module function works .
train
false
8,554
def xmlescape(data, quote=True): if (hasattr(data, 'xml') and callable(data.xml)): return to_bytes(data.xml()) if (not isinstance(data, (text_type, bytes))): data = str(data) data = to_bytes(data, 'utf8', 'xmlcharrefreplace') data = local_html_escape(data, quote) return data
[ "def", "xmlescape", "(", "data", ",", "quote", "=", "True", ")", ":", "if", "(", "hasattr", "(", "data", ",", "'xml'", ")", "and", "callable", "(", "data", ".", "xml", ")", ")", ":", "return", "to_bytes", "(", "data", ".", "xml", "(", ")", ")", "if", "(", "not", "isinstance", "(", "data", ",", "(", "text_type", ",", "bytes", ")", ")", ")", ":", "data", "=", "str", "(", "data", ")", "data", "=", "to_bytes", "(", "data", ",", "'utf8'", ",", "'xmlcharrefreplace'", ")", "data", "=", "local_html_escape", "(", "data", ",", "quote", ")", "return", "data" ]
returns an escaped string of the provided data args: data: the data to be escaped quote: optional .
train
false
8,556
def p_statement(t): names[t[1]] = t[3]
[ "def", "p_statement", "(", "t", ")", ":", "names", "[", "t", "[", "1", "]", "]", "=", "t", "[", "3", "]" ]
statement : expression .
train
false
8,557
@blueprint.route('/resources/<resource>/meters/<meter>/volume/sum') def compute_resource_volume_sum(resource, meter): return _get_statistics('sum', meter=meter, resource=resource, project=acl.get_limited_to_project(flask.request.headers))
[ "@", "blueprint", ".", "route", "(", "'/resources/<resource>/meters/<meter>/volume/sum'", ")", "def", "compute_resource_volume_sum", "(", "resource", ",", "meter", ")", ":", "return", "_get_statistics", "(", "'sum'", ",", "meter", "=", "meter", ",", "resource", "=", "resource", ",", "project", "=", "acl", ".", "get_limited_to_project", "(", "flask", ".", "request", ".", "headers", ")", ")" ]
return the sum of samples for a meter .
train
false
8,558
def warnIfDispersyThread(func): def invoke_func(*args, **kwargs): from twisted.python.threadable import isInIOThread from traceback import print_stack if isInIOThread(): import inspect caller = inspect.stack()[1] callerstr = ('%s %s:%s' % (caller[3], caller[1], caller[2])) from time import time logger.error('%d CANNOT BE ON DISPERSYTHREAD %s %s:%s called by %s', long(time()), func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) print_stack() return func(*args, **kwargs) invoke_func.__name__ = func.__name__ return invoke_func
[ "def", "warnIfDispersyThread", "(", "func", ")", ":", "def", "invoke_func", "(", "*", "args", ",", "**", "kwargs", ")", ":", "from", "twisted", ".", "python", ".", "threadable", "import", "isInIOThread", "from", "traceback", "import", "print_stack", "if", "isInIOThread", "(", ")", ":", "import", "inspect", "caller", "=", "inspect", ".", "stack", "(", ")", "[", "1", "]", "callerstr", "=", "(", "'%s %s:%s'", "%", "(", "caller", "[", "3", "]", ",", "caller", "[", "1", "]", ",", "caller", "[", "2", "]", ")", ")", "from", "time", "import", "time", "logger", ".", "error", "(", "'%d CANNOT BE ON DISPERSYTHREAD %s %s:%s called by %s'", ",", "long", "(", "time", "(", ")", ")", ",", "func", ".", "__name__", ",", "func", ".", "func_code", ".", "co_filename", ",", "func", ".", "func_code", ".", "co_firstlineno", ",", "callerstr", ")", "print_stack", "(", ")", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "invoke_func", ".", "__name__", "=", "func", ".", "__name__", "return", "invoke_func" ]
wed rather not be on the dispersy thread .
train
false
8,559
def flat(x, y, z, eps=0.001): np = import_module('numpy') vector_a = (x - y).astype(np.float) vector_b = (z - y).astype(np.float) dot_product = np.dot(vector_a, vector_b) vector_a_norm = np.linalg.norm(vector_a) vector_b_norm = np.linalg.norm(vector_b) cos_theta = (dot_product / (vector_a_norm * vector_b_norm)) return (abs((cos_theta + 1)) < eps)
[ "def", "flat", "(", "x", ",", "y", ",", "z", ",", "eps", "=", "0.001", ")", ":", "np", "=", "import_module", "(", "'numpy'", ")", "vector_a", "=", "(", "x", "-", "y", ")", ".", "astype", "(", "np", ".", "float", ")", "vector_b", "=", "(", "z", "-", "y", ")", ".", "astype", "(", "np", ".", "float", ")", "dot_product", "=", "np", ".", "dot", "(", "vector_a", ",", "vector_b", ")", "vector_a_norm", "=", "np", ".", "linalg", ".", "norm", "(", "vector_a", ")", "vector_b_norm", "=", "np", ".", "linalg", ".", "norm", "(", "vector_b", ")", "cos_theta", "=", "(", "dot_product", "/", "(", "vector_a_norm", "*", "vector_b_norm", ")", ")", "return", "(", "abs", "(", "(", "cos_theta", "+", "1", ")", ")", "<", "eps", ")" ]
flat flattens the arguments into a string .
train
false
8,564
def add_standard_options_to(parser): parser.add_option('-n', action='store', type='int', default=100, dest='num_runs', help='Number of times to run the test.') parser.add_option('--profile', action='store_true', help='Run the benchmark through cProfile.') parser.add_option('--profile_sort', action='store', type='str', default='time', help='Column to sort cProfile output by.') parser.add_option('--take_geo_mean', action='store_true', help='Return the geo mean, rather than individual data.')
[ "def", "add_standard_options_to", "(", "parser", ")", ":", "parser", ".", "add_option", "(", "'-n'", ",", "action", "=", "'store'", ",", "type", "=", "'int'", ",", "default", "=", "100", ",", "dest", "=", "'num_runs'", ",", "help", "=", "'Number of times to run the test.'", ")", "parser", ".", "add_option", "(", "'--profile'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Run the benchmark through cProfile.'", ")", "parser", ".", "add_option", "(", "'--profile_sort'", ",", "action", "=", "'store'", ",", "type", "=", "'str'", ",", "default", "=", "'time'", ",", "help", "=", "'Column to sort cProfile output by.'", ")", "parser", ".", "add_option", "(", "'--take_geo_mean'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Return the geo mean, rather than individual data.'", ")" ]
add a bunch of common command-line flags to an existing optionparser .
train
false
8,565
def add_loadgraph_args(parser): parser.add_argument(u'-l', u'--loadgraph', metavar=u'filename', default=None, help=u'load a precomputed k-mer graph from disk')
[ "def", "add_loadgraph_args", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "u'-l'", ",", "u'--loadgraph'", ",", "metavar", "=", "u'filename'", ",", "default", "=", "None", ",", "help", "=", "u'load a precomputed k-mer graph from disk'", ")" ]
common loadgraph argument .
train
false
8,566
def test_plot_montage(): m = read_montage('easycap-M1') m.plot() m.plot(show_names=True) d = read_dig_montage(hsp, hpi, elp, point_names) d.plot() d.plot(show_names=True)
[ "def", "test_plot_montage", "(", ")", ":", "m", "=", "read_montage", "(", "'easycap-M1'", ")", "m", ".", "plot", "(", ")", "m", ".", "plot", "(", "show_names", "=", "True", ")", "d", "=", "read_dig_montage", "(", "hsp", ",", "hpi", ",", "elp", ",", "point_names", ")", "d", ".", "plot", "(", ")", "d", ".", "plot", "(", "show_names", "=", "True", ")" ]
test plotting montages .
train
false
8,567
def gametime(format=False): gametime = ((runtime() - GAME_TIME_OFFSET) * TIMEFACTOR) if format: return _format(gametime, YEAR, MONTH, WEEK, DAY, HOUR, MIN) return gametime
[ "def", "gametime", "(", "format", "=", "False", ")", ":", "gametime", "=", "(", "(", "runtime", "(", ")", "-", "GAME_TIME_OFFSET", ")", "*", "TIMEFACTOR", ")", "if", "format", ":", "return", "_format", "(", "gametime", ",", "YEAR", ",", "MONTH", ",", "WEEK", ",", "DAY", ",", "HOUR", ",", "MIN", ")", "return", "gametime" ]
get the total gametime of the server since first start args: format : format into time representation .
train
false
8,568
def average_neighbor_degree(G, source='out', target='out', nodes=None, weight=None): source_degree = G.degree target_degree = G.degree if G.is_directed(): direction = {'out': G.out_degree, 'in': G.in_degree} source_degree = direction[source] target_degree = direction[target] return _average_nbr_deg(G, source_degree, target_degree, nodes=nodes, weight=weight)
[ "def", "average_neighbor_degree", "(", "G", ",", "source", "=", "'out'", ",", "target", "=", "'out'", ",", "nodes", "=", "None", ",", "weight", "=", "None", ")", ":", "source_degree", "=", "G", ".", "degree", "target_degree", "=", "G", ".", "degree", "if", "G", ".", "is_directed", "(", ")", ":", "direction", "=", "{", "'out'", ":", "G", ".", "out_degree", ",", "'in'", ":", "G", ".", "in_degree", "}", "source_degree", "=", "direction", "[", "source", "]", "target_degree", "=", "direction", "[", "target", "]", "return", "_average_nbr_deg", "(", "G", ",", "source_degree", ",", "target_degree", ",", "nodes", "=", "nodes", ",", "weight", "=", "weight", ")" ]
returns the average degree of the neighborhood of each node .
train
false
8,569
def _PrepareListOfSources(spec, generator_flags, gyp_file): sources = OrderedSet() _AddNormalizedSources(sources, spec.get('sources', [])) excluded_sources = OrderedSet() if (not generator_flags.get('standalone')): sources.add(gyp_file) for a in spec.get('actions', []): inputs = a['inputs'] inputs = [_NormalizedSource(i) for i in inputs] inputs = OrderedSet(inputs) sources.update(inputs) if (not spec.get('msvs_external_builder')): excluded_sources.update(inputs) if int(a.get('process_outputs_as_sources', False)): _AddNormalizedSources(sources, a.get('outputs', [])) for cpy in spec.get('copies', []): _AddNormalizedSources(sources, cpy.get('files', [])) return (sources, excluded_sources)
[ "def", "_PrepareListOfSources", "(", "spec", ",", "generator_flags", ",", "gyp_file", ")", ":", "sources", "=", "OrderedSet", "(", ")", "_AddNormalizedSources", "(", "sources", ",", "spec", ".", "get", "(", "'sources'", ",", "[", "]", ")", ")", "excluded_sources", "=", "OrderedSet", "(", ")", "if", "(", "not", "generator_flags", ".", "get", "(", "'standalone'", ")", ")", ":", "sources", ".", "add", "(", "gyp_file", ")", "for", "a", "in", "spec", ".", "get", "(", "'actions'", ",", "[", "]", ")", ":", "inputs", "=", "a", "[", "'inputs'", "]", "inputs", "=", "[", "_NormalizedSource", "(", "i", ")", "for", "i", "in", "inputs", "]", "inputs", "=", "OrderedSet", "(", "inputs", ")", "sources", ".", "update", "(", "inputs", ")", "if", "(", "not", "spec", ".", "get", "(", "'msvs_external_builder'", ")", ")", ":", "excluded_sources", ".", "update", "(", "inputs", ")", "if", "int", "(", "a", ".", "get", "(", "'process_outputs_as_sources'", ",", "False", ")", ")", ":", "_AddNormalizedSources", "(", "sources", ",", "a", ".", "get", "(", "'outputs'", ",", "[", "]", ")", ")", "for", "cpy", "in", "spec", ".", "get", "(", "'copies'", ",", "[", "]", ")", ":", "_AddNormalizedSources", "(", "sources", ",", "cpy", ".", "get", "(", "'files'", ",", "[", "]", ")", ")", "return", "(", "sources", ",", "excluded_sources", ")" ]
prepare list of sources and excluded sources .
train
false
8,573
def sm_volume_create(context, values): return IMPL.sm_volume_create(context, values)
[ "def", "sm_volume_create", "(", "context", ",", "values", ")", ":", "return", "IMPL", ".", "sm_volume_create", "(", "context", ",", "values", ")" ]
create a new child zone entry .
train
false
8,576
def _validate_nodes_json(json_nodes, errors, user, workflow): assert isinstance(errors, dict), 'errors must be a dict.' result = True for node in json_nodes: _errors = {} node_dict = format_dict_field_values(node) if (node['node_type'] in ACTION_TYPES): node_result = _validate_node_json(node['node_type'], node_dict, _errors, user, workflow) else: node_result = True link_result = _validate_node_links_json(node['node_type'], node_dict['child_links'], _errors) result = (result and node_result and link_result) if ((not node.has_key('name')) and ((not node.has_key('node_type')) or (not node.has_key('id')))): raise StructuredException(code='INVALID_REQUEST_ERROR', message=_('Error saving workflow'), data={'errors': 'Node is missing a name.'}, error_code=400) errors[node.get('name', ('%s-%s' % (node.get('node_type'), node.get('id'))))] = _errors return result
[ "def", "_validate_nodes_json", "(", "json_nodes", ",", "errors", ",", "user", ",", "workflow", ")", ":", "assert", "isinstance", "(", "errors", ",", "dict", ")", ",", "'errors must be a dict.'", "result", "=", "True", "for", "node", "in", "json_nodes", ":", "_errors", "=", "{", "}", "node_dict", "=", "format_dict_field_values", "(", "node", ")", "if", "(", "node", "[", "'node_type'", "]", "in", "ACTION_TYPES", ")", ":", "node_result", "=", "_validate_node_json", "(", "node", "[", "'node_type'", "]", ",", "node_dict", ",", "_errors", ",", "user", ",", "workflow", ")", "else", ":", "node_result", "=", "True", "link_result", "=", "_validate_node_links_json", "(", "node", "[", "'node_type'", "]", ",", "node_dict", "[", "'child_links'", "]", ",", "_errors", ")", "result", "=", "(", "result", "and", "node_result", "and", "link_result", ")", "if", "(", "(", "not", "node", ".", "has_key", "(", "'name'", ")", ")", "and", "(", "(", "not", "node", ".", "has_key", "(", "'node_type'", ")", ")", "or", "(", "not", "node", ".", "has_key", "(", "'id'", ")", ")", ")", ")", ":", "raise", "StructuredException", "(", "code", "=", "'INVALID_REQUEST_ERROR'", ",", "message", "=", "_", "(", "'Error saving workflow'", ")", ",", "data", "=", "{", "'errors'", ":", "'Node is missing a name.'", "}", ",", "error_code", "=", "400", ")", "errors", "[", "node", ".", "get", "(", "'name'", ",", "(", "'%s-%s'", "%", "(", "node", ".", "get", "(", "'node_type'", ")", ",", "node", ".", "get", "(", "'id'", ")", ")", ")", ")", "]", "=", "_errors", "return", "result" ]
validates every node and link in the workflow .
train
false
8,577
def _footer_copyright(): return _(u'\xa9 {org_name}. All rights reserved except where noted. EdX, Open edX and the edX and Open EdX logos are registered trademarks or trademarks of edX Inc.').format(org_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
[ "def", "_footer_copyright", "(", ")", ":", "return", "_", "(", "u'\\xa9 {org_name}. All rights reserved except where noted. EdX, Open edX and the edX and Open EdX logos are registered trademarks or trademarks of edX Inc.'", ")", ".", "format", "(", "org_name", "=", "configuration_helpers", ".", "get_value", "(", "'PLATFORM_NAME'", ",", "settings", ".", "PLATFORM_NAME", ")", ")" ]
return the copyright to display in the footer .
train
false
8,578
def change_LOG_DIR(log_dir, web_log): log_dir_changed = False abs_log_dir = ek(os.path.normpath, ek(os.path.join, sickbeard.DATA_DIR, log_dir)) web_log_value = checkbox_to_value(web_log) if (ek(os.path.normpath, sickbeard.LOG_DIR) != abs_log_dir): if helpers.makeDir(abs_log_dir): sickbeard.ACTUAL_LOG_DIR = ek(os.path.normpath, log_dir) sickbeard.LOG_DIR = abs_log_dir logger.init_logging() logger.log((u'Initialized new log file in ' + sickbeard.LOG_DIR)) log_dir_changed = True else: return False if ((sickbeard.WEB_LOG != web_log_value) or (log_dir_changed is True)): sickbeard.WEB_LOG = web_log_value return True
[ "def", "change_LOG_DIR", "(", "log_dir", ",", "web_log", ")", ":", "log_dir_changed", "=", "False", "abs_log_dir", "=", "ek", "(", "os", ".", "path", ".", "normpath", ",", "ek", "(", "os", ".", "path", ".", "join", ",", "sickbeard", ".", "DATA_DIR", ",", "log_dir", ")", ")", "web_log_value", "=", "checkbox_to_value", "(", "web_log", ")", "if", "(", "ek", "(", "os", ".", "path", ".", "normpath", ",", "sickbeard", ".", "LOG_DIR", ")", "!=", "abs_log_dir", ")", ":", "if", "helpers", ".", "makeDir", "(", "abs_log_dir", ")", ":", "sickbeard", ".", "ACTUAL_LOG_DIR", "=", "ek", "(", "os", ".", "path", ".", "normpath", ",", "log_dir", ")", "sickbeard", ".", "LOG_DIR", "=", "abs_log_dir", "logger", ".", "init_logging", "(", ")", "logger", ".", "log", "(", "(", "u'Initialized new log file in '", "+", "sickbeard", ".", "LOG_DIR", ")", ")", "log_dir_changed", "=", "True", "else", ":", "return", "False", "if", "(", "(", "sickbeard", ".", "WEB_LOG", "!=", "web_log_value", ")", "or", "(", "log_dir_changed", "is", "True", ")", ")", ":", "sickbeard", ".", "WEB_LOG", "=", "web_log_value", "return", "True" ]
change logging directory for application and webserver .
train
false
8,579
def v1_key_to_string(v1_key): path_element_strings = [] for path_element in v1_key.path: field = path_element.WhichOneof('id_type') if (field == 'id'): id_or_name = str(path_element.id) elif (field == 'name'): id_or_name = path_element.name else: id_or_name = '' path_element_strings.append(('%s: %s' % (path_element.kind, id_or_name))) return ('[%s]' % ', '.join(path_element_strings))
[ "def", "v1_key_to_string", "(", "v1_key", ")", ":", "path_element_strings", "=", "[", "]", "for", "path_element", "in", "v1_key", ".", "path", ":", "field", "=", "path_element", ".", "WhichOneof", "(", "'id_type'", ")", "if", "(", "field", "==", "'id'", ")", ":", "id_or_name", "=", "str", "(", "path_element", ".", "id", ")", "elif", "(", "field", "==", "'name'", ")", ":", "id_or_name", "=", "path_element", ".", "name", "else", ":", "id_or_name", "=", "''", "path_element_strings", ".", "append", "(", "(", "'%s: %s'", "%", "(", "path_element", ".", "kind", ",", "id_or_name", ")", ")", ")", "return", "(", "'[%s]'", "%", "', '", ".", "join", "(", "path_element_strings", ")", ")" ]
generates a string representing a keys path .
train
false
8,580
@login_required @expect_json def _create_item(request): parent_locator = request.json['parent_locator'] usage_key = usage_key_with_run(parent_locator) if (not has_studio_write_access(request.user, usage_key.course_key)): raise PermissionDenied() category = request.json['category'] if isinstance(usage_key, LibraryUsageLocator): if (category not in ['html', 'problem', 'video']): return HttpResponseBadRequest(("Category '%s' not supported for Libraries" % category), content_type='text/plain') created_block = create_xblock(parent_locator=parent_locator, user=request.user, category=category, display_name=request.json.get('display_name'), boilerplate=request.json.get('boilerplate')) return JsonResponse({'locator': unicode(created_block.location), 'courseKey': unicode(created_block.location.course_key)})
[ "@", "login_required", "@", "expect_json", "def", "_create_item", "(", "request", ")", ":", "parent_locator", "=", "request", ".", "json", "[", "'parent_locator'", "]", "usage_key", "=", "usage_key_with_run", "(", "parent_locator", ")", "if", "(", "not", "has_studio_write_access", "(", "request", ".", "user", ",", "usage_key", ".", "course_key", ")", ")", ":", "raise", "PermissionDenied", "(", ")", "category", "=", "request", ".", "json", "[", "'category'", "]", "if", "isinstance", "(", "usage_key", ",", "LibraryUsageLocator", ")", ":", "if", "(", "category", "not", "in", "[", "'html'", ",", "'problem'", ",", "'video'", "]", ")", ":", "return", "HttpResponseBadRequest", "(", "(", "\"Category '%s' not supported for Libraries\"", "%", "category", ")", ",", "content_type", "=", "'text/plain'", ")", "created_block", "=", "create_xblock", "(", "parent_locator", "=", "parent_locator", ",", "user", "=", "request", ".", "user", ",", "category", "=", "category", ",", "display_name", "=", "request", ".", "json", ".", "get", "(", "'display_name'", ")", ",", "boilerplate", "=", "request", ".", "json", ".", "get", "(", "'boilerplate'", ")", ")", "return", "JsonResponse", "(", "{", "'locator'", ":", "unicode", "(", "created_block", ".", "location", ")", ",", "'courseKey'", ":", "unicode", "(", "created_block", ".", "location", ".", "course_key", ")", "}", ")" ]
view for create items .
train
false
8,581
def controller_id(vendor_id, product_ids, model_name): if (not (type(vendor_id) is int)): raise AssertionError raise ((type(product_ids) is list) or AssertionError) for product_id in product_ids: raise ((type(product_id) is int) or AssertionError) raise ((type(model_name) is str) or (type(model_name) is list) or AssertionError) model_names = ((type(model_name) is str) and [model_name]) else: model_names = model_name return {VENDORID: vendor_id, PRODUCTIDS: product_ids, MODEL_NAMES: model_names}
[ "def", "controller_id", "(", "vendor_id", ",", "product_ids", ",", "model_name", ")", ":", "if", "(", "not", "(", "type", "(", "vendor_id", ")", "is", "int", ")", ")", ":", "raise", "AssertionError", "raise", "(", "(", "type", "(", "product_ids", ")", "is", "list", ")", "or", "AssertionError", ")", "for", "product_id", "in", "product_ids", ":", "raise", "(", "(", "type", "(", "product_id", ")", "is", "int", ")", "or", "AssertionError", ")", "raise", "(", "(", "type", "(", "model_name", ")", "is", "str", ")", "or", "(", "type", "(", "model_name", ")", "is", "list", ")", "or", "AssertionError", ")", "model_names", "=", "(", "(", "type", "(", "model_name", ")", "is", "str", ")", "and", "[", "model_name", "]", ")", "else", ":", "model_names", "=", "model_name", "return", "{", "VENDORID", ":", "vendor_id", ",", "PRODUCTIDS", ":", "product_ids", ",", "MODEL_NAMES", ":", "model_names", "}" ]
generate a hardwareid dict .
train
false
8,582
def print_hits(results): print_search_stats(results) for hit in results['hits']['hits']: created_at = parse_date(hit['_source'].get('created_at', hit['_source']['authored_date'])) print(('/%s/%s/%s (%s): %s' % (hit['_index'], hit['_type'], hit['_id'], created_at.strftime('%Y-%m-%d'), hit['_source']['description'].replace('\n', ' ')))) print(('=' * 80)) print()
[ "def", "print_hits", "(", "results", ")", ":", "print_search_stats", "(", "results", ")", "for", "hit", "in", "results", "[", "'hits'", "]", "[", "'hits'", "]", ":", "created_at", "=", "parse_date", "(", "hit", "[", "'_source'", "]", ".", "get", "(", "'created_at'", ",", "hit", "[", "'_source'", "]", "[", "'authored_date'", "]", ")", ")", "print", "(", "(", "'/%s/%s/%s (%s): %s'", "%", "(", "hit", "[", "'_index'", "]", ",", "hit", "[", "'_type'", "]", ",", "hit", "[", "'_id'", "]", ",", "created_at", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "hit", "[", "'_source'", "]", "[", "'description'", "]", ".", "replace", "(", "'\\n'", ",", "' '", ")", ")", ")", ")", "print", "(", "(", "'='", "*", "80", ")", ")", "print", "(", ")" ]
simple utility function to print results of a search query .
train
true
8,583
def argsplit(args, sep=','): parsed_len = 0 last = 0 splits = [] for e in bracket_split(args, brackets=['()', '[]', '{}']): if (e[0] not in {'(', '[', '{'}): for (i, char) in enumerate(e): if (char == sep): splits.append(args[last:(parsed_len + i)]) last = ((parsed_len + i) + 1) parsed_len += len(e) splits.append(args[last:]) return splits
[ "def", "argsplit", "(", "args", ",", "sep", "=", "','", ")", ":", "parsed_len", "=", "0", "last", "=", "0", "splits", "=", "[", "]", "for", "e", "in", "bracket_split", "(", "args", ",", "brackets", "=", "[", "'()'", ",", "'[]'", ",", "'{}'", "]", ")", ":", "if", "(", "e", "[", "0", "]", "not", "in", "{", "'('", ",", "'['", ",", "'{'", "}", ")", ":", "for", "(", "i", ",", "char", ")", "in", "enumerate", "(", "e", ")", ":", "if", "(", "char", "==", "sep", ")", ":", "splits", ".", "append", "(", "args", "[", "last", ":", "(", "parsed_len", "+", "i", ")", "]", ")", "last", "=", "(", "(", "parsed_len", "+", "i", ")", "+", "1", ")", "parsed_len", "+=", "len", "(", "e", ")", "splits", ".", "append", "(", "args", "[", "last", ":", "]", ")", "return", "splits" ]
used to split js args .
train
true
8,584
def new_func_strip_path(func_name): (filename, line, name) = func_name if filename.endswith('__init__.py'): return ((os.path.basename(filename[:(-12)]) + filename[(-12):]), line, name) return (os.path.basename(filename), line, name)
[ "def", "new_func_strip_path", "(", "func_name", ")", ":", "(", "filename", ",", "line", ",", "name", ")", "=", "func_name", "if", "filename", ".", "endswith", "(", "'__init__.py'", ")", ":", "return", "(", "(", "os", ".", "path", ".", "basename", "(", "filename", "[", ":", "(", "-", "12", ")", "]", ")", "+", "filename", "[", "(", "-", "12", ")", ":", "]", ")", ",", "line", ",", "name", ")", "return", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "line", ",", "name", ")" ]
make profiler output more readable by adding __init__ modules parents .
train
false
8,586
def test_ast_invalid_for(): cant_compile(u'(for* [a 1] (else 1 2))')
[ "def", "test_ast_invalid_for", "(", ")", ":", "cant_compile", "(", "u'(for* [a 1] (else 1 2))'", ")" ]
make sure ast cant compile invalid for .
train
false
8,587
def get_time_display(dtime, format_string=None, coerce_tz=None): if ((dtime is not None) and (dtime.tzinfo is not None) and coerce_tz): try: to_tz = timezone(coerce_tz) except UnknownTimeZoneError: to_tz = utc dtime = to_tz.normalize(dtime.astimezone(to_tz)) if ((dtime is None) or (format_string is None)): return get_default_time_display(dtime) try: return unicode(strftime_localized(dtime, format_string)) except ValueError: return get_default_time_display(dtime)
[ "def", "get_time_display", "(", "dtime", ",", "format_string", "=", "None", ",", "coerce_tz", "=", "None", ")", ":", "if", "(", "(", "dtime", "is", "not", "None", ")", "and", "(", "dtime", ".", "tzinfo", "is", "not", "None", ")", "and", "coerce_tz", ")", ":", "try", ":", "to_tz", "=", "timezone", "(", "coerce_tz", ")", "except", "UnknownTimeZoneError", ":", "to_tz", "=", "utc", "dtime", "=", "to_tz", ".", "normalize", "(", "dtime", ".", "astimezone", "(", "to_tz", ")", ")", "if", "(", "(", "dtime", "is", "None", ")", "or", "(", "format_string", "is", "None", ")", ")", ":", "return", "get_default_time_display", "(", "dtime", ")", "try", ":", "return", "unicode", "(", "strftime_localized", "(", "dtime", ",", "format_string", ")", ")", "except", "ValueError", ":", "return", "get_default_time_display", "(", "dtime", ")" ]
converts a datetime to a string representation .
train
false
8,588
@register.function @jinja2.contextfunction def queue_tabnav(context): request = context['request'] counts = context['queue_counts'] apps_reviewing = AppsReviewing(request).get_apps() if acl.action_allowed(request, 'Apps', 'Review'): rv = [(reverse('reviewers.apps.queue_pending'), 'pending', pgettext(counts['pending'], 'Apps ({0})').format(counts['pending'])), (reverse('reviewers.apps.queue_rereview'), 'rereview', pgettext(counts['rereview'], 'Re-reviews ({0})').format(counts['rereview'])), (reverse('reviewers.apps.queue_updates'), 'updates', pgettext(counts['updates'], 'Updates ({0})').format(counts['updates']))] if acl.action_allowed(request, 'Apps', 'ReviewEscalated'): rv.append((reverse('reviewers.apps.queue_escalated'), 'escalated', pgettext(counts['escalated'], 'Escalations ({0})').format(counts['escalated']))) rv.append((reverse('reviewers.apps.apps_reviewing'), 'reviewing', _('Reviewing ({0})').format(len(apps_reviewing)))) rv.append((reverse('reviewers.apps.queue_homescreen'), 'homescreen', pgettext(counts['homescreen'], 'Homescreens ({0})').format(counts['homescreen']))) else: rv = [] if acl.action_allowed(request, 'Apps', 'ModerateReview'): rv.append((reverse('reviewers.apps.queue_moderated'), 'moderated', pgettext(counts['moderated'], 'Moderated Reviews ({0})').format(counts['moderated']))) if acl.action_allowed(request, 'Apps', 'ReadAbuse'): rv.append((reverse('reviewers.apps.queue_abuse'), 'abuse', pgettext(counts['abuse'], 'Abuse Reports ({0})').format(counts['abuse']))) if acl.action_allowed(request, 'Websites', 'ReadAbuse'): rv.append((reverse('reviewers.websites.queue_abuse'), 'abusewebsites', pgettext(counts['abusewebsites'], 'Website Abuse Reports ({0})').format(counts['abusewebsites']))) return rv
[ "@", "register", ".", "function", "@", "jinja2", ".", "contextfunction", "def", "queue_tabnav", "(", "context", ")", ":", "request", "=", "context", "[", "'request'", "]", "counts", "=", "context", "[", "'queue_counts'", "]", "apps_reviewing", "=", "AppsReviewing", "(", "request", ")", ".", "get_apps", "(", ")", "if", "acl", ".", "action_allowed", "(", "request", ",", "'Apps'", ",", "'Review'", ")", ":", "rv", "=", "[", "(", "reverse", "(", "'reviewers.apps.queue_pending'", ")", ",", "'pending'", ",", "pgettext", "(", "counts", "[", "'pending'", "]", ",", "'Apps ({0})'", ")", ".", "format", "(", "counts", "[", "'pending'", "]", ")", ")", ",", "(", "reverse", "(", "'reviewers.apps.queue_rereview'", ")", ",", "'rereview'", ",", "pgettext", "(", "counts", "[", "'rereview'", "]", ",", "'Re-reviews ({0})'", ")", ".", "format", "(", "counts", "[", "'rereview'", "]", ")", ")", ",", "(", "reverse", "(", "'reviewers.apps.queue_updates'", ")", ",", "'updates'", ",", "pgettext", "(", "counts", "[", "'updates'", "]", ",", "'Updates ({0})'", ")", ".", "format", "(", "counts", "[", "'updates'", "]", ")", ")", "]", "if", "acl", ".", "action_allowed", "(", "request", ",", "'Apps'", ",", "'ReviewEscalated'", ")", ":", "rv", ".", "append", "(", "(", "reverse", "(", "'reviewers.apps.queue_escalated'", ")", ",", "'escalated'", ",", "pgettext", "(", "counts", "[", "'escalated'", "]", ",", "'Escalations ({0})'", ")", ".", "format", "(", "counts", "[", "'escalated'", "]", ")", ")", ")", "rv", ".", "append", "(", "(", "reverse", "(", "'reviewers.apps.apps_reviewing'", ")", ",", "'reviewing'", ",", "_", "(", "'Reviewing ({0})'", ")", ".", "format", "(", "len", "(", "apps_reviewing", ")", ")", ")", ")", "rv", ".", "append", "(", "(", "reverse", "(", "'reviewers.apps.queue_homescreen'", ")", ",", "'homescreen'", ",", "pgettext", "(", "counts", "[", "'homescreen'", "]", ",", "'Homescreens ({0})'", ")", ".", "format", "(", "counts", "[", "'homescreen'", "]", ")", ")", ")", "else", ":", "rv", "=", "[", "]", "if", "acl", ".", "action_allowed", "(", "request", ",", "'Apps'", ",", "'ModerateReview'", ")", ":", "rv", ".", "append", "(", "(", "reverse", "(", "'reviewers.apps.queue_moderated'", ")", ",", "'moderated'", ",", "pgettext", "(", "counts", "[", "'moderated'", "]", ",", "'Moderated Reviews ({0})'", ")", ".", "format", "(", "counts", "[", "'moderated'", "]", ")", ")", ")", "if", "acl", ".", "action_allowed", "(", "request", ",", "'Apps'", ",", "'ReadAbuse'", ")", ":", "rv", ".", "append", "(", "(", "reverse", "(", "'reviewers.apps.queue_abuse'", ")", ",", "'abuse'", ",", "pgettext", "(", "counts", "[", "'abuse'", "]", ",", "'Abuse Reports ({0})'", ")", ".", "format", "(", "counts", "[", "'abuse'", "]", ")", ")", ")", "if", "acl", ".", "action_allowed", "(", "request", ",", "'Websites'", ",", "'ReadAbuse'", ")", ":", "rv", ".", "append", "(", "(", "reverse", "(", "'reviewers.websites.queue_abuse'", ")", ",", "'abusewebsites'", ",", "pgettext", "(", "counts", "[", "'abusewebsites'", "]", ",", "'Website Abuse Reports ({0})'", ")", ".", "format", "(", "counts", "[", "'abusewebsites'", "]", ")", ")", ")", "return", "rv" ]
returns tuple of tab navigation for the queue pages .
train
false
8,591
def getStyleValue(defaultValue, key, xmlElement): if ('style' in xmlElement.attributeDictionary): line = xmlElement.attributeDictionary['style'] strokeIndex = line.find(key) if (strokeIndex > (-1)): words = line[strokeIndex:].replace(':', ' ').replace(';', ' ').split() if (len(words) > 1): return words[1] if (key in xmlElement.attributeDictionary): return xmlElement.attributeDictionary[key] if (xmlElement.parent == None): return defaultValue return getStyleValue(defaultValue, key, xmlElement.parent)
[ "def", "getStyleValue", "(", "defaultValue", ",", "key", ",", "xmlElement", ")", ":", "if", "(", "'style'", "in", "xmlElement", ".", "attributeDictionary", ")", ":", "line", "=", "xmlElement", ".", "attributeDictionary", "[", "'style'", "]", "strokeIndex", "=", "line", ".", "find", "(", "key", ")", "if", "(", "strokeIndex", ">", "(", "-", "1", ")", ")", ":", "words", "=", "line", "[", "strokeIndex", ":", "]", ".", "replace", "(", "':'", ",", "' '", ")", ".", "replace", "(", "';'", ",", "' '", ")", ".", "split", "(", ")", "if", "(", "len", "(", "words", ")", ">", "1", ")", ":", "return", "words", "[", "1", "]", "if", "(", "key", "in", "xmlElement", ".", "attributeDictionary", ")", ":", "return", "xmlElement", ".", "attributeDictionary", "[", "key", "]", "if", "(", "xmlElement", ".", "parent", "==", "None", ")", ":", "return", "defaultValue", "return", "getStyleValue", "(", "defaultValue", ",", "key", ",", "xmlElement", ".", "parent", ")" ]
get the stroke value string .
train
false
8,592
@pytest.mark.django_db def test_create_project_good(english): proj = Project(code='hello', fullname='world', source_language=english) proj.save() proj.delete() code_with_padding = ' hello ' fullname_with_padding = ' world ' proj = Project(code=code_with_padding, fullname='world', source_language=english) proj.save() assert (proj.code == code_with_padding.strip()) proj.delete() proj = Project(code='hello', fullname=fullname_with_padding, source_language=english) proj.save() assert (proj.fullname == fullname_with_padding.strip()) proj.delete() proj = Project(code=code_with_padding, fullname=fullname_with_padding, source_language=english) proj.save() assert (proj.code == code_with_padding.strip()) assert (proj.fullname == fullname_with_padding.strip()) proj.delete()
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_create_project_good", "(", "english", ")", ":", "proj", "=", "Project", "(", "code", "=", "'hello'", ",", "fullname", "=", "'world'", ",", "source_language", "=", "english", ")", "proj", ".", "save", "(", ")", "proj", ".", "delete", "(", ")", "code_with_padding", "=", "' hello '", "fullname_with_padding", "=", "' world '", "proj", "=", "Project", "(", "code", "=", "code_with_padding", ",", "fullname", "=", "'world'", ",", "source_language", "=", "english", ")", "proj", ".", "save", "(", ")", "assert", "(", "proj", ".", "code", "==", "code_with_padding", ".", "strip", "(", ")", ")", "proj", ".", "delete", "(", ")", "proj", "=", "Project", "(", "code", "=", "'hello'", ",", "fullname", "=", "fullname_with_padding", ",", "source_language", "=", "english", ")", "proj", ".", "save", "(", ")", "assert", "(", "proj", ".", "fullname", "==", "fullname_with_padding", ".", "strip", "(", ")", ")", "proj", ".", "delete", "(", ")", "proj", "=", "Project", "(", "code", "=", "code_with_padding", ",", "fullname", "=", "fullname_with_padding", ",", "source_language", "=", "english", ")", "proj", ".", "save", "(", ")", "assert", "(", "proj", ".", "code", "==", "code_with_padding", ".", "strip", "(", ")", ")", "assert", "(", "proj", ".", "fullname", "==", "fullname_with_padding", ".", "strip", "(", ")", ")", "proj", ".", "delete", "(", ")" ]
tests projects are created with valid arguments only .
train
false
8,595
def FindVssProjectInfo(fullfname): (path, fnameonly) = os.path.split(fullfname) origPath = path project = '' retPaths = [fnameonly] while (not project): iniName = os.path.join(path, g_iniName) database = win32api.GetProfileVal('Python', 'Database', '', iniName) project = win32api.GetProfileVal('Python', 'Project', '', iniName) if project: break (path, addpath) = os.path.split(path) if (not addpath): break retPaths.insert(0, addpath) if (not project): win32ui.MessageBox(('%s\r\n\r\nThis directory is not configured for Python/VSS' % origPath)) return return (project, '/'.join(retPaths), database)
[ "def", "FindVssProjectInfo", "(", "fullfname", ")", ":", "(", "path", ",", "fnameonly", ")", "=", "os", ".", "path", ".", "split", "(", "fullfname", ")", "origPath", "=", "path", "project", "=", "''", "retPaths", "=", "[", "fnameonly", "]", "while", "(", "not", "project", ")", ":", "iniName", "=", "os", ".", "path", ".", "join", "(", "path", ",", "g_iniName", ")", "database", "=", "win32api", ".", "GetProfileVal", "(", "'Python'", ",", "'Database'", ",", "''", ",", "iniName", ")", "project", "=", "win32api", ".", "GetProfileVal", "(", "'Python'", ",", "'Project'", ",", "''", ",", "iniName", ")", "if", "project", ":", "break", "(", "path", ",", "addpath", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "(", "not", "addpath", ")", ":", "break", "retPaths", ".", "insert", "(", "0", ",", "addpath", ")", "if", "(", "not", "project", ")", ":", "win32ui", ".", "MessageBox", "(", "(", "'%s\\r\\n\\r\\nThis directory is not configured for Python/VSS'", "%", "origPath", ")", ")", "return", "return", "(", "project", ",", "'/'", ".", "join", "(", "retPaths", ")", ",", "database", ")" ]
looks up the file system for an ini file describing the project .
train
false
8,596
def next_event_indexer(all_dates, all_sids, event_dates, event_timestamps, event_sids): validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full((len(all_dates), len(all_sids)), (-1), dtype=np.int64) sid_ixs = all_sids.searchsorted(event_sids) dt_ixs = all_dates.searchsorted(event_dates, side='right') ts_ixs = all_dates.searchsorted(event_timestamps) for i in range((len(event_sids) - 1), (-1), (-1)): start_ix = ts_ixs[i] end_ix = dt_ixs[i] out[start_ix:end_ix, sid_ixs[i]] = i return out
[ "def", "next_event_indexer", "(", "all_dates", ",", "all_sids", ",", "event_dates", ",", "event_timestamps", ",", "event_sids", ")", ":", "validate_event_metadata", "(", "event_dates", ",", "event_timestamps", ",", "event_sids", ")", "out", "=", "np", ".", "full", "(", "(", "len", "(", "all_dates", ")", ",", "len", "(", "all_sids", ")", ")", ",", "(", "-", "1", ")", ",", "dtype", "=", "np", ".", "int64", ")", "sid_ixs", "=", "all_sids", ".", "searchsorted", "(", "event_sids", ")", "dt_ixs", "=", "all_dates", ".", "searchsorted", "(", "event_dates", ",", "side", "=", "'right'", ")", "ts_ixs", "=", "all_dates", ".", "searchsorted", "(", "event_timestamps", ")", "for", "i", "in", "range", "(", "(", "len", "(", "event_sids", ")", "-", "1", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "start_ix", "=", "ts_ixs", "[", "i", "]", "end_ix", "=", "dt_ixs", "[", "i", "]", "out", "[", "start_ix", ":", "end_ix", ",", "sid_ixs", "[", "i", "]", "]", "=", "i", "return", "out" ]
construct an index array that .
train
true
8,597
def fix_ampersands(value): return unencoded_ampersands_re.sub('&amp;', force_unicode(value))
[ "def", "fix_ampersands", "(", "value", ")", ":", "return", "unencoded_ampersands_re", ".", "sub", "(", "'&amp;'", ",", "force_unicode", "(", "value", ")", ")" ]
replaces ampersands with &amp; entities .
train
false
8,598
def _send_notification(operation, resource_type, resource_id, actor_dict=None, public=True): payload = {'resource_info': resource_id} if actor_dict: payload['actor_id'] = actor_dict['id'] payload['actor_type'] = actor_dict['type'] payload['actor_operation'] = actor_dict['actor_operation'] notify_event_callbacks(SERVICE, resource_type, operation, payload) if (public and (CONF.notification_format == 'basic')): notifier = _get_notifier() if notifier: context = {} event_type = ('%(service)s.%(resource_type)s.%(operation)s' % {'service': SERVICE, 'resource_type': resource_type, 'operation': operation}) if _check_notification_opt_out(event_type, outcome=None): return try: notifier.info(context, event_type, payload) except Exception: LOG.exception(_LE('Failed to send %(res_id)s %(event_type)s notification'), {'res_id': resource_id, 'event_type': event_type})
[ "def", "_send_notification", "(", "operation", ",", "resource_type", ",", "resource_id", ",", "actor_dict", "=", "None", ",", "public", "=", "True", ")", ":", "payload", "=", "{", "'resource_info'", ":", "resource_id", "}", "if", "actor_dict", ":", "payload", "[", "'actor_id'", "]", "=", "actor_dict", "[", "'id'", "]", "payload", "[", "'actor_type'", "]", "=", "actor_dict", "[", "'type'", "]", "payload", "[", "'actor_operation'", "]", "=", "actor_dict", "[", "'actor_operation'", "]", "notify_event_callbacks", "(", "SERVICE", ",", "resource_type", ",", "operation", ",", "payload", ")", "if", "(", "public", "and", "(", "CONF", ".", "notification_format", "==", "'basic'", ")", ")", ":", "notifier", "=", "_get_notifier", "(", ")", "if", "notifier", ":", "context", "=", "{", "}", "event_type", "=", "(", "'%(service)s.%(resource_type)s.%(operation)s'", "%", "{", "'service'", ":", "SERVICE", ",", "'resource_type'", ":", "resource_type", ",", "'operation'", ":", "operation", "}", ")", "if", "_check_notification_opt_out", "(", "event_type", ",", "outcome", "=", "None", ")", ":", "return", "try", ":", "notifier", ".", "info", "(", "context", ",", "event_type", ",", "payload", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "_LE", "(", "'Failed to send %(res_id)s %(event_type)s notification'", ")", ",", "{", "'res_id'", ":", "resource_id", ",", "'event_type'", ":", "event_type", "}", ")" ]
send notification to inform observers about the affected resource .
train
false
8,600
@pytest.mark.skipif(u'not HAS_SCIPY') def test_efunc_vs_invefunc_flrw(): z0 = 0.5 z = np.array([0.5, 1.0, 2.0, 5.0]) cosmo = test_cos_sub() assert allclose(cosmo.efunc(z0), (1.0 / cosmo.inv_efunc(z0))) assert allclose(cosmo.efunc(z), (1.0 / cosmo.inv_efunc(z))) cosmo = test_cos_subnu() assert allclose(cosmo.efunc(z0), (1.0 / cosmo.inv_efunc(z0))) assert allclose(cosmo.efunc(z), (1.0 / cosmo.inv_efunc(z)))
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "u'not HAS_SCIPY'", ")", "def", "test_efunc_vs_invefunc_flrw", "(", ")", ":", "z0", "=", "0.5", "z", "=", "np", ".", "array", "(", "[", "0.5", ",", "1.0", ",", "2.0", ",", "5.0", "]", ")", "cosmo", "=", "test_cos_sub", "(", ")", "assert", "allclose", "(", "cosmo", ".", "efunc", "(", "z0", ")", ",", "(", "1.0", "/", "cosmo", ".", "inv_efunc", "(", "z0", ")", ")", ")", "assert", "allclose", "(", "cosmo", ".", "efunc", "(", "z", ")", ",", "(", "1.0", "/", "cosmo", ".", "inv_efunc", "(", "z", ")", ")", ")", "cosmo", "=", "test_cos_subnu", "(", ")", "assert", "allclose", "(", "cosmo", ".", "efunc", "(", "z0", ")", ",", "(", "1.0", "/", "cosmo", ".", "inv_efunc", "(", "z0", ")", ")", ")", "assert", "allclose", "(", "cosmo", ".", "efunc", "(", "z", ")", ",", "(", "1.0", "/", "cosmo", ".", "inv_efunc", "(", "z", ")", ")", ")" ]
test that efunc and inv_efunc give inverse values .
train
false
8,601
def join_bg_jobs(bg_jobs, timeout=None): (ret, timeout_error) = (0, False) for bg_job in bg_jobs: bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO()) try: start_time = time.time() timeout_error = _wait_for_commands(bg_jobs, start_time, timeout) for bg_job in bg_jobs: bg_job.process_output(stdout=True, final_read=True) bg_job.process_output(stdout=False, final_read=True) finally: for bg_job in bg_jobs: bg_job.cleanup() if timeout_error: raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result, ('Command(s) did not complete within %d seconds' % timeout)) return bg_jobs
[ "def", "join_bg_jobs", "(", "bg_jobs", ",", "timeout", "=", "None", ")", ":", "(", "ret", ",", "timeout_error", ")", "=", "(", "0", ",", "False", ")", "for", "bg_job", "in", "bg_jobs", ":", "bg_job", ".", "output_prepare", "(", "StringIO", ".", "StringIO", "(", ")", ",", "StringIO", ".", "StringIO", "(", ")", ")", "try", ":", "start_time", "=", "time", ".", "time", "(", ")", "timeout_error", "=", "_wait_for_commands", "(", "bg_jobs", ",", "start_time", ",", "timeout", ")", "for", "bg_job", "in", "bg_jobs", ":", "bg_job", ".", "process_output", "(", "stdout", "=", "True", ",", "final_read", "=", "True", ")", "bg_job", ".", "process_output", "(", "stdout", "=", "False", ",", "final_read", "=", "True", ")", "finally", ":", "for", "bg_job", "in", "bg_jobs", ":", "bg_job", ".", "cleanup", "(", ")", "if", "timeout_error", ":", "raise", "error", ".", "CmdError", "(", "bg_jobs", "[", "0", "]", ".", "command", ",", "bg_jobs", "[", "0", "]", ".", "result", ",", "(", "'Command(s) did not complete within %d seconds'", "%", "timeout", ")", ")", "return", "bg_jobs" ]
joins the bg_jobs with the current thread .
train
false
8,602
def show_item_groups_in_website(): products = frappe.get_doc(u'Item Group', u'Products') products.show_in_website = 1 products.route = u'products' products.save()
[ "def", "show_item_groups_in_website", "(", ")", ":", "products", "=", "frappe", ".", "get_doc", "(", "u'Item Group'", ",", "u'Products'", ")", "products", ".", "show_in_website", "=", "1", "products", ".", "route", "=", "u'products'", "products", ".", "save", "(", ")" ]
set show_in_website=1 for item groups .
train
false
8,603
def dmp_euclidean_prs(f, g, u, K): if (not u): return dup_euclidean_prs(f, g, K) else: raise MultivariatePolynomialError(f, g)
[ "def", "dmp_euclidean_prs", "(", "f", ",", "g", ",", "u", ",", "K", ")", ":", "if", "(", "not", "u", ")", ":", "return", "dup_euclidean_prs", "(", "f", ",", "g", ",", "K", ")", "else", ":", "raise", "MultivariatePolynomialError", "(", "f", ",", "g", ")" ]
euclidean polynomial remainder sequence in k[x] .
train
false
8,604
def _count_newlines_from_end(in_str): try: i = len(in_str) j = (i - 1) while (in_str[j] == '\n'): j -= 1 return ((i - 1) - j) except IndexError: return i
[ "def", "_count_newlines_from_end", "(", "in_str", ")", ":", "try", ":", "i", "=", "len", "(", "in_str", ")", "j", "=", "(", "i", "-", "1", ")", "while", "(", "in_str", "[", "j", "]", "==", "'\\n'", ")", ":", "j", "-=", "1", "return", "(", "(", "i", "-", "1", ")", "-", "j", ")", "except", "IndexError", ":", "return", "i" ]
counts the number of newlines at the end of a string .
train
false
8,607
def single_tab(pl, segment_info, mode): return (len(list_tabpages()) == 1)
[ "def", "single_tab", "(", "pl", ",", "segment_info", ",", "mode", ")", ":", "return", "(", "len", "(", "list_tabpages", "(", ")", ")", "==", "1", ")" ]
returns true if vim has only one tab opened .
train
false
8,609
def EncodeData(chart, series, y_min, y_max, encoder): assert ((y_min is None) == (y_max is None)) if (y_min is not None): def _ScaleAndEncode(series): series = ScaleData(series, y_min, y_max, encoder.min, encoder.max) return encoder.Encode(series) encoded_series = [_ScaleAndEncode(s) for s in series] else: encoded_series = [encoder.Encode(s) for s in series] result = JoinLists(**{'data': encoded_series}) result['data'] = (encoder.prefix + result['data']) return result
[ "def", "EncodeData", "(", "chart", ",", "series", ",", "y_min", ",", "y_max", ",", "encoder", ")", ":", "assert", "(", "(", "y_min", "is", "None", ")", "==", "(", "y_max", "is", "None", ")", ")", "if", "(", "y_min", "is", "not", "None", ")", ":", "def", "_ScaleAndEncode", "(", "series", ")", ":", "series", "=", "ScaleData", "(", "series", ",", "y_min", ",", "y_max", ",", "encoder", ".", "min", ",", "encoder", ".", "max", ")", "return", "encoder", ".", "Encode", "(", "series", ")", "encoded_series", "=", "[", "_ScaleAndEncode", "(", "s", ")", "for", "s", "in", "series", "]", "else", ":", "encoded_series", "=", "[", "encoder", ".", "Encode", "(", "s", ")", "for", "s", "in", "series", "]", "result", "=", "JoinLists", "(", "**", "{", "'data'", ":", "encoded_series", "}", ")", "result", "[", "'data'", "]", "=", "(", "encoder", ".", "prefix", "+", "result", "[", "'data'", "]", ")", "return", "result" ]
format the given data series in plain or extended format .
train
false
8,610
def stringvalue(string): return string.replace((u'\\' + string[0]), string[0])[1:(-1)]
[ "def", "stringvalue", "(", "string", ")", ":", "return", "string", ".", "replace", "(", "(", "u'\\\\'", "+", "string", "[", "0", "]", ")", ",", "string", "[", "0", "]", ")", "[", "1", ":", "(", "-", "1", ")", "]" ]
retrieve actual value of string without quotes .
train
false
8,611
def threading_data(data=None, fn=None, **kwargs): def apply_fn(results, i, data, kwargs): results[i] = fn(data, **kwargs) results = ([None] * len(data)) threads = [] for i in range(len(data)): t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs)) t.start() threads.append(t) for t in threads: t.join() return np.asarray(results)
[ "def", "threading_data", "(", "data", "=", "None", ",", "fn", "=", "None", ",", "**", "kwargs", ")", ":", "def", "apply_fn", "(", "results", ",", "i", ",", "data", ",", "kwargs", ")", ":", "results", "[", "i", "]", "=", "fn", "(", "data", ",", "**", "kwargs", ")", "results", "=", "(", "[", "None", "]", "*", "len", "(", "data", ")", ")", "threads", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "data", ")", ")", ":", "t", "=", "threading", ".", "Thread", "(", "name", "=", "'threading_and_return'", ",", "target", "=", "apply_fn", ",", "args", "=", "(", "results", ",", "i", ",", "data", "[", "i", "]", ",", "kwargs", ")", ")", "t", ".", "start", "(", ")", "threads", ".", "append", "(", "t", ")", "for", "t", "in", "threads", ":", "t", ".", "join", "(", ")", "return", "np", ".", "asarray", "(", "results", ")" ]
return a batch of result by given data .
train
false
8,612
def _tolerance(X, tol): if sp.issparse(X): variances = mean_variance_axis(X, axis=0)[1] else: variances = np.var(X, axis=0) return (np.mean(variances) * tol)
[ "def", "_tolerance", "(", "X", ",", "tol", ")", ":", "if", "sp", ".", "issparse", "(", "X", ")", ":", "variances", "=", "mean_variance_axis", "(", "X", ",", "axis", "=", "0", ")", "[", "1", "]", "else", ":", "variances", "=", "np", ".", "var", "(", "X", ",", "axis", "=", "0", ")", "return", "(", "np", ".", "mean", "(", "variances", ")", "*", "tol", ")" ]
return a tolerance which is independent of the dataset .
train
false
8,614
def split_addr(addr, encoding): if ('@' in addr): (localpart, domain) = addr.split('@', 1) try: localpart.encode('ascii') except UnicodeEncodeError: localpart = Header(localpart, encoding).encode() domain = domain.encode('idna').decode('ascii') else: localpart = Header(addr, encoding).encode() domain = '' return (localpart, domain)
[ "def", "split_addr", "(", "addr", ",", "encoding", ")", ":", "if", "(", "'@'", "in", "addr", ")", ":", "(", "localpart", ",", "domain", ")", "=", "addr", ".", "split", "(", "'@'", ",", "1", ")", "try", ":", "localpart", ".", "encode", "(", "'ascii'", ")", "except", "UnicodeEncodeError", ":", "localpart", "=", "Header", "(", "localpart", ",", "encoding", ")", ".", "encode", "(", ")", "domain", "=", "domain", ".", "encode", "(", "'idna'", ")", ".", "decode", "(", "'ascii'", ")", "else", ":", "localpart", "=", "Header", "(", "addr", ",", "encoding", ")", ".", "encode", "(", ")", "domain", "=", "''", "return", "(", "localpart", ",", "domain", ")" ]
split the address into local part and domain .
train
false
8,615
def make_pad_velocity_curve_message(index, velocities): raise ((len(velocities) == PAD_VELOCITY_CURVE_CHUNK_SIZE) or AssertionError) return make_message(32, ((index,) + tuple(velocities)))
[ "def", "make_pad_velocity_curve_message", "(", "index", ",", "velocities", ")", ":", "raise", "(", "(", "len", "(", "velocities", ")", "==", "PAD_VELOCITY_CURVE_CHUNK_SIZE", ")", "or", "AssertionError", ")", "return", "make_message", "(", "32", ",", "(", "(", "index", ",", ")", "+", "tuple", "(", "velocities", ")", ")", ")" ]
updates a chunk of velocities in the voltage to velocity table .
train
false
8,616
def test_conversion_to_and_from_physical_quantities(): mst = ([10.0, 12.0, 14.0] * u.STmag) flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) assert isinstance(mst_roundtrip, u.Magnitude) assert (mst_roundtrip.unit == mst.unit) assert_allclose(mst_roundtrip.value, mst.value) wave = ([4956.8, 4959.55, 4962.3] * u.AA) flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert (mst_roundtrip2.unit == mst.unit) assert_allclose(mst_roundtrip2.value, mst.value)
[ "def", "test_conversion_to_and_from_physical_quantities", "(", ")", ":", "mst", "=", "(", "[", "10.0", ",", "12.0", ",", "14.0", "]", "*", "u", ".", "STmag", ")", "flux_lambda", "=", "mst", ".", "physical", "mst_roundtrip", "=", "flux_lambda", ".", "to", "(", "u", ".", "STmag", ")", "assert", "isinstance", "(", "mst_roundtrip", ",", "u", ".", "Magnitude", ")", "assert", "(", "mst_roundtrip", ".", "unit", "==", "mst", ".", "unit", ")", "assert_allclose", "(", "mst_roundtrip", ".", "value", ",", "mst", ".", "value", ")", "wave", "=", "(", "[", "4956.8", ",", "4959.55", ",", "4962.3", "]", "*", "u", ".", "AA", ")", "flux_nu", "=", "mst", ".", "to", "(", "u", ".", "Jy", ",", "equivalencies", "=", "u", ".", "spectral_density", "(", "wave", ")", ")", "mst_roundtrip2", "=", "flux_nu", ".", "to", "(", "u", ".", "STmag", ",", "u", ".", "spectral_density", "(", "wave", ")", ")", "assert", "isinstance", "(", "mst_roundtrip2", ",", "u", ".", "Magnitude", ")", "assert", "(", "mst_roundtrip2", ".", "unit", "==", "mst", ".", "unit", ")", "assert_allclose", "(", "mst_roundtrip2", ".", "value", ",", "mst", ".", "value", ")" ]
ensures we can convert from regular quantities .
train
false
8,617
def fill_with_sample_data(rc_object): for kind in ALL_FOUR: with open(getattr(rc_object, kind), 'w') as f: f.write(kind)
[ "def", "fill_with_sample_data", "(", "rc_object", ")", ":", "for", "kind", "in", "ALL_FOUR", ":", "with", "open", "(", "getattr", "(", "rc_object", ",", "kind", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "kind", ")" ]
put dummy data into all four files of this renewablecert .
train
false
8,618
def test_on_focus_changed_issue1484(monkeypatch, qapp, caplog): monkeypatch.setattr(app, 'qApp', qapp) buf = QBuffer() app.on_focus_changed(buf, buf) assert (len(caplog.records) == 1) record = caplog.records[0] expected = 'on_focus_changed called with non-QWidget {!r}'.format(buf) assert (record.message == expected)
[ "def", "test_on_focus_changed_issue1484", "(", "monkeypatch", ",", "qapp", ",", "caplog", ")", ":", "monkeypatch", ".", "setattr", "(", "app", ",", "'qApp'", ",", "qapp", ")", "buf", "=", "QBuffer", "(", ")", "app", ".", "on_focus_changed", "(", "buf", ",", "buf", ")", "assert", "(", "len", "(", "caplog", ".", "records", ")", "==", "1", ")", "record", "=", "caplog", ".", "records", "[", "0", "]", "expected", "=", "'on_focus_changed called with non-QWidget {!r}'", ".", "format", "(", "buf", ")", "assert", "(", "record", ".", "message", "==", "expected", ")" ]
check what happens when on_focus_changed is called with wrong args .
train
false
8,619
def bootstrap_app(): from salt.netapi.rest_cherrypy import app import salt.config __opts__ = salt.config.client_config(os.environ.get('SALT_MASTER_CONFIG', '/etc/salt/master')) return app.get_app(__opts__)
[ "def", "bootstrap_app", "(", ")", ":", "from", "salt", ".", "netapi", ".", "rest_cherrypy", "import", "app", "import", "salt", ".", "config", "__opts__", "=", "salt", ".", "config", ".", "client_config", "(", "os", ".", "environ", ".", "get", "(", "'SALT_MASTER_CONFIG'", ",", "'/etc/salt/master'", ")", ")", "return", "app", ".", "get_app", "(", "__opts__", ")" ]
grab the opts dict of the master config by trying to import salt .
train
true
8,620
def duplication_matrix(n): tmp = np.eye(((n * (n + 1)) // 2)) return np.array([unvech(x).ravel() for x in tmp]).T
[ "def", "duplication_matrix", "(", "n", ")", ":", "tmp", "=", "np", ".", "eye", "(", "(", "(", "n", "*", "(", "n", "+", "1", ")", ")", "//", "2", ")", ")", "return", "np", ".", "array", "(", "[", "unvech", "(", "x", ")", ".", "ravel", "(", ")", "for", "x", "in", "tmp", "]", ")", ".", "T" ]
create duplication matrix d_n which satisfies vec(s) = d_n vech(s) for symmetric matrix s returns d_n : ndarray .
train
false
8,621
def pick_channels(ch_names, include, exclude=[]): if (len(np.unique(ch_names)) != len(ch_names)): raise RuntimeError('ch_names is not a unique list, picking is unsafe') _check_excludes_includes(include) _check_excludes_includes(exclude) if (not isinstance(include, set)): include = set(include) if (not isinstance(exclude, set)): exclude = set(exclude) sel = [] for (k, name) in enumerate(ch_names): if (((len(include) == 0) or (name in include)) and (name not in exclude)): sel.append(k) return np.array(sel, int)
[ "def", "pick_channels", "(", "ch_names", ",", "include", ",", "exclude", "=", "[", "]", ")", ":", "if", "(", "len", "(", "np", ".", "unique", "(", "ch_names", ")", ")", "!=", "len", "(", "ch_names", ")", ")", ":", "raise", "RuntimeError", "(", "'ch_names is not a unique list, picking is unsafe'", ")", "_check_excludes_includes", "(", "include", ")", "_check_excludes_includes", "(", "exclude", ")", "if", "(", "not", "isinstance", "(", "include", ",", "set", ")", ")", ":", "include", "=", "set", "(", "include", ")", "if", "(", "not", "isinstance", "(", "exclude", ",", "set", ")", ")", ":", "exclude", "=", "set", "(", "exclude", ")", "sel", "=", "[", "]", "for", "(", "k", ",", "name", ")", "in", "enumerate", "(", "ch_names", ")", ":", "if", "(", "(", "(", "len", "(", "include", ")", "==", "0", ")", "or", "(", "name", "in", "include", ")", ")", "and", "(", "name", "not", "in", "exclude", ")", ")", ":", "sel", ".", "append", "(", "k", ")", "return", "np", ".", "array", "(", "sel", ",", "int", ")" ]
pick channels by names .
train
false
8,623
def get_protocol(): protocol = 'http' if getattr(settings, 'USERENA_USE_HTTPS', userena_settings.DEFAULT_USERENA_USE_HTTPS): protocol = 'https' return protocol
[ "def", "get_protocol", "(", ")", ":", "protocol", "=", "'http'", "if", "getattr", "(", "settings", ",", "'USERENA_USE_HTTPS'", ",", "userena_settings", ".", "DEFAULT_USERENA_USE_HTTPS", ")", ":", "protocol", "=", "'https'", "return", "protocol" ]
returns a string with the current protocol .
train
false
8,624
def collect_info(path_list): files_list = [] dir_list = [] for path in path_list: temp_files_list = [] temp_dir_list = [] for (root, dirs, files) in os.walk(path): temp_files_list += files temp_dir_list += dirs files_list.append(temp_files_list) dir_list.append(temp_dir_list) return (files_list, dir_list)
[ "def", "collect_info", "(", "path_list", ")", ":", "files_list", "=", "[", "]", "dir_list", "=", "[", "]", "for", "path", "in", "path_list", ":", "temp_files_list", "=", "[", "]", "temp_dir_list", "=", "[", "]", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "path", ")", ":", "temp_files_list", "+=", "files", "temp_dir_list", "+=", "dirs", "files_list", ".", "append", "(", "temp_files_list", ")", "dir_list", ".", "append", "(", "temp_dir_list", ")", "return", "(", "files_list", ",", "dir_list", ")" ]
recursive collect dirs and files in path_list directory .
train
false
8,625
@public def hermite_poly(n, x=None, **args): if (n < 0): raise ValueError(("can't generate Hermite polynomial of degree %s" % n)) poly = DMP(dup_hermite(int(n), ZZ), ZZ) if (x is not None): poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) if (not args.get('polys', False)): return poly.as_expr() else: return poly
[ "@", "public", "def", "hermite_poly", "(", "n", ",", "x", "=", "None", ",", "**", "args", ")", ":", "if", "(", "n", "<", "0", ")", ":", "raise", "ValueError", "(", "(", "\"can't generate Hermite polynomial of degree %s\"", "%", "n", ")", ")", "poly", "=", "DMP", "(", "dup_hermite", "(", "int", "(", "n", ")", ",", "ZZ", ")", ",", "ZZ", ")", "if", "(", "x", "is", "not", "None", ")", ":", "poly", "=", "Poly", ".", "new", "(", "poly", ",", "x", ")", "else", ":", "poly", "=", "PurePoly", ".", "new", "(", "poly", ",", "Dummy", "(", "'x'", ")", ")", "if", "(", "not", "args", ".", "get", "(", "'polys'", ",", "False", ")", ")", ":", "return", "poly", ".", "as_expr", "(", ")", "else", ":", "return", "poly" ]
generates hermite polynomial of degree n in x .
train
false
8,626
def clear_feedback_message_references(user_id, exploration_id, thread_id): model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False) if (model is None): return updated_references = [] for reference in model.feedback_message_references: if ((reference['exploration_id'] != exploration_id) or (reference['thread_id'] != thread_id)): updated_references.append(reference) if (not updated_references): model.delete() else: model.feedback_message_references = updated_references model.put()
[ "def", "clear_feedback_message_references", "(", "user_id", ",", "exploration_id", ",", "thread_id", ")", ":", "model", "=", "feedback_models", ".", "UnsentFeedbackEmailModel", ".", "get", "(", "user_id", ",", "strict", "=", "False", ")", "if", "(", "model", "is", "None", ")", ":", "return", "updated_references", "=", "[", "]", "for", "reference", "in", "model", ".", "feedback_message_references", ":", "if", "(", "(", "reference", "[", "'exploration_id'", "]", "!=", "exploration_id", ")", "or", "(", "reference", "[", "'thread_id'", "]", "!=", "thread_id", ")", ")", ":", "updated_references", ".", "append", "(", "reference", ")", "if", "(", "not", "updated_references", ")", ":", "model", ".", "delete", "(", ")", "else", ":", "model", ".", "feedback_message_references", "=", "updated_references", "model", ".", "put", "(", ")" ]
removes feedback message references associated with a feedback thread .
train
false
8,627
def js_alert(ident, level, message): return ('try {add_message("%(ident)s", "%(level)s", "%(message)s");}\ncatch(err) {alert("%(levelup)s: %(message)s");}\n' % {'ident': ident, 'level': level, 'levelup': level.upper(), 'message': message.replace('"', '\\"')})
[ "def", "js_alert", "(", "ident", ",", "level", ",", "message", ")", ":", "return", "(", "'try {add_message(\"%(ident)s\", \"%(level)s\", \"%(message)s\");}\\ncatch(err) {alert(\"%(levelup)s: %(message)s\");}\\n'", "%", "{", "'ident'", ":", "ident", ",", "'level'", ":", "level", ",", "'levelup'", ":", "level", ".", "upper", "(", ")", ",", "'message'", ":", "message", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "}", ")" ]
this function returns a string containing js code to generate an alert message .
train
false
8,628
def exists_in_default_path(sheet): default_path_sheet = os.path.join(sheets.default_path(), sheet) return ((sheet in sheets.get()) and os.access(default_path_sheet, os.R_OK))
[ "def", "exists_in_default_path", "(", "sheet", ")", ":", "default_path_sheet", "=", "os", ".", "path", ".", "join", "(", "sheets", ".", "default_path", "(", ")", ",", "sheet", ")", "return", "(", "(", "sheet", "in", "sheets", ".", "get", "(", ")", ")", "and", "os", ".", "access", "(", "default_path_sheet", ",", "os", ".", "R_OK", ")", ")" ]
predicate that returns true if the sheet exists in default_path .
train
false
8,629
def _dot2int(v): t = [int(i) for i in v.split('.')] if (len(t) == 3): t.append(0) elif (len(t) != 4): raise ValueError(('"i.i.i[.i]": %r' % (v,))) if ((min(t) < 0) or (max(t) > 255)): raise ValueError(('[0..255]: %r' % (v,))) i = t.pop(0) while t: i = ((i << 8) + t.pop(0)) return i
[ "def", "_dot2int", "(", "v", ")", ":", "t", "=", "[", "int", "(", "i", ")", "for", "i", "in", "v", ".", "split", "(", "'.'", ")", "]", "if", "(", "len", "(", "t", ")", "==", "3", ")", ":", "t", ".", "append", "(", "0", ")", "elif", "(", "len", "(", "t", ")", "!=", "4", ")", ":", "raise", "ValueError", "(", "(", "'\"i.i.i[.i]\": %r'", "%", "(", "v", ",", ")", ")", ")", "if", "(", "(", "min", "(", "t", ")", "<", "0", ")", "or", "(", "max", "(", "t", ")", ">", "255", ")", ")", ":", "raise", "ValueError", "(", "(", "'[0..255]: %r'", "%", "(", "v", ",", ")", ")", ")", "i", "=", "t", ".", "pop", "(", "0", ")", "while", "t", ":", "i", "=", "(", "(", "i", "<<", "8", ")", "+", "t", ".", "pop", "(", "0", ")", ")", "return", "i" ]
convert i .
train
true
8,630
def percentOverlap(x1, x2, size): nonZeroX1 = np.count_nonzero(x1) nonZeroX2 = np.count_nonzero(x2) minX1X2 = min(nonZeroX1, nonZeroX2) percentOverlap = 0 if (minX1X2 > 0): percentOverlap = (float(np.dot(x1, x2)) / float(minX1X2)) return percentOverlap
[ "def", "percentOverlap", "(", "x1", ",", "x2", ",", "size", ")", ":", "nonZeroX1", "=", "np", ".", "count_nonzero", "(", "x1", ")", "nonZeroX2", "=", "np", ".", "count_nonzero", "(", "x2", ")", "minX1X2", "=", "min", "(", "nonZeroX1", ",", "nonZeroX2", ")", "percentOverlap", "=", "0", "if", "(", "minX1X2", ">", "0", ")", ":", "percentOverlap", "=", "(", "float", "(", "np", ".", "dot", "(", "x1", ",", "x2", ")", ")", "/", "float", "(", "minX1X2", ")", ")", "return", "percentOverlap" ]
computes the percentage of overlap between vectors x1 and x2 .
train
true
8,632
def detach_role_policy(policy_name, role_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) try: conn.detach_role_policy(policy_arn, role_name) log.info('Detached {0} policy to role {1}.'.format(policy_name, role_name)) except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to detach {0} policy to role {1}.' log.error(msg.format(policy_name, role_name)) return False return True
[ "def", "detach_role_policy", "(", "policy_name", ",", "role_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "policy_arn", "=", "_get_policy_arn", "(", "policy_name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "try", ":", "conn", ".", "detach_role_policy", "(", "policy_arn", ",", "role_name", ")", "log", ".", "info", "(", "'Detached {0} policy to role {1}.'", ".", "format", "(", "policy_name", ",", "role_name", ")", ")", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "msg", "=", "'Failed to detach {0} policy to role {1}.'", "log", ".", "error", "(", "msg", ".", "format", "(", "policy_name", ",", "role_name", ")", ")", "return", "False", "return", "True" ]
detach a managed policy to a role .
train
true
8,634
def get_last_day(dt): return (get_first_day(dt, 0, 1) + datetime.timedelta((-1)))
[ "def", "get_last_day", "(", "dt", ")", ":", "return", "(", "get_first_day", "(", "dt", ",", "0", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "(", "-", "1", ")", ")", ")" ]
returns last day of the month using: get_first_day + datetime .
train
false