content
stringlengths
22
815k
id
int64
0
4.91M
def merge_vocab(pair: Tuple[str, str], input_vocab: Dict[str, int]) -> Tuple[Dict[str, int], List]: """ >>> pair = ('w', 'o') >>> input_vocab = {'b i r d @': 3, 'w o r d @': 7, 'w o g @': 13} >>> new_vocab, new_pairs = merge_vocab(pair, input_vocab) >>> new_vocab {'b i r d @': 3, 'wo r d @': 7, 'wo g @': 13} >>> new_pairs [(('wo', 'r'), 7), (('o', 'r'), -7), (('wo', 'g'), 13), (('o', 'g'), -13)] """ output_vocab = {} concat_pair_with_space = ' '.join(pair) concat_pair_with_space_escaped = regex.escape(concat_pair_with_space) concat_pair = ''.join(pair) reg = regex.compile('(^|[^ ]+ )(' + concat_pair_with_space_escaped + ')( [^ ]+|$)') added_pairs = [] for word in input_vocab: word_occurences = input_vocab[word] match = reg.search(word) while match: # word changed if match.group(1) != '': subtoken_before = match.group(1)[:-1] added_pairs.append(((subtoken_before, concat_pair), word_occurences)) if pair != (subtoken_before, pair[0]): added_pairs.append(((subtoken_before, pair[0]), -word_occurences)) if match.group(3) != '': subtoken_after = match.group(3)[1:] added_pairs.append(((concat_pair, subtoken_after), word_occurences)) if pair != (pair[1], subtoken_after): added_pairs.append(((pair[1], subtoken_after), -word_occurences)) start, end = match.span(2) replacement = concat_pair word = word[:start] + replacement + word[end:] match = reg.search(word) output_vocab[word] = word_occurences return output_vocab, added_pairs
13,000
def _handle_ping(dispatch, action): """Reply with PONG""" handle = "hdmimatrix@dummy" if action["payload"] == handle or action["payload"] == "*": dispatch(pong())
13,001
def visualize_bbox_act(img, bboxes,labels, act_preds, classes=None,thickness=1, font_scale=0.4,show=False, wait_time=0,out_file=None): """Show the tracks with opencv.""" assert bboxes.ndim == 2 assert labels.ndim == 1 assert bboxes.shape[0] == labels.shape[0] assert bboxes.shape[1] == 5 if isinstance(img, str): img = mmcv.imread(img) img_shape = img.shape bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) text_width, text_height = 8, 15 for i, (bbox, label) in enumerate(zip(bboxes, labels), 0): x1, y1, x2, y2 = bbox[:4].astype(np.int32) score = float(bbox[-1]) # bbox bbox_color = random_color(label) bbox_color = [int(255 * _c) for _c in bbox_color][::-1] cv2.rectangle(img, (x1, y1), (x2, y2), bbox_color, thickness=thickness) # score text = '{:.02f}'.format(score) width = len(text) * text_width img[y1 - text_height:y1, x1:x1 + width, :] = bbox_color cv2.putText( img, text, (x1, y1 - 2), cv2.FONT_HERSHEY_COMPLEX, font_scale, color=(0, 0, 0)) classes_color = random_color(label + 1) text = classes[label] width = len(text) * text_width img[y1:y1 + text_height, x1:x1 + width, :] = bbox_color cv2.putText(img,text, (x1, y1 + text_height - 2), cv2.FONT_HERSHEY_COMPLEX, font_scale,color=classes_color) #background_color = random_color(label + 5) background_color = [255, 204, 153] if (act_preds is not None) and (len(bboxes)==len(labels)==len(act_preds)): for j, act_pred in enumerate(act_preds[i]): text = '{}: {:.02f}'.format(act_pred[0], act_pred[1]) width = len(text) * (text_width) img[y1+text_height*(j+2) :y1 + text_height*(j+3), x1:x1 + width, :] = background_color cv2.putText(img, text, (x1, y1 + text_height*(j+3) - 2), cv2.FONT_HERSHEY_COMPLEX, font_scale, color=classes_color) if show: mmcv.imshow(img, wait_time=wait_time) if out_file is not None: mmcv.imwrite(img, out_file) return img
13,002
def calculate_similarity(subgraph_degrees): """ Given a list of subgraph degrees, this function calls the guidance function and calculates the similarity of a particular node with all it's non-connected nodes. :param subgraph_degrees: A list of lists containing the non connected node and degrees of common neighbours from the subgraph. :return: A dictionary of similarity of each non-connected node """ similarity_dict = [] for nc_node in subgraph_degrees: similarity = 0 for common_node in nc_node[1]: # Getting the degree of the common neighbour node from the original # graph original_degree = graph.degrees.filter("id = '{}'".format( common_node.id)).select("degree").collect() # Getting the degree of the common neighbour node from the subgraph sub_degree = common_node.degree # Calling the function to calculate guidance for the common # neighbour node guidance = get_guidance(sub_degree, original_degree[0].degree) # Adding the guidance to the similarity of the non-connected node similarity += guidance similarity_dict.append((nc_node[0], similarity)) return similarity_dict
13,003
def balance_thetas(theta_sets_types, theta_sets_values): """Repeats theta values such that all thetas lists have the same length """ n_sets = max([len(thetas) for thetas in theta_sets_types]) for i, (types, values) in enumerate(zip(theta_sets_types, theta_sets_values)): assert len(types) == len(values) n_sets_before = len(types) if n_sets_before != n_sets: theta_sets_types[i] = [types[j % n_sets_before] for j in range(n_sets)] theta_sets_values[i] = [values[j % n_sets_before] for j in range(n_sets)] return theta_sets_types, theta_sets_values
13,004
def _format_weights(df, col, targets, regs): """ Reformat the edge table (target -> regulator) that's output by amusr into a pivoted table that the rest of the inferelator workflow can handle :param df: pd.DataFrame An edge table (regulator -> target) with columns containing model values :param col: Which column to pivot into values :param targets: list A list of target genes (the index of the output data) :param regs: list A list of regulators (the columns of the output data) :return out: pd.DataFrame [G x K] A [targets x regulators] dataframe pivoted from the edge dataframe """ # Make sure that the value column is all numeric df[col] = pd.to_numeric(df[col]) # Pivot an edge table into a matrix of values out = pd.pivot_table(df, index='target', columns='regulator', values=col, fill_value=0.) # Reindex to a [targets x regulators] dataframe and fill anything missing with 0s out = out.reindex(targets).reindex(regs, axis=1) out = out.fillna(value=0.) return out
13,005
def put_thread(req_thread: ReqThreadPut): """Put thread for video to DynamoDB""" try: input = thread_input.update_item(req_thread) res = table.update_item(**input) return res except ClientError as err: err_message = err.response["Error"]["Message"] raise HTTPException(status_code=404, detail=err_message) except BaseException as err: raise HTTPException(status_code=404, detail=str(err))
13,006
def test_envelope_connection_id(): """Test the property Envelope.connection_id.""" envelope_context = EnvelopeContext( uri=URI("connection/author/connection_name/0.1.0") ) envelope = Envelope( to="to", sender="sender", protocol_id=PublicId("author", "name", "0.1.0"), message=b"message", context=envelope_context, ) assert envelope.connection_id == PublicId("author", "connection_name", "0.1.0")
13,007
def _get_or_create_campaign_team(name, owner, tasks, redudancy): """ Creates CampaignTeam instance, if it does not exist yet. Returns reference to CampaignTeam instance. """ # pylint: disable-msg=no-member _cteam = CampaignTeam.objects.get_or_create( teamName=name, owner=owner, requiredAnnotations=100, # (tasks * redudancy), # TODO: fix requiredHours=50, # (tasks * redudancy) / 2, createdBy=owner, ) _cteam[0].members.add(owner) _cteam[0].save() return _cteam[0]
13,008
def pytest_configure(config): """ Prerequisites which needs to done in Before Suite """ os.environ["env"] = config.getoption("--env") os.environ["api_retry_limit"] = config.getoption("--api_retry_limit") os.environ["log_level"] = config.getoption("--log_level") os.environ["api_wait_time"] = config.getoption("--api_wait_time")
13,009
def scaling_sorted(args): """ """ scale_idx = args.scalecol - 1 # Python: 0-based indexing with open(args.affinity, 'r') as affn: header = affn.readline() sys.stdout.write(header) with open(args.signalScales, 'r') as scales: for aln, sln in zip(affn, scales): factor = float(sln.split()[scale_idx]) annreg = aln.strip().split() scaled = [str(float(x) * factor) for x in annreg[1:]] sys.stdout.write(annreg[0] + '\t' + '\t'.join(scaled) + '\n') # assert that both files have the same number of lines assert not affn.readline(), 'Iteration through affinity file not exhaustive' assert not scales.readline(), 'Iteration through scaling file not exhaustive' return
13,010
def check_method(adata): """Check that method output fits expected API.""" assert "labels_pred" in adata.obs return True
13,011
async def test_get_subscriber_subscriptions(web_client, container): """Check subscriber subscriptions getting handler.""" newsfeed_id = '123' subscription_storage = container.subscription_storage() await subscription_storage.add( { 'id': str(uuid.uuid4()), 'newsfeed_id': '124', 'to_newsfeed_id': newsfeed_id, 'subscribed_at': datetime.datetime.utcnow().timestamp(), }, ) await subscription_storage.add( { 'id': str(uuid.uuid4()), 'newsfeed_id': '125', 'to_newsfeed_id': newsfeed_id, 'subscribed_at': datetime.datetime.utcnow().timestamp(), }, ) await subscription_storage.add( { 'id': str(uuid.uuid4()), 'newsfeed_id': '125', 'to_newsfeed_id': '126', 'subscribed_at': datetime.datetime.utcnow().timestamp(), }, ) response = await web_client.get(f'/newsfeed/{newsfeed_id}/subscribers/subscriptions/') assert response.status == 200 data = await response.json() subscription_1, subscription_2 = data['results'] assert uuid.UUID(subscription_1['id']) assert subscription_1['newsfeed_id'] == '125' assert subscription_1['to_newsfeed_id'] == newsfeed_id assert int(subscription_1['subscribed_at']) assert uuid.UUID(subscription_2['id']) assert subscription_2['newsfeed_id'] == '124' assert subscription_2['to_newsfeed_id'] == newsfeed_id assert int(subscription_2['subscribed_at'])
13,012
def build_index_block(in_channels, out_channels, kernel_size, stride=2, padding=0, groups=1, norm_cfg=dict(type='BN'), use_nonlinear=False, expansion=1): """Build an conv block for IndexBlock. Args: in_channels (int): The input channels of the block. out_channels (int): The output channels of the block. kernel_size (int): The kernel size of the block. stride (int, optional): The stride of the block. Defaults to 2. padding (int, optional): The padding of the block. Defaults to 0. groups (int, optional): The groups of the block. Defaults to 1. norm_cfg (dict, optional): The norm config of the block. Defaults to dict(type='BN'). use_nonlinear (bool, optional): Whether use nonlinearty in the block. If true, a ConvModule with kernel size 1 will be appended and an ``ReLU6`` nonlinearty will be added to the origin ConvModule. Defaults to False. expansion (int, optional): Expandsion ratio of the middle channels. Effective when ``use_nonlinear`` is true. Defaults to 1. Returns: nn.Module: The built conv block. """ if use_nonlinear: return nn.Sequential( ConvModule( in_channels, in_channels * expansion, kernel_size, stride=stride, padding=padding, groups=groups, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6')), ConvModule( in_channels * expansion, out_channels, 1, stride=1, padding=0, groups=groups, bias=False, norm_cfg=None, act_cfg=None)) else: return ConvModule( in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=False, norm_cfg=None, act_cfg=None)
13,013
def nodes(*paths, type=None): """Call node() on each given path and return the list of results. nodes('foo', 'bar', ...) is equivalent to [node('foo'), node('bar'), ...] """ return list(map(lambda p: node(p, type=type), paths))
13,014
def GetCodeBucket(app, project): """Gets a bucket reference for a Cloud Build. Args: app: App resource for this project project: str, The name of the current project. Returns: storage_util.BucketReference, The bucket to use. """ # Attempt to retrieve the default appspot bucket, if one can be created. log.debug('No bucket specified, retrieving default bucket.') if not app.codeBucket: raise exceptions.DefaultBucketAccessError(project) return storage_util.BucketReference.FromBucketUrl(app.codeBucket)
13,015
def parse_rule(parameter_string): """Parse a parameter string into its constituent name, type, and pattern For example: `parse_parameter_string('<param_one:[A-z]>')` -> ('param_one', str, '[A-z]') :param parameter_string: String to parse :return: tuple containing (parameter_name, parameter_type, parameter_pattern) """ # We could receive NAME or NAME:PATTERN if str(parameter_string).startswith('/'): parameter_string = parameter_string[1:] parameter_string = str(parameter_string).strip('<>') name = parameter_string pattern = 'string' if ':' in parameter_string: name, pattern = parameter_string.split(':', 1) default = (str, pattern) # Pull from pre-configured types _type, pattern = REGEX_TYPES.get(pattern, default) return name, _type, pattern
13,016
def plan(): """ 改进方案 :return: """ return render_template('plan.htm')
13,017
def get_repo_meta(instance, **kwargs): """ Get and create git repository of user with token in commandline. Post Save is called twice in Abstract User class. Async produce unpredictible behaviour (Hence, populate some duplicate instance in repo) """ # async(tasks.get_repo, instance) if not instance.last_login: # import ipdb # ipdb.set_trace() print('contains token') import re from datetime import datetime print('Populating repo ...') if instance.git_token: from github import Github g = Github(instance.git_token) GIT_USERNAME = g.get_user().login repo_list = [str(r.full_name) for r in g.get_user().get_repos(GIT_USERNAME)] print(repo_list) r = re.compile(GIT_USERNAME + '/*') newlist = filter(r.match, repo_list) another_list = [repo.replace(GIT_USERNAME + '/', '') for repo in list(newlist)] print(another_list) for repo in another_list: repo_name = g.get_user().get_repo(repo) print(repo_name) Repo.objects.get_or_create(name=repo_name.name, repo_full_name=repo_name.full_name, owner_name=repo_name.owner.name, language=repo_name.language, owner_username=repo_name.owner.login, size=repo_name.size, clone_url=repo_name.clone_url, issues_url=repo_name.issues_url, merges_url=repo_name.merges_url, milestones_url=repo_name.milestones_url, comments_url=repo_name.comments_url, commits_url=repo_name.commits_url, git_last_modified=datetime.strptime(repo_name.last_modified, '%a, %d %b %Y %H:%M:%S %Z'))
13,018
def extract_traceback(notebook): """ Extracts information about an error from the notebook. Parameters ---------- notebook: :class:`nbformat.notebooknode.NotebookNode` Executed notebook to find an error traceback. Returns ------- bool Whether the executed notebook has an error traceback. int or None Number of a cell with a traceback. If None, then the notebook doesn't contain an error traceback. str Error traceback if exists. """ for cell in notebook['cells']: # Find a cell output with a traceback and extract the traceback outputs = cell.get('outputs', []) for output in outputs: traceback = output.get('traceback', []) if traceback: traceback = '\n'.join(traceback) return True, cell['execution_count'], traceback return False, None, ""
13,019
def start(bot, update): """Send a message when the command /start is issued.""" update.message.reply_text('Здравствуйте! Чем можем помочь?')
13,020
def test_insert_items_with_priority_add_all_at_that_priority(empty_priorityq): """Test inserting items into the queue adds them at given priority.""" q = empty_priorityq q.insert(5, 6) q.insert(3, 6) q.insert(1, 6) assert q._size == 1 assert q._all_values[1].priority == 6 assert len(q._all_values[1]) == 3
13,021
def get_log() -> str: """get_log() -> str (internal) """ return str()
13,022
def start_server(self, parameters): # pragma: no cover """adds the server start to celery's queue Args: parameters(dict): The POST JSON parameters """ self.update_state(state=CeleryStates.started) session = ServerSession(parameters) return session()
13,023
def generate_config(context): """ Entry point for the deployment resources. """ properties = context.properties name = properties.get('name', context.env['name']) project_id = properties.get('project', context.env['project']) bgp = properties.get('bgp', {'asn': properties.get('asn')}) router = { 'name': context.env['name'], # https://cloud.google.com/compute/docs/reference/rest/v1/routers 'type': 'gcp-types/compute-v1:routers', 'properties': { 'name': name, 'project': project_id, 'region': properties['region'], 'bgp': bgp, 'network': properties.get('networkURL', generate_network_uri( project_id, properties.get('network', ''))), } } optional_properties = [ 'description', 'bgpPeers', 'interfaces', 'nats', ] for prop in optional_properties: append_optional_property(router, properties, prop) return { 'resources': [router], 'outputs': [ { 'name': 'name', 'value': name }, { 'name': 'selfLink', 'value': '$(ref.' + context.env['name'] + '.selfLink)' }, { 'name': 'creationTimestamp', 'value': '$(ref.' + context.env['name'] + '.creationTimestamp)' } ] }
13,024
def MakeRootForHref(rootkml,region,lod,root_href): """Make a NetworkLink KML file to the root Put the proper network URL here and publish this file. All NetworkLinks below are relative to this. Args: rootkml - name of file to create region - region of root of hierarchy lod - minLodPixels root_href - href to first file in hierarchy """ link = kml.genxml.Link() link.href = root_href link.viewRefreshMode = 'onRegion' (n,s,e,w) = region.NSEWstring() regionxml = kml.genkml.Region(n,s,e,w,minpx=lod,maxpx=-1) networklink = kml.genxml.NetworkLink() networklink.Link = link.xml() networklink.Region = regionxml stylexml = kml.genkml.CheckHideChildren() networklink.Add_Style(stylexml) document = kml.genxml.Document() document.Add_Feature(networklink.xml()) k = kml.genxml.Kml() k.Feature = document.xml() kmlstr = k.xml() f = open(rootkml, 'w') f.write(kmlstr) f.close()
13,025
def cachedmethod(timeout): """ Function decorator to enable caching for instance methods. """ def _cached(func): if not(hasattr(func, 'expires')): func.expires = {} func.cache = {} def __cached(self, *args, **kwargs): if(timeout and func.expires.get(repr(self), 0) < time.time()): if(repr(self) in func.cache): del func.cache[repr(self)] if(repr(self) in func.cache): return func.cache[repr(self)] result = func(self, *args, **kwargs) if(result): func.cache[repr(self)] = result func.expires[repr(self)] = time.time() + timeout return result return __cached try: # see if it's an int int(timeout) except TypeError: func = timeout timeout = 0 return _cached(func) return _cached
13,026
def test_bad_construction(): """Test bad construction parameters.""" with pytest.raises(ValueError): OnExecutionComplete( target_action='not-an-action', on_completion=lambda *args: None ) with pytest.raises(ValueError): OnExecutionComplete( target_action=LogInfo(msg='some message'), on_completion='not-a-callable-nor-an-action-iterable' )
13,027
def __build_pyramid(models, features): """Applies all submodels to each FPN level. Args: models (list): List of submodels to run on each pyramid level (by default only regression, classifcation). features (list): The FPN features. Returns: list: A list of tensors, one for each submodel. """ return [__build_model_pyramid(n, m, features) for n, m in models]
13,028
def get_ap_list(): """ Method to return list of aps present in the network """ return jsonify_params( CELLULAR_NETWORK.ap_list )
13,029
def test_sample_dataframe_schema() -> None: """Test the sample argument of schema.validate.""" df = pd.DataFrame({"col1": range(1, 1001)}) # assert all values -1 schema = DataFrameSchema( columns={"col1": Column(Int, Check(lambda s: s == -1))} ) for seed in [11, 123456, 9000, 654]: sample_index = df.sample(100, random_state=seed).index df.loc[sample_index] = -1 assert schema.validate(df, sample=100, random_state=seed).equals(df)
13,030
def set_title_z_offset(obj, offset): """ Set z-axis title offset :param obj: drawable object :type obj: TH1X, TGraph :param offset: axis title offset value :type offset: float :return: nothing :rtype: None """ set_axis_title_offset(obj, offset, 'z')
13,031
def _merge_url_rule(rule_before, rule_after): """ Merges two url rule parts. Parameters ---------- rule_before : `None` or `tuple` of `tuple` (`int`, `str`) First url part if any to join `rule_after` to. rule_after : `None` or `tuple` of `tuple` (`int`, `str`) Second url part what's start is extended by `rule_before`. Returns ------- merged_rule : `None` or `tuple` of `tuple` (`int`, `str`) The merged rule. """ if rule_before is None: return rule_after if rule_after is None: return rule_before if rule_after[0] == DUMMY_RULE_PART: rule_after = rule_after[1:] return (*rule_before, *rule_after)
13,032
def parse_options(args): """ Parse commandline arguments into options for Monitor :param args: :return: """ parser = argparse.ArgumentParser() parser.add_argument( "--tcp", required=True, action="append", help="TCP/IP address to monitor, e.g. google.com:80. For best results" " use multiple addresses." ) parser.add_argument("--logfile", default="connection.log", help="Where to store the connection quality data") parser.add_argument("--interval", default=30.0, type=float, help="How many seconds between checks") parser.add_argument("--timeout", default=3.0, type=float, help="How many seconds to wait for connection") parser.add_argument("--quiet", default=False, action="store_true", help="Do not output log data to screen") return parser.parse_args(args)
13,033
def check_file_executable(exe): """ Check the file can be executed """ try: output=subprocess.check_output([exe,"--version"],stderr=subprocess.STDOUT) except EnvironmentError as error: if error.errno == errno.EACCES: sys.exit("ERROR: Unable to execute software: " + exe) except subprocess.CalledProcessError: pass
13,034
def create_graphic_model(nodes, edges, gtype): """ Create a graphic model given nodes and edges Parameters ---------- nodes : dict for each node {key, text, math} edges : dict for each edge {key, text, math} gtype : str [default="text"] "text" for a verbose version, "math" for a compact version """ mod = Digraph() if gtype == "math": tindx = 1 else: tindx = 0 for ckey in nodes.keys(): if ckey == "Like": cstyle = "filled" else: cstyle = None mod.node(ckey, nodes[ckey][tindx], style=cstyle) for ckey in edges.keys(): for cval in np.atleast_1d(edges[ckey]): mod.edge(ckey, cval) return mod
13,035
def zero_cross_bounds(arr, dim, num_cross): """Find the values bounding an array's zero crossing.""" sign_switch = np.sign(arr).diff(dim) switch_val = arr[dim].where(sign_switch, drop=True)[num_cross] lower_bound = max(0.999*switch_val, np.min(arr[dim])) upper_bound = min(1.001*switch_val, np.max(arr[dim])) return arr.sel(**{dim: [lower_bound, upper_bound], "method": "backfill"})
13,036
def remove_tseqs(t: ST_Type) -> ST_Type: """ Get just the sseqs and the non-nested types, removing the tseqs """ if type(t) == ST_SSeq or type(t) == ST_SSeq_Tuple: inner_tseqs_removed = remove_tseqs(t.t) return replace(t, t=inner_tseqs_removed) elif is_nested(t): return remove_tseqs(t.t) else: return t
13,037
def prod_cart(in_list_1: list, in_list_2: list) -> list: """ Compute the cartesian product of two list :param in_list_1: the first list to be evaluated :param in_list_2: the second list to be evaluated :return: the prodotto cartesiano result as [[x,y],..] """ _list = [] for element_1 in in_list_1: for element_2 in in_list_2: _list.append([element_1,element_2]) return _list
13,038
def list_host(args): """List hosts""" clientstable = PrettyTable(["Client", "Type", "Enabled", "Current"]) clientstable.align["Client"] = "l" baseconfig = Kconfig(client=args.client, debug=args.debug).baseconfig clients = baseconfig.list_hosts(empty()).clients for entry in clients: client = entry.client enabled = entry.enabled _type = entry.type if entry.current: clientstable.add_row([client, _type, enabled, 'X']) else: clientstable.add_row([client, _type, enabled, '']) print(clientstable) return
13,039
def _write_building(buildings, lines): """ Args: buildings (idf_MSequence): IDF object from idf.idfobjects() lines (list): Text to create the T3D file (IDF file to import in TRNBuild). To be appended (insert) here """ # Get line number where to write log("Writing building info from idf file to t3d file...") buildingNum = checkStr(lines, "ALL OBJECTS IN CLASS: BUILDING") # Writing BUILDING infos to lines for building in buildings: lines.insert(buildingNum, building)
13,040
def classFactory(iface): # pylint: disable=invalid-name """Load esrimap class from file esrimap. :param iface: A QGIS interface instance. :type iface: QgsInterface """ # from .esri_basemap import esrimap return esrimap(iface)
13,041
def gpib_open(name): """ Start a device session. Returns a unique integer for the instrument at the specified GPIB address. For example:: >>> gpib_open(lan[158.154.1.110]:19) 4 @param name : LAN/GPIB address of the device @type name : str @return: int """ (devtype,devID) = name.split() address = eval(devtype)[devID]['addr'] return _open(address)
13,042
def myjobs_view(request): """ Renderbox view :param request: :return: """ return render(request, 'renderbox/myjobs.html')
13,043
def set_task_payload(func): """Set TASK_PAYLOAD and unset TASK_PAYLOAD.""" @functools.wraps(func) def wrapper(task): """Wrapper.""" environment.set_value('TASK_PAYLOAD', task.payload()) try: return func(task) except: # Truly catch *all* exceptions. e = sys.exc_info()[1] e.extras = {'task_payload': environment.get_value('TASK_PAYLOAD')} raise finally: environment.remove_key('TASK_PAYLOAD') return wrapper
13,044
def _set_status(file_id, status): """Update status for file with id ``file_id``.""" assert file_id, 'Eh? No file_id?' LOG.debug(f'Updating status file_id {file_id} with "{status}"') with connect() as conn: with conn.cursor() as cur: cur.execute('UPDATE local_ega.files SET status = %(status)s WHERE id = %(file_id)s;', {'status': status, 'file_id': file_id})
13,045
def geometry_extractor_osm(locator, config): """this is where the action happens if it is more than a few lines in ``main``. NOTE: ADD YOUR SCRIPT'S DOCUMENATION HERE (how) NOTE: RENAME THIS FUNCTION (SHOULD PROBABLY BE THE SAME NAME AS THE MODULE) """ # local variables: buffer_m = config.surroundings_helper.buffer buildings_height = config.surroundings_helper.height_ag buildings_floors = config.surroundings_helper.floors_ag shapefile_out_path = locator.get_surroundings_geometry() zone = gdf.from_file(locator.get_zone_geometry()) # trnasform zone file to geographic coordinates zone = zone.to_crs(get_geographic_coordinate_system()) lon = zone.geometry[0].centroid.coords.xy[0][0] lat = zone.geometry[0].centroid.coords.xy[1][0] zone = zone.to_crs(get_projected_coordinate_system(float(lat), float(lon))) # get a polygon of the surrounding area, and one polygon representative of the zone area print("Calculating surrounding area") area_with_buffer = calc_surrounding_area(zone, buffer_m) # get footprints of all the surroundings print("Getting building footprints") area_with_buffer_polygon = area_with_buffer.to_crs(get_geographic_coordinate_system()).geometry.values[0] all_surroundings = osmnx.footprints.footprints_from_polygon(polygon=area_with_buffer_polygon) all_surroundings = all_surroundings.to_crs(get_projected_coordinate_system(float(lat), float(lon))) # erase overlapping area print("Removing unwanted buildings") surroundings = erase_no_surrounding_areas(all_surroundings, zone, area_with_buffer) assert surroundings.shape[0] > 0, 'No buildings were found within range based on buffer parameter.' # clean attributes of height, name and number of floors result = clean_attributes(surroundings, buildings_height, buildings_floors, key="CEA") result = result.to_crs(get_projected_coordinate_system(float(lat), float(lon))) # save to shapefile result.to_file(shapefile_out_path)
13,046
def _match_contact(filter_criteria): """ This default matching strategy function will attempt to get a single result for the specified criteria. It will fail with an `unmatched` result if there are no matching contacts. It will fail with a `multiple_matches` result if there are multiple matches for this criteria. """ contact = None try: contact = get_queryset_object(Contact.objects.all(), **filter_criteria) contact_matching_status = ContactMatchingStatus.matched except Contact.DoesNotExist: contact_matching_status = ContactMatchingStatus.unmatched except Contact.MultipleObjectsReturned: contact_matching_status = ContactMatchingStatus.multiple_matches return contact, contact_matching_status
13,047
def get_objects_dictionary(): """ creates a dictionary with the types and the circuit objects :return: Dictionary instance """ object_types = {'bus': Bus(), 'load': Load(), 'static_generator': StaticGenerator(), 'battery': Battery(), 'generator': Generator(), 'shunt': Shunt(), 'wires': Wire(), 'overhead_line_types': Tower(), 'underground_cable_types': UndergroundLineType(), 'sequence_line_types': SequenceLineType(), 'transformer_types': TransformerType(), 'branch': Branch(), 'transformer2w': Transformer2W(), 'line': Line(), 'dc_line': DcLine(None, None), 'hvdc': HvdcLine(), 'vsc': VSC(Bus(), Bus(is_dc=True)), } return object_types
13,048
def create_stripe_onboarding_link(request, stripe_id=None,): """Creates stripe connect onboarding link by calling Stripe API.""" account_links = stripe.AccountLink.create( account=stripe_id, return_url=request.build_absolute_uri( reverse("users:stripe_callback") ), refresh_url=request.build_absolute_uri( reverse("users:stripe_authorize") ), type="account_onboarding", ) return account_links
13,049
def add_scheme_if_missing(url): """ >>> add_scheme_if_missing("example.org") 'http://example.org' >>> add_scheme_if_missing("https://example.org") 'https://example.org' """ if "//" not in url: url = "http://%s" % url return url
13,050
def monkeypatch_pkg_resources(monkeypatch, monkeypatch_entrypoint): """Monkeypatching pkg_resources.iter_entry_points with our list of entry points.""" monkeypatch.setattr(pkg_resources, 'iter_entry_points', get_entry_points)
13,051
def _extract_assembly_information(job_context: Dict) -> Dict: """Determine the Ensembl assembly version and name used for this index. Ensembl will periodically release updated versions of the assemblies which are where the input files for this processor comes from. All divisions other than the main one have identical release versions, but we don't know which division these files came from so we can't just hit thier API again. Therefore, look at the URL we used to get the files because it contains the assembly version and name. I'll admit this isn't the most elegant solution, but since the transcriptome index's only database model is the OriginalFiles until processing is complete, there's no other way to pass this information through to this processor without modifying the OriginalFile model. The URL path we're attempting follows this pattern (defined in the surveyor) ftp://ftp.{url_root}/gtf/{species_sub_dir}/{filename_species}.{assembly_name}.{assembly_version}.gtf.gz and we are attempting to extract {assembly_version} and {assembly_name}. """ original_files = job_context["original_files"] for og_file in original_files: if ".gtf.gz" in og_file.source_filename: extensionless_url = og_file.source_url[:-7] version_start_index = extensionless_url.rfind(".") + 1 job_context["assembly_version"] = extensionless_url[version_start_index:] # Decrement the index to skip the period. versionless_url = extensionless_url[:version_start_index-1] assembly_name_start_index = versionless_url.rfind(".") + 1 job_context["assembly_name"] = versionless_url[assembly_name_start_index:] return job_context
13,052
def fixture_circle_2() -> Circle: """Return an example circle.""" return Circle(Point(0.0, 0.0), 1.0)
13,053
def parse_fortran(source, filename="<floopy code>", free_form=None, strict=None, seq_dependencies=None, auto_dependencies=None, target=None): """ :returns: a :class:`loopy.TranslationUnit` """ parse_plog = ProcessLogger(logger, "parsing fortran file '%s'" % filename) if seq_dependencies is not None and auto_dependencies is not None: raise TypeError( "may not specify both seq_dependencies and auto_dependencies") if auto_dependencies is not None: from warnings import warn warn("auto_dependencies is deprecated, use seq_dependencies instead", DeprecationWarning, stacklevel=2) seq_dependencies = auto_dependencies if seq_dependencies is None: seq_dependencies = True if free_form is None: free_form = True if strict is None: strict = True import logging console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s") console.setFormatter(formatter) logging.getLogger("fparser").addHandler(console) from fparser import api tree = api.parse(source, isfree=free_form, isstrict=strict, analyze=False, ignore_comments=False) if tree is None: raise LoopyError("Fortran parser was unhappy with source code " "and returned invalid data (Sorry!)") from loopy.frontend.fortran.translator import F2LoopyTranslator f2loopy = F2LoopyTranslator(filename, target=target) f2loopy(tree) kernels = f2loopy.make_kernels(seq_dependencies=seq_dependencies) from loopy.transform.callable import merge prog = merge(kernels) all_kernels = [clbl.subkernel for clbl in prog.callables_table.values()] for knl in all_kernels: prog.with_kernel(_add_assignees_to_calls(knl, all_kernels)) if len(all_kernels) == 1: # guesssing in the case of only one function prog = prog.with_entrypoints(all_kernels[0].name) from loopy.frontend.fortran.translator import specialize_fortran_division prog = specialize_fortran_division(prog) parse_plog.done() return prog
13,054
def plot_curve(lr_list, args, iters_per_epoch, by_epoch=True): """Plot learning rate vs iter graph.""" try: import seaborn as sns sns.set_style(args.style) except ImportError: print("Attention: The plot style won't be applied because 'seaborn' " 'package is not installed, please install it if you want better ' 'show style.') wind_w, wind_h = args.window_size.split('*') wind_w, wind_h = int(wind_w), int(wind_h) plt.figure(figsize=(wind_w, wind_h)) # if legend is None, use {filename}_{key} as legend ax: plt.Axes = plt.subplot() ax.plot(lr_list, linewidth=1) if by_epoch: ax.xaxis.tick_top() ax.set_xlabel('Iters') ax.xaxis.set_label_position('top') sec_ax = ax.secondary_xaxis( 'bottom', functions=(lambda x: x / iters_per_epoch, lambda y: y * iters_per_epoch)) sec_ax.set_xlabel('Epochs') # ticks = range(0, len(lr_list), iters_per_epoch) # plt.xticks(ticks=ticks, labels=range(len(ticks))) else: plt.xlabel('Iters') plt.ylabel('Learning Rate') if args.title is None: plt.title(f'{osp.basename(args.config)} Learning Rate curve') else: plt.title(args.title) if args.save_path: plt.savefig(args.save_path) print(f'The learning rate graph is saved at {args.save_path}') plt.show()
13,055
def compute_sources(radius, evolved_vars): """ Computes source terms for the symmetry. """ mass_density = evolved_vars[0] momentum_density = evolved_vars[1] energy_density = evolved_vars[2] factor = -_symmetry_alpha / radius pressure = compute_pressure(mass_density, momentum_density, energy_density) return (factor * momentum_density, factor * momentum_density**2 / mass_density, factor * (energy_density + pressure) * momentum_density / mass_density)
13,056
def average_pq(ps, qs): """ average the multiple position and quaternion array Args: ps (np.array): multiple position array of shape Nx3 qs (np.array): multiple quaternion array of shape Nx4 Returns: p_mean (np.array): averaged position array q_mean (np.array): averaged quaternion array """ p_average = np.mean(np.asarray(ps), axis=0) q_average = average_q(np.asarray(qs)) return p_average, q_average
13,057
def logobase(**kwargs): """Create a PyGraphviz graph for a logo.""" ag = pygraphviz.AGraph(bgcolor='#D0D0D0', strict=False, directed=True, ranksep=0.3, **kwargs) ag.edge_attr['penwidth'] = 1.4 ag.edge_attr['arrowsize'] = 0.8 return ag
13,058
def integral_raycasting( pixels: Tensor, mu: Tensor, rho: Tensor, lambd: Tensor, appearance: Tensor, background_appearance: Tensor, K: Tensor, dist_coef: Tensor = None, alpha: float = 2.5e-2, beta: float = 2e0, eps: float = 1e-8, ) -> Tensor: """ :param pixels: [H, W, 3, 1] :param mu: [*, N, 3, 1] :param rho: [*, N, 3, 3] :param lambd: [*, N, 3, 1] :param appearance: [*, N, 3] :param background_appearance: [*, 1, 3] :param K: [*, 3, 3] :param dist_coef: [*, D] :param alpha: :param beta: :param function: :param eps: :return: """ rays = pixel_grid_to_ray_grid( xyz=pixels, K=K, dist_coef=dist_coef, ) lambd, alpha = invert_lambd( lambd=lambd, alpha=alpha, eps=eps, ) rays_sigma_rays, mu_sigma_mu, rays_sigma_mu = compute_quantities( rays=rays, mu=mu, rho=rho, lambd=lambd, ) z = optimal_z(rays_sigma_mu=rays_sigma_mu, rays_sigma_rays=rays_sigma_rays, eps=eps) z_background = beta * max_z(z=z) weights = density(x=z) * integral( rays_sigma_rays=rays_sigma_rays, mu_sigma_mu=mu_sigma_mu, rays_sigma_mu=rays_sigma_mu, alpha=alpha, eps=eps, ) weight_background = density(x=z_background) * background_integral( z=z_background, alpha=alpha, ) shape = weights.shape[:-1] + weight_background.shape[-1:] weight_background = weight_background.expand(shape) weights = torch.cat([weights, weight_background], dim=-1) weights = normalize_weights(weights=weights, eps=eps) appearance = torch.cat([appearance, background_appearance], dim=-2) image = splat_image(weights=weights, appearance=appearance) return image
13,059
async def cors_handler(request, handler): """Middleware to add CORS response headers """ response = await handler(request) response.headers['Access-Control-Allow-Origin'] = '*' return response
13,060
def validate_image(task: ExternalTask): """ To simulate BPMN/Failure/Success, this handler uses image name variable (to be passed when launching the process) """ log_context = {"WORKER_ID": task.get_worker_id(), "TASK_ID": task.get_task_id(), "TOPIC": task.get_topic_name()} log_with_context("executing validate_image", log_context) img_name = task.get_variable('imgName') if "poor" in img_name: return task.bpmn_error("POOR_QUALITY_IMAGE", "Image quality is bad", {"img_rejection_code": "POOR_QUALITY_CODE_XX", "img_rejection_reason": f"Image quality must be at least GOOD"}) elif "jpg" in img_name: return task.complete({"img_approved": True}) elif "corrupt" in img_name: return task.failure("Cannot validate image", "image is corrupted", 0, default_config.get("retryTimeout")) else: return task.bpmn_error("INVALID_IMAGE", "Image extension must be jpg", {"img_rejection_code": "INVALID_IMG_NAME", "img_rejection_reason": f"Image name {img_name} is invalid"})
13,061
def simulate_patch(app, path, **kwargs): """Simulates a PATCH request to a WSGI application. Equivalent to:: simulate_request(app, 'PATCH', path, **kwargs) Args: app (callable): The WSGI application to call path (str): The URL path to request Keyword Args: params (dict): A dictionary of query string parameters, where each key is a parameter name, and each value is either a ``str`` or something that can be converted into a ``str``, or a list of such values. If a ``list``, the value will be converted to a comma-delimited string of values (e.g., 'thing=1,2,3'). params_csv (bool): Set to ``False`` to encode list values in query string params by specifying multiple instances of the parameter (e.g., 'thing=1&thing=2&thing=3'). Otherwise, parameters will be encoded as comma-separated values (e.g., 'thing=1,2,3'). Defaults to ``True``. headers (dict): Additional headers to include in the request (default: ``None``) body (str): A string to send as the body of the request. Accepts both byte strings and Unicode strings (default: ``None``). If a Unicode string is provided, it will be encoded as UTF-8 in the request. json(JSON serializable): A JSON document to serialize as the body of the request (default: ``None``). If specified, overrides `body` and the Content-Type header in `headers`. protocol: The protocol to use for the URL scheme (default: 'http') host(str): A string to use for the hostname part of the fully qualified request URL (default: 'falconframework.org') remote_addr (str): A string to use as the remote IP address for the request (default: '127.0.0.1') extras (dict): Additional CGI variables to add to the WSGI ``environ`` dictionary for the request (default: ``None``) """ return simulate_request(app, 'PATCH', path, **kwargs)
13,062
def capture_video_frames(source_path: string, dest_path: string, duration: float, fps: int = 24): """ Used for preparing a data set. Loads a video, which is expected to be in the .mp4 format and stores the single frames of this video. ----------------------------------------------------------------------------------- Parameters: ----------------------------------------------------------------------------------- source_path: string Path to the video which has to be captured, e.g. .../.../myTestvid.mp4 dest_path: string Destination of where the captured frames has to be stored to. duration: float Defines how much minutes of the video to be captured. The number of frames to be captured is then calculated by minutes * 60 * fps. fps: int Frames per second. Default = 24. """ vid_name = source_path.split('/')[3].split('.')[0] frame_counter = duration * 60 * fps vid_capture = cv2.VideoCapture(source_path) success, image = vid_capture.read() count = 0 while success and count < frame_counter: cv2.imwrite(dest_path + '/' + vid_name + '_frame_%d.png' % count, image) # save frame as PNG file success, image = vid_capture.read() print(count, ' Read a new frame: ', success) count += 1
13,063
def test_render_template_broken(): """A broken template should raise an error""" with pytest.raises(TemplateSyntaxError) as ex: render_template(BROKEN_TEMPLATE, context=VARS) assert "Invalid block tag" in ex.value.args[0]
13,064
def purge_files(): """ Remove all generated files from previous runs. """ for fname in glob.glob("*.png"): print("Deleting {!r} ...".format(fname)) os.unlink(fname) for fname in glob.glob("*.png.old"): print("Deleting {!r} ...".format(fname)) os.unlink(fname)
13,065
def _merge_nbval_coverage_data(cov): """Merge nbval coverage data into pytest-cov data.""" if not cov: return if coverage.version_info > (5, 0): data = cov.get_data() nbval_data = coverage.CoverageData(data.data_filename(), suffix='.nbval', debug=cov.debug) nbval_data.read() cov.get_data().update(nbval_data, aliases=aliases) else: # Get the filename of the nbval coverage: filename = cov.data_files.filename + '.nbval' # Read coverage generated by nbval in this run: nbval_data = coverage.CoverageData(debug=cov.debug) try: nbval_data.read_file(os.path.abspath(filename)) except coverage.CoverageException: return # Set up aliases (following internal coverage.py code here) aliases = None if cov.config.paths: aliases = coverage.files.PathAliases() for paths in cov.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) # Merge nbval data into pytest-cov data: cov.data.update(nbval_data, aliases=aliases) # Delete our nbval coverage data coverage.misc.file_be_gone(filename)
13,066
def minimum_image_box(sizes): """Creates a distance wrapper using the minimum image convention Arguments: sizes (array-like of float): box sizes """ def _box(sizes, distance_vectors): """A minimum image wrapper for distances""" shift = sizes[None, None, :] * np.round(distance_vectors / sizes[None, None, :]) distance_vectors -= shift return distance_vectors return partial(_box, np.array(sizes))
13,067
def mlrPredict(W, data): """ mlrObjFunction predicts the label of data given the data and parameter W of Logistic Regression Input: W: the matrix of weight of size (D + 1) x 10. Each column is the weight vector of a Logistic Regression classifier. X: the data matrix of size N x D Output: label: vector of size N x 1 representing the predicted label of corresponding feature vector given in data matrix """ label = np.zeros((data.shape[0], 1)) ################## # YOUR CODE HERE # ################## # HINT: Do not forget to add the bias term to your input data x = np.hstack((np.ones((data.shape[0], 1)),data)) label = (np.argmax((np.exp(np.dot(x, W)) / np.sum(np.exp(np.dot(x, W)))), axis=1)).reshape((data.shape[0],1)) return label
13,068
def plot_binary_logistic_boundary(logreg, X, y, xlim, ylim): """Plots the boundary given by the trained logistic regressor :param logreg: Logistic Regrssor model :type logreg: logistic_regression.LogisticRegressionModel :param X: The features and samples used to train :type X: np.ndarray :param y: The labels for the classification task :type y: np.ndarray :param xlim: min and max :math:`x_1` values for the plot :type xlim: typing.Tuple[int, int] :param ylim: min and max :math:`x_2` values for the plot :type ylim: typing.Tuple[int, int] """ xx, yy = np.mgrid[xlim[0]:xlim[1]:.01, ylim[0]:ylim[1]:.01] grid = np.c_[xx.ravel(), yy.ravel()] probs = logreg(grid)[:, 1].reshape(xx.shape) f, ax = plt.subplots(figsize=(8, 6)) contour = ax.contourf(xx, yy, probs, 25, cmap="RdBu", vmin=0, vmax=1) ax_c = f.colorbar(contour) ax_c.set_label("$P(y = 1)$") ax_c.set_ticks([0, .25, .5, .75, 1]) ax.scatter(X[:,0], X[:, 1], c=y, s=50, cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white", linewidth=1) ax.set(aspect="equal", xlim=xlim, ylim=ylim, xlabel="$X_1$", ylabel="$X_2$") plt.show() f, ax = plt.subplots(figsize=(8, 6)) ax.contour(xx, yy, probs, levels=[.5], cmap="Greys", vmin=0, vmax=.6) ax.scatter(X[:,0], X[:, 1], c=y, s=50, cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white", linewidth=1) ax.set(aspect="equal", xlim=xlim, ylim=ylim, xlabel="$X_1$", ylabel="$X_2$") plt.show()
13,069
def _wrap(wrapper, func): """ save wrapped function if multiple decorators are used :param func: :return: """ setattr(wrapper, '__wrapped__', func)
13,070
def calc_mean_score(movies): """Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place""" return round(sum([movie.score for movie in movies]) / len(movies), 1)
13,071
def get_proxy_signature(query_dict, secret): """ Calculate the signature of the given query dict as per Shopify's documentation for proxy requests. See: http://docs.shopify.com/api/tutorials/application-proxies#security """ # Sort and combine query parameters into a single string. sorted_params = '' for key in sorted(query_dict.keys()): sorted_params += "{0}={1}".format(key, ",".join(query_dict.getlist(key))) signature = hmac.new(secret.encode('utf-8'), sorted_params.encode('utf-8'), hashlib.sha256) return signature.hexdigest()
13,072
def candlestick_echarts(data_frame: pd.DataFrame, time_field: str = 'time', open_field: str = "open", high_field: str = 'high', low_field: str = 'low', close_field: str = 'close', volume_field: str = 'volume', mas: list = [5, 10, 30], log_y: bool = True, title: str = "", width: str = "100%", height: str = "600px", left_padding: str = '5%', right_padding: str = '3%') -> Echarts: """ 绘制K线 :param data_frame: :param time_field: 时间列名, 如果指定的列不存在,使用index作为time :param open_field: open列名 :param high_field: high列名 :param low_field: low列名 :param close_field: close列名 :param volume_field: volume列名 :param mas: 均线组 :param log_y: y轴 log分布 底为1.1 一个格子对应10% :param title: 可选标题 :param width: 输出div的宽度 支持像素和百分比 比如800px/100% :param height: 输出div的高度 支持像素和百分比 比如800px/100% :param left_padding: 左侧padding宽度 :param right_padding: 右侧padding宽度 :return: """ df = data_frame.copy() if time_field not in data_frame.columns: # 使用index作为时间 df[time_field] = df.index df[close_field] = df[close_field].fillna(method="ffill") df[open_field] = df[open_field].fillna(df[close_field]) df[high_field] = df[high_field].fillna(df[close_field]) df[low_field] = df[low_field].fillna(df[close_field]) df[volume_field] = df[volume_field].fillna(0) volumes = (df[volume_field]).round(2).tolist() vol_filter = (df[volume_field]).quantile([0.05, 0.95]).values bar_items = [({"value": vol} if vol >= vol_filter[0] and vol <= vol_filter[1] else ( {"value": vol, "itemStyle": {"color": "red"}} if vol > vol_filter[1] else {"value": vol, "itemStyle": {"color": "green"}})) for vol in volumes] options = { 'animation': False, 'title': {'text': title}, 'legend': {'top': 10, 'left': 'center', 'data': [title]}, 'tooltip': { 'trigger': 'axis', 'axisPointer': {'type': 'cross'}, 'borderWidth': 1, 'borderColor': '#ccc', 'padding': 10, 'formatter': Js(""" function(params){ var dt = params[0]['axisValue']; var labels = []; labels.push('<b><span>时间:&nbsp;</span></b>' + dt + '<br/>'); params.sort(function(a, b) { if (a.seriesName < b.seriesName ) {return -1;} else if (a.seriesName > b.seriesName ) {return 1;} else{ return 0;} }); for (let i = 0; i < params.length; i++) { const param = params[i]; var label=["<b><span>"+param['seriesName']+"("+param['seriesType']+"):&nbsp;</span></b>"]; var dimensionNames=param["dimensionNames"]; if (typeof(param['value'])=='object' && dimensionNames.length==param['data'].length){ label.push("<br/>"); for (let j = 1; j <dimensionNames.length; j++) { var value= param['data'][j]; if (typeof(value)=='number'){ if (value%1==0 || value>100000){ label.push("<span>"+dimensionNames[j]+':&nbsp;'+value.toFixed(0)+"</span><br/>"); }else{ label.push("<span>"+dimensionNames[j]+':&nbsp;'+value.toFixed(2)+"</span><br/>"); } }else{ label.push("<div style='max-width:15em;word-break:break-all;white-space: normal;'>"+dimensionNames[j]+':&nbsp;'+value+"</div>"); } } }else if(param['seriesType']=="candlestick"){ label.push("<br/>"); label.push("<span>open:&nbsp;"+param['data'][1].toFixed(2)+"</span><br/>"); label.push("<span>close:&nbsp;"+param['data'][2].toFixed(2)+"</span><br/>"); label.push("<span>high:&nbsp;"+param['data'][4].toFixed(2)+"</span><br/>"); label.push("<span>low:&nbsp;"+param['data'][3].toFixed(2)+"</span><br/>"); }else if(typeof(param['value'])=='number'){ if (param['value']%1==0){ label.push("<span>"+param['value'].toFixed(0)+"</span><br/>"); }else{ label.push("<span>"+param['value'].toFixed(2)+"</span><br/>"); } }else if(param['value']){ label.push("<div style='max-width:15em;word-break:break-all;white-space: normal;'>"+value+"</div>"); }else{ label.push("<br/>"); } var cardStr= label.join(''); labels.push(cardStr); } return labels.join(''); }"""), 'textStyle': {'color': '#000'}, 'position': Js(""" function (pos, params, el, elRect, size){ var obj = {top: 10}; obj[['left', 'right'][+(pos[0] < size.viewSize[0] / 2)]] = 30; return obj; } """) }, 'axisPointer': { 'link': {'xAxisIndex': 'all'}, 'label': {'backgroundColor': '#777'} }, 'grid': [ {'left': left_padding, 'right': right_padding, 'height': '70%'}, {'left': left_padding, 'right': right_padding, 'top': '71%', 'height': '16%'} ], 'xAxis': [ { 'type': 'category', 'data': df[time_field].tolist(), 'scale': True, 'boundaryGap': False, 'axisLine': {'show': False}, 'axisLabel': {'show': False}, 'axisTick': {'show': False}, 'splitLine': {'show': True}, 'splitNumber': 20, 'min': 'dataMin', 'max': 'dataMax', 'axisPointer': { 'z': 100 } }, { 'type': 'category', 'gridIndex': 1, 'data': df[time_field].tolist(), 'scale': True, 'boundaryGap': False, 'axisLine': {'onZero': False, 'show': True}, 'axisLine': {'show': True}, 'axisLabel': {'show': True}, 'axisTick': {'show': True}, 'splitLine': {'show': True}, 'axisLabel': {'show': True}, 'splitNumber': 20, 'min': 'dataMin', 'max': 'dataMax' } ], 'yAxis': [ { 'scale': True, 'type': 'log' if log_y else 'value', 'logBase': 1.1, 'splitNumber': 10, 'axisLabel': {'show': True, 'formatter': Js(""" function(value,index){ return value.toFixed(2); } """)}, 'axisLine': {'show': False}, 'axisTick': {'show': True}, 'splitLine': {'show': True} }, { 'scale': True, 'gridIndex': 1, 'splitNumber': 2, 'axisLabel': {'show': True, 'formatter': Js(""" function(value,index){ var si = [ { value: 1, symbol: "" }, { value: 1E3, symbol: "K" }, { value: 1E6, symbol: "M" }, { value: 1E9, symbol: "G" }, { value: 1E12, symbol: "T" }, { value: 1E15, symbol: "P" }, { value: 1E18, symbol: "E" } ]; var rx = /\.0+$|(\.[0-9]*[1-9])0+$/; var i; for (i = si.length - 1; i > 0; i--) { if (value >= si[i].value) { break; } } return (value / si[i].value).toFixed(2).replace(rx, "$1") + si[i].symbol; } """) }, 'axisLine': {'show': False}, 'axisTick': {'show': False}, 'splitLine': {'show': False} } ], 'dataZoom': [ { 'type': 'inside', 'xAxisIndex': [0, 1], 'start': 0, 'end': 100 } ], 'series': [ { 'name': title, 'type': 'candlestick', 'data': df[[open_field, close_field, low_field, high_field]].values.tolist(), 'emphasis': { 'itemStyle': { 'borderColor': "#333", 'borderWidth': 1, 'shadowColor': 'rgba(0, 0, 0, 0.5)', 'shadowBlur': 15 } } }, { 'name': 'Volume', 'type': 'bar', 'xAxisIndex': 1, 'yAxisIndex': 1, 'data': bar_items, 'emphasis': { 'itemStyle': { 'borderColor': "#333", 'borderWidth': 1, 'shadowColor': 'rgba(0, 0, 0, 0.5)', 'shadowBlur': 15 } } } ] } for ma_len in mas: name = "MA" + str(ma_len) df[name] = df[close_field].rolling(ma_len).mean().round(2) series_ma = { 'name': name, 'type': 'line', 'data': df[name].tolist(), 'smooth': True, 'showSymbol': False, 'lineStyle': {'opacity': 0.5} } options['series'].append(series_ma) options['legend']['data'].append(name) return Echarts(options=options, width=width, height=height)
13,073
def select_sounder_hac(path_sounder, sounder): """ Donne les indices pour un sondeur (sounder) dans un hac (path sounder), et retourne les index de sondeur et de transducer correspondant inputs: path_sounder: path du hac à analyser sounder: nom du transducer outputs: index du sondeur et du transducer """ list_sounder = util.hac_sounder_descr(FileName=path_sounder) list_st = [ [ list_sounder.GetSounder(isdr).GetTransducer(itsd).m_transName for itsd in range(list_sounder.GetSounder(isdr).m_numberOfTransducer) ] for isdr in range(list_sounder.GetNbSounder()) ] for i in range(len(list_st)): for j in range(len(list_st[i])): if list_st[i][j] == sounder: return i, j return None
13,074
def check_configured_topic(host, topic_configuration, topic_name, kafka_servers, deleted_options=None): """ Test if topic configuration is what was defined """ # Forcing api_version to 0.11.0 in order to be sure that a # Metadata_v1 is sent (so that we get the controller info) kafka_client = KafkaManager( bootstrap_servers=kafka_servers, api_version=(0, 11, 0) ) if deleted_options is None: deleted_options = {} try: if topic_configuration['state'] == 'present': assert topic_name in kafka_client.get_topics() partitions = \ kafka_client.get_total_partitions_for_topic(topic_name) assert partitions == topic_configuration['partitions'] ite = kafka_client.get_partitions_metadata_for_topic(topic_name) for _, metadata in six.iteritems(ite): tot_replica = len(metadata.replicas) assert tot_replica == topic_configuration['replica_factor'] for key, value in six.iteritems(topic_configuration['options']): config = kafka_client.get_config_for_topic(topic_name, [key]) assert str(config) == str(value) for key, value in six.iteritems(deleted_options): config = kafka_client.get_config_for_topic(topic_name, key) assert str(config) != str(value) else: assert topic_name not in kafka_client.get_topics() finally: kafka_client.close()
13,075
def upvote_checklist(request, checklist_id): # for "messages", refer https://stackoverflow.com/a/61603003/6543250 """if user cannot retract upvote, then this code be uncommented if Upvote.objects.filter(user=User.objects.filter(username=username).first(), checklist=Checklist.objects.get(id=checklist_id)): msg = 'You have already upvoted the checklist once!' messages.info(request, msg) """ """ Note: notifications recorded only when a user upvotes the checklist not downvote in order to promote healthy behaviour and not let the author inundate with downvote notifs in case some user decides to harass the author. """ if Checklist.objects.get(id=checklist_id).author == request.user: msg = "Action Denied! You cannot upvote your own checklist!" messages.error(request, msg) else: # remove user's upvote if he has already upvoted obj = Upvote.objects.filter( user=request.user, checklist=Checklist.objects.get(id=checklist_id) ) msg = "" if obj: obj.delete() msg = "Upvote retracted!" else: upvote_obj = Upvote( user=request.user, checklist=Checklist.objects.get(id=checklist_id), ) upvote_obj.save() msg = "Checklist upvoted!" # also update notifications table so relevant notif can be shown to author fromUser = request.user toUser = Checklist.objects.get(id=checklist_id).author Notification( fromUser=fromUser, toUser=toUser, notif_type=1, checklist=Checklist.objects.get(id=checklist_id), ).save() messages.success(request, msg) if request.META.get("HTTP_REFERER"): if "login" in request.META.get("HTTP_REFERER") and "next" in request.META.get( "HTTP_REFERER" ): return redirect("checklist-home") # redirect to home url; simply reload the page # return redirect('checklist-home') return redirect(request.META.get("HTTP_REFERER", "checklist-home"))
13,076
def index(request): """查询页面""" ctx = {} Advert_1 = Advert.objects.get(advert_num=1) # 广告1 Advert_2 = Advert.objects.get(advert_num=2) # 广告2 ctx['Adverturl1'] = Advert_1.advert_url ctx['Adverturl2'] = Advert_2.advert_url ctx['Advertimg1'] = '/advert/'+ str(Advert_1.img) ctx['Advertimg2'] = '/advert/'+ str(Advert_2.img) return render(request, 'srsys/index.html',ctx)
13,077
def load_amazon(): """ """ df = pd.read_csv('data/amazon.txt', header=None, delimiter='\t') X_data = df[0].tolist() y_data = df[1].tolist() print 'Preprocessing...' vectorizer = TfidfVectorizer(strip_accents='unicode', lowercase=True, stop_words='english', ngram_range=(1, 2), max_df=0.5, min_df=5, max_features=20000, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) vectorizer.fit(X_data) X_data = vectorizer.transform(X_data) X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.1, random_state=0) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0) return X_train, y_train, X_val, y_val, X_test, y_test
13,078
def get_switch_filters( switch_id, exception_when_missing=True, user=None, session=None, **kwargs ): """get filters of a switch.""" return _get_switch( switch_id, session=session, exception_when_missing=exception_when_missing )
13,079
def generalized_zielonka_with_psolC(g): """ Zielonka's algorithm with psolC partial solver. :param g: the game to solve. :return: the solution in the following format : (W_0, W_1). """ return generalized_parity_solver_with_partial(g, psolC_gen.psolC_generalized)
13,080
def plot_matrix(mat, figsize=(7, 4), draw_cbar=True, vmin=0, vmax=1, cmap=None): """ wrapper for plotting a matrix of probabilities. attribues (optional) are used as xlabels """ if np.any(mat < 0): print('rescaling matrix to probabilities') mat = .5 * (mat + 1) try: import seaborn as sns if cmap is None: cmap = sns.cubehelix_palette( 8, start=2, dark=0, light=1, reverse=False, as_cmap=True) cmap = sns.cubehelix_palette( 4, start=2, dark=0, light=1, reverse=False, as_cmap=True) sns.set_style("whitegrid", {'axes.grid': False}) except: cmap = 'gray_r' import matplotlib.pyplot as plt fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) cax = ax.imshow(mat, aspect='auto', cmap=cmap, vmin=vmin, vmax=vmax, origin='upper') if draw_cbar is True: fig.colorbar(cax, orientation='vertical') return fig, ax # ax.set_yticks([])
13,081
def dump_txt(records: Iterable[Record], file: TextIO) -> None: """Saves AnimData records to a tabbed text file. :param records: Iterable of `Record`s to write to the `file`. :param file: A text file object, or any any object with a `write()` method. If `file` is a file object, it should be opened with `newline=''`. """ writer = csv.writer(file, dialect="excel-tab") writer.writerow( [ "CofName", "FramesPerDirection", "AnimationSpeed", *(f"FrameData{frame:03}" for frame in range(FRAME_MAX)), ] ) for record in records: writer.writerow( [ record.cof_name, record.frames_per_direction, record.animation_speed, *record.triggers.to_codes(), ] )
13,082
def read_properties_core(xml_source): """Read assorted file properties.""" properties = DocumentProperties() root = fromstring(xml_source) creator_node = root.find(QName(NAMESPACES['dc'], 'creator').text) if creator_node is not None: properties.creator = creator_node.text else: properties.creator = '' last_modified_by_node = root.find( QName(NAMESPACES['cp'], 'lastModifiedBy').text) if last_modified_by_node is not None: properties.last_modified_by = last_modified_by_node.text else: properties.last_modified_by = '' created_node = root.find(QName(NAMESPACES['dcterms'], 'created').text) if created_node is not None: properties.created = W3CDTF_to_datetime(created_node.text) else: properties.created = datetime.datetime.now() modified_node = root.find(QName(NAMESPACES['dcterms'], 'modified').text) if modified_node is not None: properties.modified = W3CDTF_to_datetime(modified_node.text) else: properties.modified = properties.created return properties
13,083
def batch_decode(loc, priors, variances): """Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions """ boxes = torch.cat(( priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:], priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2) boxes[:, :, :2] -= boxes[:, :, 2:] / 2 boxes[:, :, 2:] += boxes[:, :, :2] return boxes
13,084
def __save_image_file__(img, file_name, output_path, wmode): """ Saves the PIL image to a file :param img: PIL image :param file_name: File name :param output_path: Output path :param wmode: Work mode """ # create output directory if it doesn't exist dir = os.path.dirname(output_path) if dir != '': os.makedirs(dir, exist_ok=True) if wmode == "file": file_name_out = os.path.basename(output_path) if file_name_out == '': # Change file extension to png file_name = os.path.splitext(file_name)[0] + '.png' # Save image img.save(os.path.join(output_path, file_name)) else: try: # Save image img.save(output_path) except OSError as e: if str(e) == "cannot write mode RGBA as JPEG": raise OSError("Error! " "Please indicate the correct extension of the final file, for example: .png") else: raise e else: # Change file extension to png file_name = os.path.splitext(file_name)[0] + '.png' # Save image img.save(os.path.join(output_path, file_name))
13,085
def generate_app(): """Creates a summary report for hte generated builds.""" click.echo(click.style(f'Building app in {os.getcwd()}', bg='green')) config = load_appear_config() if config is False: exit(1)
13,086
async def test_service_unload(hass): """Verify service unload works.""" hass.data[deconz.services.DECONZ_SERVICES] = True with patch( "homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True) ) as async_remove: await deconz.services.async_unload_services(hass) assert hass.data[deconz.services.DECONZ_SERVICES] is False assert async_remove.call_count == 2
13,087
def _step2_macs_seq (configs): """Step2 MACS if the raw data type is seq. So it will use the output from step1. """ # check the input t_rep_files = configs["samtools.treat_output_replicates"] t_comb_file = configs["samtools.treat_output"] c_comb_file = configs["samtools.control_output"] macs_genome_option = " -g "+ configs["sample.species"]+" " # run MACS, first for each replicate for i in range(1,configs["data.number_replicates"]+1): if configs["data.has_control"]: # run MACS w/ control command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_rep_files[i-1]+" -c "+ c_comb_file + " -n "+configs["sample.sample_id"]+"_rep"+str(i) run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_rep"+str(i)+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig_replicates"][i-1] run_cmd(command_line) else: # run MACS w/o control command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_rep_files[i-1]+" -n "+configs["sample.sample_id"]+"_rep"+str(i) run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_rep"+str(i)+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig_replicates"][i-1] run_cmd(command_line) # run MACS for the combined treatment if configs["data.number_replicates"] == 1: # no need to run MACS again, simply copy the previous results command_line = "cp "+configs["sample.sample_id"]+"_rep1_peaks.xls"+" "+configs["macs.output_xls"] run_cmd(command_line) command_line = "cp "+configs["sample.sample_id"]+"_rep1_peaks.bed"+" "+configs["macs.output_bed"] run_cmd(command_line) command_line = "cp "+configs["sample.sample_id"]+"_rep1_summits.bed"+" "+configs["macs.output_summits"] run_cmd(command_line) command_line = "cp "+configs["macs.output_treat_wig_replicates"][0]+" "+configs["macs.output_treat_wig"] run_cmd(command_line) if configs["data.has_control"]: command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/control/"+configs["sample.sample_id"]+"_rep1_control_afterfiting_all.wig.gz > "+configs["macs.output_control_wig"] run_cmd(command_line) else: # run MACS on combined alignment files if configs["data.has_control"]: command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_comb_file+" -c "+c_comb_file+" -n "+configs["sample.sample_id"] run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig"] run_cmd(command_line) command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/control/"+configs["sample.sample_id"]+"_control_afterfiting_all.wig.gz > "+configs["macs.output_control_wig"] run_cmd(command_line) else: command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_comb_file+" -n "+configs["sample.sample_id"] run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig"] run_cmd(command_line) return True
13,088
def menu_entry_to_db(entry): """ Converts a MenuEntry into Meal, Menu, and MenuItem objects which are stored in the database. """ menu, _ = Menu.objects.get_or_create(date=entry.date) meal = Meal.objects.create(meal_type=entry.meal_type, vendor=entry.vendor) for item_name in entry.items: item, _ = MenuItem.objects.get_or_create(name=item_name) meal.items.add(item) if entry.meal_type == 'L': if menu.lunch: menu.lunch.delete() menu.lunch = meal if entry.meal_type == 'D': if menu.dinner: menu.dinner.delete() menu.dinner = meal menu.save() return menu
13,089
def get_device_of(tensor: torch.Tensor) -> int: """ Returns the device of the tensor. """ if not tensor.is_cuda: return -1 else: return tensor.get_device()
13,090
def test_script_task(scheduler: Scheduler) -> None: """ Tasks should be definable as shell scripts. """ @task(script=True) def task1(message): return """echo Hello, {message}!""".format(message=message) assert scheduler.run(task1("World")) == b"Hello, World!\n"
13,091
def initialize(indexCallback=None): """ @param indexCallback: A function which is called when eSpeak reaches an index. It is called with one argument: the number of the index or C{None} when speech stops. """ global espeakDLL, bgThread, bgQueue, player, onIndexReached espeakDLL = cdll.LoadLibrary(os.path.join(globalVars.appDir, "synthDrivers", "espeak.dll")) espeakDLL.espeak_Info.restype=c_char_p espeakDLL.espeak_Synth.errcheck=espeak_errcheck espeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck espeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck espeakDLL.espeak_SetParameter.errcheck=espeak_errcheck espeakDLL.espeak_Terminate.errcheck=espeak_errcheck espeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE)) espeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE) espeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,) eSpeakPath = os.path.join(globalVars.appDir, "synthDrivers") sampleRate = espeakDLL.espeak_Initialize( AUDIO_OUTPUT_SYNCHRONOUS, 300, os.fsencode(eSpeakPath), # #10607: ensure espeak does not exit NVDA's process on errors such as the espeak path being invalid. espeakINITIALIZE_DONT_EXIT ) if sampleRate <= 0: raise OSError(f"espeak_Initialize failed with code {sampleRate}. Given Espeak data path of {eSpeakPath}") player = nvwave.WavePlayer( channels=1, samplesPerSec=sampleRate, bitsPerSample=16, outputDevice=config.conf["speech"]["outputDevice"], buffered=True ) onIndexReached = indexCallback espeakDLL.espeak_SetSynthCallback(callback) bgQueue = queue.Queue() bgThread=BgThread() bgThread.start()
13,092
def __yaml_tag_test(*args, **kwargs): """YAML tag constructor for testing only""" import copy return copy.deepcopy(args), copy.deepcopy(kwargs)
13,093
def _create_fake_bids_dataset(base_dir='', n_sub=10, n_ses=2, tasks=['localizer', 'main'], n_runs=[1, 3], with_derivatives=True, with_confounds=True, no_session=False): """Creates a fake bids dataset directory with dummy files. Returns fake dataset directory name. Parameters ---------- base_dir: string (Absolute path), optional Absolute directory path in which to create the fake BIDS dataset dir. Default: Current directory. n_sub: int, optional Number of subject to be simulated in the dataset. Default: 10 n_ses: int, optional Number of sessions to be simulated in the dataset. Ignored if no_session=True. Default: 2 n_runs: List[int], optional Default: [1, 3] with_derivatives: bool, optional In the case derivatives are included, they come with two spaces and descriptions. Spaces are 'MNI' and 'T1w'. Descriptions are 'preproc' and 'fmriprep'. Only space 'T1w' include both descriptions. Default: True with_confounds: bool, optional Default: True no_session: bool, optional Specifying no_sessions will only produce runs and files without the optional session field. In this case n_ses will be ignored. Default: False Returns ------- dataset directory name: string 'bids_dataset' Creates ------- Directory with dummy files """ bids_path = os.path.join(base_dir, 'bids_dataset') os.makedirs(bids_path) # Create surface bids dataset open(os.path.join(bids_path, 'README.txt'), 'w') vox = 4 created_sessions = ['ses-%02d' % label for label in range(1, n_ses + 1)] if no_session: created_sessions = [''] for subject in ['sub-%02d' % label for label in range(1, n_sub + 1)]: for session in created_sessions: subses_dir = os.path.join(bids_path, subject, session) if session == 'ses-01' or session == '': anat_path = os.path.join(subses_dir, 'anat') os.makedirs(anat_path) anat_file = os.path.join(anat_path, subject + '_T1w.nii.gz') open(anat_file, 'w') func_path = os.path.join(subses_dir, 'func') os.makedirs(func_path) for task, n_run in zip(tasks, n_runs): run_labels = [ 'run-%02d' % label for label in range(1, n_run + 1)] for run in run_labels: fields = [subject, session, 'task-' + task] if '' in fields: fields.remove('') file_id = '_'.join(fields) if n_run > 1: file_id += '_' + run bold_path = os.path.join(func_path, file_id + '_bold.nii.gz') _write_fake_bold_img(bold_path, [vox, vox, vox, 100]) events_path = os.path.join(func_path, file_id + '_events.tsv') _basic_paradigm().to_csv(events_path, sep='\t', index=None) param_path = os.path.join(func_path, file_id + '_bold.json') with open(param_path, 'w') as param_file: json.dump({'RepetitionTime': 1.5}, param_file) # Create derivatives files if with_derivatives: bids_path = os.path.join(base_dir, 'bids_dataset', 'derivatives') os.makedirs(bids_path) for subject in ['sub-%02d' % label for label in range(1, 11)]: for session in created_sessions: subses_dir = os.path.join(bids_path, subject, session) func_path = os.path.join(subses_dir, 'func') os.makedirs(func_path) for task, n_run in zip(tasks, n_runs): for run in ['run-%02d' % label for label in range(1, n_run + 1) ]: fields = [subject, session, 'task-' + task] if '' in fields: fields.remove('') file_id = '_'.join(fields) if n_run > 1: file_id += '_' + run preproc = (file_id + '_space-MNI_desc-preproc_bold.nii.gz' ) preproc_path = os.path.join(func_path, preproc) _write_fake_bold_img(preproc_path, [vox, vox, vox, 100] ) preproc = (file_id + '_space-T1w_desc-preproc_bold.nii.gz' ) preproc_path = os.path.join(func_path, preproc) _write_fake_bold_img(preproc_path, [vox, vox, vox, 100] ) preproc = (file_id + '_space-T1w_desc-fmriprep_bold.nii.gz' ) preproc_path = os.path.join(func_path, preproc) _write_fake_bold_img(preproc_path, [vox, vox, vox, 100] ) if with_confounds: confounds_path = os.path.join( func_path, file_id + '_desc-confounds_regressors.tsv', ) _basic_confounds(100).to_csv(confounds_path, sep='\t', index=None) return 'bids_dataset'
13,094
def format_data_for_training(data): """ Create numpy array with planet features ready to feed to the neural net. :param data: parsed features :return: numpy array of shape (number of frames, PLANET_MAX_NUM, PER_PLANET_FEATURES) """ training_input = [] training_output = [] for d in data: features, expected_output = d if len(expected_output.values()) == 0: continue features_matrix = [] for planet_id in range(PLANET_MAX_NUM): if str(planet_id) in features: features_matrix.append(features[str(planet_id)]) else: features_matrix.append([0] * PER_PLANET_FEATURES) fm = np.array(features_matrix) output = [0] * PLANET_MAX_NUM for planet_id, p in expected_output.items(): output[int(planet_id)] = p result = np.array(output) training_input.append(fm) training_output.append(result) return np.array(training_input), np.array(training_output)
13,095
def idcardcert(appcode, card_no): """ 身份证实名认证身份证二要素一致性验证 """ host = 'http://idquery.market.alicloudapi.com' path = '/idcard/query' # method = 'GET' appcode = appcode querys = 'number=%s' % card_no # bodys = {} url = host + path + '?' + querys try: request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content = response.read() if content: return json.loads(content.decode("unicode-escape")) return content except BaseException: return None
13,096
def custom_error_exception(error=None, exception=None): """Define custom exceptions for MySQL server errors This function defines custom exceptions for MySQL server errors and returns the current set customizations. If error is a MySQL Server error number, then you have to pass also the exception class. The error argument can also be a dictionary in which case the key is the server error number, and value the exception to be raised. If none of the arguments are given, then custom_error_exception() will simply return the current set customizations. To reset the customizations, simply supply an empty dictionary. Examples: import mysql.connector from mysql.connector import errorcode # Server error 1028 should raise a DatabaseError mysql.connector.custom_error_exception( 1028, mysql.connector.DatabaseError) # Or using a dictionary: mysql.connector.custom_error_exception({ 1028: mysql.connector.DatabaseError, 1029: mysql.connector.OperationalError, }) # Reset mysql.connector.custom_error_exception({}) Returns a dictionary. """ global _CUSTOM_ERROR_EXCEPTIONS if isinstance(error, dict) and not len(error): _CUSTOM_ERROR_EXCEPTIONS = {} return _CUSTOM_ERROR_EXCEPTIONS if not error and not exception: return _CUSTOM_ERROR_EXCEPTIONS if not isinstance(error, (int, dict)): raise ValueError( "The error argument should be either an integer or dictionary") if isinstance(error, int): error = { error: exception } for errno, exception in error.items(): if not isinstance(errno, int): raise ValueError("error number should be an integer") try: if not issubclass(exception, Exception): raise TypeError except TypeError: raise ValueError("exception should be subclass of Exception") _CUSTOM_ERROR_EXCEPTIONS[errno] = exception return _CUSTOM_ERROR_EXCEPTIONS
13,097
def cal_md5(content): """ 计算content字符串的md5 :param content: :return: """ # 使用encode result = hashlib.md5(content.encode()) # 打印hash md5 = result.hexdigest() return md5
13,098
def get_arkouda_server_info_file(): """ Returns the name of a file to store connection information for the server. Defaults to ARKOUDA_HOME + ak-server-info, but can be overridden with ARKOUDA_SERVER_CONNECTION_INFO :return: server connection info file name as a string :rtype: str """ dflt = os.path.join(get_arkouda_home(), 'ak-server-info') return os.getenv('ARKOUDA_SERVER_CONNECTION_INFO', dflt)
13,099