content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def nameFormat(name): """ Edits the name of the column so that it is properly formatted with a space between the words, and each word capitalized.""" space = name.find("l") + 1 firsthalf = name[:space] secondhalf = name[space:] name = firsthalf.capitalize() + " " + secondhalf.capitalize() return name
d93b54d6a18347aeb32657c1f8880965d01db7f2
12,585
def invert_cd_path(graph, path, c, d): """ Switch the colors of the edges on the cd-path: c to d and d to c. :param graph: nx.Graph(); each edge should have an attribute "color" :param path: nx.Graph() representing cd-path :param c: integer smaller then the degree of "graph" or None; represents a color :param d: integer smaller then the degree of "graph" or None; represents a color :return: graph with switched colors """ for edge in path.edges: current_color = graph.get_edge_data(*edge)["color"] if current_color == c: graph.add_edge(*edge, color=d) if current_color == d: graph.add_edge(*edge, color=c) return graph
53455f3e442a10ee403d499fe9c5b6f2e86a6e7f
12,586
def observe_state(obsAction, oppID, oppMood, stateMode): """Keeping this as a separate method in case we want to manipulate the observation somehow, like with noise (x chance we make an observational mistake, etc).""" # print('oppmood', oppMood) state = [] if stateMode == 'stateless': state.append(obsAction) elif stateMode == 'agentstate': state.append(obsAction) state.append(oppID) elif stateMode == 'moodstate': state.append(obsAction) state.append(oppID) state.append(oppMood) # Returns a list, but this should be utilised as a tuple when used to key a Q value # print('state:', state) return state
ad3417a724982679a8abf653b456a00c4129ef48
12,587
def pascal_triangle(n): """ eturns a list of lists of integers representing the Pascal’s triangle of n """ if n <= 0: return [] res = [] l = [] for x in range(n): row = [] for y in range(x + 1): if x == 0 or y == 0 or x == y: row.append(1) else: row.append(l[y] + l[y - 1]) l = row res.append(row) return res
49f0474083cd347177bbe963af811fbe6a8a15d8
12,588
def _initialise_notable_eids(db): """Returns set of eids corresponding to "notable" entities.""" rows = db.query(""" SELECT eid FROM entity_flags WHERE political_entity=TRUE; """) notable_eids = set(row["eid"] for row in rows) print('[OK] Received %d notable eIDs.' % len(notable_eids)) return notable_eids
c11306395937fc58dadbd3e9d129f7f6d7b4b576
12,589
def get_vector22(): """ Return the vector with ID 22. """ return [ 0.50000000, 0.25000000, 0.25000000, ]
20fcd0ccb42dad295c5b09e85c9071bbe6413512
12,590
def ldm(self, operand: int) -> int: """ Name: Load Accumulator Immediate. Function: The 4 bits of immediate data are loaded into the accumulator. Syntax: LDM <value> Assembled: 1101 <DDDD> Symbolic: DDDD --> ACC Execution: 1 word, 8-bit code and an execution time of 10.8 usec. Side-effects: The carry bit is not affected. """ self.ACCUMULATOR = operand self.increment_pc(1) return self.ACCUMULATOR
ee76ddd7e14dbc6cc03d586a82421bbccc38d83c
12,591
def _bytes_chr_py3(i): """ Returns a byte string of length 1 whose ordinal value is i in Python 3. Do not call directly, use bytes_chr instead. """ return bytes([i])
b6cd1e5e7214d02c29a594e639fbddac901a1362
12,593
def linux_provisioners(**kwargs): """Linux specific provisioners.""" # Setup vars from kwargs vagrant_box = kwargs['data']['vagrant_box'] build_scripts_dir = kwargs['data']['build_scripts_dir'] template = kwargs['data']['template'] scripts = [ f'{build_scripts_dir}/base.sh', f'{build_scripts_dir}/virtualbox.sh', f'{build_scripts_dir}/vmware.sh', f'{build_scripts_dir}/cleanup.sh', f'{build_scripts_dir}/zerodisk.sh' ] if vagrant_box: scripts.insert( 3, f'{build_scripts_dir}/vagrant.sh') provisioner_spec = { 'type': 'shell', 'scripts': scripts } template['provisioners'].append(provisioner_spec) return template
7627205c94ca8c1d445a8d8af3fd7fb7e062226c
12,594
def guess_type(v, consumer='python'): """Guess the type of a value (None, int, float or string) for different types of consumers (Python, SQLite etc.). For Python, use isinstance() to check for example if a number is an integer. >>> guess_type('1') 1 >>> guess_type('1', 'sqlite') 'integer' >>> guess_type('1.0') 1.0 >>> guess_type('1.0', 'sqlite') 'real' >>> guess_type('abc') 'abc' >>> guess_type('abc', 'sqlite') 'text' >>> >>> value_type = lib.base3.guess_type(value) >>> if isinstance(value_type, int) or isinstance(value_type, float): >>> ... """ if consumer == 'python': if v is None: return None try: return int(v) except ValueError: try: return float(v) except ValueError: return str(v) if consumer == 'sqlite': if v is None: return 'string' try: int(v) return 'integer' except ValueError: try: float(v) return 'real' except ValueError: return 'text'
a9361d5ce7f070f09dca267afb4ffcae866040eb
12,595
import torch def array_from_skew_matrix(x, device=torch.device("cpu"), dtype=torch.float32): """ Receives a skew matrix and returns its associated 3-element vector (array). :param x: Skew matrix (3x3) :return: Associated array (3-element). :param device: Device to allocate new tensors. Default: torch.device("cpu"). :param dtype: Data type for new tensors. Default: torch.float32. """ # We are only slicing the last index in order to keep last dimension. return torch.hstack((x[:, 2, 1:2], x[:, 0, 2:], x[:, 1, 0:1]))
54dcb699db154e8c995dc324888b633451cfb7fc
12,596
def binary(img,th=128): """generate binary image Args: img : image array th : threshold """ img[img<th] = 0 img[img>=th] = 255 return img
627c3fc928886facbae8ca4cf692f543f4cbca40
12,597
def get_trace_len(df): """ get the length of trace """ cid = list(df['CaseID']) dic = {} for i in cid: if i in dic: dic[i] += 1 else: dic[i] = 1 trace_len = int(max(dic.values())) return trace_len
a199b320df2c61a04cde676814350878b0bf89a0
12,598
import json def hello(event, context): # pylint: disable=unused-argument """Hello lambda function. Args: event (dict): Contains information from the invoking service (service defines the event structure) context (obj): Contains methods and properties that provide information about the invocation, function, and runtime environmenti. (function name, version, memory limits, request id and etc.) Returns: response (dict): Contains request response from the handler (200 status code and event data) """ body = { "message": "Go Serverless v2.0! Your function executed successfully!", "input": event, } response = {"statusCode": 200, "body": json.dumps(body)} return response
fabe319ab98cfcdd9208b021a6b4e42d6aa46a05
12,599
import pkg_resources def get_duplicate_entry_points(group, name=None): """Create list of entry points with duplicates.""" return [pkg_resources.EntryPoint.parse('dev = tests.test_get_entry_points:dev'), pkg_resources.EntryPoint.parse('dev = tests.test_get_entry_points:dev')]
3bed4e7eecf8c74361f472928c4a9865956e051a
12,600
import re def find_dlls(data: bytes) -> list: """ lists the dlls mentioned in the strings Args: data: strings data from "Strings.txt" file Returns: List of hardcoded .dll names in the strings output """ pattern = rb".+\.dll|.+\.DLL" return list(map(lambda x: x.decode(), set(re.findall(pattern, data))))
51a44b0563dcd61990f35562f3a624aaed6085c7
12,601
from typing import List from typing import Dict def single__intent_topk_precision_score( intent_prediction: List[Dict[str, str]], y_true: List[str], k: int = 1, ) -> float: """Compute the Precision of a single utterance with multi-intents Precision of a single utterance is defined as the proportion of correctly predicted labels to the total number of the true label. It can be formulated as .. math:: \\text{Precision of single utterance}=\\frac{|\\text{pred}_i \\cap \\text{true}_i|}{|\\text{true}_i|} Args: intent_prediction (a list of dictionaries): A sorted intent prediction (by score) of a single utterance. y_true (a list of strings): The corresponding true intent of that utterance. Note that it can be more than one intents. k (an integer): The top k prediction of intents we take for computing precision. Returns: precision score (a float): precision of a single utterance given top k prediction. Examples: >>> intent_prediction, _ = model.predict("I like apple.") >>> print(intent_prediction) [ {"intent": "blabla", "score": 0.7}, {"intent": "ohoh", "score": 0.2}, {"intent": "preference", "score": 0.1}, ] >>> precision = single__intent_topk_precision_score( intent_prediction=intent_prediction, y_true=["preference", "ohoh", "YY"], k=2, ) >>> print(precision) 0.333333 """ # noqa top_k_pred = [pred["intent"] for pred in intent_prediction[: k]] precision_score = ( len(set(y_true) & set(top_k_pred)) / len(y_true) ) return precision_score
3b67849670f80a3fa148249a6fe41ce4627546e5
12,602
import pandas as pd import os def already_done(output_file, combi): """ Check if this model already has been run based on the output file. In case this script crashes (e.g. running out of memory, this allows us to continue where we left off""" if os.path.exists(output_file): df = pd.read_csv(output_file)[['dataset', 'algorithm', 'descriptor', 'augmentation']] done = list(df.to_records(index=False)) done = [''.join([f"{i}" for i in j]) for j in done] combi_vals = (combi[0], combi[2].value, combi[1].value, combi[3]) combi_vals = ''.join([f"{i}" for i in combi_vals]) return combi_vals in done else: return False
ab080d3130cb4b350ffde58b6ab485c8d352427b
12,603
import sys def create_matrix(dim): """Create the matix from user input. Args: dim: Dimension of the matrix. Returns: User-defined matrix as a nested list. Raises: Error if unsymmetric matrix is created. """ matr = [] for i in range(dim): matr.append(input("Type line {}: ".format(str(i+1))).split()) # Get one line as input, separated by spaces. if len(matr[i]) != dim: sys.exit("ERROR: Only symmetric matrices allowed.") return matr
125044d463a0ddbdc5029b556c1029427ec4d8c2
12,605
import math def _fix_lambda_path(lambda_path): """Replace the first value in lambda_path (+inf) with something more reasonable. The method below matches what is done in the R/glmnent wrapper.""" if lambda_path.shape[0] > 2: lambda_0 = math.exp(2 * math.log(lambda_path[1]) - math.log(lambda_path[2])) lambda_path[0] = lambda_0 return lambda_path
b1587354156bdfeb63f5442e5ac3e171aecd3f99
12,606
def _validate_belongs(*options): """ Return a validator ensuring the item belongs in the list. """ def _validate_belongs(value): # noqa: E306 for opt in options: if isinstance(value, str) and isinstance(opt, str): if value.lower() == opt.lower(): # noqa: E501 return opt elif value is True or value is False or value is None: if value is opt: return opt elif value == opt: return opt raise ValueError( f'Invalid value {value!r}. Options are: ' + ', '.join(map(repr, options)) + '.' ) return _validate_belongs
9284b6ff4f55138a3606cc530891f8d1c206ece3
12,608
def sorted_ext_names(config, ext_names): """Sort extensions if `EXT_ORDER` is specified. Extensions not listed in `EXT_ORDER` will be appended to the end of the list in an arbitrary order. """ ext_order = [e.strip() for e in config['EXT_ORDER'].split(',')] def sort_key(ext_name): try: return ext_order.index(ext_name) except ValueError: # If `ext_name` is not in the sort list, it has a sort value of # positive infinity (last). return float('inf') return sorted(ext_names, key=sort_key)
6265b6c6ea21915b148648d9adeb800f6d9a8a95
12,609
def _deal_with_axis(obj, axis): """Handle the `axis` parameter Parameters ---------- obj: DimArray object axis: `int` or `str` or `tuple` or None Returns ------- newobj: reshaped obj if axis is tuple otherwise obj idx : axis index name : axis name """ # before applying the function on the collapsed array if type(axis) in (tuple, list): idx = 0 newobj = obj.flatten(axis, insert=idx) #idx = obj.dims.index(axis[0]) # position where the new axis has been inserted #ax = newobj.axes[idx] ax = newobj.axes[0] name = ax.name else: newobj = obj idx, name = obj._get_axis_info(axis) return newobj, idx, name
ae0eec4bdf7f172f9617e9f9d1330d75b08f8e97
12,610
def do_train(dataset_dict): """provide dict in train mode""" # USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) dataset_dict.pop("sem_seg_file_name", None) return dataset_dict
d6a2bc73baa41218c24282695f7b2a2c57d086d4
12,611
def clear_noise(image, size): """利用自定义矩形框清除噪点 :param image: 二值图 :param size: 自定义矩形框的大小 type: tuple :return: image """ image = image.convert('L') for y in range(image.size[1] - size[1]): for x in range(image.size[0] - size[0]): white = True for xx in range(x, x + size[0]): if xx == x or xx == x + size[0] - 1: for yy in range(y, y + size[1]): if image.getpixel((xx, yy)) != 255: white = False break else: if (image.getpixel((xx, y)) != 255 or image.getpixel((xx, y + size[1] -1)) != 255): white = False break if white: for i in range(x, x + size[0]): for j in range(y, y + size[1]): image.putpixel((i, j), 255) return image
e9b5a00e724eb6c2d464d02e1041c59c128e490d
12,612
def simple_bet(pressure, n_monolayer, c_const): """A simple BET equation returning loading at a pressure.""" return (n_monolayer * c_const * pressure / (1 - pressure) / (1 - pressure + c_const * pressure))
601701b608a48fe634d028023068fcadc636499b
12,613
def _select_identifier(identifiers, dict_to_identify): """Select the right field identifier. Args: identifiers (list): List of identifiers. dict_to_identify (dict): Dictionary to identify. Returns: str: Identifier. """ for identifier in identifiers: if identifier in dict_to_identify: return identifier return ''
647be77f4f87b6d6cba1d979882ac78741134484
12,614
import os def get_user_ids(path): """ Get all user ids based on name of folders under "public_dataset/" :return: a list of user ids """ file_name = os.listdir(path) user_id = [i for i in file_name if '.pdf' not in i and '.DS_Store' not in i] return user_id
bc018eb494acc5a586383beb1eef2f9ccf7eff8d
12,615
def get_group_summary(conn, group_name): """Return the summary of the group results""" cur = conn.execute(''' select t.path, t.filename, cast(substr(reverse(group_concat(cast(result as char))), 1, 1) as integer), duration, t.lines from results inner join tests t on results.test_id = t.id inner join group_entries ge on t.id = ge.test_id inner join groups g on ge.group_id = g.id where g.name = ? and t.active group by t.id order by run_id ''', [group_name]) return cur.fetchall()
806b6a34f379aa2b7c74edc43d523fa9ee169602
12,616
from typing import Container from typing import Iterable def match_tags(search: Container[str], tags: Iterable[str]): """Check if the search constraints satisfy tags. The search tags should be uppercased. All !tags or -tags cannot be present, all +tags must be present, and at lest one normal tag must be present (if they are) to pass. """ if not tags: return True has_all = '<ALL>' in search # None = no normal tags, True = matched one, False = not matched one. matched = None for tag in tags: tag = tag.upper() start = tag[0:1] if start == '!' or start == '-': if tag[1:] in search: return False elif start == '+': if tag[1:] not in search: return False else: if matched is None: matched = False if has_all or tag in search: matched = True return matched is not False
0a6ee5f233900eb50ad72613aa73f227a836b4dd
12,617
import os def get_fa_from_scratch(scratch_dir): """ Careful... May not work in the Future Inputs: scratch_dir: (str) Path to work dir/ tmp etc.. Outputs: FNA fp: (str) Automatic download through GenbankToGenome """ fna_fp = None scratch_files = os.listdir(scratch_dir) all_fna_fps = [] for f in scratch_files: # some fasta files may end with 'fasta' if f.endswith('.fa'): all_fna_fps.append(f) if f.endswith('.fasta'): all_fna_fps.append(f) if len(all_fna_fps) > 1: raise Exception("Multiple .fa files in scratch directory. Expecting only one: " + \ ", ".join(all_fna_fps)) elif len(all_fna_fps) == 0: raise Exception("No .fa files in scratch directory. Program needs genome fna file to run.") else: fna_fp = all_fna_fps[0] return os.path.join(scratch_dir, fna_fp)
cd673c4abb2ecfdf5d6def9a41185b6d60f46f2e
12,618
def text_node_for(context, text): """Outputs the text node with the forloop.counter incremented""" return ('<text id="%d">%s</text>' %(context["forloop"]["counter"]+1, text))
4fbf40884ed19e326ce7e1fdc6a9773ed14a40fc
12,620
from typing import Dict from typing import Any from typing import Iterator from typing import Tuple def flatten(dictionary: Dict[str, Any]) -> Dict[str, Any]: """ >>> flatten({'foo':{'bar':{'baz': 0}, 'deadbeef': 1}, '42': 3}) {'foo.bar.baz': 0, 'foo.deadbeef': 1, '42': 3} """ def iterate(data: Dict[str, Any], prefix: str) -> Iterator[Tuple[str, Any]]: for key, value in data.items(): prefixed_key = prefix + key if isinstance(value, dict): for prefixed_subkey, val in iterate(value, prefixed_key + '.'): yield prefixed_subkey, val else: yield prefixed_key, value return dict(iterate(dictionary, ""))
401f2f8894690a0c1171d26378ac4a5178dd705a
12,621
def offline_validator_address(): """Ethereum address of the offline validator. The offline validator is the third positioned validator at the e2e_test_chain. He is meant to be not started at all and therefore becomes reported as offline. """ return "0xe7a664509027ff348d379bb5d3a8340aaecb56eb"
e89238a3a9d0bc938e60bdf8f52e0416a4a81987
12,622
def xyz_there(s): """ Return True if the given string contains an appearance of "xyz" where the xyz is not directly preceeded by a period (.). So "xxyz" counts but "x.xyz" does not. xyz_there('abcxyz') → True xyz_there('abc.xyz') → False xyz_there('xyz.abc') → True """ return False
16a882ac081e7b5cc7f75d66ce7d78c1efcc92b7
12,623
def fromobject(thrift, tcls, obj): """Create thrift object with type `tcls` from a Python object. """ if isinstance(obj, tcls): return obj return globals().get(tcls.__name__, tcls)(thrift, obj)
fcb868dc460db835a4ec77fc08e8ed944fb8296b
12,624
def subdivide_stats(data): """ If a key contains a ., create a sub-dict with the first part as parent key """ ret = {} for key, value in data.items(): if '.' in key: parent, subkey = key.split('.', 2) if parent not in ret: ret[parent] = {} ret[parent][subkey] = value else: ret[key] = value return ret
65e47db5c75118c1939a8ecdd3ad581a053da893
12,625
def start_client(daemon, host, port, **kwargs): """Return a socket to an existing "remote" pydevd-handling daemon. The daemon supports the pydevd client wire protocol, sending requests and handling responses (and events). This is a replacement for _pydevd_bundle.pydevd_comm.start_client. """ sock, start_session = daemon.start_client((host, port)) start_session(**kwargs) return sock
26b4e5e79a4b8aaf602cb5b713514358b4951fdc
12,626
import re def parentdir(file_path): """ Get the parent directory of a file or directory. Goes by the path alone, so it doesn't follow symlinks. On Windows, paths must be converted to use / before passing. This function is not the same as os.path.dirname(); for example, dirname will not give us the correct answer for any of: . ./ .. ../ Note: still doesn't always correctly handle paths starting with / and containing . or .., e.g., parentdir('/foo/..') Dependencies: modules: re """ # remove trailing /'s parentdir = re.sub('/*$', '', file_path) # are there no /'s left? if '/' not in parentdir: if parentdir == '': return '/' # it was /, and / is its own parent if parentdir == '.': return '..' if parentdir == '..': return '../..' return '.' # remove the last component of the path parentdir = re.sub('/*[^/]*$', '', parentdir) if parentdir == '': return '/' return parentdir
baec28e9b6f0017566b91cd2b442a9eb783a144f
12,627
def _Int(s): """Try to convert s to an int. If we can't, just return s.""" try: return int(s) except ValueError: assert '.' not in s # dots aren't allowed in individual element names return s
8f7fcf70717fe30ba991c8cff63cecefe5c01daf
12,628
def strlist_with_or (alist): """Return comma separated string, and last entry appended with ' or '.""" if len(alist) > 1: return "%s or %s" % (", ".join(alist[:-1]), alist[-1]) return ", ".join(alist)
1202e76d34f84618bb6b310bbc43c835f3c94104
12,629
def compare_authors_or_editors(db_entry, dqm_entry, datatype): # noqa: C901 """ Authors and Editors are in a hash with order of 'order' from db and 'authorRank' from dqm. They have to be sorted by the order, and for updates the db 'author_id' is used to patch. Currently we don't want to remove any authors, so we patch None to the values. We're only getting from dqms the 'name', 'first_name', 'last_name', and 'order', although the ingest schema allows other data. Sample db data, unchanged dqm entry, changed dqm entry db_entry = { "authors": [ { "date_created": "2021-10-08T17:03:27.036468", "date_updated": "2021-10-10T11:04:39.548530", "author_id": 4582613, "order": 1, "name": "Abdul Kader N", "first_name": None, "middle_names": None, "last_name": None, "first_author": False, "orcid": None, "affiliation": None, "corresponding_author": None }, { "date_created": "2021-10-08T17:03:27.036468", "date_updated": "2021-10-10T11:04:39.548947", "author_id": 4582614, "order": 2, "name": "Brun J", "first_name": None, "middle_names": None, "last_name": None, "first_author": False, "orcid": None, "affiliation": None, "corresponding_author": None } ] } dqm_entry_unchanged = { "authors": [ { "name": "Abdul Kader N", "referenceId": "WB:WBPaper00000003", "authorRank": 1 }, { "referenceId": "WB:WBPaper00000003", "name": "Brun J", "authorRank": 2 } ] } dqm_entry_changed = { "authors": [ { "name": "Abdul Kader AAN", "referenceId": "WB:WBPaper00000003", "authorRank": 1 }, { "referenceId": "WB:WBPaper00000003", "name": "Brunner J", "firstname": "bob", "authorRank": 2 }, { "referenceId": "WB:WBPaper00000003", "name": "NEW", "firstname": "bob", "authorRank": 3 }, { "referenceId": "WB:WBPaper00000003", "name": "AGAIN", "firstname": "bob", "authorRank": 4 } ] } :param db_entry: :param dqm_entry: :param datatype: :return: """ # db_entry_text = json.dumps(db_entry, indent=4) # print('db entry ') # print(db_entry_text) # dqm_entry_text = json.dumps(dqm_entry, indent=4) # print('dqm entry ') # print(dqm_entry_text) db_authors = [] dqm_authors = [] if datatype in db_entry: if db_entry[datatype] is not None: db_authors = db_entry[datatype] dqm_key = datatype if datatype == 'editors': dqm_key = 'editorsOrAuthors' if dqm_key in dqm_entry: if dqm_entry[dqm_key] is not None: dqm_authors = dqm_entry[dqm_key] db_has_change = False db_ordered = dict() for author_dict in db_authors: if datatype == 'authors': if author_dict['corresponding_author']: db_has_change = True if author_dict['first_author']: db_has_change = True if 'order' not in author_dict: # print('no order ') db_has_change = True else: order = int(author_dict['order']) db_ordered[order] = author_dict if db_has_change: return False, None, None dqm_ordered = dict() for author_dict in dqm_authors: if 'authorRank' in author_dict: order = int(author_dict['authorRank']) sanitized_dict = dict() sanitized_dict['order'] = order if 'name' in author_dict: sanitized_dict['name'] = author_dict['name'] if 'lastName' in author_dict: sanitized_dict['last_name'] = author_dict['lastName'] elif 'lastname' in author_dict: sanitized_dict['last_name'] = author_dict['lastname'] if 'firstName' in author_dict: sanitized_dict['first_name'] = author_dict['firstName'] elif 'firstname' in author_dict: sanitized_dict['first_name'] = author_dict['firstname'] dqm_ordered[order] = sanitized_dict to_patch = [] to_create = [] author_subfields = ['name', 'first_name', 'last_name'] for order in sorted(db_ordered.keys()): author_changed = False patch_dict = {'order': order} dqm_dict = dict() if order in dqm_ordered: dqm_dict = dqm_ordered[order] # print("dqm %s %s" % (order, dqm_dict['name'])) db_dict = db_ordered[order] # print("db %s %s" % (order, db_dict['name'])) for field in author_subfields: dqm_value = None if field in dqm_dict: dqm_value = dqm_dict[field] db_value = None if field in db_dict: db_value = db_dict[field] if db_value != dqm_value: patch_dict[field] = dqm_value # must assign None to fields if dqm did not set author at that order number # print("field changed %s %s %s" % (field, db_value, dqm_value)) author_changed = True if author_changed: if datatype == 'authors': to_patch.append({'author_id': db_dict['author_id'], 'patch_dict': patch_dict}) elif datatype == 'editors': to_patch.append({'editor_id': db_dict['editor_id'], 'patch_dict': patch_dict}) for order in sorted(dqm_ordered.keys()): if order not in db_ordered: to_create.append(dqm_ordered[order]) return True, to_patch, to_create
01f8256da5728fdc6ab8595d59ab10ebe7e3827d
12,630
def reformatPermissionErrors(function): """This function decorator will trap any permission error exceptions and raise a different exception with a nicer message.""" def new_func(*args, **kwargs): pass return new_func
e3482a6e93ea1d759ddb19ac8b8de90be1fd6651
12,634
import re def split_assignment_options(args): """ replace -a=b with -a b and --arg=b with --arg b :param args: :return: returns the updated arg list """ i = 0 p = re.compile('((-[a-zA-Z]=)|(--[a-zA-Z\-]+=))') while i < args.__len__(): m = p.match(args[i]) if m: e = m.end() option = args[i][:e - 1] value = args[i][e:] args[i] = option i += 1 args.insert(i, value) i += 1 return args
07d829c20383a0b6e6f27944b9cd7cc21ff420c1
12,635
def values_dict(items): """Given a list of (key, list) values returns a dictionary where single-element lists have been replaced by their sole value. """ return {k: v[0] if len(v) == 1 else v for k, v in items}
7abcf62ab334cecc6e6996bad73ff10e5eecdf89
12,636
def sameopenfile(fp1, fp2): """Return ``True`` if the file descriptors *fp1* and *fp2* refer to the same file.""" return True
c6943dc157d19208e52a201adff563404ae221f7
12,639
def DetermineServiceFromUrl(url): """Takes a DFA service's URL and returns the service name. Args: url: string The DFA service's URL. Returns: string The name of the service this URL points to. """ return url.split('/')[-1]
213e1a4dbb5eb3ed643e0ace300b4ac5b6c6c745
12,640
def scrub_response(response): """ Drop irrelevant headers. """ headers = response["headers"] for header in [ "CF-Cache-Status", "CF-RAY", "Cache-Control", "Connection", "Date", "Expect-CT", "NEL", "Report-To", "Server", "Transfer-Encoding", "cf-request-id", "Set-Cookie", ]: headers.pop(header, None) return response
8f1a9f9499df1fbaf7147ccdda8c7854c4c638ec
12,646
def check_necessary_conds(val_inf, muls): """ The necessary conditions for a rational solution to exist are as follows - i) Every pole of a(x) must be either a simple pole or a multiple pole of even order. ii) The valuation of a(x) at infinity must be even or be greater than or equal to 2. Here, a simple pole is a pole with multiplicity 1 and a multiple pole is a pole with multiplicity greater than 1. """ return (val_inf >= 2 or (val_inf <= 0 and val_inf%2 == 0)) and \ all(mul == 1 or (mul%2 == 0 and mul >= 2) for mul in muls)
58463ac2855d07eb36e1850cec3208d69d5a545c
12,647
def arrange_dlc_data(df): """ Restructure DLC dataframe """ scorer = df.keys()[0][0] bodyparts = df[scorer].columns.levels[0] data = {} for bp in bodyparts: data[bp] = df[scorer][bp] return data
26cfbe04ef2dc84d522e1e8c7f9db2a1ee3fe175
12,648
def get_rect_footprint(l_m=4.8, w_m=1.83): """ Get rectangular footprint of length (x, longitudinal direction) and width (y, lateral direction) l_m : length (default 4.8) w_m : width (default 1.83) Return Values ============= footprint_x : tuple of x coordinates of the footprint footprint_y : tuple of y coordinates of the footprint """ l_half_m = l_m * 0.5 w_half_m = w_m * 0.5 footprint_x, footprint_y = zip((l_half_m, w_half_m), (-l_half_m, w_half_m), (-l_half_m, -w_half_m), (l_half_m, -w_half_m), (l_half_m, w_half_m)) return footprint_x, footprint_y
dd40304c404226543023c1f81bca5aac81e70eec
12,649
import bz2 import struct def ReadPrefix(input_filename): """ Read the prefix of a graph data structure from disk @param input_filename: the filename where the graph data is stored """ assert (input_filename.endswith('.graph.bz2')) data = bz2.decompress(open(input_filename, 'rb').read()) byte_index = 0 # read the basic attributes for the graph nvertices, nedges, directed, vertex_colored, edge_colored = struct.unpack('qq???', data[byte_index:byte_index + 19]) byte_index += 19 # read the prefix prefix, = struct.unpack('128s', data[byte_index:byte_index + 128]) byte_index += 128 prefix = prefix.decode().strip('\0') return prefix
1c585963bb2387ad9de2b4bf7e53f7b642bafcb8
12,650
def _parse_source_file_list_blob_key(blob_key): """Parse the BLOB key for source file list. Args: blob_key: The BLOB key to parse. By contract, it should have the format: `${SOURCE_FILE_LIST_BLOB_TAG}.${run_id}` Returns: - run ID """ return blob_key[blob_key.index(".") + 1 :]
a7f5c7ccee1404e17b90cb6cd58922ec162d33e3
12,652
def prop_FC(csp, newVar=None): """ Do forward checking. That is check constraints with only one uninstantiated variable. Remember to keep track of all pruned variable,value pairs and return """ constraints = csp.get_cons_with_var(newVar) if newVar else csp.get_all_cons() pruned = [] for constraint in constraints: if constraint.get_n_unasgn() != 1: continue # skip to the next constraint # If we get here, get the single unassigned variable var = constraint.get_unasgn_vars()[0] for val in var.cur_domain(): # Check if the var = val satisfies the constraint if not constraint.has_support(var, val): if (var, val) not in pruned: # Then prune this value from var's domain var.prune_value(val) pruned.append((var, val)) # After looking at all values in the var's domain, check if it is now empty if var.cur_domain_size() == 0: return False, pruned return True, pruned
3956cf0ae03a03c0dbc3504ab2ed2a20b8343d90
12,654
def quick_sort(alist): """ 快速排序,不稳定,空间O(logn) 时间:最好O(nlogn),平均O(nlogn),最坏O(n^2) """ length = len(alist) if length <= 1: # print(alist) return alist low = 0 high = length - 1 mid_value = alist[0] while low < high: while low < high and alist[high] >= mid_value: high -= 1 # high 左移指导与low相等或 high指向的值大于mid alist[low] = alist[high] while low < high and alist[low] <= mid_value: low += 1 alist[high] = alist[low] alist[low] = mid_value alist[low + 1:] = quick_sort(alist[low + 1:]) alist[:low] = quick_sort(alist[:low]) # print(alist, low) return alist
b284759dc021bc252fc2e3c77345eaf2b8415317
12,655
def _construct_unicode(self, node): """Override the default string handling function to always return unicode objects. Required for Python 2.7. Thanks to: http://stackoverflow.com/questions/2890146/how-to-force-pyyaml-to-load-strings-as-unicode-objects """ return self.construct_scalar(node)
eed380a154fe5cfd313c6f61a0648862f7d07a0f
12,658
def update_real_pred_150d(real_pred_150d, buffer): """@params: real_pred_150d: List[dict] buffer: dict: {"date": , "value0": , "value1": } Note: buffer has to be complete """ return real_pred_150d[1:] + [buffer]
1ef0c648c5f921a9b66e23f41cfd7cf53cf16c0b
12,659
from datetime import datetime def now(): """Return a string representation of the current date/time. Returns: (str): current UTC time as a string """ return str(datetime.utcnow())[:19] + "Z"
93906571ce2b74846e73f1ec5f773589ce70e4bb
12,660
def calc_lnoverlaps(group_pars, star_pars, nstars): """Find the lnoverlaps given the parameters of a group Parameters ---------- group_pars : [npars] array Group parameters (internal encoding, 1/dX... no nstars) star_pars : dict stars: (nstars) high astropy table including columns as documented in the Traceback class. times : [ntimes] numpy array times that have been traced back, in Myr xyzuvw : [nstars, ntimes, 6] array XYZ in pc and UVW in km/s xyzuvw_cov : [nstars, ntimes, 6, 6] array covariance of xyzuvw nstars : int number of stars in traceback Returns ------- lnols : [nstars] array The log of the overlap of each star with the provided group """ lnols = None return lnols
6aaeecfb1712134a4f5761c9a047763d572517b7
12,662
def pad(val: str) -> str: """Pad base64 values if need be: JWT calls to omit trailing padding.""" padlen = 4 - len(val) % 4 return val if padlen > 2 else (val + "=" * padlen)
3ab1c91fde1522f15a766730f73e44c97dbeda1a
12,664
def non_zero_row(arr): """ 0. Empty row returns False. >>> arr = array([]) >>> non_zero_row(arr) False 1. Row with a zero returns False. >>> arr = array([1, 4, 3, 0, 5, -1, -2]) >>> non_zero_row(arr) False 2. Row with no zeros returns True. >>> arr = array([-1, -0.1, 0.001, 2]) >>> non_zero_row(arr) True :param arr: array :type arr: numpy array :return empty: If row is completely free of zeros :rtype empty: bool """ if len(arr) == 0: return False for item in arr: if item == 0: return False return True
7052dac96bbf359800c43e0b750e999fba5ef2e0
12,665
def word_remove(text, wordlist): """ This function takes a list of text strings and a list of words. It returns the list of text strings with the words appearing in the wordlist removed. """ # Create new emoty list for the cleaned text clean_text = [] # Seperate ALL words from each other and write them with lower case wordlist = ' '.join(wordlist).lower().split() # Loop over every chunk and remove the words from the wordlist for i in range(len(text)): chunk = text[i] # take chunk chunkwords = chunk.split() # split into single words resultwords = [word for word in chunkwords if word.lower() not in wordlist] # remove words from wordlist chunkclean = ' '.join(resultwords) # gather all words into a string again clean_text.append(chunkclean) # append the chunk to the list outside the loop return(clean_text)
8dac46d54345efedf7bb117af3e4eb67704c0f85
12,666
def fs_write(obj, file_path): """ Convenience function to write an Object to a FilePath Args: obj (varies): The Object to write out file_path (str): The Full path including filename to write to Returns: The object that was written """ try: with open(str(file_path), 'w') as f: f.write(obj) return obj except TypeError as e: raise e
c94db2399283a26529bf4416f5b05a93fafb4e07
12,667
import json def get_json(inp_dict): """Converts a given dictionary to prettified JSON string. Parameters ---------- inp_dict: map Input dictionary to be converted to JSON. Returns ------- Prettified JSON string """ return json.dumps(inp_dict, indent=4)
9404064536a12595fe9b601363eee07b09a97bc8
12,668
import MySQLdb def getCurrentDimm(): """ Return current DIMM measurement """ cnx = MySQLdb.connect(host='eeva',user='select_user',passwd='select_pass',db='SiteData') cursor = cnx.cursor() cursor.execute("""SELECT AVG((fwhmhorl+fwhmvert+fwhmverl)/3) FROM RoboDimmING WHERE DateTimeUT BETWEEN DATE_SUB(NOW(), INTERVAL 900 SECOND) AND NOW()""") cnx.commit() a = cursor.fetchone() cnx.close() try: return '%.2f' % float(a[0]) except TypeError: return 'n/a'
b72562dafc490f2165390251da784eb57f839e92
12,670
def getConfig(config, key): """ 获取指定key的配置【NC】 :param key: :return: """ return config[key]
cf53e746a5004705b8265024c1ac884fc9c6adce
12,671
import calendar def month_delta(date, months): """Add or subtract months from date.""" day = date.day # subtract one because months are not zero-based month = date.month + months - 1 year = date.year + month // 12 # now add it back month = month % 12 + 1 days_in_month = calendar.monthrange(year, month)[1] if day >= days_in_month: day = days_in_month try: return date.replace(year, month, day) except ValueError: raise OverflowError('date value out of range')
afa064af13c67be776a7f01ce48fc6ed2f33f581
12,672
def get_attribute(obj, key): """ Get an attribute from an object, regardless of whether it is a dict or an object """ if not isinstance(obj, dict): return getattr(obj, key) return obj[key]
bb20ac5809cf89b8043ff1fc60b9e4775ca95d18
12,673
import requests import time def getData(base, path, parms=None): """ this is a general-purpose query helper """ u = base + path try: r = requests.get(u, timeout=3, params=parms) # print(r.url) except requests.Timeout: print(f"{time.asctime()}: Timeout fetching {u} {parms}") return {} except Exception as e: print(f"{time.asctime()}: Error {e} fetching {u} {parms}") return {} if r.status_code == requests.codes.ok: return r.json() print(f"{time.asctime()}: Error fetching {r.url}: ({r.status_code}) {r} {r.text}") return {}
42d6271422c112b6ebf2469d340ae46ebb5a95e4
12,674
import os def getParts(path): """ Return the slash-separated parts of a given path as a list """ if path == os.sep: return [os.sep] else: return path.split(os.sep)
ea39dedb08a825f4eada9d8b3d7697746093b8db
12,675
import math def check_anomaly(day_data_tuple): """ Find mean, std, and maximum values for the day. Using a single pass (online) mean/std algorithm allows us to only read through the day's data once. Note: M2 = 2nd moment, variance day_data is an iterable, returned from groupby, and we request values via for loop """ (day, day_data) = day_data_tuple n = 0 mean = 0 M2 = 0 max_value = 0 for timestamp, value in day_data: n += 1 delta = value - mean mean = mean + (delta / n) M2 += delta * (value - mean) max_value = max(max_value, value) variance = M2 / (n - 1) standard_deviation = math.sqrt(variance) # Check if day's data is anomalous, if True return day if max_value > mean + 6 * standard_deviation: return day return False
50cf3820a8839584aafefaba1e3219dbdf6a24d0
12,677
import re def remplace_accents(text: str) -> str: """Remplace spanish accents vocab lower case: Unicode code point literal to str""" text = re.sub(r"á", "a", text, flags=re.I) text = re.sub(r"é", "e", text, flags=re.I) text = re.sub(r"í", "i", text, flags=re.I) text = re.sub(r"ó", "o", text, flags=re.I) text = re.sub(r"ú", "u", text, flags=re.I) return text
d9967d401c637ae5674f88ce72cc86bf7143ba96
12,679
def b(n): """ Simple function to approximate b(n) when evaluating a Sersic profile following Capaccioli (1989). Valid for 0.5 < n < 10 Parameters ---------- n: float or array Sersic index Returns ------- b(n): float or array Approximation to Gamma(2n) = 2 gamma(2n,b(n)) """ return 1.9992*n - 0.3271
c0a236c1394cdac31559c2d376b0d62d023e5709
12,680
from typing import List def compute_sigma_for_given_alpha(bundles:List[float],alpha:float)->float: """ This is a helper function to compute_alpha5_using_binary_search. the function computes one side of the inequality . :param bundles: valuations of the bags from B1 to Bk, were k is number of agents :param alpha: the potential alpha5- sigma is computed with it :return sigma: one side of the inequality >>> bundles=[0.74,0.75,0.50,1.02] >>> alpha = 0.92 >>> round(compute_sigma_for_given_alpha(bundles=bundles,alpha=alpha),6) 0.331522 >>> bundles=[0.74,0.75,0.72] >>> alpha = 0.9 >>> compute_sigma_for_given_alpha(bundles=bundles,alpha=alpha) 0.0 >>> bundles=[0.74,0.73] >>> alpha = 0.99 >>> round(compute_sigma_for_given_alpha(bundles=bundles,alpha=alpha),6) 0.265152 """ sum=0 count=0 for bundle in bundles: if(bundle/alpha)<0.75: count+=1 sum+=0.75-bundle/alpha return sum+(1/8)*count
08ae1c84f13de03b5404158de146b05b9efbfdce
12,681
def ind2(e,L): """returns the index of an element in a list""" def indHelp(e,L,c): if(c==len(L)): return c if(e==True): return c if(e == L[c]): return c return indHelp(e,L,c+1) return indHelp(e,L,0)
bab0ba443e3b5d9223b6af82763c92e03c91dcb9
12,682
def opponent(team): """Given a team, return the opponent team. >>>opponent(TEAM_WHITES) -1 #=TEAM_BLACKS >>>opponent(TEAM_BLACKS) 1 #=TEAM_WHITES >>>opponent(0) 0 """ return -team
4363665bfd8b56c80292c8435b000b4a40fc1a46
12,683
def _makeScriptOrder(gpos): """ Run therough GPOS and make an alphabetically ordered list of scripts. If DFLT is in the list, move it to the front. """ scripts = [] for scriptRecord in gpos.ScriptList.ScriptRecord: scripts.append(scriptRecord.ScriptTag) if "DFLT" in scripts: scripts.remove("DFLT") scripts.insert(0, "DFLT") return sorted(scripts)
6c67698c3d084c8e8f038e05a8d8e53811220a15
12,684
import torch from typing import Tuple from typing import List def concat_enc_outs( input: torch.LongTensor, enc_out: torch.Tensor, mask: torch.BoolTensor, embedding_size: int, padding_idx: int, ) -> Tuple[torch.Tensor, torch.BoolTensor]: """ Concatenate Encoder Outputs. Does the whole "FiD" thing; each query/document pair is independently encoded in the Encoder, so we need to concatenate all the outputs prior to sending to the decoder. :param input: [bsz, seqlen] original input to the encoder :param enc_out: [bsz * n_docs, seqlen] output representations from the encoder :param mask: encoder mask :param embedding_size: emb/hidden size of the enc representations :param padding_idx: pad token index; used for mask purposes. :return (new_out, new_mask): return the encoder output and encoder mask, appropriately concatenated. """ bsz, n_docs = input.size(0), enc_out.size(0) // input.size(0) split_enc_out = enc_out.split([n_docs] * bsz, dim=0) split_mask = mask.split([n_docs] * bsz, dim=0) concat_outs: List[torch.Tensor] = [] concat_lengths = [] for i in range(bsz): mask_i = split_mask[i].view(-1) out_i = split_enc_out[i].reshape(-1, embedding_size)[mask_i] concat_outs.append(out_i) concat_lengths.append(out_i.size(0)) new_out = enc_out.new(bsz, max(concat_lengths), embedding_size) new_mask: torch.BoolTensor = mask.new(bsz, max(concat_lengths)) # type: ignore new_out.fill_(padding_idx) new_mask.fill_(False) for i, (out_i, length_i) in enumerate(zip(concat_outs, concat_lengths)): new_out[i, :length_i] = out_i new_mask[i, :length_i] = True return new_out, new_mask
6b1794966229a8f7658afcb70a5e91a652e6b1c5
12,685
import re def _reg_name_regex(): """Compile regular expression for RFC3986_ reg-name production _RFC3986: https://www.ietf.org/rfc/rfc3986 """ # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" unreserved = r"[A-Za-z0-9-._~]" # pct-encoded = "%" HEXDIG HEXDIG pct_encoded = r"%[0-9A-Fa-f][0-9A-Fa-f]" # "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" sub_delims = r"[!$&'()*+,;=]" # reg-name = *( unreserved / pct-encoded / sub-delims ) reg_name = r"^(" + unreserved + r"|" + pct_encoded + sub_delims + r")*$" return re.compile(reg_name)
9af962094a9d52c49942f472506c37770da2156e
12,686
import pickle def deliver(conn, localobj): """delivers (recreates) a local object on the other party. the object is moved by *value*, so changes made to it will not reflect on the local object. returns a proxy to the remote object""" return conn.modules.cPickle.loads(pickle.dumps(localobj))
5587d08d9ed984a919bdd1fc51e0f3b1e69d5896
12,687
import math def poids_attirance(p, dist): """ Calcule le poids d'attraction d'une neurone vers une ville. """ d = p[0] * p[0] + p[1] * p[1] d = math.sqrt(d) d = dist / (d + dist) return d
4a997566d19dc7e436a3a1704be7b5ac74424266
12,688
def dedup(records): """Remove any identical records from the list. Args: records (list(dict)): the list of dicts to be filtered. Returns: list(dict): the list of records with any duplicates removed. The list returned contains records in the same order as the original list. """ seen = set() filtered = [] for record in records: key = tuple(sorted(record.items())) if key not in seen: seen.add(key) filtered.append(record) return filtered
f3aadddf1458a08d36331a74722e12057d8ab8f9
12,689
import inspect def is_functional_member(member): """ Check whether a class member from the __dict__ attribute is a method. This can be true in two ways: - It is literally a Python function - It is a method descriptor (wrapping a function) Args: member (object): An object in the class __dict__. Returns: bool: `True` if the member is a function (or acts like one). """ return ( inspect.isfunction(member) or ( inspect.ismethoddescriptor(member) and isinstance(member, (classmethod, staticmethod)) ) )
268068600689a7935c9a8b26aa14ca09f9679228
12,690
def convert_x1y1x2y2_to_XcYcWH(box): """ Convert box from dictionary of {"x1":,"y1":,"x2":,"y2"} to {"x_centre":,"y_centre":,"width":,"height":} Assumption 1: point 1 is the top left and point 2 is the bottom right hand corner """ assert box["x1"] <= box["x2"] assert box["y1"] <= box["y2"] width = box["x2"] - box["x1"] height = box["y2"] - box["y1"] x_centre = round(box["x1"] + width/2) y_centre = round(box["y1"] + height/2) return {"x_centre":x_centre,"y_centre":y_centre,"width":width,"height":height}
e7da7353b64b969b4c51dc7ece06729b05bad31e
12,691
import argparse def update_namespace(args, dictionary, overwrite=True, rest=False): """update Namespace with given dictionary Args: args (Namespace): Namespace to be updated dictionary (dict): dictionary overwrite (bool, optional): If True, All Namespace value will overwritten by dictionary value. Otherwise, only Namespace with None will be overwritten. Defaults to True. rest: Effective only if overwrite=True. If True, add keys in dictionary but not in args into args. Otherwise raise an error. Returns: Namespace """ dict_args = vars(args) if overwrite: dict_args.update(dictionary) else: for k, v in dict_args.items(): if v is not None: pass elif k in dictionary: dict_args[k] = dictionary[k] for k, v in dictionary.items(): if k not in dict_args: if rest: dict_args[k] = v else: raise KeyError(f"no key {k}") args = argparse.Namespace(**dict_args) return args
e75e933b2e174353c2db8ce595fa07232f2e5c1d
12,692
def get_dataset_url(hit): """Select dataset url.""" ret_url = None for url in hit["_source"].get("urls", []): if url.startswith("http"): ret_url = url if url.startswith("http") and "amazonaws.com" in url: ret_url = url if url.startswith("http") and "googleapis.com" in url: ret_url = url return ret_url
1d78757dda61d762008a78e4b9463c7248978f56
12,696
def edit_distance(s1: str, s2: str) -> int: """The minimum number of edits required to make s1 equal to s2. This is also known as the Levenshtein distance. An edit is the addition, deletion, or replacement of a character. """ if not s1: return len(s2) if not s2: return len(s1) M = [[0 for _ in range(len(s2) + 1)] for _ in range(len(s1) + 1)] # M[i1][i2] is the edit distance between s1[:i1] and s2[:i2]. for i1 in range(len(s1) + 1): M[i1][0] = i1 for i2 in range(len(s2) + 1): M[0][i2] = i2 for i1 in range(1, len(s1) + 1): for i2 in range(1, len(s2) + 1): cost = 0 if s1[i1 - 1] == s2[i2 - 1] else 1 M[i1][i2] = min( [ 1 + M[i1 - 1][i2], 1 + M[i1][i2 - 1], cost + M[i1 - 1][i2 - 1], ]) return M[len(s1)][len(s2)]
bd433f8b52dd9826032ced7ece0fb7df8b5ad8cc
12,697
def removeTags(text): """ """ # {} wrapper generates a set instead of list to deal with repeated tags tags = {tag.strip('#') for tag in text.split() if tag.startswith("#")} text = text.replace('#', '') return (text, tags)
30e267e22da210d48b3271a2e57562b6bff92820
12,698
def _remap_cortex_out(cortex_out, region, out_file): """Remap coordinates in local cortex variant calls to the original global region. """ def _remap_vcf_line(line, contig, start): parts = line.split("\t") if parts[0] == "" or parts[1] == "": return None parts[0] = contig try: parts[1] = str(int(parts[1]) + start) except ValueError: raise ValueError("Problem in {0} with \n{1}".format( cortex_out, parts)) return "\t".join(parts) def _not_filtered(line): parts = line.split("\t") return parts[6] == "PASS" contig, start, _ = region start = int(start) with open(cortex_out) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("##fileDate"): pass elif line.startswith("#"): out_handle.write(line) elif _not_filtered(line): update_line = _remap_vcf_line(line, contig, start) if update_line: out_handle.write(update_line)
03357f276c6733508f17197a3300f1d55271e486
12,699
def power_out_time(value): """ 0 - 32767 seconds """ return "Seconds:{}".format(value)
1f1efb91a9163ad0f6fb920985edb84809c38c5a
12,700
def _album_is_reviewed(album, user_key): """Check if an album has been reviewed. Args: album: An Album entity user_key: A stringified user key, or None. Returns: If user_key is None, returns True if and only if the album has any reviews at all. If user_key is not None, returns True if and only if the album has been reviewed by the specified user. """ if user_key is None or user_key == "": return len(album.reviews) > 0 for review in album.reviews: if str(review.author.key()) == user_key: return True return False
fd3b495ccdbc7a61398bc63a28fd5ab8378152f0
12,701
import sqlite3 def load_history_db(history_db): """ Load simulation history from provided db. In case no db is given, returns an empty history :param history_db: :return: """ history = {} if history_db is not None: conn = sqlite3.connect(history_db) cursor = conn.cursor() for robot_string, fitness in cursor.execute('SELECT * FROM history'): history[robot_string] = (float(fitness),) cursor.close() conn.close() return history
06055c6c48f8757513808e44d85f802ab0acd456
12,702
def upload_location(instance, filename, **kwargs): """Upload location for profile image""" return f"accounts/{instance.username}/{filename}"
c662bbb095f8180aa330a2566c9d9aaf372712b0
12,704
import base64 def base64url_to_hex(data): """ Converts a base64url encoded string to a hex encoded string Args: data (str): The base64url encoded string """ data = data.replace("-", "+").replace("_", "/") missing_padding = len(data) % 4 if missing_padding: data += "=" * (4 - missing_padding) return base64.b64decode(data).hex()
db56f4c9e1335b9a4a8f0e0a794c7468122666dc
12,705
import os def clean_dir(dir_to_clean, remove_ext): """ dir_to_clean (list, str): directories to clean remove_ext (list,str): extensions to delete ---------------- return: None """ file_size_del = 0 files_count = 0 for direc in dir_to_clean: for file in os.listdir(direc): if any([True if ext in os.path.splitext(file)[1] else False for ext in remove_ext]): files_count += 1 file_size_del += os.path.getsize(os.path.join(direc, file)) os.remove(os.path.join(direc, file)) print(f"Removed {file_size_del/(1000**3)} Gb over {files_count} files and {len(dir_to_clean)} directories.") return None
df53c204ec066224c9c11c9ff749bfa39fbcec43
12,706
def safe_filename(infilename): """ Take a filename and remove special characters like the asterisk and slash which mess things up. Warning: if you pass a directory path to this function, it will remove the slashes. Do not do that. :param infilename: filename to be processed. :type infilename: string :return: string with asterisk and slash replaced by underscores :rtype: string """ safename = infilename.replace('/', '_') safename = safename.replace('*', '_') return safename
0e46972696f8d7b10bc6bc61efcdd2dfbca9e909
12,707
import collections from typing import Mapping def namedtuple(typename, field_names, default_values=()): """ Overwriting namedtuple class to use default arguments for variables not passed in at creation of object Can manually set default value for a variable; otherwise None will become default value """ T = collections.namedtuple(typename, field_names) T.__new__.__defaults__ = (None,) * len(T._fields) if isinstance(default_values, Mapping): prototype = T(**default_values) else: prototype = T(*default_values) T.__new__.__defaults__ = tuple(prototype) return T
b0fba5ba73037e7bdb5db05d10a8b96f953fc829
12,708
def length_along_path(pp, index): """ Get the lenth measured along the path up to the given index. """ index = min(index, len(pp) - 1) # beware of the end diff_squared = (pp[:index] - pp[1:index+1]) ** 2 distances = diff_squared.sum(1) return (distances ** 0.5).sum() # == sum(pp[j].distance(pp[j+1]) for j in range(index))
a7eea4b411860c51fb5f64a690a9d4832c8b98a9
12,709
def RFile(): """Return a raman data file for testing.""" return 'examples/testdata/1/0001.txt'
cb9bf96aa3dab11f27322b4587f3e36f89a59f31
12,710
def find_node(_node_number, geometry): """ Check if node number exist in the f2u model """ _node = [] for _number in _node_number: try: _test = geometry._mesh.nodes[_number] _node.append(_number) except KeyError: continue return _node
17e5a3c21529298e19ee424539a83ccea1b8acf6
12,711