content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import List def split_chinese_sentence(text: str) -> List[str]: """Performs sentence tokenization for Chinese. Args: text (str): text to split into sentences. Returns: List[str]: a list of sentences. """ sentences = [] quote_mark_count = 0 sentence = "" for i, c in enumerate(text): sentence += c if c in {"”", "」"}: sentences.append(sentence) sentence = "" elif c in {"。", "!", "?", "!", "?"}: if i < len(text) - 1 and text[i + 1] not in {"”", '"', "」"}: sentences.append(sentence) sentence = "" elif c == '"': quote_mark_count += 1 if ( quote_mark_count % 2 == 0 and len(sentence) > 2 and sentence[-2] in {"?", "!", "。", "?", "!"} ): sentences.append(sentence) sentence = "" if sentence: sentences.append(sentence) return sentences
15e410f675f163a7a4d130209821f2038dabf9e2
49,264
def discovery_filter(func): """Bypass discovery if not configured to do so, or if unlikely to get an RP.""" def wrapper(self, *args, **kwargs) -> None: if any( ( self._gwy.config.disable_sending, self._gwy.config.disable_discovery, getattr(self, "has_battery", None), ) ): return return func(self, *args, **kwargs) return wrapper
5a25b381cc79300ea585ddfb20ce425efbcf4901
49,265
def notebook(): """Return a content of Jupyter notebook.""" return { u'cells': [{ u'metadata': { u'trusted': True, }, u'cell_type': u'code', u'source': u'█', u'execution_count': None, u'outputs': [] }], u'metadata': {}, u'nbformat': 4, u'nbformat_minor': 2, }
4a5f1c4120463cc8d597886fdd71cf9d8defbe5e
49,267
def intersect(a, b): """ Identifies the elements in A that are in B. """ return list(set(a) & set(b))
3f704d653ed5f2b552a55f3eef95db7569718037
49,268
import json def dump_objs(objs_iter, dump_fn=json.dumps): """ 'Dumpa' um iterável de objectos do mesmo tipo para um array de objectos de JSON. Recebe uma função para 'dumpar' os objectos individuais. Atenção que em JSON só existe uma forma de representar conjuntos de elementos: o array, que equivale a uma C{list}a em Python. Assim sendo, esta função devolve um JSON-array que, à semelhança de uma lista de Python, é delimitado por '[' e ']'. """ return '[%s]' % ','.join((dump_fn(obj) for obj in objs_iter))
e56d031c379f8cc2d23e686fea2e185c6b74d785
49,269
import os def get_subdir(filepath): """ :param filepath: Path to the input file >>> get_subdir("/tmp/a/b/c/d.yml") 'c' >>> get_subdir("/tmp/x.json") 'tmp' >>> get_subdir("/") '' >>> get_subdir("a.yml") '' """ return os.path.split(os.path.dirname(filepath))[-1]
c8cae9822650d894c26fc67bca80c05a1e20a228
49,270
import os def _prefix_path(prefix, path): """Prefix path(s). Args: prefix: The prefix to apply. data: A path or a dict of paths. Returns: The updated path or dict of paths. """ if isinstance(path, dict): for key, value in path.items(): path[key] = os.path.join(prefix, value) else: path = os.path.join(prefix, path) return path
767cae3c1876d0400caa43a58505227422e97bd0
49,271
def getText(targetFile): """Return a string containing the contents of the target file""" with open(targetFile, "rt", encoding="utf-8") as f: return f.read()
90e70c5a7fc1796b0fd28fc4f52d602cf1c89b27
49,274
def get_centre(bounds): """ Get the centre of the object from the bounding box. """ if len(bounds) != 6: return [None] * 6 return [bounds[i] - (bounds[i] - bounds[i - 1]) / 2.0 for i in range(1, len(bounds), 2)]
ffa873400eb35957c08da523abcc1a0fea3b664d
49,275
def delay_from_foffsets(df, dfd, dfdd, times): """ Return the delays in phase caused by offsets in frequency (df), and two frequency derivatives (dfd, dfdd) at the given times in seconds. """ f_delays = df * times fd_delays = dfd * times**2 / 2.0 fdd_delays = dfdd * times**3 / 6.0 return (f_delays + fd_delays + fdd_delays)
1977b71f5bb5cafce55da7435141c6ff0db29e67
49,277
def get_pulsar_producer_stage(pipeline_builder, topic): """Create and return a Pulsar Producer origin stage depending on execution mode for the pipeline.""" pulsar_producer = pipeline_builder.add_stage('Pulsar Producer', type='destination') pulsar_producer.set_attributes(data_format='TEXT', text_field_path='/text', topic=topic) return pulsar_producer
7e2aa226445dfa979cfdab043b67fbf32aedadd1
49,278
def cli(ctx, gff, analysis_id, organism_id, landmark_type="", re_protein="", re_protein_capture="^(.*?)$", fasta="", no_seq_compute=False, quiet=False, add_only=False, protein_id_attr=""): """Load features from a gff file Output: None """ return ctx.gi.feature.load_gff(gff, analysis_id, organism_id, landmark_type=landmark_type, re_protein=re_protein, re_protein_capture=re_protein_capture, fasta=fasta, no_seq_compute=no_seq_compute, quiet=quiet, add_only=add_only, protein_id_attr=protein_id_attr)
dae1465e31340e093be6ef9ac7b0533ec8c3f35f
49,279
import os def get_dir_size(start_path='.'): """Calculaet size of given directory. :param start_path: Directory path, defaults to '.' :type start_path: str, optional :return: Size in bytes of given directory, including all subdirectories. :rtype: int """ total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size
44af71e108dad9b1c90980308cf30fbc4a39714b
49,280
def all_preds(connection, table, dim_cols, cmp_pred): """ Extracts all possible predicates from database. Args: connection: connection to database table: extract predicates for this table dim_cols: list of dimension columns cmp_pred: filter rows by this predicate Returns: list of (column, value) pairs representing predicates """ preds = [("'any'", 'any')] for dim in dim_cols: print(f'Generating predicates for dimension {dim} ...') with connection.cursor() as cursor: query = f'select distinct {dim} from {table} ' \ f'where {cmp_pred} and {dim} is not null' cursor.execute(query) result = cursor.fetchall() preds += [(dim, r[0]) for r in result] return preds
7fe68c7e0862506b259ada9e0b647ef669931d97
49,281
def incrementAtIndex(valueList, index, max): """Returns True if the value incremented.""" originalValue = valueList[index] valueList[index] += 1 if valueList[index] < max else 0 return valueList[index] != originalValue
b835fe8e7078e5ea245f7115c639084412e928d5
49,282
def hack_ncbi_fasta_name(pipe_name): """Turn 'gi|445210138|gb|CP003959.1|' into 'CP003959.1' etc. For use with NCBI provided FASTA and GenBank files to ensure contig names match up. Or Prokka's *.fna and *.gbk files, turning 'gnl|Prokka|contig000001' into 'contig000001' """ if pipe_name.startswith("gi|") and pipe_name.endswith("|"): return pipe_name.split("|")[3] elif pipe_name.startswith("gnl|") and pipe_name.count("|")==2: return pipe_name.split("|")[2] else: return pipe_name
3b384b101a63fc2babd2b7b26b603565d71a8496
49,283
def train_one_mini_batch( args , model , images # input images , labels # expected label for the input images , cross_error # error function , optimizer # otptimizer ): """ It runs a train on a mini-batch, which consists of a number of images. """ predicts = model(images.float()) error = cross_error(predicts, labels.long()) # CrossEntropyLoss(calculated, expected) optimizer.zero_grad() error.backward() # loss optimizer.step() return error
1fac43aef945f6471915a09f4fe6d0f44f3c15c2
49,284
def parse_slots(str, dia_act): """ format BIO """ new_str = 'BOS ' + str + ' EOS' new_str = new_str.lower() w_arr = new_str.split(' ') bio_arr = ['O'] * len(w_arr) left_index = 0 for slot in dia_act['slot_vals'].keys(): if len(dia_act['slot_vals'][slot]) == 0: continue slot_val = dia_act['slot_vals'][slot][0].lower() if slot_val == 'unk' or slot_val == 'finish': continue str_left_index = new_str.find(slot_val, 0) if str_left_index == -1: str_left_index = new_str.find(slot_val.split(' ')[0], 0) if str_left_index == -1: continue left_index = len(str[0:str_left_index].split(' ')) print((str_left_index, left_index, len(w_arr), len(slot_val.split(' ')))) range_len = min(len(slot_val.split(' ')), len(w_arr)-left_index) for index in range(range_len): bio_arr[left_index+index] = ("B-" + slot) if index == 0 else ("I-" + slot) return w_arr, bio_arr
a591ed3059721881d3e4a1e7402945a573062423
49,286
def findTarget(self, root, k): # ! 104ms,利用广度优先搜索 """ :type root: TreeNode :type k: int :rtype: bool """ if not root: return False res = [] bfs = [root] while bfs: node = bfs.pop(0) if k - node.val in res: return True res.append(node.val) if node.left: bfs.append(node.left) if node.right: bfs.append(node.right) return False
a4714e4ffd44256ce0d69e0710625d0e562c19f5
49,287
def photo_pull(req, id_num, img_name): """ Creates path to image based on name and redMapper id number. Args: req: the http request id_num: the redmapperID of the image galaxy img_name: the name of the desired image Returns: Path to desired image """ path = "static/data/" + id_num + "/" + id_num + "-" + img_name return path
0c35ea26385b408dbcf40445ec161c16f8cf9f69
49,288
def widget_type(field): """ (stolen from django-widget-tweaks) Returns field widget class name (in lower case). """ if hasattr(field, 'field') and hasattr(field.field, 'widget') and field.field.widget: widget_name = field.field.widget.__class__.__name__.lower() if widget_name == "groupedchoicewidget": widget_name = field.field.widget.widget_name return widget_name return ''
84e88834c22780038967f57a3fb7dee9be78ccd6
49,289
def read_data(fname, ignore_docstart=False): """Read data from any files with fixed format. Each line of file should be a space-separated token information, in which information starts from the token itself. Each sentence is separated by a empty line. e.g. 'Apple NP (NP I-ORG' could be one line Args: fname (str): file path for reading data. Returns: sentences (list): Sentences is a list of sentences. Sentence is a list of token information. Token information is in format: [token, feature_1, ..., feature_n, tag_label] """ sentences, prev_sentence = [], [] with open(fname) as f: for line in f: if not line.strip(): if prev_sentence and (not ignore_docstart or len(prev_sentence) > 1): sentences.append(prev_sentence) prev_sentence = [] continue prev_sentence.append(list(line.strip().split())) if prev_sentence != []: sentences.append(prev_sentence) return sentences
71be54b8b9e3a762f13ce9ba10e1f7a885032e2e
49,290
def seqs_dic_count_chars(seqs_dic): """ Given a dictionary with sequences, count how many times each character appears. >>> seqs_dic = {'s1': 'ABCC', 's2': 'ABCD'} >>> seqs_dic_count_chars(seqs_dic) {'A': 2, 'B': 2, 'C': 3, 'D': 1} """ assert seqs_dic, "given seqs_dic empty" cc_dic = {} for seq_id in seqs_dic: seq = seqs_dic[seq_id] for c in seq: if c in cc_dic: cc_dic[c] += 1 else: cc_dic[c] = 1 assert cc_dic, "cc_dic empty" return cc_dic
aafd059c4c100d755b3e3e10e99b4449eb9e360d
49,291
def resolve_concept_reference(attribute_blocks, concepts): """ This function resolves the relative references that attribute blocks could have. This occur when a concept have two blocks of attributes attacked to it, i.e. one with domain and the other without domain. The last block have as associated concept the attribute block on top of it instead of the concept itself. :arg attribute_blocks: list of attribute blocks. :arg concepts: list of concepts. :return list of attributes with the correct associated concepts. """ for id, attribute_block in attribute_blocks.items(): if "concept_associated" not in attribute_block: continue source_id = attribute_block["concept_associated"] # Check if the object associated to this set of attributes (attribute block) is really a concept if source_id not in concepts and source_id in attribute_blocks: # If a the id was not from a concept look for the attributes associated # and take its concept associated real_id = attribute_blocks[source_id]["concept_associated"] attribute_blocks[id]["concept_associated"] = real_id for attribute in attribute_block["attributes"]: if attribute["domain"] != False: attribute["domain"] = real_id return attribute_blocks
53aa4d7291842e00694fb776a8ea62218723300b
49,292
import argparse def parse_args(): """Parse arguments""" parser = argparse.ArgumentParser(prog="diffdirs") parser.add_argument( "original_data_path", metavar="ORIGINAL_DATA_PATH", help="The path to the 'original' data. This will be used to compare against", ) parser.add_argument( "new_data_paths", metavar="NEW_DATA_PATH", nargs="+", help="The path(s) to the 'new' data. These will be compared against original_data_path", ) parser.add_argument( "-g", "--globs", metavar="GLOB", default=["**/*.fits"], nargs="+", help=( "An optional glob pattern for specifying which files to check for regression" "NOTE: This currently will only work for FITS files" ), ) return parser.parse_args()
56336e10e785fcea0373ddeb6c2d18edafce4be6
49,293
def default_model(): """default model used whenever a model is needed to run a test. Should work regardless of where the test is run, i.e. both on CI platform and locally. currently ``teenytweetynet`` """ return "teenytweetynet"
48790e18789da080bae7aace3744a26c62a75ea7
49,295
import pathlib def unique_directories(files): """Returns a list of directories (pathlib.Path objects) for the files passed without repetitions.""" return list({pathlib.Path(x).parent for x in files})
326ed8b251b21fb36f03c5ddc7edd0b18918e868
49,297
def policy_adapter(policy): """ A policy adapter for deterministic policies. Adapts a deterministic policy given as array or map `policy[state] -> action` for the trajectory-generation functions. Args: policy: The policy as map/array `policy[state: Integer] -> action: Integer` representing the policy function p(state). Returns: A function `(state: Integer) -> action: Integer` acting out the given policy. """ return lambda state: policy[state]
d5a0a23aca2bba135c7e5222b240f5fa67418f85
49,298
def _compact4nexus(orig_list): """Transform [1 2 3 5 6 7 8 12 15 18 20] (baseindex 0, used in the Nexus class) into '2-4 6-9 13-19\\3 21' (baseindex 1, used in programs like Paup or MrBayes.). """ if not orig_list: return '' orig_list=list(set(orig_list)) orig_list.sort() shortlist=[] clist=orig_list[:] clist.append(clist[-1]+.5) # dummy value makes it easier while len(clist)>1: step=1 for i,x in enumerate(clist): if x==clist[0]+i*step: # are we still in the right step? continue elif i==1 and len(clist)>3 and clist[i+1]-x==x-clist[0]: # second element, and possibly at least 3 elements to link, # and the next one is in the right step step=x-clist[0] else: # pattern broke, add all values before current position to new list sub=clist[:i] if len(sub)==1: shortlist.append(str(sub[0]+1)) else: if step==1: shortlist.append('%d-%d' % (sub[0]+1,sub[-1]+1)) else: shortlist.append('%d-%d\\%d' % (sub[0]+1,sub[-1]+1,step)) clist=clist[i:] break return ' '.join(shortlist)
dba41e029526ede67c6915f2545e08ebf05fe771
49,299
def get_comp(graph, node): """Returns a set of nodes in this node's component""" unexplored = set(node) explored = set() while unexplored: node = unexplored.pop() explored.add(node) new_nbrs = graph.edges[node] - explored unexplored.update(new_nbrs) return explored
f53837ca60f9c07a8c34911e736987a3e34fcc34
49,300
from typing import Any from typing import Callable def noop_decorator(*args: Any, **kwargs: Any) -> Any: """Return function decorated with no-op; invokable with or without args. >>> @noop_decorator ... def func1(x): return x * 10 >>> @noop_decorator() ... def func2(x): return x * 10 >>> @noop_decorator(2, 3) ... def func3(x): return x * 10 >>> @noop_decorator(keyword=True) ... def func4(x): return x * 10 >>> check_eq(func1(1) + func2(1) + func3(1) + func4(1), 40) """ if len(args) != 1 or not callable(args[0]) or kwargs: return noop_decorator # Decorator is invoked with arguments; ignore them. func: Callable[[Any], Any] = args[0] return func
16333bd7c21885ff894efc2a8fa6008bbbbc1b43
49,301
def toOneOverX(x, y=None): """ """ if x != 0: return 1 / x else: raise ValueError("cannot divide by zero")
4b48fadce1b687f2b7b7aaf06232dffe924aab19
49,302
from typing import Mapping from typing import List from typing import Any def transpose_dict_of_lists(dict_of_lists: Mapping[str, list], keys: List[str]) \ -> List[Mapping[str, Any]]: """Takes a dict of lists, and turns it into a list of dicts.""" return [{key: dict_of_lists[key][i] for key in keys} for i in range(len(dict_of_lists[keys[0]]))]
b51323fcc31aa41ee8a5c333ad36e5d2fdae2b85
49,303
import math def gauss_legendre_1(max_step): """Float number implementation of the Gauss-Legendre algorithm, for max_step steps.""" a = 1. b = 1./math.sqrt(2) t = 1./4.0 p = 1. for i in range(max_step): at = (a + b) / 2.0 bt = math.sqrt(a*b) tt = t - p*(a-at)**2 pt = 2.0 * p a, b, t, p = at, bt, tt, pt my_pi = ((a+b)**2)/(4.0*t) return my_pi
c48fb524c7c5b3aeb14fe247deaa4c329bc2b405
49,304
import sys def pathlify(hash_prefix): """Converts a binary object hash prefix into a posix path, one folder per byte. >>> pathlify('\xDE\xAD') 'de/ad' """ if sys.version_info.major == 3: return '/'.join('%02x' % b for b in hash_prefix) else: return '/'.join('%02x' % ord(b) for b in hash_prefix)
3c376917c5710c664a550150debed2de17b1f49f
49,306
import os import re def convert_source_file_to_arxiv_id(filename): """ Converts a source file name into an ArXiV ID. """ #  Remove folder and extension arxiv_id, _ = os.path.splitext(os.path.basename(filename)) # Add a slash to old arxiv ID format match = re.match(r"([a-z-]+)([0-9]+)", arxiv_id) if match: return f"{match.group(1)}/{match.group(2)}" return arxiv_id
9d015fd451a9bad45ec94d5933d24388aaa005c1
49,307
def is_6_digits(number: int) -> bool: """It is a six-digit number.""" if len(str(number)) == 6: return True return False
60ab2a868be04ae48b39c47ec3b65145d597052a
49,308
def GetSumByIteration(array , target): """ THis function uses iterations """ NumSums =[] for i in range(len(array)-2): for j in range(i+1 , len(array)-1): for k in range(j+1 , len(array)): currSum = array[i]+array[j]+array[k] if currSum == target: NumSums.append((array[i],array[j],array[k])) return NumSums
b7c1b7c7b1a675d7132e377fe5e722867e0c8e20
49,309
def get_most_frequent_color(image_patch): """ Find the most frequent color in image_patch :param image_patch: :return: """ color_dict = dict() for x in range(image_patch.shape[0]): for y in range(image_patch.shape[1]): if str(image_patch[x, y]) in color_dict.keys(): count = color_dict[str(image_patch[x, y])][0] + 1 color_dict[str(image_patch[x, y])] = (count, image_patch[x, y]) else: color_dict[str(image_patch[x, y])] = (0, image_patch[x, y]) return color_dict[max(color_dict.keys(), key=lambda k: color_dict[k][0])][1]
fbb809b515e0d0acf18049834bb4bef8792a6596
49,310
import sys def min_fill_in_heuristic(graph): """Implements the Minimum Degree heuristic. Returns the node from the graph, where the number of edges added when turning the neighbourhood of the chosen node into clique is as small as possible. This algorithm chooses the nodes using the Minimum Fill-In heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses additional constant memory.""" if len(graph) == 0: return None min_fill_in_node = None min_fill_in = sys.maxsize # create sorted list of (degree, node) degree_list = [(len(graph[node]), node) for node in graph] degree_list.sort() # abort condition min_degree = degree_list[0][0] if min_degree == len(graph) - 1: return None for (_, node) in degree_list: num_fill_in = 0 nbrs = graph[node] for nbr in nbrs: # count how many nodes in nbrs current nbr is not connected to # subtract 1 for the node itself num_fill_in += len(nbrs - graph[nbr]) - 1 if num_fill_in >= 2 * min_fill_in: break num_fill_in /= 2 # divide by 2 because of double counting if num_fill_in < min_fill_in: # update min-fill-in node if num_fill_in == 0: return node min_fill_in = num_fill_in min_fill_in_node = node return min_fill_in_node
412d0ec1534592119a2be7ebdbd510b96c98a7e1
49,311
def parse_clan_file(clan_list): """ Parses a list of Rfam clan accessions clan_list: A plain .txt file containing a list of Rfam Clan Accessions return: A list of clan accessions """ fp = open(clan_list, 'r') clan_accessions = [x.strip() for x in fp] fp.close() return clan_accessions
c5869235750902876f10408e73bbf675316d130c
49,312
def c2f(celsius): """ Convert Celcius to Farenheit """ return 9.0/5.0 * celsius + 32
bbe693bf2fa529a3b50793796c2b7b89edb683a6
49,314
from pathlib import Path def get_plot_folder(folder_path: str): """Creates a folder for plots, creating also parents directories if necessary""" folder = Path(folder_path) if not folder.exists(): folder.mkdir(parents=True) return folder
dd95e7089a377f94593e0d4ead360f14a9268a7a
49,315
def source_location_to_tuple(locpb): """Converts a SourceLocation proto into a tuple of primitive types.""" if locpb is None: return None if not locpb.file() and not locpb.line() and not locpb.function_name(): return None return locpb.file(), locpb.line(), locpb.function_name()
cac9f13bcdccab65eeaed94955e3fc5f48193f4b
49,316
def remove_by_index(config_pool, index): """ remove the selected configuration """ for config in config_pool: if config.index == index: config_pool.remove(config) break return config_pool
dc09d08bcf38c5c6d506c126844774e509a85df4
49,320
import os def cpu_threads(): """ Suggest how many CPU threads should be used for multiprocessing. :return: 1, or 1 fewer than the number of threads in the affinity mask of the current process, whichever is greater. """ return max(1, len(os.sched_getaffinity(0)) - 1)
b08169713fdc12b5e55ba0f6bfead073d91af002
49,321
def _data_types_from_dsp_mask(words): """ Return a list of the data types from the words in the data_type mask. """ data_types = [] for i, word in enumerate(words): data_types += [j + (i * 32) for j in range(32) if word >> j & 1] return data_types
a0c10a96ce8d6ca0af3156ee147de8571f605447
49,322
def get_expected_number_of_faces(off_file): """ Finds the expected number of faces in an OFF file. Used to check this matches the number of items in a pixel mapping list. :param off_file: The OFF file. :return: The number of faces in the OFF file. """ for line in off_file.split("\n")[1:]: if line[0] != "#": return int(line.split()[1])
8486fa165f43d924c6dd17b2670d75f8091256d1
49,323
import os def configure(filename=None): """ Creates dictionary to pass to a sqlalchemy.engine_from_config() call """ configuration = {} if filename: # file the configuration dictionary using parameters from file try: with open(filename) as fp: for line in fp: if line[0] != "#": k,v = line.split("=") configuration[k.strip()] = v.strip() except: raise else: DB_HOST = os.getenv("DB_HOST", "localhost") DB_NAME = os.getenv("DB_NAME", "aqms_ir") DB_PORT = os.getenv("DB_PORT", "5432") DB_USER = os.getenv("DB_USER", "trinetdb") DB_PASSWORD = os.getenv("DB_PASSWORD") print("\nConnecting to database {} on {}".format(DB_NAME,DB_HOST)) if not DB_PASSWORD: DB_PASSWORD = input("Password for user {} on {}: ".format(DB_USER,DB_NAME)) configuration["sqlalchemy.url"] = "postgresql://{}:{}@{}:{}/{}".format(DB_USER,DB_PASSWORD,DB_HOST,DB_PORT,DB_NAME) return configuration
f9ccacd6df81d911f137afea17612f3f19ba34e7
49,325
import numpy def in_search_space(x, lower, upper, constraints): """Determine feasibility of a sample.""" if constraints == "Reject": numpy.logical_and(numpy.all(x >= lower), numpy.all(x <= upper)) else: return True
b1c386096891214720cb5cb3a8c3ab1325960752
49,327
import sys import os def terminal_size(): """ Return the number of terminal columns and rows, defaulting to 80x24 if the standard output is not a TTY. NOTE THAT THIS ONLY WORKS ON UNIX. """ if sys.stdout.isatty(): # PORT: This only works on Unix. rows, columns = [int(x) for x in os.popen('stty size', 'r').read().split()] else: rows = 24 columns = 80 return rows, columns
f59c2cd84a50af1f34279238ea6050cfd7f1270c
49,329
def cf_ni_ratio(cf_df, financials_df): """Checks if the latest reported Operating CF (Cashflow) is larger than the latest reported NI (Net Income). cf_df = Cashflow Statement of the specified company financials_df = Financial Statement of the specified company """ cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0] net_income = financials_df.iloc[financials_df.index.get_loc("Net Income"),0] if (cf > net_income): return True else: return False
f0997b38950e4fadcd2a9bb8d1da7e648c970bc9
49,330
def floyd(top): """ >>> floyd([1,2,3,4]) False >>> floyd([1,2,1,2,1]) True >>> floyd([1,2,3,1,2,3,1]) True >>> floyd([1,2,3,1,2,3,1,2,3,1]) True >>> floyd(["A","B","A","B","A"]) True """ tortoise = top hare = top while True: # Is Hare at End? if not hare[1:]: return False # NO LOOP hare = hare[1:] # Increment hare # Is Hare at End? if not hare[1:]: return False # NO LOOP hare = hare[1:] # Increment Hare Again tortoise = tortoise[1:] # Did Hare Meet Tortoise? if hare[0] == tortoise[0]: return True
700096d98dd100bca6098fa75543deba0166ef68
49,333
def retrieve_files(s3, short_name, date_str): """Retrieve a list of files from S3 bucket. Parameters ---------- s3: S3FileSystem reference to S3 bucket to retrieve data from short_name: string short name of collection date_str: str string date and time to search for Returns ------- list: list of strings """ files = s3.glob(f"podaac-ops-cumulus-protected/{short_name}/{date_str}*.nc") return files
e362b1be22f56dae4c17280038eb2f5ff1315ae4
49,334
import numpy def my_coefficient_of_correlation(y_true: numpy.ndarray, y_pred: numpy.ndarray): """ 相关系数 https://baike.baidu.com/item/%E7%9B%B8%E5%85%B3%E7%B3%BB%E6%95%B0/3109424?fr=aladdin https://numpy.org/doc/stable/reference/generated/numpy.nanvar.html https://zhuanlan.zhihu.com/p/37609917 https://blog.csdn.net/ybdesire/article/details/6270328?spm=1001.2101.3001.6650.1&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7Edefault-1.pc_relevant_paycolumn_v2&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7Edefault-1.pc_relevant_paycolumn_v2&utm_relevant_index=2 https://zhuanlan.zhihu.com/p/34380674 :param y_true: :param y_pred: :return: """ cov_x = numpy.stack((y_true, y_pred), axis=0) ''' -- 0 , 1 0: cov(a,a), cov(a,b) 1: cov(b,a), cov(b,b) ''' np_cov = numpy.cov(cov_x) # var()函数默认计算总体方差。要计算样本的方差,必须将 ddof 参数设置为值1。 var_true = numpy.var(y_true, ddof=1) var_predict = numpy.var(y_pred, ddof=1) ret_matrix = np_cov / ((var_true * var_predict) ** 0.5) ret = ret_matrix[0][1] return ret
72210e2c1888abcc0530d891949b09606d47e956
49,335
def mismos_digitos(a: int, b: int) -> bool: """ Mismos Dígitos Parámetros: a (int): El primer número. Es un entero positivo. b (int): El segundo número. Es un entero positivo. Retorno: bool: True si los digitos que aparecen en ambos números son los mismos. False de lo contrario. """ c = str(min(a, b)) d = str(max(a, b)) i = 0 diferentes = False while i < len(c) and not diferentes: j = 0 contiene = 0 while j < len(d): if c[i] == d[j]: contiene += 1 j += 1 if contiene < 1: diferentes = True i += 1 return not diferentes
30dc56066495add1d75bbed0aa93ed7f2c26817e
49,336
from typing import List from typing import Dict def get_readme_download_url(files: List[Dict[str, str]]) -> str: """ Takes in a response from the github api that lists the files in a repo and returns the url that can be used to download the repo's README file. """ for file in files: if file["name"].lower().startswith("readme"): return file["download_url"] return ""
5c394d247b0ab06099b3d3a540fa2f5101b385e6
49,338
import os def get_unidist_root(): """ Get the project root directory. Returns ------- str Absolute path to the project root directory. """ unidist_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")) os.environ["PYTHONPATH"] = ( unidist_root + os.pathsep + os.environ.get("PYTHONPATH", "") ) return unidist_root
ca54a2db71761a351e76c4b5011cd83e7dd9788a
49,339
import threading def _start_thread(target, *args, **kwargs) -> threading.Thread: """Helper to simplify starting a thread.""" thread = threading.Thread(target=target, args=args, kwargs=kwargs) thread.start() return thread
a5d9e1feb1b02ee9b1ec3aaa46758cd3d27e1880
49,340
def reassemble(tripleList): """ Reassembles the triples that were found to be in the right order. """ subj = "" subjL = {} pred = "" predL = {} obj = "" objL = {} for token in tripleList: if token._.part == "subj": subjL.update({token._.posi: token.text}) elif token._.part == "pred": predL.update({token._.posi: token.text}) elif token._.part == "obj": objL.update({token._.posi: token.text}) else: raise Exception('Triple does not belong to any part (subj,pred,obj).') subj = " ".join(str(subjL[elem]) for elem in sorted(subjL)) pred = " ".join(str(predL[elem]) for elem in sorted(predL)) obj = " ".join(str(objL[elem]) for elem in sorted(objL)) if subj != "" and pred != "" and obj != "": return (subj, pred, obj) else: return None
267e0d9116bc4eac60ef3573a91ce08d28491cbc
49,341
def count_trees(data, route): """ >>> count_trees([ ... [False, False, True, True, False, False, False, False, False, False, False], ... [True, False, False, False, True, False, False, False, True, False, False], ... [False, True, False, False, False, False, True, False, False, True, False], ... [False, False, True, False, True, False, False, False, True, False, True], ... [False, True, False, False, False, True, True, False, False, True, False], ... [False, False, True, False, True, True, False, False, False, False, False], ... [False, True, False, True, False, True, False, False, False, False, True], ... [False, True, False, False, False, False, False, False, False, False, True], ... [True, False, True, True, False, False, False, True, False, False, False], ... [True, False, False, False, True, True, False, False, False, False, True], ... [False, True, False, False, True, False, False, False, True, False, True] ... ], (3, 1)) 7 """ dx, dy = route height = len(data) width = len(data[0]) x = 0 y = 0 trees = 0 while y < height: if data[y][x]: trees += 1 x = (x + dx) % width y += dy return trees
1a81a8ccb3b7dd87f04b7713d08370297cb7b5fa
49,342
def combination_of_two_lists(lst1, lst2): """ ["f", "m"] and ["wb", "go", "re"] => ["f_wb", "f_go", "f_re", "m_wb", "m_go", "m_re"] """ return [e1 + "_" + e2 for e1 in lst1 for e2 in lst2]
3f966ff5bb535d95c232162f76705b2b9f6311c6
49,344
import argparse def parse_args(args): """Take care of all the argparse stuff. :returns: the args """ parser = argparse.ArgumentParser(description='Produce spectrum of results.') parser.add_argument('star', help='Star Name', type=str) parser.add_argument('obsnum', help="Observation label") parser.add_argument('teff_1', type=int, help='Host Temperature') parser.add_argument('logg_1', type=float, help='Host logg') parser.add_argument('feh_1', type=float, help='Host Fe/H') parser.add_argument('teff_2', type=int, help='Companion Temperature') parser.add_argument('logg_2', type=float, help='Companion logg') parser.add_argument('feh_2', type=float, help='companion Fe/H') parser.add_argument('gamma', type=float, help='Host rv') parser.add_argument("rv", type=float, help='Companion rv') parser.add_argument("-p", "--plot_name", type=str, help="Name of save figure.") return parser.parse_args(args)
a3dd3d7e741be2508ce64f5dc2cd32f7f81f4fa6
49,345
def robot_paths(x: int, y: int) -> int: """compute the number of paths from top-left to bottom-right cell in a x-y-grid :param x: width of grid :param y: length of grid :return: number of possible paths, when only walking right or down a cell """ number_of_paths = [] for i in range(0, x + 1): number_of_paths.append([]) for j in range(0, y + 1): if i == 0 or j == 0: number_of_paths[i].append(0) elif i == 1 and j == 1: number_of_paths[i].append(1) else: number_of_paths[i].append(number_of_paths[i - 1][j] + number_of_paths[i][j - 1]) return number_of_paths[x][y]
60d25bc2e5d2582281d385318fb3a411f3f163e6
49,346
import json def ReadJson(Dir, FileName): """ **FileName: string of the file name to be used in the creation Reads a Json file and returns the output P.S file name with the directory but without extension ex. /test """ y = json.load(open(str(Dir) + str(FileName) + '.json','r')) return y
1ab73e09e15958fa1d4900dbdecbb3d88817190e
49,348
def _recursive_01_knapsack_aux(capacity: int, w: list, v: list, value: int) -> int: """Either takes the last element or it doesn't. This algorithm takes exponential time.""" if capacity == 0: return 0 if len(w) > 0 and len(v) > 0: if w[-1] > capacity: # We cannot include the nth item. value = _recursive_01_knapsack_aux(capacity, w[:-1], v[:-1], value) else: value = max( v[-1] + _recursive_01_knapsack_aux(capacity - w[-1], w[:-1], v[:-1], value), _recursive_01_knapsack_aux(capacity, w[:-1], v[:-1], value)) return value
aed2071f7faab1d1f2042bc16474a8c686d59244
49,349
def linsrgb_to_srgb (linsrgb): """Convert physically linear RGB values into sRGB ones. The transform is uniform in the components, so *linsrgb* can be of any shape. *linsrgb* values should range between 0 and 1, inclusively. """ # From Wikipedia, but easy analogue to the above. gamma = 1.055 * linsrgb**(1./2.4) - 0.055 scale = linsrgb * 12.92 # return np.where (linsrgb > 0.0031308, gamma, scale) if linsrgb > 0.0031308: return gamma return scale
8eb64d26d13ae7f2d90f0508c1267c1617ac5b9a
49,350
import re def format_results(results): """ Outputs the breach data via markdown, ready to paste into a report. """ # Initialize a new dictionary of known breach names known_breaches = {} # Breaches are returned in JSON-like format regex = '"Name":"(.*?)"' # Loop through our results, building the new dictionary ordered # by breach name instead of account name for address in results: breaches = re.findall(regex, results[address], re.IGNORECASE) for breach in breaches: if breach in known_breaches: known_breaches[breach].append(address) else: known_breaches[breach] = [address,] return known_breaches
9869d0560eea16dfd8af372705434a84c610d3ae
49,351
def letter_for(label): """Return the letter for a given label.""" return "ABCDEFGHIJ"[label]
6c0692f7451db6fd45141339f702665ba3ac2bb4
49,352
def read_coverages(input_file): """Return coverage values.""" coverages = [] with open(input_file, 'r') as input_handle: for line in input_handle: coverages.append(float(line.rstrip())) return coverages
6820da83d0ecb24cdad1169b0521505d59d3dcbf
49,353
def buildNetwork(batch_payment_file): """ Open batch_payment_file and extract two user IDs from each payment. Use a hashmap to store neighborhood information for each user ID. """ # key: a user ID value: a set of its neighbors IDs network = {} with open(batch_payment_file, "r") as f: f.readline() # skip the header for line in f: cell = line.strip().split(',') id1 = cell[1].strip() id2 = cell[2].strip() if id1 not in network: network[id1] = set() if id2 not in network: network[id2] = set() network[id1].add(id2) network[id2].add(id1) return network
87693f865600a5da6de683f0944574647702f444
49,354
def remove_holidays(df): """This function is to remove all the weekends from the histogram""" df = df.copy() # check weekends" dayofweek = ( df.index.dayofweek.values ) % 7 # shifting to a sunday start, as in energyplus df["weekday"] = dayofweek <= 4 df = df[df["weekday"]] return df
e8372636f5c44c564dc7727107f3027ee8aea644
49,355
def bgr01(r, g, b): """Rescales BGR to a 0-1 scale.""" return (b / 255.0, g / 255.0, r / 255.0)
c7d9d5cecfec3311ad8f5d3b2baf619b5bbe83cf
49,357
def get_search_words(): """() -> list Returns a static list with pre-defined search words. Used for searching active & completed products.""" words = ['Alpha Black Lotus', 'Alpha Mox Sapphire', 'Alpha Mox Jet', 'Alpha Mox Pearl', 'Alpha Mox Ruby', 'Alpha Mox Emerald', 'Alpha Timetwister', 'Alpha Ancestral Recall', 'Alpha Time Walk', 'Beta Black Lotus MTG', 'Beta Mox Sapphire', 'Beta Mox Jet', 'Beta Mox Pearl', 'Beta Mox Ruby', 'Beta Mox Emerald', 'Beta Timetwister', 'Beta Ancestral Recall', 'Beta Time Walk', 'Unlimited Black Lotus MTG', 'Unlimited Mox Sapphire', 'Unlimited Mox Jet', 'Unlimited Mox Pearl', 'Unlimited Mox Ruby', 'Unlimited Mox Emerald', 'Unlimited Timetwister', 'Unlimited Ancestral Recall', 'Unlimited Time Walk', 'Alpha Tundra MTG', 'Alpha Underground Sea MTG', 'Alpha Badlands MTG', 'Alpha Taiga MTG', 'Alpha Savannah MTG', 'Alpha Scrubland MTG', 'Alpha Volcanic Island MTG', 'Alpha Bayou MTG', 'Alpha Plateau MTG', 'Alpha Tropical Island MTG', 'Beta Tundra MTG', 'Beta Underground Sea MTG', 'Beta Badlands MTG', 'Beta Taiga MTG', 'Beta Savannah MTG', 'Beta Scrubland MTG', 'Beta Volcanic Island MTG', 'Beta Bayou MTG', 'Beta Plateau MTG', 'Beta Tropical Island MTG', 'Unlimited Tundra MTG', 'Unlimited Underground Sea MTG', 'Unlimited Badlands MTG', 'Unlimited Taiga MTG', 'Unlimited Savannah MTG', 'Unlimited Scrubland MTG', 'Unlimited Volcanic Island MTG', 'Unlimited Bayou MTG', 'Unlimited Plateau MTG', 'Unlimited Tropical Island MTG', 'Revised Tundra MTG', 'Revised Underground Sea MTG', 'Revised Badlands MTG', 'Revised Taiga MTG', 'Revised Savannah MTG', 'Revised Scrubland MTG', 'Revised Volcanic Island MTG', 'Revised Bayou MTG', 'Revised Plateau MTG', 'Revised Tropical Island MTG', 'Alpha Time Vault MTG', 'Beta Time Vault MTG', 'Unlimited Time Vault MTG', "Collectors Time Vault MTG", "International Collectors Time Vault MTG", "Collectors Black Lotus MTG", "Collectors Mox Sapphire", "Collectors Mox Jet", "Collectors Mox Pearl", "Collectors Mox Ruby", "Collectors Mox Emerald", "Collectors Timetwister", "Collectors Ancestral Recall", "Collectors Time Walk", "International Collectors Black Lotus MTG", "International Collectors Mox Sapphire", "International Collectors Mox Jet", "International Collectors Mox Pearl", "International Collectors Mox Ruby", "International Collectors Mox Emerald", "International Collectors Timetwister", "International Collectors Ancestral Recall", "International Collectors Time Walk", "Collectors Tundra MTG", "Collectors Underground Sea MTG", "Collectors Badlands MTG", "Collectors Taiga MTG", "Collectors Savannah MTG", "Collectors Scrubland MTG", "Collectors Volcanic Island MTG", "Collectors Bayou MTG", "Collectors Plateau MTG", "Collectors Tropical Island MTG", "International Collectors Tundra MTG", "International Collectors Underground Sea MTG", "International Collectors Badlands MTG", "International Collectors Taiga MTG", "International Collectors Savannah MTG", "International Collectors Scrubland MTG", "International Collectors Volcanic Island MTG", "International Collectors Bayou MTG", "International Collectors Plateau MTG", "International Collectors Tropical Island MTG"] return words
7fa1b34f077a386c7448a2cd9189c5f42b9cd91e
49,358
def parse(source): """ Return words array for the given page source """ children = source.find(id='lemmas').children words = [] for node in children: dt = node.find('dt') if dt != -1: word = dt.find('b').text.strip(',') words.append(word) return words
2e79a1714fda79adca25d1dfc22d4cf773152ebc
49,359
def to_csv(data, headers): """ convert a set of data to a csv, based on header column names""" rows = [",".join(headers)] for datarow in data: rowdata = [str(datarow.get(h, "")) for h in headers] rows += [",".join(rowdata)] csv = "\n".join(rows) return csv.encode()
be836ba257b584f9d105d54503c2febccaa5e88c
49,360
from datetime import datetime def get_date(): """Get current date in YEAR-MONTH-DAY format.""" dt = datetime.now() return dt.strftime("%Y-%m-%d")
493a6947951de11bcd01d74679419c14cf0052ee
49,363
def support(v, eps): """ Given a vector `v` and a threshold eps, return all indices where `|v|` is larger than eps. EXAMPLES:: sage: sage.modular.modform.numerical.support( numerical_eigenforms(61)._easy_vector(), 1.0 ) [] sage: sage.modular.modform.numerical.support( numerical_eigenforms(61)._easy_vector(), 0.5 ) [0, 1] """ return [i for i in range(v.degree()) if abs(v[i]) > eps]
f2bc705a40504aae3ba77f4feccf854e2ad98fd1
49,364
import argparse def buildArgParser(): """ Creates an argparser object out of any command line options """ parser = argparse.ArgumentParser(description='Creates a GBrowse database instance ' + 'from the provided GFF3 file') parser.add_argument('-i', '--input_gff3_file', help='Input GFF3 file that will be loaded into GBrowse') parser.add_argument('-s', '--input_sam_file', required=False, help='Optional input SAM file that should be mapped to ' + 'the genome loaded into GBrowse') parser.add_argument('-t', '--gbrowse_conf_template', help='A GBrowse template to generate the data source ' + 'configuration from') parser.add_argument('-o', '--organism', help='The organism whose data is being loaded into ' + 'the GBrowse database') parser.add_argument('-g', '--gbrowse_master_conf', default='/opt/gbrowse2/GBrowse.conf', help='Path to GBrowse master configuration file') parser.add_argument('-c', '--gbrowse_conf_dir', default='/opt/gbrowse2/', help='Path to GBrowse configuration directory') parser.add_argument('-d', '--database', help='Desired name for MySQL database housing annotation data') parser.add_argument('-l', '--hostname', help='MySQL server hostname') parser.add_argument('-u', '--username', help='Database username') parser.add_argument('-p', '--password', help='Database password', default="", required=False) args = parser.parse_args() return args
9f786a4367bc3c66f089f860ad9b6bcf188dc5a5
49,366
import re def validate_subnet_ids(value): """Raise exception is subnet IDs fail to match constraints.""" subnet_id_pattern = r"^(subnet-[0-9a-f]{8}|subnet-[0-9a-f]{17})$" for subnet in value: if not re.match(subnet_id_pattern, subnet): return fr"satisfy regular expression pattern: {subnet_id_pattern}" return ""
6446f7bd081335fb00869a1015656665d827bf75
49,367
def to_world_canvas(world_point, canvas_extents, world_extents): """Transforms a point from world coord system to world canvas coord system.""" x = int(world_point[0] / world_extents[0] * canvas_extents[0]) y = int(canvas_extents[1] - 1 - world_point[1] / world_extents[1] * canvas_extents[1]) return (x, y)
5dec7f87fae35542b5798f88b0353c9b593e88fb
49,369
import re import os def parse_ctesttestfile_line(link_target, binary_type, line, verbose=False): """! Parse lines of CTestTestFile.cmake file and searches for 'add_test' @return Dictionary of { test_case : test_case_path } pairs or None if failed to parse 'add_test' line @details Example path with CTestTestFile.cmake: c:/temp/xxx/mbed-sdk-private/build/frdm-k64f-gcc/test/ Example format of CTestTestFile.cmake: # CMake generated Testfile for # Source directory: c:/temp/xxx/mbed-sdk-private/build/frdm-k64f-gcc/test # Build directory: c:/temp/xxx/mbed-sdk-private/build/frdm-k64f-gcc/test # # This file includes the relevant testing commands required for # testing this directory and lists subdirectories to be tested as well. add_test(mbed-test-stdio "mbed-test-stdio") add_test(mbed-test-call_before_main "mbed-test-call_before_main") add_test(mbed-test-dev_null "mbed-test-dev_null") add_test(mbed-test-div "mbed-test-div") add_test(mbed-test-echo "mbed-test-echo") add_test(mbed-test-ticker "mbed-test-ticker") add_test(mbed-test-hello "mbed-test-hello") """ add_test_pattern = '[adtesADTES_]{8}\([\w\d_-]+ \"([\w\d_-]+)\"' re_ptrn = re.compile(add_test_pattern) if line.lower().startswith('add_test'): m = re_ptrn.search(line) if m and len(m.groups()) > 0: if verbose: print(m.group(1) + binary_type) test_case = m.group(1) test_case_path = os.path.join(link_target, 'test', m.group(1) + binary_type) return test_case, test_case_path return None
30fa6984b500b94d07bea37286b0fd5c250216e4
49,370
import struct def py_int2byte(val): """ Converts Python int value to byte. """ return struct.pack('B', val)
d6ab7467c615a23ccb416db2d1150a3909d9f1ff
49,372
from os import path def get_full_path(file_path: str) -> str: """ get full path """ if file_path.endswith('/'): file_path = file_path[:-1] return path.abspath(path.expanduser(file_path))
ef51e29a41f07bc01ea31f111c83564b748a2eeb
49,374
def flatten_dict(dd, separator='.', prefix=''): """Recursive subroutine to flatten nested dictionaries down into a single-layer dictionary. Borrowed from https://www.geeksforgeeks.org/python-convert-nested-dictionary-into-flattened-dictionary/ Parameters ---------- dd : dict dictionary to flatten separator : str, optional separator character used in constructing flattened dictionary key names from multiple recursive elements. Default value is '.' prefix : str, optional flattened dictionary key prefix. Default value is an empty string (''). Returns ------- a version of input dictionary *dd* that has been flattened by one layer """ return {prefix + separator + k if prefix else k: v for kk, vv in dd.items() for k, v in flatten_dict(vv, separator, kk).items() } if isinstance(dd, dict) else {prefix: dd}
f0b356aa0c516b88e83d8c8820fc53f4db69623d
49,376
def known_mismatch(hashes1, hashes2): """Returns a string if this is a known mismatch.""" def frame_0_dup_(h1, h2): # asymmetric version return ((h1[0] == h2[0]) and (h1[2:] == h2[2:]) and (h1[1] != h2[1] and h2[1] == h1[0])) def frame_0_dup(h1, h2): return frame_0_dup_(h1, h2) or frame_0_dup_(h2, h1) def frame_0_missing(h1, h2): return (h1[1:] == h2[:-1]) or (h2[:1] == h1[:-1]) for func in [frame_0_dup, frame_0_missing]: if func(hashes1, hashes2): return func.__name__ return None
dfd3985a26e53147147b2485203964863db8600c
49,377
def S_load_calc(va,vb,vc,Zload): """Power absorbed by load at PCC LV side.""" return (1/2)*(va*(-(va/Zload)).conjugate() + vb*(-(vb/Zload)).conjugate() + vc*(-(vc/Zload)).conjugate())
8dabfbd167b944e6387499e7da45af23667c2b48
49,378
import argparse def create_parser(): """Create a parser.""" parser = argparse.ArgumentParser(description='Average profile(s) from LAMMPS') parser.add_argument( '-f', '--file', help='File to average', required=True, default='x-temp.txt' ) parser.add_argument( '-s', '--split', help='Write a new file for each variable', required=False, action='store_true' ) parser.add_argument( '-p', '--plot', help='Write a new file for each variable', required=False, action='store_true' ) return parser
efc9361ccfd56c048a45f1c2c33481a56b90b0b0
49,379
def admin_access_allowed(user): """Does the user have the right permissions to be using this app?""" if user: return bool(user.groups.filter(name="equipment_tracker").count())
5b90b8e6b2c229bfadb7c21f3d79369196fd92f7
49,380
def _PruneMessage(obj): """Remove any common structure in the message object before printing.""" if isinstance(obj, list) and len(obj) == 1: return _PruneMessage(obj[0]) elif isinstance(obj, dict) and len(obj) == 1: for v in obj.values(): return _PruneMessage(v) else: return obj
7fa8437e8d0eb974d1d541ddeecf80955098a84d
49,382
def to_unicode(s): """Return the parameter as type which supports unicode, possibly decoding it. In Python2, this is the unicode type. In Python3 it's the str type. """ if isinstance(s, bytes): # In Python2, this branch is taken for both 'str' and 'bytes'. # In Python3, this branch is taken only for 'bytes'. return s.decode('utf-8') return s
34353f67dedad93220cf1b567c739dccba610ecf
49,384
def time_check(message): """ check if the duration for the quick search preference is a valid input """ time = 0 try: time = int(message) except Exception as e: return False if time < 1 or time > 8: return False return True
745bcc45986d2b149b062d4864ae9cf8eb060901
49,387
def _same_side(pos, p, q, a, b): """Indicates whether the points a and b are at the same side of segment p-q""" dx = pos[p][0] - pos[q][0] dy = pos[p][1] - pos[q][1] dxa = pos[a][0] - pos[p][0] dya = pos[a][1] - pos[p][1] dxb = pos[b][0] - pos[p][0] dyb = pos[b][1] - pos[p][1] return (dy*dxa - dx*dya > 0) == (dy*dxb - dx*dyb > 0)
e8635b29322a53d6928677fb9379a04440ef6e0e
49,389
def apply_opcode4(code_list, opcode_loc, parameter_mode_dict): """When you've determined that the opcode is 4 - which means to return a value in the location of its only parameter as an output - you can use this function to adjust code_list. Parameters ---------- code_list : list The opcode opcode_loc : int The index of the opcode in code_list parameter_mode_dict : dict A dictionary indicating for the following value after an opcode of 3 whether they should be considered in position (0) or immediate (1) modes Returns ------- code_list : list The whole programme output : int The value in the location determined by the parameter of the opcode """ opcode, param1 = code_list[opcode_loc:opcode_loc+2] # If the mode is 1 then the parameter should be interpreted as it stands. # If the mode is 0 then we need to get the value at that location in the # code list if parameter_mode_dict[1] == '0': param1 = code_list[param1] # Return that value as an output output = param1 return code_list, output
52b9e57185f7eb6fc01266b91d1cfea86f048a43
49,390
def sz_to_ind(sz, charge, nsingle): """ Converts :math:`S_{z}` to a list index. Parameters ---------- sz : int Value :math:`S_{z}` of a spin projection in the z direction. charge : int Value of the charge. nsingle : int Number of single particle states. Returns ------- int Value of the list index corresponding to sz. """ szmax = min(charge, nsingle-charge) return int((szmax+sz)/2)
c6016a5f21f4a4e9904046260b94b1f4d6d397ba
49,391
def map_activity_model(raw_activity): """ Maps request data to the fields defined for the Activity model """ return { 'activity_id': raw_activity['key'], 'description': raw_activity['activity'], 'activity_type': raw_activity['type'], 'participants': raw_activity['participants'], 'accessibility': raw_activity['accessibility'], 'price': raw_activity['price'], 'link': raw_activity['link'] }
080370bc0ddbc3c7487e0c6789612c7bc5298258
49,392
def hierholzer(graph): """Return an Eulerian path via Hierholzer algo""" ans = [] def fn(x): """Return Eulerian path via dfs.""" while graph[x]: fn(graph[x].pop()) ans.append(x) ans.reverse() return ans
b4abb0b66a723996cf0c43c00a9d65b91f87f10b
49,393
import math def wrap_to_pi(angle): """ Wrap the given angle to pi: -pi <= angle <= pi. :param angle: float, Angle to wrap in rads :return: float, Wrapped angle """ while angle > math.pi: angle -= 2 * math.pi while angle < -math.pi: angle += 2 * math.pi return angle
613c81b3404ece1326e19639bc8684cafdd4a990
49,394
def after_commit(self, item, status_code): """ This will be used in an after_commit function. In this case, the process is diverted to a message, new status code and the method exits with this message. """ # processing takes place here payload = { "message": "This is no longer a REST resource. We can do anything.", "data": item.to_dict(), } return False, payload, 419
79e3f64a5b458e87397b86ec25910959d8efb590
49,395
def is_terminate(runid, iterations, val): """dummy is_terminate call back.""" return False
1340fb47b471b78383c9691c197edad3d342f667
49,396