content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import json def process_jsonld(results): """ decode bytes-like objet into valid JSON format :return: data in valid JSON format """ results = results.decode('utf8').replace("'", '"') return json.loads(results)
36ebe65ca3a4320de6fa5ac1115c6a934bdea1ba
44,345
def get_split(text, maxlen=200, overlap=50): """Segment text data with overlapped window of len maxlen""" l_total = [] l_partial = [] den = maxlen - overlap if len(text.split()) // den > 0: n = len(text.split()) // den else: n = 1 for w in range(n): if w == 0: l_partial = text.split()[:maxlen] l_total.append(" ".join(l_partial)) else: l_partial = text.split()[w * den : w * den + maxlen] l_total.append(" ".join(l_partial)) return l_total
2a087e682c969cb78a16c31193bffe7061b7267e
44,346
import subprocess def has_internet_connection(): """ """ try: res = subprocess.check_call(["ping", "-c", "1", "8.8.8.8"]) return res == 0 except subprocess.CalledProcessError: return False
61e69922b32a6ee885d6da33535b917af94359da
44,347
def overlapsize(first, second, groundplane): """ Counts the number of frames in first that temporally overlap with second. """ return len( set( f.frame for f in first.getboxes(interpolate = True, groundplane=groundplane) ) & set( s.frame for s in second.getboxes(interpolate = True, groundplane=groundplane) ))
70e514f40f57d10c82b7ab52f724da652a8d87e6
44,348
from typing import Any from pathlib import Path import json def read_glue_cache(folder: str, docname: str) -> dict[str, Any]: """Read a glue cache from the build folder, for a particular document.""" docpath = docname.split("/") path = Path(folder).joinpath(*docpath[:-1]).joinpath(f"{docpath[-1]}.glue.json") if not path.exists(): return {} with path.open("r") as f: return json.load(f)
b723451401a3eea836407271ac514fddf08de524
44,350
import torch def cxcy_to_xy(cxcy): """ Convert bounding boxes from center-size coords (c_x, c_y, w, h) to boundary coords (x_min, y_min, x_max, y_max). Args: cxcy: bounding boxes in center-size coords - a tensor of size (n_boxes, 4) Returns: xy: bounding boxes in boundary coords - a tensor of size (n_boxes, 4) """ return torch.cat( [cxcy[:, :2] - (cxcy[:, 2:] / 2), cxcy[:, :2] + (cxcy[:, 2:] / 2)], 1, # (x_min, y_min), (x_max, y_max) )
94b610ea00bdf665df19680396a8880e9f766e3e
44,352
def _rate_limit_exceeded(forbidden): """Predicate: pass only exceptions with 'rateLimitExceeded' as reason.""" return any(error['reason'] == 'rateLimitExceeded' for error in forbidden._errors)
364d2349fd9619bfb884804602eb737336b95b56
44,353
import queue def get_item_from_queue(Q, timeout=0.01): """ Attempts to retrieve an item from the queue Q. If Q is empty, None is returned. Blocks for 'timeout' seconds in case the queue is empty, so don't use this method for speedy retrieval of multiple items (use get_all_from_queue for that). """ try: item = Q.get(True, 0.01) except queue.Empty: return None return item
c2c8d213e72c5fea4715094ab2084677ff18c3be
44,354
def search_in(transitions, visited, path, pos, goal): """ Recursive helper for depth-first search. Takes transition map, set of visited positions, list of path thus far, the next step to consider, and a goal. Returns a complete path to goal or None if no path found from this start state. """ # If we're about to go someplace we've been, it's a loop: no path to goal if pos in visited: return None # Add pos to visited locations and path thus far visited.add(pos) path.append(pos) # If pos is the goal, we're done if pos == goal: return path # For each adjacent position, try moving there for next in transitions[pos]: r = search_in(transitions, visited, path, next, goal) if r: return r # If we get here, none of these led to the goal, so pos is a bad path, retrace by # removing pos from path & visited, then returning failure path.pop() visited.remove(pos) return None
af84fa0748dfd9988d304197caf2f1fc7b34edfc
44,356
def ticks_to_bars(ticks, freq): """Re sample ticks [timestamp bid, ask) to bars OHLC of selected frequency https://stackoverflow.com/a/17001474/3512107 :param ticks: :param freq: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases Alias Description B business day frequency C custom business day frequency D calendar day frequency W weekly frequency M month end frequency SM semi-month end frequency (15th and end of month) BM business month end frequency CBM custom business month end frequency MS month start frequency SMS semi-month start frequency (1st and 15th) BMS business month start frequency CBMS custom business month start frequency Q quarter end frequency BQ business quarter end frequency QS quarter start frequency BQS business quarter start frequency A, Y year end frequency BA, BY business year end frequency AS, YS year start frequency BAS, BYS business year start frequency BH business hour frequency H hourly frequency T, min minutely frequency S secondly frequency L, ms milliseconds U, us microseconds N nanoseconds """ ticks['mid'] = ticks.mean(axis=1) ticks.drop(['bid', 'ask'], axis=1, inplace=True) bars = ticks.resample(rule=freq, level=0).ohlc() # Drop N/A. When there are no tick, do not create a bar bars.dropna(inplace=True) # Drop multi-index, Influx write has problem with that bars.columns = bars.columns.droplevel(0) return bars
a4836dfcb00330c0c7262c5edf9b331a0334af55
44,359
def create_request_url(title): """Replaces space characters with '+' to form a suitable query string for the API""" q_string = title.replace(' ', '+') return f"https://google-search3.p.rapidapi.com/api/v1/search/q={q_string}num=2"
b014829caa807137e7a03468e688c9aab1b4bb0f
44,360
import cProfile def start_profile(): """Starts profiling.""" profile = cProfile.Profile() profile.enable() return profile
cc3057740818ad7095e1ac49f98793ccb9e92e65
44,361
def josephus_bitwiseVer3(n): """ n = 2^m + l f(1) = α f((bm bm-1 bm-2 ... b2 b1 b0)2) = (α βbm-1 βbm-2 ... βb1 βb0)2 α=1, β=-1, γ=1 note: josephus_bitwiseVer3 runs faster then josephus_bitwiseVer3_1 with Python3.6 interpreter with Mac OS X 10.12.3 """ nBinStr = str(bin(n))[2:] # [βbm/α, βbm-1, βbm-2, ... , βb1, βb0] result = 0 if n == 0: return result shift = (n).bit_length() - 1 # start from m for b in nBinStr: if b == "1": result += 1 << shift else: # b == 0 && shift >= 1 result += -1 << shift shift -= 1 return result
3554ecf45501f6e155d14eb3696eafce39abcd9c
44,362
def toChunk(data): """ Convert string to a chunk. @returns: a tuple of strings representing the chunked encoding of data """ return ("%x\r\n" % len(data), data, "\r\n")
d11b043e631b30755ac5bc796edc9edf9edf24f5
44,363
import re def remove_control_characters(html): """Get rid of cruft?""" # https://github.com/html5lib/html5lib-python/issues/96 html = re.sub("[\x00-\x08\x0b\x0e-\x1f\x7f]", "", html) return html
cfc8e25454f025881c851223bedf498a57110100
44,364
import math def map_unit_vector_to_angle(unit_vector): """Convert a unit vector to an angle.""" return math.atan2(unit_vector[1] * math.pi, unit_vector[0] * math.pi)
c80bb109ca9ee964279cda6d4ffc12016df0e535
44,365
def SplitTextBlock(text_block): """受信したテキストブロックを行の配列にして返します""" lines = text_block.split('\n') # 例えば 'abc\n' を '\n' でスプリットすると 'abc' と '' になって、 # 最後に空文字列ができます。これは無視します if lines[len(lines)-1] == '': lines = lines[:-1] return lines
cc6e16f8b6c4b098989c6909e5e5b1c757c4e737
44,366
import os import re def nt_compatible_path(path): """Provide compatible NT file paths for ``os.path`` functions ``os.path`` expects NT paths with no ``/`` at the beginning. For example, ``/C:/Directory/zzz.svg`` would fail ``os.path.isfile()``, ``os.path.isdir()`` etc. where the expected input for `os.path` functions is ``/C:/Directory/zzz.svg``. Currently ``nt_compatible_path`` performs some basic checks and eliminates the unwanted ``/`` at the beginning. """ if os.name == 'nt' and re.match( '^/[a-z]:/', path, re.IGNORECASE | re.MULTILINE | re.DOTALL): return re.sub('^/', '', path, re.IGNORECASE | re.MULTILINE | re.DOTALL) else: return path
2898f4bfd1b922d7b4e9d023c4f2114e3ad74bc9
44,367
def getNodeSummary(scModel, node_id): """ Return path analysis. This only applicable after running processProjectProperties() :param scModel: :param node_id: :return: None if not found @type scModel: ImageProjectModel @type node_id: str @rtype: dict """ node = scModel.getGraph().get_node(node_id) return node['pathanalysis'] if node is not None and 'pathanalysis' in node else None
2469d49a32d84b69d7653e17a64f88fe75428f8a
44,368
def cli(ctx, name, owner, changeset_revision): """Return a list of dictionaries of metadata about a certain changeset revision for a single tool. Output: Returns a list of the following dictionaries: #. a dictionary defining the repository #. a dictionary defining the repository revision (RepositoryMetadata) #. a dictionary including the additional information required to install the repository For example:: [{u'deleted': False, u'deprecated': False, u'description': u'Galaxy Freebayes Bayesian genetic variant detector tool', u'homepage_url': u'', u'id': u'491b7a3fddf9366f', u'long_description': u'Galaxy Freebayes Bayesian genetic variant detector tool originally included in the Galaxy code distribution but migrated to the tool shed.', u'name': u'freebayes', u'owner': u'devteam', u'private': False, u'remote_repository_url': u'', u'times_downloaded': 269, u'type': u'unrestricted', u'url': u'/api/repositories/491b7a3fddf9366f', u'user_id': u'1de29d50c3c44272'}, {u'changeset_revision': u'd291dc763c4c', u'do_not_test': False, u'downloadable': True, u'has_repository_dependencies': False, u'id': u'504be8aaa652c154', u'includes_datatypes': False, u'includes_tool_dependencies': True, u'includes_tools': True, u'includes_tools_for_display_in_tool_panel': True, u'includes_workflows': False, u'malicious': False, u'repository_id': u'491b7a3fddf9366f', u'url': u'/api/repository_revisions/504be8aaa652c154'}, {u'freebayes': [u'Galaxy Freebayes Bayesian genetic variant detector tool', u'http://testtoolshed.g2.bx.psu.edu/repos/devteam/freebayes', u'd291dc763c4c', u'9', u'devteam', {}, {u'freebayes/0.9.6_9608597d12e127c847ae03aa03440ab63992fedf': {u'changeset_revision': u'd291dc763c4c', u'name': u'freebayes', u'repository_name': u'freebayes', u'repository_owner': u'devteam', u'type': u'package', u'version': u'0.9.6_9608597d12e127c847ae03aa03440ab63992fedf'}, u'samtools/0.1.18': {u'changeset_revision': u'd291dc763c4c', u'name': u'samtools', u'repository_name': u'freebayes', u'repository_owner': u'devteam', u'type': u'package', u'version': u'0.1.18'}}]}] """ return ctx.gi.repositories.get_repository_revision_install_info(name, owner, changeset_revision)
bd470cad00a642216b41aa61c3e5d57f41ce4c32
44,369
def get_mount_force(density,drag_coef,chord,length,vel): """ Computes the drag forces acting on an air foil section with given drag coeff, chord and length. """ return 0.5*density*drag_coef*chord*length*vel**2
de6d29cf0bbddd585995bcec4b3f13a69a405f6c
44,370
from typing import Any def try_to_cpu(t: Any) -> Any: """ Try to move the input variable `t` to a cpu device. Args: t: Input. Returns: t_cpu: `t` moved to a cpu device, if supported. """ try: t = t.cpu() except AttributeError: pass return t
3cf3607e79d4057ff5d5a196bc259291bbe969dd
44,371
import base64 def decode_object_data(lines): """ Decodes the base64 encoded data found within directory document objects. :param list(str) lines: the lines as found in a directory document object, not including newlines or the begin/end lines :returns: the decoded data :rtype: bytes """ return base64.b64decode("".join(lines))
279ed2913880862da6b3a6c245c7e4e2c25f0ecf
44,372
import torch def mean_pooling(x, x_padding_mask): """ Mean pooling on representation Args: x: feature matrix :math:`(T, N, E)', where T is sequence length, N is batch size and E is feature dimension. x_padding_mask: :math:`(N, T)`, where T is sequence length and N is batch size. Returns: """ sql = torch.sum((~x_padding_mask).long(), -1).unsqueeze(-1) # [bsz, 1] return torch.sum(x * (~x_padding_mask).transpose(0, 1).unsqueeze(-1).float(), dim=0) / sql
9b637ac4b292f4a9bde81723cc1367edba53d47b
44,373
import sys import os import subprocess def cat_EVM_inputs(evm_dir): # , inputs): """ Takes the files in EVM input directory and concatenates the needed files to prepare the EVM command. Augustus, Genemark and Transdecoder go into gene_predictions.gff3 and pasa asemblies and transcript alignments go into transcripts.gff3 """ # GENE PREDICTIONS sys.stdout.write('###CONCATENATING FILES###\n') # GENE PREDICTION file_list = [] ab_initio_list = ['cat'] protein_list = [] transcript_list = [] list_soft = [] transcript_file = '' protein_file = '' for root, dirs, files in os.walk(evm_dir): for name in files: if 'augustus' in name: ab_initio_list.append(os.path.join(root, name)) list_soft.append('augustus') elif 'genemark' in name: ab_initio_list.append(os.path.join(root, name)) list_soft.append('genemark') elif 'PASA' in name or 'pasa' in name: transcript_file = os.path.join(root, name) transcript_list.append(os.path.join(root, name)) list_soft.append('pasa') elif 'protein' in name: protein_file = os.path.join(root, name) protein_list.append(os.path.join(root, name)) list_soft.append('exonerate') elif 'trinity' in name: ab_initio_list.append(os.path.join(root, name)) list_soft.append('gmap') elif 'external' in name: ab_initio_list.append(os.path.join(root, name)) list_soft.append('external') pred_filename = evm_dir + 'gene_predictions.gff3' if os.path.isfile(pred_filename): sys.stdout.write(('Gene predictions GFF3 file existed already: ' + pred_filename + ' --- skipping\n')) else: pred_file = open(pred_filename, 'w') try: subprocess.check_call(ab_initio_list, stdout=pred_file, cwd=evm_dir) # sys.stdout.write '> Gene prediction concatenation completed' except: # sys.stdout.write 'Gene prediction concatenation failed' raise NameError('') pred_file.close() return list_soft, pred_filename, transcript_file, protein_file
50bd5a0bdcd43a2d94198fb0d9db07be021366fd
44,374
def get_create_constraint_query(label: str, property_name: str, constraint_name: str = ''): """ Build query to create a constraint :param label: node label :param property_name: node property for the constraint :param constraint_name: the constrain name :return: cypher query """ query = 'CREATE CONSTRAINT ' if constraint_name: query += constraint_name query += f' IF NOT EXISTS ON (n:{label}) ASSERT n.{property_name} IS UNIQUE' return query
4e8baccf1729a5fc2e3a87f44e0b5de46d4cefb0
44,376
def delete_subdir(config): """ Remove subdir from config """ if not config: return config if 'subdir' in config: del config['subdir'] return config
3b032d681ac213032e42d78ac56db1d441c9dba6
44,378
def compute_node_degrees(ugraph): """ Returns a dictionary of degree number for all nodes in the undirected graph """ node_deg = {} # iterate over all dictionary keys to find size of # adjacency list for each node for node in ugraph: node_deg[node] = len(ugraph[node]) return node_deg
a6d2f2df91b8536eca7814d54376f8b7855c2e7b
44,379
def _get_model_device(model): """Return the device of a random model property.""" return list(model.values())[0].device
6e73a1ed57926ddfde023f24c010d47459bf0492
44,380
def untgz(archive): """Remove .tar.gz""" return archive.replace('.tar.gz', '')
6f2506b3559d19e46d3428fc59ec65fdf480b988
44,381
from datetime import datetime import json def generate_event(file_name, datetime=datetime): """Function to generate json with a timestamp and filname headers. """ return json.dumps({'timestamp': datetime.now().isoformat(), 'filename': file_name})
d546f28902ebbc98d81031740c8c2368e5aa7baa
44,382
import re def is_valid_rank_dir_name(name): """Check if the name followed the rank directory format.""" return bool(re.search(r'^rank_\d+$', name))
d40d3cbd3b4e8d749fd85400eb6d68e3d3ae1394
44,383
def filter_movies_with_no_images(movie_list): """Return movies with contains poster :param movie_list: list of movies. :return: list of filtered movies. """ return [movie for movie in movie_list if movie.get("Poster") != "N/A"]
fb8c17c84d897f590a794141b6a931f43fc0f526
44,385
import requests def TVMazeSearch(showname): """ Performs embedded episode single show search on TVMaze.com Args: showname: Query of the Show name """ baseurl = 'http://api.tvmaze.com' searchurl = '/singlesearch/shows?q=' showjson = requests.get(baseurl + searchurl + showname + '&embed=episodes') return showjson
a7ed61b700c6e1ae37a2aa0194392bc3c5510dfc
44,387
import sys import traceback def full_stack(): """ Cludgy workaround from StackOverflow User Tobias Kienzler Source: stackoverflow.com/questions/6086976/how-to-get-a-complete-exception-stack-trace-in-python/16589622#16589622 """ exc = sys.exc_info()[0] stack = traceback.extract_stack()[:-1] # last one would be full_stack() if not exc is None: # i.e. if an exception is present del stack[-1] # remove call of full_stack, the printed exception # will contain the caught exception caller instead trc = 'Traceback (most recent call last):\n' stackstr = trc + ''.join(traceback.format_list(stack)) if not exc is None: stackstr += ' ' + traceback.format_exc().lstrip(trc) return stackstr
5f9411091e9864ac92e06b22add105192eb2b4d3
44,388
import re import tempfile import os def mlab_tempfile(dir=None): """Returns a temporary file-like object with valid matlab name. The file name is accessible as the .name attribute of the returned object. The caller is responsible for closing the returned object, at which time the underlying file gets deleted from the filesystem. Parameters ---------- dir : str A path to use as the starting directory. Note that this directory must already exist, it is NOT created if it doesn't (in that case, OSError is raised instead). Returns ------- f : file-like object Examples -------- >>> f = mlab_tempfile() >>> pth, fname = os.path.split(f.name) >>> '-' not in fname True >>> f.close() """ valid_name = re.compile(r'^\w+$') # Make temp files until we get one whose name is a valid matlab identifier, # since matlab imposes that constraint. Since the temp file routines may # return names that aren't valid matlab names, but we can't control that # directly, we just keep trying until we get a valid name. To avoid an # infinite loop for some strange reason, we only try 100 times. for n in range(100): f = tempfile.NamedTemporaryFile(suffix='.m',prefix='tmp_matlab_', dir=dir) # Check the file name for matlab compliance fname = os.path.splitext(os.path.basename(f.name))[0] if valid_name.match(fname): break # Close the temp file we just made if its name is not valid; the # tempfile module then takes care of deleting the actual file on disk. f.close() else: raise ValueError("Could not make temp file after 100 tries") return f
59d85627cce6d22cfbf9abc9cd2bad4cc0b1aee0
44,390
def detector_tab_is_not_empty(list) : """ @param list : a list of result @rtype : False if all the items in the list are empty, True otherwise """ for item in list : if not(not(item)) : return True return False
48cbf34e779b201adfd8d2e442a1678e08de921a
44,391
import random def choosePivots(order, mode="random"): """ Choisit deux indices, pour les pivots dans l'algorithme de tri Params : order : liste des éléments dans laquelle piocher les deux pivots mode : indique la façon de piocher les pivots ; random(aléatoirement), middle(couper en trois parties équilibrées) Renvoi: Les deux pivots """ if mode == "middle": i = len(order)//3 j = 2*i return (order[i],order[j]) elif mode == "random": x,y = random.sample(order,2) if order.index(x) < order.index(y): return x,y else: return y,x
5d5b54369a5e6848988100011155b641617ef952
44,392
import re import string def rm_mid_word_punct(dfcol): """ Aggressively remove punctuation mid-word """ punct = re.escape(string.punctuation) mid_text_illegal_punct = re.escape('!"#$%&\()*+,/:;<=>?@[\\]^_`{|}~') #text punctuation - , ' or . rstr = f'(?<=[a-zA-Z0-9]|[{punct}])[{mid_text_illegal_punct}]+(?=[a-zA-Z0-9]|[{punct}])' rstr2 = f'((?<=[a-zA-Z0-9][{punct}])[{punct}]+(?=[a-zA-Z0-9]))|((?<=[a-zA-Z0-9])[{punct}]+(?=[{punct}][a-zA-Z0-9]))' col = dfcol.str.replace(rstr, '', regex=True) col = col.str.replace(rstr2, '', regex=True) return col
b8cc051cd70c63b55de4fe6c95693818b535cc89
44,394
def policy_evaluation(env, policy, U, discount=0.9, maxiters=300): """ Update the current policy with che current value function """ for i in range(maxiters): for s in range(env.observation_space.n): summ = 0 for s_1 in range(env.observation_space.n): summ += env.T[s, policy[s], s_1] * U[s_1] U[s] = env.RS[s] + discount * summ return U
6feb85ef9c1556e31694964daa83de0ee70e04c8
44,396
import os import json def readGraphJson(file_path): """ Reads a json file and returns its contents as a dict. Contents of json file must be generated from nodeUtils.recurse. Used for recreating a graph. **parameters**, **types**, **return** and **return types** :param file_path: Full file path of the json file. :type file_path: str :return: If failed, returns None. :rtype: NoneType :return: Returns a dict from the contents of "file_path". :rtype: dict - Example:: readGraphJson(r"C:/temp/graph.json") """ datas = None if not os.path.exists(file_path): print("No json file found: {}".format(file_path)) return try: with open(file_path) as json_buffer: datas = json.load(json_buffer) except Exception as err: print(str(err)) print("Failed to read: {}".format(file_path)) return return datas
285448e0e83d0dd83ce1e9ddae5d07471440fcad
44,400
def linecut(x, y, ycut): """ Given a sequence of line segments defined by arrays x, y, and a specified y value ycut, return a list of x values where the line segments cross ycut, return empty list if they don't cut. NB: Often useful when e.g. x is an iteration number and y a residual, xcut gives then at what iterations a convergence criteria was achieved. """ xcut = [] if x.size < 2: return [] for i in range(x.size - 1): xx, yy = [x[i], x[i + 1]], [y[i], y[i + 1]] if yy[1] < yy[0]: xx.reverse() yy.reverse() if ycut >= yy[0] and ycut < yy[1]: yfrac = (ycut - yy[0]) / (yy[1] - yy[0]) xcut.append(xx[0] + yfrac * (xx[1] - xx[0])) return xcut
b0bd5d9c03e24a28a67d0be4b5ca6877b334f603
44,401
import os def find_imgs(dirpath, img_type=(".jpg", ".jpeg")): """Walks directory path, finding all files ending in img_type. Parameters ---------- dirpath : str Path to an image-containing directory. img_type : tuple, optional By default, finds JPG image types, but can be changed if camera exports a different filetype. Returns ------- list of dictionaries Contains filenames and filepaths. """ output = [] for dir_, _, files in os.walk(dirpath): if "_selected" not in dir_: found = ( f for f in files if f.lower().endswith(img_type) ) for filename in found: filepath = os.path.join(dir_, filename) output.append({ "filename": filename, "filepath": filepath }) return output
b03e94d54587346f1f2aab1408fc13bf5ef8437b
44,402
def update_pet_with_form(petId, name=None, status=None): # noqa: E501 """Updates a pet in the store with form data # noqa: E501 :param petId: ID of pet that needs to be updated :type petId: int :param name: Updated name of the pet :type name: str :param status: Updated status of the pet :type status: str :rtype: None """ return 'do some magic!'
f4e9a8f3d96c3d76b4ea5dc182125e4a1933d9fd
44,406
def highlight_uncovered_lines(text): """Highlight lines beginning with '>>>>>>'.""" def color_uncov(line): # The line must start with the missing line indicator or some HTML # was put in front of it. if line.startswith('&gt;'*6) or '>'+'&gt;'*6 in line: return ('<div class="notcovered">%s</div>' % line.rstrip('\n')) return line text = ''.join(map(color_uncov, text.splitlines(True))) return text
5a812eb4dcc19ee9e9f6105215c89d7cffd1fc5f
44,408
def resolve_boolean_value(val: str): """ Resolve a string which represents a boolean value Args: val: The value Returns: True, False or None """ val = val.upper() if val == "TRUE": return True elif val == "FALSE": return False else: return None
6ec89280a23c0ea9819fe7c0c4294d0d5d16b3ad
44,409
def getallqualities(): """Return a dictionary of TF2 item qualities with number as key and description as value""" return {6: 'Unique', 3: 'Vintage', 11: 'Strange', 1: 'Genuine', 14: "Collector's", 13: 'Haunted', 5: 'Unusual'}
f2c59fbb7612792239d2a7b6bf05c5466335d99d
44,410
def compute_RayleighsQuotient(Y, A): """ Rayleigh's quotient with the vector Y and input matrix A """ return (Y.T)@(A.dot(Y)) / (Y.T@Y)
29eb22bcbe86ee18ea60a0cf5b20ecb9c7e274c6
44,411
def customer_name(toml_data): """Return the customer name from the toml data file""" return toml_data["customer"]["customer_name"]
91d75f04c832c75eca26bdc23ac16bc3d0d80dc8
44,412
import collections def count(text): """Count words""" if(type(text) is str): for i in range(len(text)): if not(((ord(text[i]) >= 48) and (ord(text[i]) <= 57)) or ((ord(text[i]) >= 65) and (ord(text[i]) <= 90)) or ((ord(text[i]) >= 97) and (ord(text[i]) <= 122))): text = text.replace(text[i], ' ') print(text) sentence = text.lower() list = sentence.split() count = collections.Counter(list) return count
1cf054adae8309e716beb21550465bf3d26c1b13
44,413
def noop(data): """ No-op "encoder" returns the object exactly as it is. """ return data
2929d8ce17197946e1af8f705ea17fdd4dfc6e41
44,414
def split_instruction(ins): """ Split an assembly instruction into seperate parts. :param ins: The assembly line. :return: A list with the parts of the instruction. """ newins = ins.replace(',', ' ') splitins = newins.split() return splitins
f3799deb9dc41c3c5184effec7bd1b1c07c61ffc
44,415
def parse_lists_from_api(list_string, separator=';'): """ Parse list of items returned by the OpenCitations API :param list_string: list of dois separated by sep :type list_string: str :param separator: (Default ';') separator to separate elements in string :type separator: str :return: items in the string as a list :rtype: list """ if list_string is None: return None items = [item.strip() for item in list_string.split(separator)] return items
fa56eaed2bdfb9d10be1917ba6c02cc08cd9974b
44,417
def read(xb, gps_reads=10, timeout=10): """Reads all GPS input, parse, average, and return. xb: an already initialized xbee module for UART reads. gps_reads: number of inputs to average. timeout: seconds before giving up""" return {}
26780b0294aea4c10fae22797ce59a4e9ca1036b
44,418
def a_source_password(plugin_ctx, fsm_ctx): """send source password.""" src_password = plugin_ctx.src_password fsm_ctx.ctrl.send_command(src_password, password=True) return True
19d9bcba5ad41395b767422ef585b45ceb4d2d42
44,419
from typing import List def function(name: str, type_sig: str, arg_names: List[str] = [], body: str = '') -> str: """Function definition.""" args = f'{arg_names} ' if arg_names else '' return f'{name} : {type_sig}\n{name} {args}=\n{body}'
804a6d01dd64183b738e5b8dbb7713194921eb1c
44,420
def compatible_versions(actual_version: str, required_version: str) -> bool: """Determine whether two versions are equal. Only the dot separated elements in common are taken into account, so actual "3.7.4" compared with "3.7" will return True. Args: actual_version: A dot separated version. required_version: A dot separated version. Returns: True if the actual_version is compatible with the required_version, otherwise False. """ return all( actual == expected for actual, expected in zip(actual_version.split("."), required_version.split(".")) )
1876c7ce1ca1b992640ba7bb4f96cc9420de7965
44,422
from typing import Callable import math def logistic(A, K, B, v, Q, M) -> Callable[[float], float]: """ Creates a generalized logistic function. https://en.wikipedia.org/wiki/Generalised_logistic_function :param A: the lower asymptote :param K: the upper asymptote :param B: the growth rate :param v: near which asymptote the growth occurs :param Q: Y(0) :param M: starting point x_0 :return: a function """ def f(t): return A + (K - A) / ((1 + Q * math.exp(-B * (t - M))) ** (1 / v)) return f
8102cd6cd324d0a48671156424302a34fe73c1fa
44,423
import textwrap def wrap_message(message: str, chars_in_line: int = 80) -> str: """Wrap a block of text to a certain amount of characters :param message: :param chars_in_line: The width to wrap against :returns: the wrapped message """ return_text = [] for paragraph in message.split('\n'): lines = textwrap.wrap(paragraph, chars_in_line) if not lines: return_text.append('') else: return_text.extend(lines) return '\n'.join(return_text)
21662addb2d93e9a858d61b8ba12e6148587a9cb
44,424
def get_density(df): """ Return population per m^2 density from pandas dataframe. :param df: dataframe with 'population' and 'area' columns """ return (1.0E6 * df['population'] / df['area'] ).round(1)
c7edb5ece5b5d6727d8ac29ff5eb91cffcf25bf3
44,425
def days_index(df): """ Find indices of daily trips. Parameters ---------- df : pandas DataFrame Dataframe containing bikeshare trip data with columns that have been renamed to the common key. Returns ------- d_i : dict Contains the indices of the first trip per day. """ days = df['start_dt'].dt.day d_i = [(days == i).idxmax() for i in range(1, max(days)+1)] return dict(zip(range(1, max(days)+1), d_i))
31be6f32a4ef5f248500a7a3162d8f07bbde6c09
44,428
def expand_delimited_multiclass_series(series, delim): """ Given a multi-class column (e.g., ...]), return an expanded set of columns split on the delimiter. Mostly used for multiclass image classification. """ expanded_columns = series.str.split("_", expand=True) expanded_columns.columns = [ series.name + "{:02d}".format(x) for x in expanded_columns.columns .values ] return expanded_columns
f92f4bfd9200e51970014f045ea9daf9660e5712
44,430
def _clean_json_dict_keys(x): """Cast any dictionary keys in the given JSON object to str. We can assume that x isn't a recursive data structure, and that this is only called in Python 2.""" if isinstance(x, dict): return {str(k): _clean_json_dict_keys(v) for k, v in x.items()} elif isinstance(x, list): return [_clean_json_dict_keys(item) for item in x] else: return x
ecd51290bb48497dcd2927705c71f3cb7d36dbf5
44,431
def fib(x): """ Returns the xth term of the Fibonacci sequence """ a, b = 1, 1 for i in range(1, x): a, b = b, a + b x += 1 return(a)
76cb08e89f11152c1aa2240b14c216042850b0ab
44,432
def somme_chiffres(n: int) -> int: """ Description: Fonction qui retourne la somme des chiffres d'un nombre. Paramètres: n: {int} -- Le nombre à traiter. Retourne: {int} -- La somme des chiffres du nombre. Exemple: >>> somme_chiffres(1010101) 4 """ somme = 0 while n > 0: somme += n % 10 n //= 10 return somme
0da5c2e2001c53ea6d3f8e215a9b45d6bc404d86
44,435
def dict_flat_get_min(): """Get flat dict for :func:`get` with minimum data of Datafile. Returns ------- dict Minimum Datafile dictionary returned by :func:`get`. """ return {"pid": "doi:10.11587/RRKEA9", "filename": "10109_qu_de_v1_0.pdf"}
981b8ef2b31a1a009b212d38b4e7d0377fceb609
44,436
def norm_stress(p, a): """ norm_stress(p, a) Returns the normal stress given the force, p, and the area, a. """ return p / a
a14ca7cd5e3add11aab8b8c02edb73d8ee0e905d
44,437
import os def project_path(): """Return absolute path for the project root Examples -------- >>> utils.project_path() """ cwd = os.path.dirname(os.path.abspath(__file__)) project_root = os.path.join(cwd, '..', '..') return os.path.abspath(project_root)
0048fe56d3457aa030ce7070cb97d3491782102c
44,438
def _listify(obj): """ makes sure `obj` is a `list` """ if isinstance(obj, str): return [obj] try: # will fail if obj is not iterable return list(obj) except TypeError: return [obj]
f42fb6c7f40c286bb96d5980f69f0f08fb040328
44,439
def swap(l, i, j): """ Swap the index i with the index j in list l. Args: l (list): list to perform the operation on. i (int): left side index to swap. j (int): Right side index to swap. Returns: list """ l[i], l[j] = l[j], l[i] return l
8283e8c1dfe07edf25a07c9912b8d240e538e790
44,441
def consume_until(line_iter, end_re): """ Consume and return lines from the iterator until one matches 'end_re'. The line matching 'end_re' will not be returned, but will be consumed. """ ret = [] for l in line_iter: line = l.group(0) if end_re.search(line): break ret.append(line) return ret
2a729d53a9658f53617c5f5ab62b3e35431ab796
44,443
def invert(horseshoe) -> list: """Inverts the horseshoe like mirror""" return [row[::-1] for row in horseshoe]
9821e9da83daebbc0f2c794424322f627bb078bb
44,445
def _should_unpack_args(args): """Returns `True` if `args` should be `*args` when passed to a callable.""" return type(args) is tuple # pylint: disable=unidiomatic-typecheck
ba7b91a64911841ab0d21f81c5b5a4d045ae78e3
44,446
def profitsharing_return_query(self, out_order_no, out_return_no, sub_mchid=None): """查询分账回退结果 :param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346' :param out_return_no: 商户回退单号,商户在自己后台生成的一个新的回退单号,在商户后台唯一,示例值:'R20190516001' :param sub_mchid: (服务商模式)子商户的商户号,由微信支付生成并下发。示例值:'1900000109' """ if out_order_no and out_return_no: path = '/v3/profitsharing/return-orders/%s?&out_order_no=%s' % (out_return_no, out_order_no) else: raise Exception('out_order_no or out_return_no is not assigned') if self._partner_mode: if sub_mchid: path = '%s&sub_mchid=%s' % (path, sub_mchid) else: raise Exception('sub_mchid is not assigned.') return self._core.request(path)
e35d89a9fb3706a7292648567fd6e98cc0b32484
44,449
def is_in_garden(i_center, j_center, horizontal, vertical) -> bool: """Evaluate whether mole is in the garden.""" # remove other space "." and white space " " #hori = [x for x in horizontal if x not in [".", " "]] #vert = [x for x in vertical if x not in [".", " "]] # count of e must be odd on both sides count_hori_right = 0 for x in horizontal[j_center+1:-1]: if x in ["|", "+"]: count_hori_right += 1 count_hori_left = 0 for x in horizontal[0:j_center]: if x in ["|", "+"]: count_hori_left += 1 count_vert_bot = 0 for x in vertical[i_center+1:-1]: if x in ["-", "+"]: count_vert_bot += 1 count_vert_top = 0 for x in vertical[0:i_center]: if x in ["-", "+"]: count_vert_top += 1 hori_ok = count_hori_right % 2 == 1 and count_hori_left % 2 == 1 vert_ok = count_vert_top % 2 == 1 and count_vert_bot % 2 == 1 print(i_center, j_center, "hori", hori_ok, "vert", vert_ok) print(count_hori_left, count_hori_right) return True if hori_ok and vert_ok else False
747e15329e502bb9c3cfbc4cff96aeb82f6c4215
44,450
import requests from datetime import datetime import pytz def bit_bucket_user_details(user_name): """ Function to find the bit bucket user details from bit bucket user name. :param user_name: string - bit bucket user name :return: dict - dictionary of bit bucket user details """ bit_bucket_url = 'https://api.bitbucket.org/2.0/users/%s' % user_name bit_bucket_data = requests.get(bit_bucket_url).json() date_conversion = bit_bucket_data['created_on'].split('+')[0] account_created_at = date_conversion.split('.') # account_created_at = datetime.strptime(date_conversion, "%Y-%m-%dT%X").replace(tzinfo=pytz.utc) # account_created_at = date_conversion.split('T') account_created_at = datetime.strptime(account_created_at[0], "%Y-%m-%dT%X").replace(tzinfo=pytz.utc) # account_created_at = datetime.strptime(account_created_at[0], "%Y-%m-%d").date() repo_url = list(bit_bucket_data['links']['repositories'].values())[0] total_no_of_repos = requests.get(repo_url).json()['size'] followers_url = list(bit_bucket_data['links']['followers'].values())[0] total_no_of_followers = requests.get(followers_url).json()['size'] following_url = list(bit_bucket_data['links']['following'].values())[0] total_no_of_following = requests.get(following_url).json()['size'] snippets_url = list(bit_bucket_data['links']['snippets'].values())[0] user_details = { 'user_name': user_name, 'display_name': bit_bucket_data['display_name'], 'account_created_at': account_created_at, 'repo_url': repo_url, 'total_no_of_repos': total_no_of_repos, 'followers': total_no_of_followers, 'following': total_no_of_following, 'blog_url': bit_bucket_data['website'], 'profile_url': list(bit_bucket_data['links']['html'].values())[0], 'snippets_url': snippets_url, 'location': bit_bucket_data['location'], } return user_details
405c97e5cb6bad0b8a68536eb6afc047e2a92216
44,452
def poly(x, *args, export=False): """Polynom nth degree for fitting. :param x: parameter :type x: int, float :param *args: list of coefficients [a_N,a_N-1, ..., a_1, a_0] :type *args: list :param export: enable text output of function, defaults to False :type export: bool or string, optional :return: returns the polynomial :rtype: str, int, float >>> poly(3.4543, 5,4,3,2,1, export='Mathematica') '5*3.4543^5 + 4*3.4543^4 + 3*3.4543^3 + 2*3.4543^2 + 1*3.4543^1' >>> poly(3.4543, 5,4,3,2,1) 920.4602110784704 """ a = list(args) if export == 'Mathematica': return ' + '.join([f'{a[i]}*{x}^{len(a)-i}' for i in range(len(a))]) else: return poly(x, *a[0:-1])*x + a[-1] if len(a) > 1 else a[0]
d780f123de597565adeedf2cfc717c703ed5051c
44,453
def to_bytes(string: str) -> str: """Convert string to space seperated octates (big endian); two octates a group.""" _bytes = string.encode() return ' '.join(hex(int.from_bytes(_bytes[i:i+2], 'big')) for i in range(0, len(_bytes), 2))
ae226cebfbbe2f1e116c834086e6323f522818e0
44,454
def gen_oscs(osc): """ Generates a list of oscillators :param osc: Oscillator to use to generate our list :return: List of keys mapped to oscillators """ oscs = {} keys = ['q', 'a', 'w', 's', 'e', 'd', 'r', 'f', 't', 'g', 'y', 'h', 'u', 'j', 'i', 'k', 'o', 'l', 'p'] # Generating oscillators and mapping them to keys: for note in range(-17, 2): oscs[keys[note+17]] = osc() oscs[keys[note + 17]].freq = 440 * (2 ** (1 / 12)) ** note iter(oscs[keys[note + 17]]) return oscs
21ec208453496feca0065d9535f75a2d70555f96
44,456
def message(blockers): """Create a sequence of key messages based on what is blocking.""" if not blockers: return ['You have 0 projects blocking you from using Python 3!'] flattened_blockers = set() for blocker_reasons in blockers: for blocker in blocker_reasons: flattened_blockers.add(blocker) need = 'You need {0} project{1} to transition to Python 3.' formatted_need = need.format(len(flattened_blockers), 's' if len(flattened_blockers) != 1 else '') can_port = ('Of {0} {1} project{2}, {3} {4} no direct dependencies ' 'blocking {5} transition:') formatted_can_port = can_port.format( 'those' if len(flattened_blockers) != 1 else 'that', len(flattened_blockers), 's' if len(flattened_blockers) != 1 else '', len(blockers), 'have' if len(blockers) != 1 else 'has', 'their' if len(blockers) != 1 else 'its') return formatted_need, formatted_can_port
a14c31dd0222ebf1e841d78ebea253cb3ca22a88
44,459
def check_true(option): """Check if option is true. Account for user inputting "yes", "Yes", "True", "Y" ... """ option = option.lower() if 'y' in option or 't' in option: return True else: return False
98310fae9a22fdc87c2faf98a942eebb85710d42
44,461
import re def commas(line): """ Return the location of all the commas in an input string or line """ return [m.start() for m in re.finditer(',', line)]
c5327e3c34336db3c64acfa391da179018276b3b
44,462
def is_subsequence(needle, haystack): """Are all the elements of needle contained in haystack, and in the same order? There may be other elements interspersed throughout""" it = iter(haystack) for element in needle: if element not in it: return False return True
1b3c1d66258bc3ae66e52c9eacfe1dbfed345c87
44,464
def filter_candidates(candidates: list, usr_input: str): """ Return entries in candidates that contain usr_input as substring, ordered by the index where the substring is found. Duplicates are removed. For matches where the index of the substring is the same, original order (time) is kept. """ with_dups = list(sorted(filter(lambda x: usr_input in x, candidates), key=lambda x: x.find(usr_input))) seen = set() return [x for x in with_dups if not (x in seen or seen.add(x))]
97663f1deffba024c052aad2015d9059953a20ca
44,465
import inspect def get_args(method): """ Helper to get the arguments (positional and keywords) from the parameters of any function or class Args: method [function]: returns: args, kwargs """ sig = inspect.signature(method) parameters = sig.parameters args = [] kwargs = [] for key in parameters: param = parameters[key] if param.default is not param.empty: kwargs.append(param) else: args.append(param) return args, kwargs
6b9b4c2fd326574924f9941cda67b60b47f13fae
44,466
from functools import reduce def _bit_list_to_bytes(bits): """Convert a sequence of truthy values into a byte string, MSB first.""" return bytes( reduce(lambda a, b: (a << 1) | b, (int(bool(x)) for x in byte_bits)) for byte_bits in zip(*[iter(bits)]*8) )
005f3b800204e4b2053d0ba7d534d8e62e1449c7
44,467
def uniformlyNonDecreasing(buffer,item,attempts): """ Stops after the buffer has seen a value larger than the one being searched for. The default halting condition for the Buffer class. """ if buffer._cache[-1]>item: return True return False
15cb8b219b92f909712d54b72b5fdd78a3424909
44,468
def normalize(data, olow, ohigh, nlow, nhigh): """ olow old low ohigh old high nlow new low nhigh new hight """ percent = (data - olow) / (ohigh - olow) return percent * (nhigh - nlow) + nlow
74c015bab4d6c2683f98cbf28af8df49f1bfbada
44,469
import struct def gen_alis_table(forward, reverse): """Generate the alias table.""" fwd_data = b''.join([struct.pack(b'=II', (cp | (kind << 24)), sid) for cp, kind, n, sid in forward]) rev_data = b''.join([struct.pack(b'=II', (cp | (kind << 24)), sid) for n, cp, kind, sid in reverse]) return b''.join([struct.pack(b'=I', len(forward)), fwd_data, rev_data])
803cea23f1375430dd6f40c8bf6074be0ae4fa2b
44,471
import inspect def check_params( func, return_invalids=False, **kwargs, ): """Return only valid parameters for a function a class from named parameters. Parameters ---------- func : callable n_features-dimensional points. **kwargs : Arbitrary keyword arguments. return_invalids : bool (default: False) If True, return both the valid and invalid arguments in a list. Returns ------- new_kwargs : Only valid keyword arguments w.r. of func. kwargs : (optional) Only invalid keyword arguments w.r. of func. (only if return_invalids is True) """ params = list(inspect.signature(func).parameters.keys()) new_kwargs = dict() keys = kwargs.keys() for key in list(keys): if key in params: new_kwargs[key] = kwargs[key] del kwargs[key] if return_invalids: return new_kwargs, kwargs else: return new_kwargs
91e3e63a338aa416b55e8894ac409f14f8e0299d
44,474
def isNan(value): """ Por equivalencia np.NaN != np.NaN por lo tanto comparando el valor consigo mismo obtendremos siempre true, salvo en los casos que tengamos NaN. :param value: Valor a validar :return: True en caso de que el valor introducido sea un nulo o np.NaN. """ if value == value: return False else: return True
b1f5cb19b70e8ebaf2f2eab8cb698204f38bb6a2
44,475
def season(x): """Returns season based on month""" x = x.month if (x > 3) and (x <= 6): return 1 elif (x > 6) and (x <= 9): return 2 elif (x > 9) and (x <= 11): return 3 else: return 4
f7304e65933c681731050fb67c85724c6ac492e9
44,476
import json def read_file(database): """Reads the file then returns it""" with open(database) as file: info = json.load(file) return info
42c2d2cf148bee58f4ec9cb54ac8d6644139478c
44,479
def cpu_used(cpuset): """ parse the cpu used as outputed by cgroup """ used = set() groups = cpuset.split(",") for group in groups: splitted = group.split("-") if len(splitted) == 1: # handle empty if not splitted[0]: continue used.add(int(splitted[0])) else: start = int(splitted[0]) end = int(splitted[1]) for i in range(start, end + 1): used.add(i) return list(used)
803522b3bdae71ab2fa1b6fe302db094c9b1dc13
44,480
def strip_comment(line: str) -> str: """Returns the content of a line without '#' and ' ' characters remove leading '#', but preserve '#' that is part of a tag example: >>> '# #hello '.strip('#').strip() '#hello' """ return line.strip('#').strip()
37500b2811490ccbe0340c2998a1dbb0316d5439
44,481
def get_birthday_month(person) -> str: """Gets the month of birth to be reflected in home""" bday_month = person.birthday.strftime("%b") return bday_month
ee00131b45657a1f11f059155d5c5238f29b642d
44,483
import yaml def get_dev_connection(): """Connect local development.""" with open('env.yaml') as file_name: data = yaml.safe_load(file_name) return { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': data['env_variables']['database'], 'USER': data['env_variables']['user'], 'PASSWORD': data['env_variables']['password'], 'HOST': data['env_variables']['host'], 'PORT': '5432', } }
de268268afebc85f12d1db78292da40cd300596d
44,485
import torch def gen_causal_mask(input_size, dim_k, full_attention=False): """ Generates a causal mask of size (input_size, dim_k) for linformer Else, it generates (input_size, input_size) for full attention """ if full_attention: return (torch.triu(torch.ones(input_size, input_size))==1).transpose(0,1) return (torch.triu(torch.ones(dim_k, input_size))==1).transpose(0,1)
48e6d47ff0171f8f123c88f6c2b329394e2d6acb
44,488
def get_field_setup_query(query, model, column_name): """ Help function for SQLA filters, checks for dot notation on column names. If it exists, will join the query with the model from the first part of the field name. example: Contact.created_by: if created_by is a User model, it will be joined to the query. """ if not hasattr(model, column_name): # it's an inner obj attr rel_model = getattr(model, column_name.split('.')[0]).mapper.class_ query = query.join(rel_model) return query, getattr(rel_model, column_name.split('.')[1]) else: return query, getattr(model, column_name)
0e192cf2b52c04819de8c6a909295c02588ff70c
44,489
def encDec0(i): """Round to the nearest decade, decade starts with a '0'-ending year.""" return (i // 10) * 10
e4019cfc5dbef38516e1eec0782026fd201ba68a
44,492