content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def find_peak_element(nums): """ Find a peak element in given array by binary search :param nums: given array :type nums: list[int] :return: index of peak element :rtype: int """ if len(nums) == 0: return -1 left = 0 right = len(nums) - 1 while left + 1 < right: mid = (left + right) // 2 # search on larger side if nums[mid - 1] > nums[mid]: right = mid - 1 elif nums[mid + 1] > nums[mid]: left = mid + 1 else: return mid if left < len(nums) and nums[left] > nums[right]: return left else: return right
6487e75e91a8e899747345a16ec1d627ec040c2d
127,796
def currency_to_int(val): """ Convert an currency string to int """ return int(val[1:].replace(",",""))
89844f8f0b93e34d7b0aa98d22f579fd17cb85ae
127,799
def parse_cstring_from_stream(stream, stream_pos=None): """ Parse a C-string from the given stream. The string is returned without the terminating \x00 byte. If the terminating byte wasn't found, None is returned (the stream is exhausted). If stream_pos is provided, the stream is seeked to this position before the parsing is done. Otherwise, the current position of the stream is used. """ if stream_pos is not None: stream.seek(stream_pos) CHUNKSIZE = 64 chunks = [] found = False while True: chunk = stream.read(CHUNKSIZE) end_index = chunk.find('\x00') if end_index >= 0: chunks.append(chunk[:end_index]) found = True break else: chunks.append(chunk) if len(chunk) < CHUNKSIZE: break return ''.join(chunks) if found else None
d637487b244b9260a2e599cb81b05942a5886564
127,802
def normalize_headers(headers): """ Create a dictionary of headers from: * A list of curl-style headers * None * a dictionary (return a *copy*). :param headers: List or dict of header (may also be None). :type headers: Iterable[string] | dict[string, string] | None :return: A dictionary of headers suitable for requests. :rtype: dict[string,string] """ if headers is None: return {} elif isinstance(headers, dict): return headers.copy() else: # Assume curl-style sequence of strings result = {} for line in headers: key, sep, value = line.partition(':') if sep is None: raise Exception("Internal error - invalid header line (%s)" % line) result[key.strip().lower()] = value.strip() return result
a34725e0f64510beb8a25491758873134ab86d02
127,805
def _GetTracingUri(point): """Gets the URI string for tracing in cloud storage, if available. Args: point: A Row entity. Returns: An URI string, or None if there is no trace available. """ if not hasattr(point, 'a_tracing_uri'): return None return point.a_tracing_uri
8ba031e6ff38b0d582411c2bd4a4c066cdb5009d
127,806
def cannot_divide_into_batches(data, batch_size): """ Checkes whether data can be divided into at least two batches. Parameters ---------- data : array-like Dataset. batch_size : int or None Size of the batch. Returns ------- bool """ if isinstance(data, (list, tuple)): # In case if network has more than one input data = data[0] n_samples = len(data) return batch_size is None or n_samples <= batch_size
c02bffbca49642bc6e98c90f9f0ae582dcb806d7
127,811
def add_prefix(dictionary, prefix): """Add prefix to every key in a dictionary :param dictionary: Original dictionary :type dictionary: dict :param prefix: Prefix to add to every key :type prefix: str """ return {'{}{}'.format(prefix, key): value for key, value in dictionary.items()}
e44d1e51faa94407750783df7826f887eec5db84
127,812
import functools import logging def njit(nogil=True): """ Fake njit decorator to use when numba is not installed """ _ = nogil def njit_fake_decorator(method): """ Return a decorator """ @functools.wraps(method) def wrapped_method(*args, **kwargs): """ Log warning that numba is not installed which causes preformance degradation """ logging.warning('numba is not installed. This causes a severe performance degradation for method %s', method.__name__) return method(*args, **kwargs) return wrapped_method return njit_fake_decorator
2d1308e873e4a2e4f375933d29bfa2075057bd25
127,814
def hash_str(x, p, m): """ Compute the hash of a string with polynomial rolling hash function. Parameters ---------- x : str element p : int Prime number. Recommended p is 53. m : int The size of hashing space. Usually, The size of the bloom filter. Returns ------- hash : hash value of x """ hash_value = 0 p_i = p for c in x: hash_value += (ord(c) * p_i) % m p_i = (p_i * p) % m try: hash_value = int(hash_value) return hash_value except: raise TypeError("Can't cast {} to integer".format(type(hash_value)))
ad7111f6da3d33930446fc96d1885581df8a73ec
127,815
def fill_cohort_config_missing(config): """ If none cohort_config section is provided, include all the entities by default Args: config (dict) a triage experiment configuration Returns: (dict) a triage cohort config """ from_query = "(select entity_id, {knowledge_date} as knowledge_date from (select * from {from_obj}) as t)" feature_aggregations = config['feature_aggregations'] from_queries = [from_query.format(knowledge_date = agg['knowledge_date_column'], from_obj=agg['from_obj']) for agg in feature_aggregations] unions = "\n union \n".join(from_queries) query = f"select distinct entity_id from ({unions}) as e" +" where knowledge_date < '{as_of_date}'" cohort_config = config.get('cohort_config', {}) default_config = {'query': query, 'name': 'all_entities'} default_config.update(cohort_config) return default_config
50447c6b0cca846f1a5ed9057af0a41b04008b96
127,818
def file2c(fn, array_name): """ Read a file and return a C-compatible array with its binary data """ # Open file and generate a list with hex-encoded byte contents with open(fn, 'rb') as f: rawdata = f.read() hexdata = [hex(x) for x in rawdata] # Generate C-compatible array # Header c_str = 'const char ' + array_name + '[' + str(len(hexdata)) + '] = {' # Data contents for x in hexdata: c_str += x + ', ' # Remove last ', ' elements c_str = c_str[:-2] # Tail c_str += '};' return c_str
51cad670fba8777e5f06a318b30c1f30a56337cb
127,821
import math def crop(pois, lat, long, max_dist): """ Crop a list to points that are within a maximum distance of a center point. :param pois: list of lats and longs :param lat: center lat :param long: center long :param max_dist: max distance :return: a filtered list """ # Convert from meters to radians: rad_dist = max_dist * math.cos(lat) / 111320 crop_list = [] for i in pois: if math.hypot(lat-i[0], long - i[1]) <= rad_dist: crop_list.append(i) return crop_list
687dd3b073fc5e20fb7e3e4b0b511374a590bf14
127,823
import aiohttp import json async def async_get_auth_token(controller_ip, username, password, DNAC_PORT): """ Authenticates with controller and returns a token to be used in subsequent API invocations """ login_url = f"https://{controller_ip}:{DNAC_PORT}/dna/system/api/v1/auth/token" async with aiohttp.ClientSession() as session: async with session.post(login_url, ssl=False, auth=aiohttp.BasicAuth(username, password)) as resp: raw_data = await resp.read() json_raw_data = json.loads(raw_data) return { "token": json_raw_data["Token"], "status": resp.status, "url": resp.url, "method": "post" }
7d8b7a45341a74c41360db02e0fff51536c14f7b
127,824
def fix_saes_params(solver, t, sds, default_sd=None): """ Repair the strategy parameters (specifically for SAES) :param solver: Solver used to start the optimisation :param t: Trace containing solution :param sds: Strategy parameters (standard deviations) :param default_sd: Default mutation strength :return: """ if default_sd is None: default_sd = solver.alg_params.mutation_sd # t = trace.trace sds = sds if len(t) < len(sds): sds = sds[:len(t)] elif len(t) > len(sds): for _ in t[len(sds):]: sds.append(default_sd) assert len(t) == len(sds) return sds
1339f002a01662306b4ed47b0df85de057ffe2f1
127,831
def cubic_to_axial(x, y, z): """ Convert cubic coordinate to its axial equivalent. """ return x, z
e3d44f631f53397eca018a0d4c25ad1cd35e2f17
127,832
def summ_bin(x): """ The function summarizes the binning outcome generated from a binning function, e.g. qtl_bin() or iso_bin(). Parameters: x: An object containing the binning outcome. Returns: A dictionary with statistics derived from the binning outcome Example: summ_bin(iso_bin(ltv, bad)) # {'sample size': 5837, 'bad rate': 0.2049, 'iv': 0.185, 'ks': 16.88, 'missing': 0.0002} """ _freq = sum(_['freq'] for _ in x['tbl']) _bads = sum(_['bads'] for _ in x['tbl']) _miss = sum(_['miss'] for _ in x['tbl']) _iv = round(sum(_['iv'] for _ in x['tbl']), 4) _ks = round(max(_["ks"] for _ in x["tbl"]), 2) _br = round(_bads / _freq, 4) _mr = round(_miss / _freq, 4) return({"sample size": _freq, "bad rate": _br, "iv": _iv, "ks": _ks, "missing": _mr})
526a02ffbecf981786e7013c8d4ea282f690eeda
127,833
import random def makeTarget(genomeSize, targetSize, numFeatures=1): """ Make a target genome with total lengt of targetSize being a subset of genomeSize. Return a dictionary of start and end positions for each feature """ target = {} featureSize = int (targetSize / numFeatures) while len(target) < numFeatures: start = round(random.randint(0, genomeSize - featureSize)) end = start + featureSize # Check if this feature overlaps with any other overlap = False for f in target: if start < f and end > target[f]: overlap = True break if not overlap: target[start] = end return target
bbd1da1eae36a36722a1930d0525ce78e1bad39b
127,834
import logging def select_python_venv(py_venvs, tox_dir): """ Get user input to select one of the available Python venv(s) """ logging.info('There are more than one amiable Python venv(s) in the .tox dir.') while True: print(f"\n\nPick one of the available python venv(s) in '{tox_dir}'.") for i, penv in enumerate(py_venvs): print(i, penv) index = input('Enter the index of the python venv you want: ') try: py_venv = py_venvs[int(index)] return py_venv except Exception as exc: print('\nException:\n', str(exc)) print(f'error: {index} is not a valid index. Please try again...')
432acc3c94fc36a80cc618dfcb4335c19215fba9
127,843
import torch def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None): """ Compute ``torch.equal`` with the optional mask parameter. Args: ignore_index (int, optional): Specifies a ``tensor`` index that is ignored. Returns: (bool) Returns ``True`` if target and prediction are equal. """ if ignore_index is not None: assert tensor.size() == tensor_other.size() mask_arr = tensor.ne(ignore_index) tensor = tensor.masked_select(mask_arr) tensor_other = tensor_other.masked_select(mask_arr) return torch.equal(tensor, tensor_other)
1da45ac80a373c55b453fcc6327af0b5287640b2
127,844
import pickle def load_variable(filename): """ Load the variables from the file. Parameters ---------- filename : the file name of the file to read. Returns ------- r : a list of the loaded variables. """ f = open(filename, 'rb') r = pickle.load(f) f.close() return r
bbaf1805c9322622ac18b5af8d8aea546b185e73
127,845
def _create_customizer_attribute( client, customer_id, customizer_attribute_name ): """Creates a customizer attribute with the given customizer attribute name. Args: client: an initialized GoogleAdsClient instance. customer_id: a client customer ID. customizer_attribute_name: the name for the customizer attribute. Returns: A resource name for a customizer attribute. """ # Creates a customizer attribute operation for creating a customizer # attribute. operation = client.get_type("CustomizerAttributeOperation") # Creates a customizer attribute with the specified name. customizer_attribute = operation.create customizer_attribute.name = customizer_attribute_name # Specifies the type to be 'PRICE' so that we can dynamically customize the # part of the ad's description that is a price of a product/service we # advertise. customizer_attribute.type_ = client.enums.CustomizerAttributeTypeEnum.PRICE # Issues a mutate request to add the customizer attribute and prints its # information. customizer_attribute_service = client.get_service( "CustomizerAttributeService" ) response = customizer_attribute_service.mutate_customizer_attributes( customer_id=customer_id, operations=[operation] ) resource_name = response.results[0].resource_name print(f"Added a customizer attribute with resource name: '{resource_name}'") return resource_name # [END add_responsive_search_ad_with_ad_customizer_1]
5713f9e3988a4590757bfda3a7bd70c2d7c0374d
127,851
def transform_angular_variance_lens_1d(k, var_r, phi_c, var_q, f): """Calculate the angular variance of a beam that has passed through a lens. The lens phase is -k*r**2/(2*f). The formula used is (15) of Siegman IEEE J. Quantum Electronics, vol. 27 1991, with k = 2*pi/lambda and opposite sign convention for f. It is valid for any refractive index. Args: var_r (scalar): real space variance. phi_c (scalar): real-space curvature var_q (scalar): angular variance of the beam before the lens. f (scalar): focal length of the lens - positive = converging lens. Returns: var_qp (scalar): angular variance of the beam after the lens. """ return var_q - 4*k*phi_c/f + var_r*k**2/f**2
6edb3a53cda095e157c6714ba44366aa77854e17
127,858
import torch def my_multi_task_loss(y_pred, y, weights): """ Multitask loss function. Computes the individual loss for each output and combine them in a weighted average :param y_pred: predicted output :param y: true labels :param weights: list of scalars representing the weight to apply to each loss :return: the weighted average loss """ # mse_loss = torch.nn.MSELoss() l1_loss = torch.nn.L1Loss() xe_loss = torch.nn.CrossEntropyLoss() y_pred_age, y_pred_gender, y_pred_race = y_pred y_age, y_gender, y_race = y age_loss = l1_loss(y_pred_age, y_age) gender_loss = xe_loss(y_pred_gender, y_gender) race_loss = xe_loss(y_pred_race, y_race) avg_loss = weights[0]*age_loss + weights[1]*gender_loss + weights[2]*race_loss return avg_loss
0c349ad6a3f46d757c9314adac1bb5d81d902709
127,859
def distance(plane): """Distance the plane is from the origin along its the normal. Negative value indicates the plane is facing the origin. """ return plane[3]
0c795889608f52faea30932642fbe86ff65fe985
127,860
def task_9_count_customers_by_country_with_than_10_customers(cur): """ List the number of customers in each country. Only include countries with more than 10 customers. Args: cur: psycopg cursor Returns: 3 records """ cur.execute("""SELECT COUNT(*), country FROM customers GROUP BY country HAVING count(*)>10""") return cur.fetchall()
8a794cab5487b56bd7fa019ce6857a9df48c20ec
127,861
def _fixed_str_size(data): """ Useful for a Pandas column of data that is at its base a string datatype. The max length of all records in this column is identified and returns a NumPy datatype string, which details a fixed length string datatype. Py3 NumPy fixed string defaults to wide characters (|U), which is unsupported by HDF5. """ str_sz = data.str.len().max() return "|S{}".format(str_sz)
ab2bb82befc875005f43751f81c89065f189a55e
127,862
def is_generalizable(scene1, scene2, relation_dataset): """Return true if scene1 can be generalized to scene2.""" # Don't use the same scene as reference and initial scene, loss is always 0 if scene1.name == scene2.name: return False # Check that at least one object can contain the other is_inside = relation_dataset.scene_to_label[scene1.name] in (1, 5, 6) inside_containers = ('bowl, boxbase', 'muesli', 'pot', 'cornellbox') if is_inside and not any([o in scene2.name for o in inside_containers]): return False return True
4801d755c2154f02dd602db2dacef3ad36c63d37
127,864
def menor_de_tres_numeros(n1, n2, n3): """Encontra o menor de três números. """ menor = n1 if n1 < n2 else n2 return n3 if n3 < menor else menor
7f7556a4b8be54c54facc2985296c4a32f84e4bb
127,867
def expected(df, ts): """Modify *df* with the 'model' and 'scenario' name from *ts.""" return df.assign(model=ts.model, scenario=ts.scenario)
742255d9b69d3ae27a0726402a46f8bdea525826
127,868
import six def to_text(value, encoding='utf-8'): """ Makes sure the value is decoded as a text string. :param value: The Python byte string value to decode. :param encoding: The encoding to use. :return: The text/unicode string that was decoded. """ if isinstance(value, six.text_type): return value return value.decode(encoding)
ee176ab900cc34d4b4f533dacf6d2d7055998021
127,872
def cross_entropy_cost_derivative(a, y): """The derivative of the cross-entropy cost function. This function is used in backpropagation to calculate the error between the output of the network and the label. Where 'a' is the network input and 'y' is the label.""" return (a - y)
b4f33539e4bd96fa7238d455e6b1e4744216a85f
127,885
import struct def read_i32(f): """ :param f: file handler or serial file :return: (int32_t) """ return struct.unpack('<l', bytearray(f.read(4)))[0]
6dee728d131ac961cb379b46209393fec6863cec
127,888
def not_empty(curve): """ If curve.df is not empty, return True. """ return not curve.df.empty
1ea0742daa1ed4ecec349d8ffd2fe81edf4e7395
127,890
from typing import Optional def parse_url_params(params: Optional[dict]) -> Optional[dict]: """Generate parameter dict and filter Nones.""" params = params or {} return {k: v for k, v in params.items() if v is not None} or None
deacafdec7b2efd8640dc0abf8dfdc7a3e32c4ff
127,892
def find_best_plan(pool): """Returns the best plan of a given pool.""" best_plan = pool[0] for plan in pool: if plan.get_fitness() > best_plan.get_fitness(): best_plan = plan return best_plan
ea1caddbd141296720e9ccb0c5cb1eb77fc30574
127,893
def parse_hostname_from_filename(file): """Parses hostname from filename""" # strip path hostname = file.split("/")[-1].split("\\")[-1] # strip extensions hostname = hostname.split(".")[0] return hostname
6ba82e4be6245b225e91d68904f727b89db59725
127,895
def get_user_input(message): """Get non-empty input from user.""" user_input = "" while len(user_input) == 0: user_input = input(message) return user_input
faab505ba96b9478081ee13e5f1985b8ed50271a
127,896
from pathlib import Path def read_plain_text(fname: Path) -> str: """ Reading Plain text file Args: fname: name of text file Returns: text string from file """ with open(fname) as f_in: text = f_in.read() return text
8717f0636772925581e074387475ee885b7d32a9
127,897
def merge(a, b): """Merges two dicts and returns result""" a.update(b) return a
ac2689c94d228c834fda10f3e3d609824135d8cc
127,899
def llist(self, nl1="", nl2="", ninc="", lab="", **kwargs): """Lists the defined lines. APDL Command: LLIST Parameters ---------- nl1, nl2, ninc List lines from NL1 to NL2 (defaults to NL1) in steps of NINC (defaults to 1). If NL1 = ALL (default), NL2 and NINC are ignored and all selected lines [LSEL] are listed. If NL1 = P, graphical picking is enabled and all remaining command fields are ignored (valid only in the GUI). A component name may also be substituted for NL1 (NL2 and NINC are ignored). lab Determines what type of listing is used (one of the following): (blank) - Prints information about all lines in the specified range. RADIUS - Prints the radius of certain circular arcs, along with the keypoint numbers of each line. Straight lines, non-circular curves, and circular arcs not internally identified as arcs (which depends upon how each arc is created) will print a radius value of zero. LAYER - Prints layer-mesh control specifications. HPT - Prints information about only those lines that contain hard points. HPT is not supported in the GUI. ORIENT - Prints a list of lines, and identifies any orientation keypoints and any cross section IDs that are associated with the lines. Used for beam meshing with defined orientation nodes and cross sections. Notes ----- There are 2 listings for the number of element divisions and the spacing ratio. The first listing shows assignments from LESIZE only, followed by the "hard" key (KYNDIV). See LESIZE for more information. The second listing shows NDIV and SPACE for the existing mesh, if one exists. Whether this existing mesh and the mesh generated by LESIZE match at any given point depends upon meshing options and the sequence of meshing operations. A "-1" in the "nodes" column indicates that the line has been meshed but that there are no interior nodes. An attribute (TYPE, MAT, REAL, or ESYS) listed as a zero is unassigned; one listed as a positive value indicates that the attribute was assigned with the LATT command (and will not be reset to zero if the mesh is cleared); one listed as a negative value indicates that the attribute was assigned using the attribute pointer [TYPE, MAT, REAL, or ESYS] that was active during meshing (and will be reset to zero if the mesh is cleared). This command is valid in any processor. """ command = f"LLIST,{nl1},{nl2},{ninc},{lab}" return self.run(command, **kwargs)
8a6b258be0c34d930d01dd27791b3c2ec2708ec5
127,909
def build_type_dict(converters): """ Builds type dictionary for user-defined type converters, used by :mod:`parse` module. This requires that each type converter has a "name" attribute. :param converters: List of type converters (parse_types) :return: Type converter dictionary """ more_types = {} for converter in converters: assert callable(converter) more_types[converter.name] = converter return more_types
b19af359448d06898948a9f7afa121c1c3095d24
127,911
from typing import List from typing import Optional def mult_list(list_: List[int], modulus: Optional[int] = None) -> int: """ Utility function to multiply a list of numbers in a modular group :param list_: list of elements :param modulus: modulus to be applied :return: product of the elements in the list modulo the modulus """ out = 1 if modulus is None: for element in list_: out = out * element else: for element in list_: out = out * element % modulus return out
544fe0c48564691032e08ee2fe142e12c0ac8bcd
127,920
def filter_from_data(data, df): """ Filter data in the first argument to present only the rows present in the second. """ return data.loc[df.index.dropna()]
509377ca3408168b62d507a7721d117126becad8
127,922
def merge(left, right): """Merges an integer list that has two sorted sublists.""" result = [] i, j = 0, 0 while i < len(left) and j < len(right): # Left and right sublists are ordered, move indices for sublists depending on # which sublist holds next smallest element. if left[i] < right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j += 1 while i < len(left): # When right sublist is empty. result.append(left[i]) i += 1 while j < len(right): # When left sublist is empty. result.append(right[j]) j += 1 return result
7696e3d56592da603230236c9303d0758a95b7b7
127,929
def reorganize_columns(df): """ Rearranges the columns in the data frame Parameters ---------- df : pandas data frame The building benchmarking data for a city Returns ---------- df The modified data frame """ fixed_column_order = ['BuildingID', 'Year', 'City', 'State', 'ZIPCode', 'BuildingType', 'PrimaryPropertyUse', 'YearBuilt', 'PropertyGFA(sf)', 'SiteEUI(kBtu/sf)', 'SourceEUI(kBtu/sf)', 'SiteEUIWN(kBtu/sf)', 'SourceEUIWN(kBtu/sf)' ] revised_column_order = [] for i in range(len(fixed_column_order)): if fixed_column_order[i] in df.columns: revised_column_order.append(fixed_column_order[i]) else: continue df = df[revised_column_order] return df
b8f943ba886b1805b5be3f03233315167c17242a
127,933
def emits(name: str): """Decorator that marks a function for what it will desugar. Also marks as a classmethod. """ def deco(fn): fn.emitter_for = name return classmethod(fn) return deco
905a35b686180f552b0b91b04fe146eab8832cc3
127,941
def _mjd2000_to_decimal_year_simple(mjd2k): """ Convert Modified Julian Date 2000 to decimal year. """ return 2000.0 + mjd2k / 365.25
f0c43a7bbc275ecaf7efccdfe324434bf467d03e
127,944
import torch def to_float(value): """Convert value to float""" return torch.tensor(value).type(torch.float)
a7e55a4d6917b93f71a2ffe268e471bc7ad65d16
127,945
def get_attribute(target, name): """ Gets an attribute from a Dataset or Group. Gets the value of an Attribute if it is present (get ``None`` if not). Parameters ---------- target : Dataset or Group Dataset or Group to get the attribute of. name : str Name of the attribute to get. Returns ------- value The value of the attribute if it is present, or ``None`` if it isn't. """ if name not in target.attrs: return None else: return target.attrs[name]
7c80d02c4f079055ec534cdf15f17cb0f7d5cde5
127,946
import sympy def next_prime(num): """Return next prime after 'num' (skip 2).""" return 3 if num < 3 else sympy.nextprime(num)
43557578d1ffe135d7293011b2487cd0f6c994eb
127,947
def convert_lx_to_qx(lx_list: list): """ Converts a list of lx values to a qx list. """ q_values = [] for i, l in enumerate(lx_list[:-1]): q_values.append(1 - lx_list[i + 1] / l) return q_values
07df65b7a86ba09b1242945e51daeae5bfb4fb97
127,951
def update_params(params, categoricals, constants): """ Update params with categoricals and constants for the fitting proces. params: params proposed by the tuner Examples of the format for SVM sigmoid hyperpartition: categoricals = (('kernel', 'poly'), ('probability', True), ('_scale', True)) constants = [('cache_size', 15000)] """ for key, value in categoricals + constants: params[key] = value return params
7dc826d7824b81eb289aab4b989c877d77bd27ae
127,952
import re def get_field_data(raw_data): """ Returns key/value pairs from raw data that are meant to refer to actual model fields Args: raw_data (dict): Returns: dict: """ return { k: v for k, v in raw_data.items() if re.match(r"^[a-zA-Z].*", k) is not None }
55293c6d599dcb3541febf670be6d74932181633
127,953
import requests import copy def get_dimensions_citations_web(headers, altmetric_id): """ Use the dimensions web URL and requests API to obtain the citations for a given altmetric id associated with a scholarly article Parameters ---------- arg1 | headers: str The login headers from dimensions to query arg2 | altmetric_id: int The altmetric id of a scholarly article Returns ------- Dictionary dict """ # create the query query = """search publications where altmetric_id in""" + \ str([altmetric_id]) + \ """return publications[id+doi+altmetric_id+title+times_cited+authors]""" # Execute DSL query. resp = requests.post( 'https://app.dimensions.ai/api/dsl.json', data=query.encode(), headers=headers) # check for 200 status if resp.status_code == 200 and 'publications' in resp.json() \ and len(resp.json()['publications']) > 0: # just obtain the first author response = copy.deepcopy(resp.json()['publications'][0]) if 'authors' in response.keys(): # set the first name response['first_name'] = response['authors'][0]['first_name'] + \ ' ' + response['authors'][0]['last_name'] # remove the authors key del response['authors'] # return the final dict return response else: # return the json return dict()
b31a8ef784f9f5bc014085d735ce36cc5a351976
127,961
def at_multimatch_cmd(caller, matches): """ Format multiple command matches to a useful error. Args: caller (Object): Calling object. matches (list): A list of matchtuples `(num, Command)`. Returns: formatted (str): A nicely formatted string, including eventual errors. """ string = "There were multiple matches:" for num, match in enumerate(matches): # each match is a tuple (candidate, cmd) cmdname, arg, cmd, dum, dum = match get_extra_info = cmd.get_extra_info(caller) string += "\n %s-%s%s" % (num + 1, cmdname, get_extra_info) return string
f94736aabb5250aa5aa4a618c0d074ea5d58cfe9
127,964
import re def vam_api_url_to_collection_url(api_url) -> str: """ Return a human-readable collection URL, given a machine-readable API URL. Returns original URL if `api_url` doesn't look like a V&A API path. This has only been tested for searches and API calls ?id_person and ?id_organisation query strings. """ if "api.vam.ac.uk" in api_url: return re.sub( r"https://api.vam.ac.uk/v2/objects/", r"https://collections.vam.ac.uk/", api_url, ) else: return api_url
90492d41486b5b8dcef272b9e216cabef46b70e5
127,971
def unsplit_svn_url(tup): """ re-join a subversion url which was split by the split_svn_url function """ repo, prefix, project, branch, suffix, peg = tuple(tup) res = [repo or '^', prefix or '/', ] if project: res.extend([project, '/']) if branch: res.extend([branch, '/']) if suffix: res.append(suffix) if peg: res.extend(['@', str(peg)]) return ''.join(res)
143c5a729de420ccaac8284b60765eb612970774
127,974
def get_num_iter(x, y, z=(0 + 0j), num_iter=0): """Determines whether abs(z) diverges and, if so, how many interations it takes for abs(z) to diverge Parameters: x - The x-coordinate (or real part) of point c y - The y-coordinate (or imaginary part) of point c z - Value that is recursively calculated by z_i+1 = z_i^2 + c (z_0 = 0) num_iter - current number of iterations of z (starts at 0) Returns: Number of iterations at which abs(z) diverges, that is, abs(z) > 10. If the number does not diverge, that is, after 50 iterations abs(z) <= 10, return 0 """ z_new = z**2 + x + y*1j if num_iter >= 50: return 0 if abs(z_new) > 10: return num_iter return get_num_iter(x, y, z_new, num_iter + 1)
00d0c5986f948f131eb01e2ef797bc17e74e7865
127,977
import ast def compile_function_ast(fn_ast): """ Compile function AST into a code object suitable for use in eval/exec """ assert isinstance(fn_ast, ast.Module) fndef_ast = fn_ast.body[0] assert isinstance(fndef_ast, ast.FunctionDef) return compile(fn_ast, "<%s>" % fndef_ast.name, mode='exec')
1f2143c67f2381af2015c12ced4336097aa74811
127,978
def has_streaming_response(function_data): """Whether the function has a stream_response.""" return function_data.get("stream_response", False)
730f258c37945872c00245c8a8174bf4672e1874
127,981
def ascending_digits(password): """Check to see if each digit in the number is equal to or larger than the one before Parameters ---------- password : int password number Returns ------- ascending_dig: bool True if all digits are equal to or larger than the one before, False if not """ # Convert password into list of 6 digits password_list = [int(d) for d in str(password)] # Set the boolean to True, we'll downgrade this to False if there _are_ # any values that are smaller than the one before ascending_dig = True # Loop through the digits and the ones just after them for a, b in zip(password_list, password_list[1:]): if b < a: ascending_dig = False return ascending_dig
de1cffc7c43162c2eb7b5a673c49861fa30e138e
127,989
import pathlib def ensure(path: pathlib.Path): """Create a directory parent if it does not exists. Parameters ---------- path : pathlib.Path The given path Returns ------- pathlib.Path The given path, with parents directories created if they don't exist. """ # https://stackoverflow.com/questions/53027297/way-for-pathlib-path-rename-to-create-intermediate-directories path.parent.mkdir(parents=True, exist_ok=True) return path
3fc0b71fdc9ea8e971a05f04754c065d9c344bd1
127,996
import re def parse_disk(line): """Parse line to dictionary representing disk.""" position, *rest = line.split() position = tuple([int(num) for num in re.findall(r'(\d+)', position)]) size, used, available, use_percent = [int(value[:-1]) for value in rest] return { 'position': position, 'size': size, 'used': used, 'available': available, 'use_percent': use_percent, }
06db0e0b2831f0150f355de174f0aa5aed45a8f5
127,997
def Factorial(integer): """ Factorial function """ assert(integer >= 0) if integer <= 4: return [1, 1, 2, 6, 24][integer] factorial = 24 mult = 5 while mult <= integer: factorial *= mult mult += 1 return factorial
764fb35870112b34758c9ba3a36d82e3fc03bb03
127,998
import socket def check_connection(address, port): """ Test connection for specific host and port. """ assert type(address) == str, "address -> must be a string" assert type(port) == int, "port -> must be an integer" s = socket.socket() try: s.connect((address, port)) return True except Exception: return False finally: s.close()
da9314d9ab5e396ccc215ed1f2fa60633dcd5fa5
128,000
def _get_sg_name_field(entity_type): """ Returns the standard Shotgun name field given an entity type. :param entity_type: shotgun entity type :returns: name field as string """ name_field = "code" if entity_type == "Project": name_field = "name" elif entity_type == "Task": name_field = "content" elif entity_type == "HumanUser": name_field = "login" return name_field
255ccf02ca65aa74929489d1dda1eb6f6f09c70c
128,001
def hour_angle_deg(true_solar_time): """Returns Hour Angle in Degrees, with True Solar Time, true_solar_time.""" if true_solar_time / 4 < 0: hour_angle_deg = true_solar_time / 4 + 180 else: hour_angle_deg = true_solar_time / 4 - 180 return hour_angle_deg
822ea01bd5d52014afd24b72cb82d10413566d78
128,002
import json def load_json(path): """Load a json file.""" with open(path, encoding="utf-8") as file: return json.loads(file.read())
50ab4b6ad320073612006d44e0356ade0f2f4aac
128,010
def get_image_name(name: str, tag: str, image_prefix: str = "") -> str: """Get a valid versioned image name. Args: name (str): Name of the docker image. tag (str): Version to use for the tag. image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository. Returns: str: a valid docker image name based on: prefix/name:tag """ versioned_tag = name.strip() + ":" + tag.strip() if image_prefix: versioned_tag = image_prefix.strip().rstrip("/") + "/" + versioned_tag return versioned_tag
e8826186e4ecc1e8a912772b027adaf21a10c1ab
128,011
def rearrange_args(*args, **kwargs): """ Given positional arguments and keyword arguments, return a list of tuples containing the type of argument and number of the argument as well as its value. Parameters: *args: Random positional arguments. **kwargs: Random keyword arguments. Returns: List of tuples where the first value of the tuple is the type of argument with its number and the second value is the value of the argument. Keyword arguments' first value also contains the key of the keyword argument. >>> rearrange_args(10, False, player1=[25, 30], player2=[5, 50]) [('positional_0', 10), ('positional_1', False), \ ('keyword_0_player1', [25, 30]), ('keyword_1_player2', [5, 50])] >>> rearrange_args('L', 'A', 'N', 'G', L='O', I='S') [('positional_0', 'L'), ('positional_1', 'A'), ('positional_2', 'N'), \ ('positional_3', 'G'), ('keyword_0_L', 'O'), ('keyword_1_I', 'S')] >>> rearrange_args(no_positional=True) [('keyword_0_no_positional', True)] # Add AT LEAST 3 doctests below, DO NOT delete this line >>> rearrange_args(11, 'no_kw') [('positional_0', 11), ('positional_1', 'no_kw')] >>> rearrange_args(11, keyW = 'MONKEY!!!!') [('positional_0', 11), ('keyword_0_keyW', 'MONKEY!!!!')] >>> rearrange_args(keyW = 4.0, d11='C') [('keyword_0_keyW', 4.0), ('keyword_1_d11', 'C')] """ key_index = 0 val_index = 1 return [('positional_' + str(count), arg) for count, arg\ in list(enumerate(args))] + [('keyword_' + str(num) + '_' + \ str(dic[key_index]), dic[val_index]) \ for num, dic in enumerate(kwargs.items())]
0c502215136f94e6356182663426dfa477ae81d3
128,014
def get_location_list(state_alert_list): """Get lat, lon list from state alert list""" locations = [] for item in state_alert_list: locations.append([item["lat"], item["lon"]]) return locations
dfcf647d1fc356a855d38d74cef8a20c942d7e6a
128,015
import json def load_json(path: str, encoding='utf-8'): """ Load the json file, which will return a dictionary. """ with open(path, 'r', encoding=encoding) as f: return json.load(f)
a7ee6b7c88c8bf5492c4d664d117a615905e4982
128,017
import random def get_test_data(test_data): """Get test data from csv dataframe and pick a random record to convert to Dict.""" input_data = test_data.to_dict(orient='records') return random.sample(input_data, 1)[0]
c44387297eb8109749194219bd9ddfb058378758
128,019
def as_instruction_packet(dxl_id, instruction, *params): """Constructs instruction packet for sending a command. Args: dxl_id: An integer representing the DXL ID number instruction: Hex code representing instruction types for DXL command packet (e.g., Read, Write, Ping, etc.) *params: Depending on the instruction, start address of data to be read, length of data, data to write, etc. Returns: A bytearray representing the instruction packet """ packet = bytearray((0xff, 0xff, dxl_id, len(params) + 2, instruction)) packet.extend(params) packet.append(255 - (sum(packet) + 2) % 256) return packet
2cfcaa2ee97168a26edafa05e179028a1a24e1ac
128,020
import socket import binascii def ip_to_integer(ip_address): """ Converts an IP address expressed as a string to its representation as an integer value and returns a tuple (ip_integer, version), with version being the IP version (either 4 or 6). Both IPv4 addresses (e.g. "192.168.1.1") and IPv6 addresses (e.g. "2a02:a448:ddb0::") are accepted. """ # try parsing the IP address first as IPv4, then as IPv6 for version in (socket.AF_INET, socket.AF_INET6): try: ip_hex = socket.inet_pton(version, ip_address) ip_integer = int(binascii.hexlify(ip_hex), 16) return (ip_integer, 4 if version == socket.AF_INET else 6) except: pass raise ValueError("invalid IP address")
ebe704befab52ed82767dc4a6a09ead89a6947d9
128,023
def get_wait_time(retries, args): """ Calculates how long we should wait to execute a failed task, based on how many times it's failed in the past. Args: retries: An int that indicates how many times this task has failed. args: A dict that contains information about when the user wants to retry the failed task. Returns: The amount of time, in seconds, that we should wait before executing this task again. """ min_backoff_seconds = float(args['min_backoff_sec']) max_doublings = int(args['max_doublings']) max_backoff_seconds = float(args['max_backoff_sec']) max_doublings = min(max_doublings, retries) wait_time = 2 ** (max_doublings - 1) * min_backoff_seconds wait_time = min(wait_time, max_backoff_seconds) return wait_time
6c4b88e0b0150ab963314333b798eb078f3e343f
128,024
def filter_dict(d, keys): """Returns a dictionary which contains only the keys in the list `keys`.""" return {k: v for k, v in d.items() if k in keys}
8ac8772efb161669f6aff0977b76b07a1c2a2f14
128,027
def advance_version(v, bumprule): """ Advance the version based on a version bump rule """ vercomps = v.replace('v', '').split('.') majv = int(vercomps[0]) minv = int(vercomps[1]) patchv = int(vercomps[2]) if bumprule == "major": majv += 1 minv = 0 patchv = 0 if bumprule == "minor": minv += 1 patchv = 0 if bumprule == "patch": patchv += 1 return "{}.{}.{}".format(majv, minv, patchv)
64ac8fb9f006314636d74d5a6fea9c811b26e412
128,029
def invalid_usb_device_protocol(request): """ Fixture that yields a invalid USB device protocol. """ return request.param
d6bbf3f3c321de2800b82bb32a03fe9b1f892de1
128,035
def get_interfaces(yaml): """Return a list of all interface and sub-interface names""" ret = [] if not "interfaces" in yaml: return ret for ifname, iface in yaml["interfaces"].items(): ret.append(ifname) if not "sub-interfaces" in iface: continue for subid, _sub_iface in iface["sub-interfaces"].items(): ret.append(f"{ifname}.{int(subid)}") return ret
a529ec0f4dca31f1d29a790079cb0b3895891da5
128,037
def dosegrid_extents_positions(extents, dd): """Determine dose grid extents in patient coordinate indices. Parameters ---------- extents : list Dose grid extents in pixel coordintes: [xmin, ymin, xmax, ymax]. dd : dict Dose data from dicomparser.GetDoseData. Returns ------- list Dose grid extents in patient coordintes: [xmin, ymin, xmax, ymax]. """ return [ dd['lut'][0][extents[0]], dd['lut'][1][extents[1]], dd['lut'][0][extents[2]], dd['lut'][1][extents[3]] ]
71598afe3e6489f89c34035a1e1a9ed117d7e82f
128,039
def _get_jira_label(gh_label): """ Reformat a github API label item as something suitable for JIRA """ return gh_label["name"].replace(" ", "-")
0e734c64198b6ccab6515317d60b6fa799ae8870
128,051
import itertools def perm_parity(input_list): """ Determines the parity of the permutation required to sort the list. Outputs 0 (even) or 1 (odd). """ parity = 0 for i, j in itertools.combinations(range(len(input_list)), 2): if input_list[i] > input_list[j]: parity += 1 return parity % 2
ae6a1feb170b26e0c5f1b580584889d2ecdea867
128,053
def parse_claspre_value(raw_value): """Convert values from claspre to floats.""" special = { "No": -1.0, "Yes": 1.0, "NA": 0.0, } value = special.get(raw_value) if value is None: return float(raw_value) else: return value
b26c706e08ef1b4b777b21b7fe9c3bdef3b261c2
128,062
def package_name(package): # type: (str) -> str """ Returns the package name of the given module name """ if not package: return "" lastdot = package.rfind(".") if lastdot == -1: return package return package[:lastdot]
b7b9b1052f5c4bee0354867969f17b3df3fcd988
128,065
def check_shape_by_index(index, input_shape, min_size) -> bool: """ Check the Shape of one object of the tuple. :param index: Index of Tuple to Test :param input_shape: Input Tuple to test :param min_size: Minimum size of of tuple object :return: 'bool' result of test """ return input_shape[index] is not None and input_shape[index] < min_size
8b69464eef74e4a7d9e23bee9f4d82cf31172cc6
128,068
def get_axis_indexes(kernel_axis_length, center_index): """Calculate the kernel indexes on one axis depending on the kernel center. Args: kernel_axis_length (int): The length of the single axis of the convolutional kernel. center_index (int): The index of the kernel center on one axis. """ axis_indexes = [] for i in range(-center_index, kernel_axis_length - center_index): axis_indexes.append(i) return axis_indexes
f056d91185200ba533bf03f164201c3eb7cef22b
128,071
def SET_INTERSECTION(*expressions): """ Takes two or more arrays and returns an array that contains the elements that appear in every input array. https://docs.mongodb.com/manual/reference/operator/aggregation/setIntersection/ for more details :param expressions: The arrays (expressions) :return: Aggregation operator """ return {'$setIntersection': list(expressions)}
9656417f0cf4ed3672bc1b4ff30ee79fbf4b0cb1
128,075
def select_covar_types(nd, ns): """ Heuristics to choose the types of covariance matrix to explore based on the data dimension and the (effective) number of samples per mixture component. :param nd: data dimension. :param ns: number of samples. :return: list of covariance types. """ if nd <= 20: # low dimensional if ns >= (10 * nd): covar_types = ['full', 'tied'] else: covar_types = ['tied', 'diag'] elif nd <= 250: # medium dimensional if ns >= (10 * nd): covar_types = ['tied', 'diag'] if nd <= 50: covar_types.append('full') else: covar_types = ['diag', 'spherical'] else: # high dimensional covar_types = ['diag', 'spherical'] return covar_types
90b715b5b549200ceeea1c7b8c6062d597151285
128,077
def _check_existence(directory): """Check that a directory exists.""" return directory.isdir()
2dddcca09edd596bb9b614edbc5185c4ce9ad82d
128,085
def count_words(line): """ Count the number of words in a line. We only count words > 1 char. """ return len(tuple(word for word in line.split() if len(word) > 1))
2355bc1657e488c254bf8d57884d91b8e499e0db
128,088
def positive_id(oid): """Return a positive identifier (for an edge)""" return oid if oid > 0 else ~oid
9742ad65a4d4696e9602eb4c760633c4c1161e58
128,090
def _ensure_str(s): """convert bytestrings and numpy strings to python strings""" return s.decode() if isinstance(s, bytes) else str(s)
05f549166cc459371b380f62393bbc835aa7ff48
128,092
import math def solution(A, B, K): """ Returns the number of integers within the range [A..B] that are divisible by K """ first = math.ceil(A / K) * K if first > B: return 0 last = math.floor(B / K) * K return ((last - first) // K) + 1
8151c3d6f3ed1d6dacdfe0e2a0aaba5365a428e1
128,093
import torch def spatial_broadcast(slots, resolution): """ :param slots: tensor of shape [batch_size, slots, features] :param resolution: tuple of integers (height, width)f :return: tensor of shape [batch_size*num_slots, height, width, features], the first dimension just reshaped and for height and width the array is copied multiple times """ slots = slots.view(-1, slots.size(2)) # [(batch_size-2)*slots, features] slots = torch.unsqueeze(slots, 1) slots = torch.unsqueeze(slots, 1) # [(batch_size-2)*slots,1, 1 features] slots = slots.expand(slots.size(0), resolution[0], resolution[1], slots.size(3)) return slots
777ec1970256ac4ae11dd9d882f81ab503e400fa
128,102
def yes_or_no(question): """Prompts user to answer a "Yes or No" question through keyboard input. Args: question (str) Question to ask. Returns: answer (bool) True for "Yes" and False for "No". """ # Set of acceptable answer formats yes = set(['yes', 'y']) no = set(['no', 'n']) # Iterate till valid answer is given while True: answer = input(question + " (y/n): ").lower().strip() if answer in yes: return True elif answer in no: return False else: print("Please respond with 'yes' or 'no'")
e7d4534746fe41d09a273c3de778d94b7970ce1f
128,103
import re def is_end_of_period(date): """Returns True if the given date 'mm/dd' is the end of a pay period, False otherwise""" date_list = re.split('\\W', date) month = int(date_list[0]) day = int(date_list[1]) if day == 15: return True days_per_month = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] if day == days_per_month[month]: return True return False
59f7810e7e1437981fb5f1cae38391c8ce619b74
128,107
def private_subnet(template, name): """Extract and return the specified subnet resource from the given template.""" return template.resources[name]
42377eda5a5750647561520c3f28950b07759c06
128,108
def _parse_squad_name(team_id): """ Parse and clean the team's name. To try and match requested team names with the master squad ID list, passed names should be parsed to remove the common 'FC' and 'CF' tags, as well as force all strings to be lowercase and excess whitespace removed. Parameters ---------- team_id : string The requested team's name to be parsed. Returns ------- string Returns a ``string`` of the parsed team's name. """ irrelevant = [' FC', ' CF', 'FC ', 'CF '] for val in irrelevant: team_id = team_id.replace(val, '') name = team_id.lower().strip() return name
ea9e91392146da8d116571520b7374c326ba5d4a
128,109
import torch def channel_split(x, split): """split a tensor into two pieces along channel dimension Args: x: input tensor split:(int) channel size for each pieces """ assert x.size(1) == split * 2 return torch.split(x, split, dim=1)
169e8e429bf465eb57bf58b1d00ec6db357492a5
128,110