content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_sample_mean_var(df, col): """Calculate sample mean and sample variance of a 1-d array (or Series). Parameters ---------- df: Pandas DataFrame col: str Column from df with numeric data to be plotted Returns ------- tuple: (sample mean (float), sample variance (float)) """ # by default np.var returns population variance. # ddof=1 to get sample var (ddof: delta degrees of freedom) data = df[col] return data.mean(), data.var(ddof=1)
374dd10010866d5723b347849476db5ab3c7a776
74,122
import inspect def _get_X_y_and_sample_weight(fit_func, fit_args, fit_kwargs): """ Get a tuple of (X, y, sample_weight) in the following steps. 1. Extract X and y from fit_args and fit_kwargs. 2. If the sample_weight argument exists in fit_func, extract it from fit_args or fit_kwargs and return (X, y, sample_weight), otherwise return (X, y) :param fit_func: A fit function object. :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :returns: A tuple of either (X, y, sample_weight), where `y` and `sample_weight` may be `None` if the specified `fit_args` and `fit_kwargs` do not specify labels or a sample weighting. """ _SAMPLE_WEIGHT = "sample_weight" def _get_Xy(args, kwargs, X_var_name, y_var_name): # corresponds to: model.fit(X, y) if len(args) >= 2: return args[:2] # corresponds to: model.fit(X, <y_var_name>=y) if len(args) == 1: return args[0], kwargs.get(y_var_name) # corresponds to: model.fit(<X_var_name>=X, <y_var_name>=y) return kwargs[X_var_name], kwargs.get(y_var_name) def _get_sample_weight(arg_names, args, kwargs): sample_weight_index = arg_names.index(_SAMPLE_WEIGHT) # corresponds to: model.fit(X, y, ..., sample_weight) if len(args) > sample_weight_index: return args[sample_weight_index] # corresponds to: model.fit(X, y, ..., sample_weight=sample_weight) if _SAMPLE_WEIGHT in kwargs: return kwargs[_SAMPLE_WEIGHT] return None fit_arg_names = list(inspect.signature(fit_func).parameters.keys()) # In most cases, X_var_name and y_var_name become "X" and "y", respectively. # However, certain sklearn models use different variable names for X and y. # E.g., see: https://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html#sklearn.multioutput.MultiOutputClassifier.fit X_var_name, y_var_name = fit_arg_names[:2] Xy = _get_Xy(fit_args, fit_kwargs, X_var_name, y_var_name) sample_weight = ( _get_sample_weight(fit_arg_names, fit_args, fit_kwargs) if (_SAMPLE_WEIGHT in fit_arg_names) else None ) return (*Xy, sample_weight)
97cb3ae27cf66a8b7e648dee329c9257610e1252
74,124
def _path_resolve(obj): """ Resolve file system path (PEP-519) objects to strings. :param obj: A file system path object or something else. :return: A string representation of a file system path object or, for anything that was not a file system path object, the original object. """ if obj and hasattr(obj, "__fspath__"): return obj.__fspath__() return obj
55ea5e80bf932f4b1954fce7619d584e58ecc977
74,125
def format_station(station): """Format the station for a playlist description.""" return '' if not station else f'on {station}'
ab84a4e5a43cd4027fd287a4dc65ee24f4049c78
74,128
def dansRectangle(x,y, cx,cy, L, H): """ Test l'appartenance à un rectangle. Paramètres: (x,y) --> point à tester, (cx,cy) --> coin supérieur gauche du rectangle, L -->.width du rectangle, H -->.height du rectangle. Retourne ``Vrai`` si le point est dans le rectangle, ``Faux`` sinon. """ return cx <= x < cx+L and cy <= y < cy+H
9227ec67800e5f1552c01824b63434f8a0740d39
74,129
def standardize_df(df): """ Standardize a dataframe (mean centered at 0 and unitary standard deviation) Args: df: pd.DataFrame with only numeric values Return: df: pd.DataFrame standardized """ return (df-df.mean())/df.std()
87ca7395f2d99bcdc64a621615cc64296e8cc2a6
74,130
from itertools import product def get_subs(n): """Get list of all possible n-length sequences of genes""" return [''.join(sub) for sub in product('CATGN', repeat=n)]
7e733cc494218c463c417d39075db9a9a052cc44
74,134
def vap2ppmv(e, p): """ Convert water vapor pressure into PPMV Args: e (float): water vapor pressure p (float): air pressure Returns: e (float): PPMV """ return 1e6 * (e / (p - e))
896c99e2156be0e22f82caf661384974c30dc99d
74,147
from pathlib import Path def load_common_config(file: Path) -> str: """ Load common part of OpenVPN config. Parameters ---------- file Path to config file. Returns ------- String containing common part of VPN config. """ with open(file, 'r') as inf: cfg = inf.read() return cfg
50db0605d0964f33ed619706fe72cc002f08e66d
74,148
import locale def guess_decode(text): """Decode *text* with guessed encoding. First try UTF-8; this should fail for non-UTF-8 encodings. Then try the preferred locale encoding. Fall back to latin-1, which always works. """ try: text = text.decode('utf-8') return text, 'utf-8' except UnicodeDecodeError: try: prefencoding = locale.getpreferredencoding() text = text.decode() return text, prefencoding except (UnicodeDecodeError, LookupError): text = text.decode('latin1') return text, 'latin1'
0c9f6011411b9191d34fb7939b760e474c797f56
74,154
def discrimine(pred, sequence): """Split a collection in two collections using a predicate. >>> discrimine(lambda x: x < 5, [3, 4, 5, 6, 7, 8]) ... ([3, 4], [5, 6, 7, 8]) """ positive, negative = [], [] for item in sequence: if pred(item): positive.append(item) else: negative.append(item) return positive, negative
23beece3fe4771fbe155d3960978d50e929f82b9
74,155
def is_container_stopped(driver): """ Check if the container is stopped """ return driver.find_element_by_css_selector(".flex>.Stopped").is_displayed()
f0464ce10df9dc44b20418c8d3e824913a910e1a
74,157
def get_fields(d): """ Recursive function to get all possible fields for the input dict """ key_list = [] for key,value in d.items(): temp_key_list = [key] if type(value) == dict: temp_key_list.append(get_fields(value)) key_list.append(temp_key_list) return key_list
423de4fd1cda682c05e2e84ac4f96fc37c724203
74,160
def update_unexplored(scene, pacs): """ Updates the floor by eliminating the visited floor positions by any pac. """ # Current positions of all pacs. all_pac_positions = set([x['position'] for x in pacs['mine'] + pacs['their']]) mine_pac_positions = set([x['position'] for x in pacs['mine']]) # Remove the current pac positions from the floor. scene['un_floor'] = scene['un_floor'].difference(all_pac_positions) # Update the pois and dead ends with my positions only. scene['un_floor_4'] = scene['un_floor_4'].difference(mine_pac_positions) scene['un_floor_3'] = scene['un_floor_3'].difference(mine_pac_positions) scene['un_floor_2_corner'] = scene['un_floor_2_corner'].difference(mine_pac_positions) scene['un_floor_2_aisle'] = scene['un_floor_2_aisle'].difference(mine_pac_positions) scene['un_floor_1'] = scene['un_floor_1'].difference(mine_pac_positions) return scene
a0b31cab07ca0bdfa94c7579e5947e990d1b65cf
74,164
import re def remove_underlines(word): """Removes both underlines and the occasional grammar mark from words""" return re.sub("/.*$", "", word).replace("_", " ")
6d3f56df2d97953e27957587f4537aafc1846998
74,165
def major_formatter(x, pos): """Return formatted value with 2 decimal places.""" return "[%.2f]" % x
ad5e26d562208975c757ab1c4cbf906433abb916
74,167
from pathlib import Path from typing import List def get_snippets(source, indent=4): """Get all code snippets from a given documentation source file.""" if not source.endswith(".rst"): # pragma: no cover source += ".rst" source_path = Path(__file__).parents[1] / "docs" / source lines = open(source_path).readlines() snippets: List[str] = [] snippet: List[str] = [] snippet_start = " " * indent for line in lines: if not line.rstrip() and snippet: snippet.append(line) elif line.startswith(snippet_start): snippet.append(line[indent:]) else: if snippet: snippets.append("".join(snippet).rstrip() + "\n") snippet = [] if snippet: snippets.append("".join(snippet).rstrip() + "\n") return snippets
45b3b52dcef65d9043613ee05726cb2a5ba343ae
74,169
def compute_start_end(dataset_str, thread): """ calculate start and end points for multi_threading """ individuals_number = dataset_str.shape[0] number_each_thread = int(individuals_number/thread) start = [number_each_thread * i for i in range(thread)] end = start[1:] end.append(individuals_number) return start, end
cc1fcebc71b2e82876b98db33a80954306f4526b
74,172
def read_files(file_name): """Reads each file line by line.""" with open(file_name, mode='r') as f: file_contents = f.read().splitlines() return file_contents
e4509c43a86d3d0af44b67e201dde9bd6af5292b
74,173
def condition_to_flag(condition): """ Cast boolean to unsigned int. """ return 1 if condition else 0
3c5c2d1dc80d49112432cf6d1f5f7b0fe4e24a4a
74,174
def ts(strr): """Changes a string of the form b'\xc3' into the form "\xc3" """ return str(strr)[1:].replace('\'', '\"')
ea617771088076d4b4718ad7c9f392f6329accb0
74,177
def checkEnergyDiff(row, col, lattice, temp, J=1, B=0, mu=1): """Calculates the energy difference of a specified site in a lattice assuming periodic boundary conditions. The difference is between being flipped and not being flipped. Inputs: row row number of the chosen site col column number of the chosen site lattice the current lattice temp the value for kT in our simulation J coupling constant between neighboring sites B external magnetic field mu magnetic moment of the lattice site Output: energy the energy difference at the given site """ # Get number of rows and columns from lattice rows, cols = (lattice.shape[0], lattice.shape[1]) # Calculate the energy energy = 2 * J * lattice[row][col] * (lattice[(row + rows - 1) % rows][col] + lattice[row][(col + 1) % cols] + lattice[(row + 1) % rows][col] + lattice[row][(col + cols - 1) % cols]) + ( 2 * B * mu * lattice[row][col]) return energy
b34abd78de817f7fb88d1fae19761eeb449ed049
74,182
def generic_cmp(value1, value2): """ Generic comparator of values which uses the builtin '<' and '>' operators. Assumes the values can be compared that way. Args: value1: The first value value2: The second value Returns: -1, 0, or 1 depending on whether value1 is less, equal, or greater than value2 """ return -1 if value1 < value2 else 1 if value1 > value2 else 0
c4329569560f75ffc54cff347a8f1900a7d7cf2d
74,188
def IsLowerCase(string): """ Return True if all characters in 'string' are lower case """ ans=True for s in string: if s.isupper(): ans=False; break return ans
900c7056baeb12b0f7c9cec955dfdaa247518929
74,194
import json def load_config_file(filename): """Return dictionary of parameters from config file.""" try: fp = open(filename) except IOError: print("Error opening file " + filename) raise try: params = json.load(fp) return params except ValueError: print("Config file malformed.") raise finally: fp.close()
22b0d6e4fb376122a661331179f0c9093c8e53eb
74,195
def execute_code_if(condition, code, glob=None, loc=None): """ Execute code if condition is true Args: condition (bool): if true the code is executed code_if_obj_not_found (str): the code to be evaluated if the object has not been found. globals (dict): the global variables dictionary locals (dict): the local variables dictionary Returns: the object returned from the code executed, None otherwise """ if not condition: return None if glob is None: glob = globals() if loc is None: loc = locals() return eval(code, glob, loc)
990ef76d24f29704d95d970b8b9f938455951b2a
74,196
import sqlite3 def query_db(dbfile, language="Python"): """ Query kaggle database for scripts associated with a particular language :param dbfile: location of kaggle db :param language: language to query, default is Python. IPython Notebook retrieves notebooks instead. :returns: database cursor """ conn = sqlite3.connect(dbfile) c = conn.cursor() py_scripts_query = f""" select Scripts.AuthorUserId as user_id, Scripts.ScriptProjectId AS project_id, ScriptVersions.id as script_id, Scripts.ForkParentScriptVersionId as parent_id, ScriptVersions.ScriptContent as script from ScriptVersions, Scripts where Scripts.CurrentScriptVersionId = ScriptVersions.Id and Scripts.Id IS NOT NULL and ScriptVersions.ScriptLanguageId = (select Id from ScriptLanguages where Name = "{language}") group by ScriptContent""" print('Querying db...') c.execute(py_scripts_query) return c
559ae79bf34bbcc46f44c231658781639c7375bb
74,198
import re from datetime import datetime def match_to_datestamp(match: re.Match) -> datetime: """Helper function to convert match to a datetime object | match object must have 7 groups where | group 0 is year | group 1 is month | group 2 is day | group 3 is hour | group 4 is minute | group 5 is second | group 6 is millisecond""" groups = [int(elm) for elm in match.groups()] try: assert len(groups) == 7 except AssertionError: print("Match object may be in wrong format check the regular expression used.") raise AssertionError date = groups[:3] time = groups[3:-1] microseconds = groups[-1]*1000 #convert millisecond to microsecond for datetime return datetime(date[0], date[1], date[2], hour = time[0], minute = time[1], second = time[2], microsecond=microseconds)
ab8e97c9254b82c7345514b164c5ac4abd1d798f
74,199
def split_octet(hexstr): """ Split a hexadecimal string (without the prefix 0x) into a list of bytes described with a 2-length hexadecimal string """ return [hexstr[i:i+2] for i in range(0, len(hexstr), 2)]
73d107eeef10fa58a067820a04ca73bb1d222037
74,202
def object_type(value): """To make the openAPI object type show up in the docs.""" return value
edc35614c7630e5a6ed610bbeb473e4a16e3767d
74,204
def coerce_bool(v, default): """Convert boolean-like values into a boolean.""" if v in [True, False]: return v s = str(v).lower().strip() if s in ['true', 't', 'y', 'yes', '1']: return True if s in ['false', 'f', 'n', 'no', '0']: return False return default
4e3a1257526504d1581f91e7aa1a1db577ffec0f
74,213
def alphanumeric_table() -> dict[str, int]: """Dictionary with symbols for alphanumeric encoding Returns: dict[str, int]: A dictionary that contains data in the form (character: number) """ table = { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20, 'L': 21, 'M': 22, 'N': 23, 'O': 24, 'P': 25, 'Q': 26, 'R': 27, 'S': 28, 'T': 29, 'U': 30, 'V': 31, 'W': 32, 'X': 33, 'Y': 34, 'Z': 35, ' ': 36, '$': 37, '%': 38, '*': 39, '+': 40, '-': 41, '.': 42, '/': 43, ':': 44 } return table
72bdad0371b9d72690a30f9dc51cb1efebb6b789
74,218
def get_ca_pos_from_residues(df, res): """Look up alpha carbon positions of provided residues.""" ca = df[df['atom_name'] == 'CA'].reset_index().set_index( ['pdb_name', 'model', 'chain', 'residue']) nb = ca.reindex(res) nb = nb.reset_index().set_index('index') return nb
1acd372c0de0e642279f68b7bec8a63b60d36ba1
74,223
def p_series(nth_term: int | float | str, power: int | float | str) -> list[str]: """ Pure Python implementation of P-Series algorithm :return: The P-Series starting from 1 to last (nth) term Examples: >>> p_series(5, 2) ['1', '1 / 4', '1 / 9', '1 / 16', '1 / 25'] >>> p_series(-5, 2) [] >>> p_series(5, -2) ['1', '1 / 0.25', '1 / 0.1111111111111111', '1 / 0.0625', '1 / 0.04'] >>> p_series("", 1000) [''] >>> p_series(0, 0) [] >>> p_series(1, 1) ['1'] """ if nth_term == "": return [""] nth_term = int(nth_term) power = int(power) series: list[str] = [] for temp in range(int(nth_term)): series.append(f"1 / {pow(temp + 1, int(power))}" if series else "1") return series
922d163ce123beb4f69e3dd3e8a82cff5809990c
74,225
def _check_connected(up_segment, core_segment, down_segment): """ Check if the supplied segments are connected in sequence. If the `core` segment is not specified, it is ignored. """ if not up_segment or not down_segment: return True up_first_ia = up_segment.first_ia() down_first_ia = down_segment.first_ia() if core_segment: if (core_segment.last_ia() != up_first_ia or core_segment.first_ia() != down_first_ia): return False elif up_first_ia != down_first_ia: return False return True
c459105d144b5e3d05d2b86a80287e0ccd23e671
74,227
def convert_to_dict(jvm_opt_file): """ JVM parameter file converter to dictionary. :param jvm_opt_file: JVM parameters file :return: Dictionary with the parameters specified on the file. """ opts = {} with open(jvm_opt_file) as fp: for line in fp: line = line.strip() if line: if line.startswith("-XX:"): # These parameters have no value key = line.split(":")[1].replace('\n', '') opts[key] = True elif line.startswith("-D"): key = line.split("=")[0] value = line.split("=")[1].replace('\n', '') value = value.strip() if not value: value = None opts[key] = value else: key = line.replace('\n', '') opts[key] = True return opts
70306d1dedd067de014bcf5cc163f26ba07ef40a
74,228
def _GetTryJobDataPoints(analysis): """Gets which data points should be used to determine the next revision. Args: analysis (MasterFlakeAnalysis): The analysis entity to determine what data points to run on. Returns: A list of data points used to analyze and determine what try job to trigger next. """ all_data_points = analysis.data_points # Include the suspected build itself first, which already has a result. data_points = [analysis.GetDataPointOfSuspectedBuild()] for i in range(0, len(all_data_points)): if all_data_points[i].try_job_url: data_points.append(all_data_points[i]) return sorted(data_points, key=lambda k: k.commit_position, reverse=True)
eb2ad6a2dc9c396fd8cfa9c10ebd3dc0e21d32b0
74,231
import ast def get_vars_in_expression(source): """Get list of variable names in a python expression.""" # https://stackoverflow.com/questions/37993137/how-do-i-detect-variables-in-a-python-eval-expression variables = {} syntax_tree = ast.parse(source) for node in ast.walk(syntax_tree): if type(node) is ast.Name: variables[node.id] = 0 # Keep first one, but not duplicates # Only return keys result = list(variables.keys()) # Only return keys i.e. the variable names result.sort() # Sort for uniqueness return result
795bd97bdafc28758b3500efb5fa89e6c2bf0198
74,236
def build_all_names_int(all_names_path = "../data/persons"): """ Read the values from the file and build a (person name -> integer) dictionary mapping. """ cnt = 0 N_map = {} with open(all_names_path, "r") as f: for line in f: name = line.strip().split("\t")[0] N_map[name] = cnt cnt +=1 return N_map
d7f86aac822ca38db233f83897682b28026dc0ac
74,241
import json def load_json(path_model): """Loads json object to dict Args: path_model: (string) path of input """ with open(path_model) as f: data = json.load(f) return data
9dcab4abfd82c5e2a5fc362780d9be8cdcb513cb
74,244
def join_number(string, num, width=None): """ Join a number to the end of a string in the standard way If width is provided will backfill >>> join_number('fred', 10) 'fred-10' >>> join_number('fred', 10, 3) 'fred-010' """ num = str(num) if width: num = num.rjust(width, '0') return string + '-' + str(num)
39ce886e53619db4c0674b0ca954f4944cd240af
74,245
def split(stats): """ Splits a list of sequences in halves. Sequences generated from MCMC runs should be split in half in order to be able to properly diagnose mixing. Args: stats: A list of sequences """ n = stats[0].size return [s[i * (n // 2) : (i + 1) * (n // 2)] for i in range(2) for s in stats]
912ae19d534b83244e888b7e59f2b3a79557a0be
74,247
def first_non_consecutive(arr): """ Finds the first element within an array that is not consecutive. :param arr: An array of ints. :return: the first element not consecutive, otherwise None. """ for i, j in enumerate(arr): if j != arr[0] + i: return j return None
020078d18df6f6b8c4ec7ae550f0dc98bb93bbb7
74,249
def is_ionic(comp): """Determines whether a compound is an ionic compound. Looks at the oxidation states of each site and checks if both anions and cations exist Args: comp (Composition): Composition to check Returns: (bool) Whether the composition describes an ionic compound """ has_cations = False has_anions = False for el in comp.elements: if el.oxi_state < 0: has_anions = True if el.oxi_state > 0: has_cations = True if has_anions and has_cations: return True return False
116735f72b8b9a092d47195aa5f8a88d7be30ef1
74,253
from pathlib import Path def file_path(relative_path): """Returns absolute path from relative path""" start_dir = Path(__file__).parent return Path(start_dir, relative_path)
58252bfcc383d86ba4333e38f948fdfabe978eb4
74,259
def get_optimal_value(capacity, items): """ get_optimal_value function implements an algorithm for the fractional knapsack problem :param capacity: Number represents the total capacity of the bag :param items: list of list that contains values and weights of each item, where items[𝑖] = [value(𝑖), weight(𝑖)] :return: decimal number represents the maximal value of fractions of items that fit into the bag of weight capacity. """ out = 0 # sort all items by their price_per_unit items.sort(key=lambda x: x[0]/x[1], reverse=True) for v, w in items: can_fit = capacity - w # if the element can fit into the bag, take the whole item. if can_fit >= 0: out += v capacity = can_fit # otherwise, take as much of the item's weight as possible (price_per_unit * capacity). else: out += (v/w) * capacity return out return out
6501672bec4c0860a0b47c36bd074dffb3a2826a
74,263
def tif_filter(time: float, value: float, *function_name) -> str: """ Creates time-initial fluent if time>0, or plain initialization otherwise """ assignment = "(= ({}) {})".format(' '.join(function_name), value) return "(at {} {})".format(time, assignment) if time > 0\ else assignment
6da851bb428409b7867c8574b4f66ac3061292d8
74,264
import hashlib def hash_file(filepath): """Calculates the hash of a file without reading it all in memory at once.""" digest = hashlib.sha1() with open(filepath, 'rb') as f: while True: chunk = f.read(1024*1024) if not chunk: break digest.update(chunk) return digest.hexdigest()
55a139144e9efbac727c182b5ae5d75749402595
74,268
def break_tie(inline,equation): """If one of the delimiters is a substring of the other (e.g., $ and $$) it is possible that the two will begin at the same location. In this case we need some criteria to break the tie and decide which operation takes precedence. I've gone with the longer of the two delimiters takes priority (for example, $$ over $). This function should return a 2 for the equation block taking precedence, a 1 for the inline block. The magic looking return statement is to map 0->2 and 1->1.""" tmp=(inline.end()-inline.start() > equation.end()-equation.start()) return (tmp*3+2)%4
c987d71ca5beb953cfd9a70ee6490682b7223628
74,270
from typing import Optional from typing import List def create_bdg_classes(color: Optional[str], outline: bool) -> List[str]: """Create the badge classes.""" classes = [ "sd-sphinx-override", "sd-badge", ] if color is None: return classes if outline: classes.extend([f"sd-outline-{color}", f"sd-text-{color}"]) else: classes.extend([f"sd-bg-{color}", f"sd-bg-text-{color}"]) return classes
56546db7ede3d411d57daa575b9b6d4c028716cd
74,273
def make_cav_dir_path(cav_code): """ Make path to shema directory """ if len(cav_code) == 2: return "schemas/{start}".format(start=cav_code[:2]) return "schemas/{start}/{list}".format(start=cav_code[:2], list='/'.join(list(cav_code[2:])))
9adccc3154ce4c15e9a2e11573dbcaa956d75e9d
74,274
import re def match(pattern, text): """ A quick way to do pattern matching. NOTE: name tokens using (?P<name>pattern) """ m = re.match(pattern, text) if m is None: return {} else: return m.groupdict()
2add932bd2be6cddae90d35ea5e581fcbdec524a
74,275
def count_paragraphs(s): """Counts the number of paragraphs in the given string.""" last_line = "" count = 0 for line in s.split("\n"): if len(line) > 0 and (len(last_line) == 0 or last_line == "\n"): count += 1 last_line = line return count
28ef6eca07578dd25567574400dd77284831165f
74,276
def scale(x,range1=(0,0),range2=(0,0)): """ Linear scaling for a value x """ return range2[0]*(1 - (x-range1[0]) / (range1[1]-range1[0])) + range2[1]*((x-range1[0]) / (range1[1]-range1[0]))
775d55530ab2bafc68af3ad4dbe6a68d70552b59
74,280
import socket def init_server(addr): """ Init a simple server from address. :param addr: (host, port) :return: socket """ # server # Symbolic name meaning all available interfaces # Arbitrary non-privileged port s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(addr) # (host, port) host ='', port = 50007 s.listen(1) return s
2f7af58a048292025af21a87da87b7111a93cb34
74,282
from typing import List def format_index_response(user_info: dict, user_posts: List[dict]) -> dict: """ This function formats the response accordingly as the defined standard. :param user_info: dict -> general user info :param user_posts: List[dict] -> posts related to user :return: dict -> formatted dict response """ response_dict = dict() response_dict['user'] = user_info # remove all userId's from the posts [post.pop('userId', None) for post in user_posts] response_dict['posts'] = user_posts return response_dict
37581c21847bb04eddfa584842c579ccb0b3a0c2
74,283
import torch def tensors_to_device(tensors, device): """ Transfer tensor, dict or list of tensors to device. Args: tensors (:class:`torch.Tensor`): May be a single, a list or a dictionary of tensors. device (:class: `torch.device`): the device where to place the tensors. Returns: Union [:class:`torch.Tensor`, list, tuple, dict]: Same as input but transferred to device. Goes through lists and dicts and transfers the torch.Tensor to device. Leaves the rest untouched. """ if isinstance(tensors, torch.Tensor): return tensors.to(device) elif isinstance(tensors, (list, tuple)): return [tensors_to_device(tens, device) for tens in tensors] elif isinstance(tensors, dict): for key in tensors.keys(): tensors[key] = tensors_to_device(tensors[key], device) return tensors else: return tensors
c74a2313774f12bbf2e095c422b968d6543ede5b
74,289
import torch def delete_edge_mask(row, col, node1, node2): """return a boolean mask which can be used to construct a new adjacency matrix that deletes edges between node1 and node2.""" mask1 = torch.logical_or(row != node1, col != node2) mask2 = torch.logical_or(row != node2, col != node1) mask = torch.logical_and(mask1, mask2) return mask
88b4a4c1020568674f58ef5d3d4e150c4a1423d3
74,290
def check_min_sample_periods_dict(count_dict, min_sample_periods): """ Check if all periods listed in the dictionary contain at least min_sample_periods examples. """ for key in count_dict.keys(): if count_dict[key] < min_sample_periods: return False return True
ee3abb130e286a41c7733da0c636c837886db9f9
74,293
def filter_dict(source, d): """Filter `source` dict to only contain same keys as `d` dict. :param source: dictionary to filter. :param d: dictionary whose keys determine the filtering. """ return {k: source[k] for k in d.keys()}
612096842e0b73f524ddc57ad1f9bd8e0d8f49b9
74,294
def _make_runium_param(iterations, times): """ Creates and returns a dict with runium specific stats and functions that can be accessed from within the task. """ context = { 'iterations': iterations, 'iterations_remaining': times - iterations } return context
45471b412f04af347f2a4c74b3abfdb41944a249
74,295
def split_list(n,ls): """Split input list into n chunks, remainder of division by n goes into last chunk. If input list is empty, returns list of n empty lists. Parameters ---------- n : int Number of chunks. ls : list List to split. Returns ------- list List of n chunks of input list. """ if len(ls) == 0: return [[] for i in range(n)] m = int(len(ls)/n) chunks = [] for i in range(0,len(ls),m): if i == (n - 1)*m: chunks.append(ls[i:]) break chunks.append(ls[i:(i + m)]) return chunks
a3189631a29d4bbebc24f15cb8a970db4e69e7be
74,300
def GitRemoteUrlFilter(url, patch): """Used with FilterFn to isolate a patch based on the url of its remote.""" return patch.git_remote_url == url
eb6e509c1966e5b9b9e6abf00a164b3a3f9cb1f2
74,302
import hashlib def checksum(record_payload): """ Returns a md5 checksum calculated from the provided DOI record payload. Parameters ---------- record_payload : str Text contents of a DOI record to generate the checksum for. Returns ------- hex_digest : str The hex digest of the md5 checksum. """ md5 = hashlib.md5() md5.update(record_payload.encode()) return md5.hexdigest()
1fd0c4bf3ba49ce61775aff0caaf0e2b5ead852e
74,303
def primitives_usages(request): """Fixture to return possible cases of promitives use cases.""" return request.param
a36f59d124a20f73d19ea63df85f919b9b5681f3
74,307
def remove_place(net, place): """ Remove a place from a Petri net Parameters ------------- net Petri net place Place to remove Returns ------------- net Petri net """ if place in net.places: in_arcs = place.in_arcs for arc in in_arcs: trans = arc.source trans.out_arcs.remove(arc) net.arcs.remove(arc) out_arcs = place.out_arcs for arc in out_arcs: trans = arc.target trans.in_arcs.remove(arc) net.arcs.remove(arc) net.places.remove(place) return net
b4bc3edabff29e0837c895f82ef9599eb59c811c
74,308
def calc_supply_age( prev_cs, cs, transferred_value, prev_age_ms, ms_since_prev_block ) -> int: """ Calculate mean supply height in ms from given args. Definitions: - h: height - s(h): circulating supply at h - a(h): mean supply age at h - e(h): coinbase emission for block h - x(h): transferred ERG in block h, excluding r(h) - t(h): time between current and previous block At h = 1 -------- s(1) = e(1) a(1) = 0 x(1) = 0 At h = 2 -------- x(2) <= s(1) s(2) = s(1) + e(2) a(2) = [ (s(1) - x(2)) * (a(1) + t(h)) ] / s(2) At h = n -------- x(n) <= s(n-1) s(n) = s(n-1) + e(n) a(n) = [ (s(n-1) - x(n)) * (a(n-1) + t(n)) ] / s(n) """ return ((prev_cs - transferred_value) * (prev_age_ms + ms_since_prev_block)) / cs
f9ecf5beb1823a37d2effa4ebddb5ed03ef2ff0d
74,309
def trapezoidal_command(CurrTime, Distance, Vmax, Accel, StartTime=0.): """ Function to generate a trapezoidal velocity command Arguments: CurrTime : The current timestep or an array of times Distance : The distance to travel over Vmax: The maximum velocity to reach Accel: The acceleration, assumed to be symmetric StartTime : The time that the command should StartTime Returns : The command at the current timestep or an array representing the command over the times given (if CurrTime was an array) """ t1 = StartTime + Vmax / Accel t2 = Distance / Vmax + StartTime t3 = t2 + Vmax / Accel #pdb.set_trace() # We'll create the command by just superimposing 4 ramps starting at # StartTime, t1, t2, and t3 trapezoidal = (Accel * (CurrTime - StartTime) * (CurrTime - StartTime >= 0) + -(Accel * (CurrTime - t1) * (CurrTime - t1 >= 0)) + -(Accel * (CurrTime - t2) * (CurrTime - t2 >= 0)) + (Accel * (CurrTime - t3) * (CurrTime - t3 >= 0))) return trapezoidal
cc4065103cf2b0cdf06f5db48c48fbd118dda06d
74,310
def make_error_format(file_path, line_no, error): """ Generate an 'error format` string recognized by the Vim compiler set by this plugin. :param file_path: The file path from where the error occurred. :param line_no: The line number pointing to the erroneous code. :param error: The error description. :returns: An error format compatible string. """ return '{file_path}:{line_no} <{error}>'.format( file_path=file_path, line_no=line_no, error=error, )
35195a24d3c8d0d2526769b16e257fe988af8c51
74,314
def take_last_forecast(df): """Get the last forecast if multiple forecasts have the same timestamp""" index_cols = ['Uniform Date Format', 'Question', 'Ordered.Bin.Number'] agg_dict = {col: 'last' for col in df.columns if col not in index_cols} return df.groupby(index_cols, as_index=False).agg(agg_dict)
9302f33219c68ac9aca6097f711a130c4a02a31d
74,318
def keep_keys(new_keys, old_dict): """Return dictionary with items indicated by the keys. Args: new_keys (iterable): Keys to keep from the old dictionary. old_dict (dict): A dictionary from which to extract a subset of items. Returns dict: A dict derived from old_dict only keeping keys from new_keys. Example: To use `keep_keys` directly on a dictionary: >>> keep_keys(["a", "b"], {"a": 1, "b": 2, "c": 3}) {'a': 1, 'b': 2} If the requested keys are not present, they are ignored. >>> keep_keys(["a", "b"], {"b": 2, "c": 3}) {'b': 2} To use `keep_keys` on a stream of dictionaries: >>> dict_gen = iter([{"a": 1, "b": 2, "c": 3}, ... {"b": 5, "c": 6}]) >>> from functools import partial >>> subdict_gen = map(partial(keep_keys, ["a", "b"]), dict_gen) >>> list(subdict_gen) [{'a': 1, 'b': 2}, {'b': 5}] """ new_dict = {k: old_dict[k] for k in new_keys if k in old_dict} return new_dict
6365015024e5c923b6e515d7ac8f4fe6eafbe7e3
74,320
def default_result_verbose(d): """Converts the terse default result `dict` returned by :func:`prepare_default_result_dict` into a more verbose version. """ v = { "key": d["k"], "done": d["d"], "nodes": [{ "id": n["i"].decode("utf8"), "address": n["a"].decode("utf8"), "expired": n["x"] } for n in d["n"]] } if "r" in d: v["result"] = d["r"] return v
cffe269b7fd0da7d593109906abeb371a3f066c4
74,321
def _format_search_string(server: str, query: str) -> str: """Generate a search string for an erddap server with user defined query.""" return f'{server}search/index.csv?page=1&itemsPerPage=100000&searchFor="{query}"'
f870296b153cdc83b5e7f1e08c369a1cc0647c90
74,322
import requests def test_steam_api_key(api_key=None): """ Test if api_key is valid for steam api Parameters ---------- api_key (int): api steam key Returns ------- (bool): True if valid """ # According to https://developer.valvesoftware.com/wiki # /Steam_Web_API#GetGlobalAchievementPercentagesForApp_.28v0001.29 url_test = f'http://api.steampowered.com/ISteamUser\ /GetPlayerSummaries/v0002/?key={api_key}&steamids=76561197960435530' return requests.get(url_test).status_code == 200
1e0efec20eb7180fd7c8157f154a0076f419869e
74,323
import typing def line_to_row(line: str) -> typing.Sequence[str]: """ Line to row. Given a string covert to a list representing a row. :param line: string containing a row :returns: row """ return line.rstrip("\n").split("\t")
49f3ecdd30c95e45073526af9613ae2437f8cb5a
74,324
def collect_driver_info(driver): """Build the dictionary that describes this driver.""" info = {'name': driver.class_name, 'version': driver.version, 'fqn': driver.class_fqn, 'description': driver.desc, 'ci_wiki_name': driver.ci_wiki_name} return info
41e94ac324d9bfc2248d90e670378cf5e39e3e1d
74,325
def __parse(string): """Parses a given string into a float if it contains a dot, into integer otherwise. :param string: Given string to parse. :return: Integer or float representation of the given string. """ if "." in string: return float(string) return int(string)
1ef2ff72c8f614684e894f8ef85a586e3f42a9c4
74,327
def generate_json(sourced_id, title, description, assign_date, due_date, class_sourced_id, category, result_value_max,result_value_min): """ Generate a JSON formatted value ready to be sent to OpenLRW :param sourced_id: :param title: :param description: :param assign_date: :param due_date: :param class_sourced_id: :param category: :param result_value_max: :param result_value_min :return: """ if result_value_max is None: result_value_max = 0.0 if result_value_min is None: result_value_min = 0.0 return { "sourcedId": sourced_id, "title": title, "description": description, "assignDate": assign_date, "dueDate": due_date, "resultValueMax": result_value_max, "resultValueMin": result_value_min, "class": { "sourcedId": class_sourced_id }, "metadata": { "type": category } }
1cc0a8ed71ecd08304d2e39215b57a2df3180bbd
74,330
def format_nav_item(url: str, title: str) -> str: """Format a single entry for the navigation bar (navbar).""" return '<li class="nav-item"><a class="nav-link" href="{}">{}</a></li>'.format(url, title)
d9c3e6a424bee4846a7f62822476dbeb6a909d99
74,335
def manh(x, y): """Compute Manhattan distance.""" return sum(abs(i - j) for i, j in zip(x, y))
1e3cd690e33519eb93728c70ac98c655a4081e23
74,343
def read_list_elem(root, tag, cls, fd): """ Read all elements with the tag ``tag`` under XML element ``root`` and return a list of objects of class ``cls``, created by calling ``cls.read_elem(elem, fd)``. This is a low-level function used internally by this library; you don't typically need to use it. """ objects = [] for elem in root.findall(tag): objects.append(cls.read_elem(elem, fd)) return objects
d5fa65b0949be5ed1655baf5ea33a8cb28aeefa0
74,344
def translate_to_wsl(path): """Translate a windows path to unix path for WSL. WSL stands for Windows Subsystem for Linux and allows to run native linux programs on Windows. In order to access Windows' filesystem the local drives are mounted to /mnt/<Drive> within the WSL shell. This little helper function translates a windows path to such a WSL compatible path. Note: This function works for local files only at the moment. Arguments: path (string): the windows path to translate to unix path Returns: string: the unix path to be used by calls to programs running on WSL Raises: FileNotFoundError: if path is an UNC path. """ if path.startswith('\\\\'): raise FileNotFoundError('UNC paths are not supported by WSL!') wsl_path = path.replace('\\', '/') if wsl_path[1:3] == ':/': return ''.join(('/mnt/', wsl_path[0].lower(), wsl_path[2:])) return wsl_path
36a1691400e1dfce0902c237ee48eca7ffeff8b6
74,346
def file_to_list(file_name): """ makes a list out of the lines of a file Args: file_name: string - the name of the file Returns: a list - the lines of the file """ lines = [] with open(file_name) as f: lines = f.read().splitlines() return lines
b8e81fb297b2aa0fd71fef937bbe36bdd4415581
74,347
import typing def is_iterable(field: typing.Any) -> bool: """ Returns boolean describing if the provided `field` is iterable. (Excludes strings, bytes) """ return isinstance(field, typing.Iterable) and not isinstance(field, (str, bytes))
bcfaf637774f4f8eda21d9c724a2961a351ec02e
74,350
import torch def normalize_tensor(tensor): """ Tensor normalization operation. Tensor/mean - 1.""" tensor = tensor / torch.mean(tensor) - 1 return tensor
fcb0ea3e41bbc4d2fca83e352475b33f0ada22c1
74,352
import math def rotate_around_origin(xy, radians): """Rotate a point around the origin. Taken from https://ls3.io/post/rotate_a_2d_coordinate_around_a_point_in_python/""" x, y = xy xx = x * math.cos(radians) + y * math.sin(radians) yy = -x * math.sin(radians) + y * math.cos(radians) return xx, yy
857b058e6ff96e0e15da628a22e42ce6c828f3fa
74,353
def _is_public(name: str) -> bool: """Returns True if a method is not protected, nor private; otherwise False.""" return not (name.startswith('__') or name.startswith('_'))
56aef4c3625483600b584cef897c5a87ebac554a
74,354
def _D2O_Tension(T): """Equation for the surface tension of heavy water Parameters ---------- T : float Temperature [K] Returns ------- sigma : float Surface tension [N/m] Raises ------ NotImplementedError : If input isn't in limit * 269.65 ≤ T ≤ 643.847 Examples -------- >>> _D2O_Tension(298.15) 0.07186 >>> _D2O_Tension(573.15) 0.01399 References ---------- IAPWS, Release on Surface Tension of Heavy Water Substance, http://www.iapws.org/relguide/surfd2o.pdf """ Tr = T/643.847 if 269.65 <= T < 643.847: return 1e-3*(238*(1-Tr)**1.25*(1-0.639*(1-Tr))) else: raise NotImplementedError("Incoming out of bound")
316a91a0a2b79abcb2180a7ce105691a3a4abc29
74,357
def int_label_to_char(label): """ Converts a integer label to the corresponding character :param label: the integer label :return: The corresponding character for the label. """ # Is a label for a numer? if label > 25: # Cast number to string return str(label - 26) else: # Else lookup ascii character return chr(label + 97)
4e9cadfb55a801b7a24473fcbb286c40e73fe5fb
74,360
def module_tracker(fwd_hook_func): """ Wrapper for tracking the layers throughout the forward pass. Args: fwd_hook_func: Forward hook function to be wrapped. Returns: Wrapped method. """ def hook_wrapper(relevance_propagator_instance, layer, *args): relevance_propagator_instance.module_list.append(layer) return fwd_hook_func(relevance_propagator_instance, layer, *args) return hook_wrapper
e2990481272388aff48eac55f409d1c953581868
74,364
def extract_pos(positions, cash): """Extract position values from backtest object as returned by get_backtest() on the Quantopian research platform. Parameters ---------- positions : pd.DataFrame timeseries containing one row per symbol (and potentially duplicate datetime indices) and columns for amount and last_sale_price. cash : pd.Series timeseries containing cash in the portfolio. Returns ------- pd.DataFrame Daily net position values. - See full explanation in tears.create_full_tear_sheet. """ positions = positions.copy() positions['values'] = positions.amount * positions.last_sale_price cash.name = 'cash' values = positions.reset_index().pivot_table(index='index', columns='sid', values='values') values = values.join(cash).fillna(0) return values
248ecd5054df0eed7126137f2b9145c11eeff2c4
74,366
def is_northern(lat): """ Determine if it is northern hemisphere. Arguments: lat: float Latitude, in degrees. Northern: positive, Southern: negative. Returns: 1: northern, 0: southern. """ if lat < 0.0: return 0 else: return 1
f74228ad463a76556a61767699b785dff7bd8849
74,369
def create_response(code: int, body: str) -> dict: """ Creates a JSON response for HTTP. Args: code (int): The HTTP status code body (str): The HTTP body as a string Returns: (dict): JSON HTTP response """ return { 'headers': { 'Content-Type': 'text/html' }, 'statusCode': code, 'body': body }
f684fc5088023b3873b7a00ca626ba4a6a520767
74,370
def get_provenance_record(gatt, vatt, ancestor_files): """Create a provenance record describing the diagnostic data and plot.""" caption = ("Ensemble Clustering Diagnostics of extreme {extreme} of " .format(**gatt) + "variable {long_name} between " "{start_year} and {end_year} ".format(**vatt)) print(gatt) record = { 'caption': caption, 'authors': ['hard_jo', 'arno_en', 'mavi_ir'], 'projects': ['c3s-magic'], 'references': ['straus07jcli'], 'plot_types': ['other'], 'realms': ['atmos'], 'domains': ['reg'], 'ancestors': ancestor_files, } return record
19ce99be20b1290322dbf57e1c79675f6ef1906e
74,373
def _normalize_counts(counts, val=1): """Normalizes a dictionary of counts, such as those returned by _get_frequencies(). Args: counts: a dictionary mapping value -> count. val: the number the counts should add up to. Returns: dictionary of the same form as counts, except where the counts have been normalized to sum to val. """ n = sum(counts.values()) frequencies = {} for r in list(counts.keys()): frequencies[r] = val * float(counts[r]) / float(n) return frequencies
ed48e402c1c9fa22e264b3fc720acaa6dbff193d
74,374
from typing import Dict from typing import Any def _make_pod_envconfig( config: Dict[str, Any], relation_state: Dict[str, Any] ) -> Dict[str, Any]: """Generate pod environment configuration. Args: config (Dict[str, Any]): configuration information. Returns: Dict[str, Any]: pod environment configuration. """ return { # General configuration "ALLOW_ANONYMOUS_LOGIN": "yes", "GIN_MODE": config["gin_mode"], "NRF_HOST": relation_state["nrf_host"], }
3f41f89f9bdf25ab6a75c5206943df5437092d1c
74,376
def read(texname): """ A function to read the .tex file @ In, texname, str, LaTeX file name @ Out, filler_text, str, tex contents """ with open(texname, 'r') as fd: filler_text = fd.read() return filler_text
35a687836fba6dc37689b4c5d2fb85f0841eb4e9
74,377
def crop_image(image, start_x, start_y, width, height): """ Crops an image Parameters ---------- image: image the image that has to be cropped start_x: integer x co-ordinate of the starting point for cropping start_y: integer y co-ordinate of the starting point for cropping width: integer expected width of the cropped image height: integer expected height of the cropped image Returns ------- """ return image[start_y : start_y + height, start_x : start_x + width]
08bac096fd30d8575e59a26ea441da8146fb9c03
74,382
import binascii import colorsys def generate_bgcolor(str): """Generate an arbitrary background color based on a hash of 'str'.""" crc = binascii.crc32(str.encode('utf-8')) % (1 << 32) # pick out some HSV h = (crc & 0xFF) / 255.0 s = ((crc & 0xFF00) >> 8) / 255.0 v = ((crc & 0xFF0000) >> 16) / 255.0 # make sure value is high, saturation is low s = (s / 4) + 0.25 v = (v / 4) + 0.75 return ('#%02x%02x%02x' % tuple([int(x*255) for x in colorsys.hsv_to_rgb(h, s, v)]))
ca8de46b5ffc558e191ee9b1f3cf092fc076aec2
74,383
import re def search_pattern(regex): """ Return a value check function which raises a ValueError if the supplied regular expression does not match anywhere in the value, see also `re.search`. """ prog = re.compile(regex) def checker(v): result = prog.search(v) if result is None: raise ValueError(v) return checker
87305c782f6b99d389fe8497ff85fd25f33f73f2
74,388