content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def __transform_center(gap_width, x, y): """ Depending on the region which the coin lands, this function transforms the coin onto a Cartesian plane where the axes are the closest corner and returns the coordinates of the center of the circle. """ split = gap_width/2 if x > split: x -= gap_width if y > split: y -= gap_width return (x, y)
1d698b02704e8bb63c3ae50364a64141b10b8d23
118,685
import gzip def open_file_for_reading(filename): """Open the file gunzipping it if it ends with .gz.""" if filename.lower().endswith(".gz"): return gzip.open(filename, "rb") else: return open(filename, "rb")
55036422c55de278bf94fea13074bbe336f96489
118,686
def update_variables_Adam(alpha, beta1, beta2, epsilon, var, grad, v, s, t): """updates a variable in place using the Adam optimization algorithm Args: alpha is the learning rate beta1 is the weight used for the first moment beta2 is the weight used for the second moment epsilon is a small number to avoid division by zero var is a numpy.ndarray containing the variable to be updated grad is a numpy.ndarray containing the gradient of var v is the previous first moment of var s is the previous second moment of var t is the time step used for bias correction """ α = alpha β1 = beta1 β2 = beta2 ε = epsilon Vd = (β1 * v) + ((1 - β1) * grad) Sd = (β2 * s) + ((1 - β2) * grad * grad) Vd_ok = Vd / (1 - β1 ** t) Sd_ok = Sd / (1 - β2 ** t) w = var - α * (Vd_ok / ((Sd_ok ** (0.5)) + ε)) return (w, Vd, Sd)
d5f1e9157c99a607593753da90005461b485f6f7
118,690
def is_numeric(obj) -> bool: """ Test if obj is numeric :param obj: test object :return: is numeric """ try: obj + 0 return True except TypeError: return False
3aa046ef85c1cccb7fb1dc4708d9b2fabb2dd09d
118,692
def rmax(a, e): """Max radius of ellipitical orbit.""" return a*(1+e)
536b8e0449976439bdc735e1395b1774fd180bca
118,693
def generate_question_with_choices(choices: list[str], message: str) -> list[dict]: """Generate a multiple choice single select question with <choices> and <message> and return it in py-inquirer format :param choices: choices of the question :param message: message of the question """ return [ { 'type': 'list', 'name': 'options', 'message': message, 'choices': choices } ]
1379d4c1ca6dca2e9bae786a9b2937de6cf95ec3
118,696
import torch def eucl_dist(x, y): """ Compute Pairwise (Squared Euclidean) Distance Input: x: embedding of size M x D y: embedding of size N x D Output: dist: pairwise distance of size M x N """ x2 = torch.sum(x**2, dim=1, keepdim=True).expand(-1, y.size(0)) y2 = torch.sum(y**2, dim=1, keepdim=True).t().expand(x.size(0), -1) xy = x.mm(y.t()) return x2 - 2*xy + y2
9e1e8bc9f7c34d86347eca460b7d3e99fb0d378d
118,699
def isPotentialMatch(candidate: str, hints: str, guessedLetter: str) -> bool: """Given candidate string, string of hints in form of 011010 and guessed letter decide whether given candidate matches hint""" for serv_let, cand_let in zip(hints, candidate): if serv_let == "0" and cand_let == guessedLetter: return False elif serv_let == "1" and cand_let != guessedLetter: return False return True
91b3a476583b4e01f6c2d301c53f31057ce774cd
118,700
def user_is_curator(group, userprofile): """Check if a user is curator in the specific group.""" return group.curators.filter(id=userprofile.id).exists()
ea84438151ce084cdf77a03cef1fe4093562f1da
118,701
def format_macrofunction(function): """Format the fully qualified name of a macro function, for error messages.""" # Catch broken bindings due to erroneous imports in user code # (e.g. accidentally to a module object instead of to a function object) if not (hasattr(function, "__module__") and hasattr(function, "__qualname__")): return repr(function) if not function.__module__: # Macros defined in the REPL have `__module__=None`. return function.__qualname__ return f"{function.__module__}.{function.__qualname__}"
dbd1820700df44940e3cd5cfcb133e6c417603bc
118,702
def _generate_binary_deferer(op_func): """ Given a binary operator, generate a method that applies that operator element-wise to a self and an other. See ReplicatedThinMatrices._defer_binary_elementwise for more. """ def deferer(self, other, *args, **kwargs): return type(self)._defer_binary_elementwise( self, other, op_func, *args, **kwargs, ) return deferer
1b20b8616d70ecf491640b27aec3e002aaad3d88
118,709
def choose_representative_strain(cluster, tree): """ Given a cluster, will find which tip in that cluster tends to be closest to all others (aka the best representative) and return that tip :param cluster: Cluster found. :param tree: Tree read in by Bio.Phylo :return: representative strain. """ representative = 'NA' best_distance = 100000000.0 # Start off at an absolutely ridiculous value. # Iterate through for strain1 in cluster: total_length = 0.0 for strain2 in cluster: if strain1 != strain2: total_length += tree.distance(strain1, strain2) if total_length < best_distance: best_distance = total_length representative = strain1 return representative
4e3fba5b0247320f64bf23b2697f3b3518e63b91
118,712
def cant_adyacentes(life, f, c): """ Calcula la cantidad de células adyacentes a la celda en la fila `f` y la columna `c`. Importante: El "tablero" se considera "infinito": las celdas del borde izquierdo están conectadas a la izquierda con las celdas del borde derecho, y viceversa. Las celdas del borde superior están conectadas hacia arriba con las celdas del borde inferior, y viceversa. """ return sum([1 if ( i % len(life), j % len(life[0])) != (f, c) and life[i % len(life)][j % len(life[0])] else 0 for j in range(c - 1, c + 2) for i in range(f - 1, f + 2)])
2ee543333088ffc55c3227ce5f1568adefff9623
118,713
import torch def evaluate_dataloader(model, dataloader, metrics): """ Return the average metric values for the given dataloader and model. Parameters ---------- model : pytorch model The pytorch model to predict with. dataloader : pytorch dataloader Dataloader returning batches of inputs. metrics : dict Dictionary where keys are metric names (str), and values are metric function (callable, taking (predictions, targets) as inputs). Returns ------- values : dict Dictionary where keys are metric names (str), and values are metric average values. """ values = {} for key in metrics.keys(): values[key] = 0 model_device = next(model.parameters()).device model.eval() with torch.no_grad(): for batch in dataloader: inputs = batch[0].to(model_device) targets = batch[1].to(model_device) preds = model(inputs) for key, metric in metrics.items(): values[key] += metric(preds, targets).item() * inputs.shape[0] for key in values.keys(): values[key] /= len(dataloader.dataset) return values
690868fb8c8881ba2838c234d2af4e790bac08b3
118,716
import pickle def unpickle_entity(filename): """ Unpickles an entity from a .pkl file """ with open(filename, "rb") as handle: entity = pickle.load(handle) return entity
bee73c9469079f3e35c9c2d2f3b18e2af02c444d
118,720
def create_new_question_entry(question, answers): """ Creates a new entry for a question in the .json file (with id=1) and returns it """ new_question_entry = { 'question': question, 'answers': [] } for answer in answers: answer_entry = { "answer": answer, "votes": 0 } new_question_entry["answers"].append(answer_entry) return new_question_entry
2d51bd7568eed099811b204ec33b3790d2d0242d
118,721
def slugify_url(url): """ Turn '/study/connect_me/' into 'study-connect-me'. """ return url.lower().strip("/").replace(":", "-").replace("/", "-").replace("_", "-")
e07a1fc1999123e5102023730e9b07ce5f66606a
118,723
from operator import concat def CreateStorageKey(s1, s2): """Concatenate arguments for use as storage key. Args: s1 (str): first string to be used in concatenation. s2 (str): second string to be used in concatenation. Return: (str): args concatenated together with a '.' between each value. """ withPeriod = concat(s1, '.') return concat(withPeriod, s2)
03b99c8ed0b14a082a24afd576d8e62ea2a1ab0c
118,725
def nt_to_tuple(nt): """ Convert an instance of namedtuple (or an instance of a subclass of namedtuple) to a tuple, even if the instance's __iter__ method has been changed. Useful for writing derived instances of typeclasses. Args: nt: an instance of namedtuple Returns: A tuple containing each of the items in nt """ return tuple((getattr(nt, f) for f in nt.__class__._fields))
5d616706c76c9a3662458c13250906f1061e1e6c
118,726
import json def parse_config_file(config_path): """ Loads config file like this: {'bit_str': '0110100000100000', 'freq': 315000000, 'baud': 410, 'sample_rate': 2000000, 'amp_map': {'LOW': 0.28, 'HIGH': 1.4}, 'header_len': 3.85, 'space_len': 3.78 } """ with open(config_path, 'r') as f: config_dict = json.load(f) return config_dict
262408e401a351f03cd3e838866fc93f203da22e
118,731
import re def is_a_ean13_barcode(string: str) -> bool: """define if the barcode scanner input is a valid EAN13 barcode""" return bool(re.fullmatch("\d{13}", string))
8929d9a20b80aba68d1b7e411c222412caf2d23f
118,734
def put_img(board, img, center): """put a image into a big board image with the anchor center.""" center_x, center_y = center img_h, img_w, _ = img.shape xmin, ymin = int(center_x - img_w // 2), int(center_y - img_h // 2) board[ymin:ymin + img_h, xmin:xmin + img_w, :] = img return board
6f4e3e30f38172f6ac4d14d479c03b17b37e8385
118,741
def get_gap_symbol(seq): """Return gap symbol. seq: Sequence object or plain string. Should be able to handle cogent.core Sequence and ModelSequence object. If the input sequence doesn't have a MolType, '-' will be returned as default. """ try: gap = seq.MolType.Alphabet.Gap except AttributeError: gap = '-' return gap
22df0980c9ab826be6327286f0abb0106000caa0
118,746
def clean(var): """Removes tabs, newlines and trailing whitespace""" if var is None: return '' return var.replace("\t", "").replace("\n", "").strip()
7fe7d3006cc632a71b8839106a5ea6da0a00ad2d
118,751
def _read_legacy_params(fname): """Read param values from a .param file (legacy). Parameters ---------- fname : str Full path to the file (.param) Returns ------- params_input : dict Dictionary of parameters """ params_input = dict() with open(fname, 'r') as fp: for line in fp.readlines(): split_line = line.lstrip().split(':') key, value = [field.strip() for field in split_line] try: if '.' in value or 'e' in value: params_input[key] = float(value) else: params_input[key] = int(value) except ValueError: params_input[key] = str(value) return params_input
b4bea3a5a1f7a8c2c7b31237add1075ae9d58e4d
118,754
def boolify(string): """ convert string to boolean type :param string: string :return: bool """ if string == 'True' or string == 'true': return True if string == 'False' or string == 'false': return False raise ValueError("wrong type")
6e09848e124dadfd9f392f2c0ab6371889ee157b
118,759
def convert_hour(hour): """Convert time (hour) to icon name to display appropriate clock icon""" clock = "wi-time-" + hour return clock
271f722bdf61200f05522563a6cd1082bb956755
118,764
def ifnone(obj, alt): """ Convenience function to return an alternative if an object is None Why isn't this in the standard library! :param obj: Object :param alt: Alternative :return: obj if not None, otherwise alt """ if obj is None: return alt else: return obj
b6d6eeb63df51ae2face1e81e6b3e619375781fd
118,766
def parse_benchmark_name(name: str): """ Parses a template benchmark name with a size >>> parse_benchmark_name('BM_Insert_Random<int64_t, int64_t, std::unordered_map>/1000') ('BM_Insert_Random', ['int64_t', 'int64_t', 'std::unordered_map'], 1000) """ base_name = name[0 : name.find('<')] t_params = [ x.strip() for x in name[name.find('<') + 1 : name.find('>')].split(',')] size = int(name[name.find('/') + 1:]) return base_name, t_params, size
1628d67272a33078dd9b2c5b105f530552bd1776
118,768
def clamp(val, minVal=0.0, maxVal=1.0): """ clamp value between min and max :param val: value to clamp :type val: float :param minVal: minimum value :type minVal: float :param maxVal: maximum value :type maxVal: float :return: the clamped value :rtype: float """ return max(minVal, min(val, maxVal))
55ba10e02f005b95178cf71184cbc7f3317a967e
118,769
def Btu_h2MBtu_h(x): """Btu/h -> MBtu/h""" return 1.e-6*x
4f9fd40e84ac2a1902af8d48b21b38a098def305
118,770
def fileinfo_remove(log_table, remove_table): """ Remove files from log table that are in remove table. Params: log_table {fileinfo} - dictionary to remove from remove_table {fileinfo} - dicionary containing keys to remove Returns: removed (int) - number of entries removed Notes: does not actually remove entries simply marks for deletion (see fileinfo_clean) """ removed = 0 for k in remove_table: if k in log_table: log_table[k].flags |= 0x2 # marked for removal removed += 1 return removed
e7de73edf1ea97e980a3ff4328d6d02b771da4a6
118,772
import re def _remove_ExtraSpaces(line: str) -> str: """Remove extra spaces.""" line = re.subn(r'\s+', ' ', line)[0] line = re.subn(r'^\s+', '', line)[0] return line
8be0dcd1298d680a2ad8c9b2d7e036b7c91d4292
118,776
def cap_feature_values(df, feature, cap_n=10): """Cap the values of a given feature, in order to reduce the effect of outliers. For example, set NUMBER_OF_HABITABLE_ROOMS values that are greater/equal to 10 to "10+". Paramters ---------- df : pandas.DataFrame Given dataframe. feature : str Feature for which values are capped. cap_n : int, default=10 At which value to cap. Return --------- df : pandas.DataFrame Dataframe with capped values.""" # Cap at given limit (i.e. 10) df.loc[(df[feature] >= cap_n), feature] = cap_n # str(cap_n) + "+" return df
273e755ec1b677b662732c8ae598581c01a25a53
118,786
def mdm_vendor(toml_data): """Returns the MDM vendor from the TOML config""" vendor = toml_data["mdm_info"]["vendor"] return vendor
6d11e5472e92fb989bcc8ea730de3622f44030ba
118,787
import platform def is_m1() -> bool: """Check whether we are running on an M1 machine""" try: return ( platform.uname().machine == "arm64" and platform.uname().system == "Darwin" ) except Exception: # Catch-all return False
648454f7b79edb61c1f1bc434cacc25b84b5a736
118,790
def normalize(records): """ Normalize result by removing the zone_time_first and zone_time_last keys and adding a source [sensor or zone] key pair. :param records: List (of dictionaries) :return: List (of dictionaries) """ normalized = [] for record in records: normalized_record = dict() normalized_record["source"] = "sensor" keys = record.keys() for key in keys: if key == "zone_time_first": normalized_record["time_first"] = record[key] normalized_record["source"] = "zone" elif key == "zone_time_last": normalized_record["time_last"] = record[key] normalized_record["source"] = "zone" else: normalized_record[key] = record[key] normalized.append(normalized_record) return normalized
473fdf9bc910498da5a1b57a76eeb507da6605e9
118,792
from typing import Type import functools def _make_meta_call_wrapper(cls: Type[object]): """Creates a `type(cls).__call__` wrapper that creates pickleable instances. This function works in tandem with `partialclass` below. It wraps `type(cls).__call__`, which is in general responsible for creating a new instance of `cls` or one of its subclasses. In cases where the to-be-created class is Fiddle's dynamically-subclassed version of `cls`, the wrapper here instead returns an instance of `cls`, which isn't a dynamic subclass and more generally doesn't have any Fiddle-related magic applied. This means the instance is compatible with pickling, and is totally transparent to any inspections by user code (since it really is an instance of the original type). Note that the resulting partial class type itself is not pickle-compatible, only instances created from it (and then only if the partial class was not itself dynamically subclassed). Args: cls: The class whose metaclass's call method should be wrapped. Returns: A wrapped version of the `type(cls).__call__`. """ cls_meta = type(cls) @functools.wraps(cls_meta.__call__) def meta_call_wrapper(new_cls: Type[object], *args, **kwargs): # If `new_cls` (the to-be-created class) is a direct subclass of `cls`, we # can be sure that it's Fiddle's dynamically created subclass. In this case, # we directly create an instance of `cls` instead. Otherwise, some further # dynamic subclassing by user code has likely occurred, and we just create # an instance of `new_cls` to avoid issues. This instance is likely not # compatible with pickle, but that's generally true of dynamically created # subclasses and would require some user workaround with or without Fiddle. if new_cls.__bases__ == (cls,): new_cls = cls return cls_meta.__call__(new_cls, *args, **kwargs) return meta_call_wrapper
a2ff190ec4654d48816113859286033867ff96ca
118,794
def get_unlisted_setting(request): """ Given a request object, return the resulting `unlisted` setting. - True if set to "true" - False if set to false nor not set at all """ if 'X-Unlisted' in request.headers and \ request.headers.get('X-Unlisted') == 'true': return True return False
f98d095e8e897584dd1c59bbc7721e0f8c4705cb
118,795
def canonical_shape(shape): """ Return shape as tuple of int or Symbol. This utility function ensures the shape tuple using a single integer type (to its best effort). Args: shape: tuple(int|long|np.int*|Symbol|SymbolExpr...) """ def try_cast(x): try: # In python2.7, long and int are different types. # If we cast a long int whose value is out of the range of int, # the result is still long, avoiding overflow: # # `type(2<<64) == long # true` # `type(int(2<<64)) == long # true` x = int(x) except TypeError: # ignore symbolic value (sm.Symbol or sm.Expr) pass return x return tuple(try_cast(x) for x in shape)
85fb283d798f9960db9aa2f02ec6becdf89664c3
118,798
def _get_named_code(error) -> int: """Gets the error code as an integer (e.g. 404) from the error string.""" return int(str(error)[:3])
28aec57eccf2793c1efc084f0d86bb22eed973d9
118,803
def get_subword_frequency(vocab: dict) -> list: """ Get the subwords frequency list :param vocab: The input vocabulary dictionary :return sub_freqs: Subwords frequency list """ sub_freqs = [] for freq in vocab.values(): sub_freqs.append(int(freq)) return sub_freqs
0496829e1e5bc392eac52bc0b201c7461aaa33f4
118,805
from typing import Tuple def egcd(a: int, b: int) -> Tuple[int, int, int]: """ Calculates the Greatest Common Divisor (gcd) of the integers `a`, and `b`. Additionally, calculates the Bézout coefficients `x`, `y` associated with a and b, all using the extended Euclidean algorithm """ swapped = False if a < b: a, b = b, a swapped = True def _recurse_egcd(a: int, b: int): """Small stub so the unnecessary compare/swap does not occur for recursion""" # No remainder left. if a == 0: return (b, 0, 1) gcd, x_0, y_0 = _recurse_egcd(b % a, a) # Recursively "fill up" bezout coefficients. x = y_0 - (b // a) * x_0 y = x_0 return (gcd, x, y) gcd, x, y = _recurse_egcd(a, b) # If a swap took place previously, # we need to swap the associated coefficients as well. if swapped: return gcd, y, x else: return gcd, x, y
528b1e4f3678c3043250f80dd946cd37c8dfa900
118,811
def process_marks(file_contents): """ (str) -> dict of str:float Parse the contents of a test results file (as a string), returning a map from criteria title to mark. Criteria titles need to be properly formatted, as they appear in the assignment's rubric (punctuation included). Marks need to be valid numerics, or 'nil'. """ d = {'My Criterion 1.': 1.0, 'My Criterion 2.': 'nil'} return d
d1de9788c7334662b7487787ac863bd331ba11dc
118,812
def list_driver_connections(graph, driver=True): """Return all edges that are driver connections, i.e., (params, objectives,constraints). If driver contains the name of a specific driver, then only return connections for that driver. """ if driver is True: return [(u,v) for u,v,data in graph.edges_iter(data=True) if data.get('drv_conn')] else: return [(u,v) for u,v,data in graph.edges_iter(data=True) if data.get('drv_conn')==driver]
838010ba662168f6f4da31901ae5b0a9cc6062ec
118,819
def _make_row(old, parent_key=None): """Make nested dictionary into a single-level dictionary, with additional processing for lists For example: { "metadata": { "durations": { "call": 38.6015, "setup": 0.0021 }, "statuses": { "call": ["failed", False], "setup": ["passed", False] } }, "result": "failed", "test_id": "test_navigation" } becomes: { "metadata.durations.call": 38.6015, "metadata.durations.setup": 0.0021, "metadata.statuses.call": "\"failed\",False", "metadata.statuses.setup": "\"passed\",False", "result": "failed", "test_id": "test_navigation" } :param old: The old dictionary to flatten :type old: dict :param parent_key: The key of the parent object :type parent_key: str | None :rtype: dict """ new = {} for key, value in old.items(): new_key = "{}.{}".format(parent_key, str(key)) if parent_key else str(key) if isinstance(value, dict): new.update(_make_row(value, new_key)) elif isinstance(value, list): new[new_key] = ",".join([str(v) for v in value]) else: new[new_key] = value return new
894649a98d12d88f7543e5ede9383ec3703148b3
118,820
from typing import Union def milimeter_mercury_to_atmospheres(mm_hg: float, unit: str) -> Union[float, str]: """ This function converts mmHg to atm Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit) Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre_of_mercury >>> milimeter_mercury_to_atmospheres(23506.92, "mmHg") 30.93015789473684 >>> milimeter_mercury_to_atmospheres("304000", "mmHg") 400.0 >>> milimeter_mercury_to_atmospheres(0, "mmHg") 0.0 >>> milimeter_mercury_to_atmospheres(35, "bar") 'Invalid unit' >>> milimeter_mercury_to_atmospheres("merc", "mmHg") Traceback (most recent call last): ... ValueError: could not convert string to float: 'merc' """ if unit == "mmHg": atm = float(mm_hg) / 760 return atm else: return "Invalid unit"
f7a3c06fc6baa2c2a245c60a9b147778ab8ff652
118,826
def join_seconds(splitted_seconds: dict) -> int: """Different from split_seconds, this function transforms a dictionary with the year, month, day, hour, minute and second keys with their respective values in seconds. :param splitted_seconds: dictionary with the year, month, day, hour, minute or second keys. Example: >>> from phanterpwa.tools import join_seconds >>> join_seconds({'year': 1, 'hour': 1, 'second': 1}) 31539601 >>> join_seconds({'year': 1, 'second': 3601}) 31539601 >>> join_seconds({'year': 1, 'minute': 60, 'second': 1}) 31539601 """ if isinstance(splitted_seconds, dict): total = 0 if "year" in splitted_seconds: total += splitted_seconds["year"] * 60 * 60 * 24 * 365 if "month" in splitted_seconds: total += splitted_seconds["month"] * 60 * 60 * 24 * 30 if "day" in splitted_seconds: total += splitted_seconds['day'] * 60 * 60 * 24 if "hour" in splitted_seconds: total += splitted_seconds['hour'] * 60 * 60 if "minute" in splitted_seconds: total += splitted_seconds['minute'] * 60 if "second" in splitted_seconds: total += splitted_seconds['second'] return total else: raise ValueError("The splitted_seconds must be dict type. Given: {0}".format(type(splitted_seconds)))
e8a9e2fb568b8bf9eb9b64db97015fdbca9f4920
118,828
def _line_length(line_elements): """ Returns the character length of the serialized elements of the given line. """ return sum(len(e.serialized()) for e in line_elements)
43c5db5685758c6a6e42a408e1eadbb9e84ab2c4
118,836
def removeN(svgfile): """ Removes the final character from every line, this is always /n, aka newline character. """ for count in range(0, len(svgfile)): svgfile[count] = svgfile[count][0: (len(svgfile[count]))-1] return svgfile
21863018b4c6dc32f8228a535342869d4e290d6a
118,839
def get_flow_att(row, ts): """ Parameters ---------- row : pd.Series Series with all attributes given by the parameter table (equal 1 row) ts : pd.DataFrame DataFrame with all input time series for the oemof-solph model. Returns ------- dict : All Flow specific attribues. """ row = row.copy() row.dropna(inplace=True) att = list(row.index) fa_list = [x.split('.')[1] for x in att if x.split('.')[0] == 'flow'] flow_attr = {} for fa in fa_list: if row['flow.' + fa] == 'series': flow_attr[fa] = ts[row['label'] + '.' + fa].values else: flow_attr[fa] = float(row['flow.' + fa]) return flow_attr
5562e038a58790e2e8b932b938075407ab3c13e4
118,843
def makeductcomponent(idf, dname): """make a duct component generate inlet outlet names""" aduct = idf.newidfobject("duct".upper(), Name=dname) aduct.Inlet_Node_Name = "%s_inlet" % (dname,) aduct.Outlet_Node_Name = "%s_outlet" % (dname,) return aduct
d9bb3dcfb015c419c30f8a5fecdedd993bb1633c
118,849
def vol_format(x): """ Formats stock volume number to millions of shares. Params: x (numeric, like int or float)): the number to be formatted Example: vol_format(10000000) vol_format(3390000) """ return "{:.1f}M".format(x/1000000)
8da20a24c6c6373f271e6f0e94cf8205cb576cfc
118,852
import hmac import hashlib def keyed_hash(digest, passphrase): """Calculate a HMAC/keyed hash. :param digest: Digest used to create hash. :type digest: str :param passphrase: Passphrase used to generate the hash. :type passphrase: str :returns: HMAC/keyed hash. :rtype: str """ encodedPassphrase = passphrase.encode() encodedDigest = digest.encode() return hmac.new(encodedPassphrase, encodedDigest, hashlib.sha256).digest()
41a233bdd981872eb3cf415440464e313ade9a7b
118,853
from datetime import datetime def date_delta(y1, m1, d1, y2, m2, d2): """ Return timedelta object of two date, eg: d = date_delta(2015, 3, 2, 2016, 3, 2) """ y1, m1, d1 = int(y1), int(m1), int(d1) y2, m2, d2 = int(y2), int(m2), int(d2) d1 = datetime(y1, m1, d1, 0, 0, 0) d2 = datetime(y2, m2, d2, 0, 0, 0) return d2 - d1 if d2 > d1 else d1 - d2
f54f55cbfc8558fc5d1e7077e0b29b5d023fafbb
118,864
import typing def is_won(correctness: typing.Tuple[int, int], board_width: int) -> bool: """Compares the left of correctness against the width of the board. :param correctness: A tuple that contains the amount of colours in the correct position on the first index. :param board_width: Width of the board. :return: True if the first index is the same as the board width. """ return correctness[0] == board_width
75cf56aac01c7d18d30b2172d8f72731ab5f60bc
118,868
def args_to_list(items): """Convert an argument into a list of arguments (by splitting each element on comma)""" result = [] if items is not None: for item in items: if item: for val in item.split(','): val = val.strip() if val: result.append(val) return result
1c73ec092160f311c0fa663d4dc4b6a4b717d5c0
118,872
import random def cxOnePoint(ind1, ind2): """Execute a one point crossover on the input individuals. The two individuals are modified in place. The resulting individuals will respectively have the length of the other. :param ind1: The first individual participating in the crossover. :param ind2: The second individual participating in the crossover. :returns: A tuple of two individuals. This function use the :func:`~random.randint` function from the python base :mod:`random` module. """ size = min(len(ind1), len(ind2)) cxpoint = random.randint(1, size - 1) ind1[cxpoint:], ind2[cxpoint:] = ind2[cxpoint:], ind1[cxpoint:] return ind1, ind2
5787afb9a7a8326f21771b67deefd75f061007a1
118,873
def vectorize_matrix(arr, order='C'): """ Convert NxN matrix into N^2 1-dim array, row by row 'C' - C-style, do row-by-row (default) 'F' - Fortran-style, do column-by-column """ assert all([len(arr.shape) == 2, arr.shape[0] == arr.shape[1], order in ['C', 'F']]) vec = arr.flatten(order=order) return vec
e8a599ed397fd53b8f158338a37855197d8238ba
118,874
import collections def merge(queries, qp, qc, qrels): """ Merge queries, qrels, <query, passage> pairs, <query, chunk> pairs into a single dict. """ data = collections.OrderedDict() for qid in qc: passage_ids = list() labels = list() for passage_id in qp[qid]: doc_id = passage_id.split("_")[0] label = 0 if doc_id in qrels[qid]: # leave unjudged documents as non-relevant label = 1 passage_ids.append(passage_id) labels.append(label) assert len(passage_ids) == len(labels) chunk_id_list = list(qc[qid].keys()) data[qid] = (queries[qid], chunk_id_list, passage_ids, labels) return data
24665c962d588c24ab94d48593c52d129a7bab80
118,881
import pathlib from typing import Dict import yaml def read_yaml(filename: pathlib.Path) -> Dict: """Reads a YAML file from disk. Args: filename: The filename to be read from disk. Returns: The contents of "filename" as a dictionary. """ with open(str(filename), "r") as yaml_file: return yaml.safe_load(yaml_file)
ee56e9279618fc73cbfae004af27ead903cb7402
118,884
def convert_value_to_list(dictionary, key): """ Given a dictionary and a key, make sure the key value is a list type. If the key does not exist in the dictionary, just return None. :param dictionary: A dictionary instance. :param key: A dictionary key. :return: Return a list type. """ try: value = dictionary.get(key) return None if value is None else value if type(value) is list else [value] except: raise ValueError("The value associated with '{}' is expected to be a list type - {}.".format(key, dictionary))
81a660eae693738f6d1f2205e688a0318db357c9
118,888
def is_neg(tok): """ Is this token a negation """ return tok.dep_.startswith('neg')
0f1aaf2af77cd7a59a6598e20b2936aa932f1a30
118,890
def determine_output_hash(crate_root, label): """Generates a hash of the crate root file's path. Args: crate_root (File): The crate's root file (typically `lib.rs`). label (Label): The label of the target. Returns: str: A string representation of the hash. """ # Take the absolute value of hash() since it could be negative. h = abs(hash(crate_root.path) + hash(repr(label))) return repr(h)
87c1d5017672e8f0789c88bffcf928f1fd9f5030
118,891
import json def load_json_db(db_file): """ Load the the data from a JSON file :param db_file: the JSON database file :returns a dictionary that contains the data """ db = dict() with open(db_file, 'r') as f: db = json.load(f) return db
77e246a15b735a53f5105928e168362558f13129
118,894
def lights_off(connection, await_response=False): """Send a serial command over the given connection to turn off all lights""" command = "X0100" connection.write(bytes(command, 'UTF-8')) if await_response: response = connection.readline().decode("UTF-8").strip() return response == "Y0100" return True
b13b9714c76d96f1189db16271a279040fbded54
118,895
def rename_nodes(parse, old2new): """ replace NODE labels in list structure, old2new is a dict """ if(type(parse)==list): return [ rename_nodes(p, old2new) for p in parse] if "NODE" in parse and parse["NODE"] in old2new: parse["NODE"]=old2new[parse["NODE"]] if "children" in parse and len(parse["children"])>0: parse["children"]=rename_nodes(parse["children"], old2new) return parse
278734890a27a442cb4874237375e58712f116eb
118,901
def _is_support_vcvars_ver(vc_full_version): """-vcvars_ver option is supported from version 14.11.25503 (VS 2017 version 15.3).""" version = [int(i) for i in vc_full_version.split(".")] min_version = [14, 11, 25503] return version >= min_version
80691e6af506ea835cf7e8a972def1df1beb9a70
118,904
def gen_preprocess_options(macros, include_dirs): """Generate C pre-processor options (-D, -U, -I) as used by at least two types of compilers: the typical Unix compiler and Visual C++. 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) means undefine (-U) macro 'name', and (name,value) means define (-D) macro 'name' to 'value'. 'include_dirs' is just a list of directory names to be added to the header file search path (-I). Returns a list of command-line options suitable for either Unix compilers or Visual C++. """ # XXX it would be nice (mainly aesthetic, and so we don't generate # stupid-looking command lines) to go over 'macros' and eliminate # redundant definitions/undefinitions (ie. ensure that only the # latest mention of a particular macro winds up on the command # line). I don't think it's essential, though, since most (all?) # Unix C compilers only pay attention to the latest -D or -U # mention of a macro on their command line. Similar situation for # 'include_dirs'. I'm punting on both for now. Anyways, weeding out # redundancies like this should probably be the province of # CCompiler, since the data structures used are inherited from it # and therefore common to all CCompiler classes. pp_opts = [] for macro in macros: if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2): raise TypeError( "bad macro definition '%s': " "each element of 'macros' list must be a 1- or 2-tuple" % macro) if len(macro) == 1: # undefine this macro pp_opts.append("-U%s" % macro[0]) elif len(macro) == 2: if macro[1] is None: # define with no explicit value pp_opts.append("-D%s" % macro[0]) else: # XXX *don't* need to be clever about quoting the # macro value here, because we're going to avoid the # shell at all costs when we spawn the command! pp_opts.append("-D%s=%s" % macro) for dir in include_dirs: pp_opts.append("-I%s" % dir) return pp_opts
b7a4f023344705b2d21f44b13971c46c34a401a7
118,910
def readline_strip(expr): """ Remove `readline hints`_ from a string. :param text: The text to strip (a string). :returns: The stripped text. """ return expr.replace('\001', '').replace('\002', '')
aab2456ef6b9ace53dc253b72eb42df00d12e097
118,913
import pathlib def includes_from_srcs(srcs) -> list: """Generates a list of include directories for the given set of source Paths""" return list(set((pathlib.Path(path).parent for path in srcs)))
e6a1ac71eb17e6d3fa5a12a2eb8d2c63016a650f
118,920
from typing import Any import json def from_json(path: str) -> Any: """ Reads input from a json file Args: path (str): Path of json file Returns: Any: Contents of json file """ with open(path, "r") as stream: return json.load(stream)
ae3c8b76001418c6e9bdcc93b0353e2541ccdc5c
118,921
import struct def to_big_endian(value): """ Convert integer to big-endian byte string """ return struct.pack(">L", value)
5d8c533952542523e7be1957b639952fdac3212f
118,926
def validate_match_data(match_data): """ validate_match_data performs basic match data validation by examining the following: 1. Number of picks/bans present in data 2. Presence of dupicate picks/bans 3. Duplicate roles on a single side Args: match_data (dict): dictionary of formatted match data Returns: bool: True if match_data passes validation checks, False otherwise. """ NUM_BANS = 10 NUM_PICKS = 10 is_valid = True bans = match_data["bans"]["blue"] + match_data["bans"]["red"] picks = match_data["picks"]["blue"] + match_data["picks"]["red"] if(len(bans) != NUM_BANS or len(picks)!= NUM_PICKS): print("Incorrect number of picks and/or bans found! {} picks, {} bans".format(len(picks), len(bans))) is_valid = False # Need to consider edge case where teams fail to submit multiple bans (rare, but possible) champs = [ban for ban in bans if ban != "none"] + [p for (p,_) in picks] if len(set(champs)) != len(champs): print("Duplicate submission(s) encountered.") counts = {} for champ in champs: if champ not in counts: counts[champ] = 1 else: counts[champ] += 1 print(sorted([(value, key) for (key, value) in counts.items() if value>1])) is_valid = False for side in ["blue", "red"]: if len(set([pos for (_,pos) in match_data["picks"][side]])) != len(match_data["picks"][side]): print("Duplicate position on side {} found.".format(side)) is_valid = False return is_valid
ec86d5fa78816dda500089b110aecb9c23a6cc78
118,931
from typing import Union from typing import Tuple def map_value_to_range( input_value: Union[int, float], input_range: Tuple[Union[int, float], Union[int, float]], output_range: Tuple[Union[int, float], Union[int, float]]): """Scales the input value proportionately to match the output range.""" input_spread = input_range[1] - input_range[0] output_spread = output_range[1] - output_range[0] value_scaled = float(input_value - input_range[0]) / float(input_spread) return output_range[0] + (value_scaled * output_spread)
366e468b9efa777e3d355b7335a0f253c22d23f8
118,932
from typing import Any from typing import Optional from typing import Sequence def is_allowed_types(value: Any, allowed_types: Optional[Sequence[Any]], required: Optional[bool] = None) -> bool: """ Check whether the type of value is in the allowed types. :param value: the value to check the type of :param allowed_types: a list of allowed types, to be checked with :func:`isinstance`. If *None*, no type check is made. :return: whether the type is allowed """ if value is None and required is not None: return not required if allowed_types is None: return True tfound = False for t in allowed_types: if t is None: if value is None: tfound = True break elif isinstance(value, t): tfound = True break return tfound
2fd9dee9d83cd5b59b3486ab918ecd290552b6e6
118,934
def map_nested(dd, fn): """Map a function to a nested data structure (containing lists or dictionaries Args: dd: nested data structure fn: function to apply to each leaf """ if isinstance(dd, dict): return {key: map_nested(dd[key], fn) for key in dd} elif isinstance(dd, list): return [map_nested(x, fn) for x in dd] else: return fn(dd)
42259d4aa88be59e56a774f6946c635c95bfff7f
118,937
import json def load_pos_config(path: str) -> dict: """Load the saved positions configuration file. Parameters ---------- path : str Path to the file to load. Returns ------- dict Dictionary with position labels as the keys and a dictionary of the corresponding positions as values. """ with open(path, "r") as jsonfile: data = json.load(jsonfile) jsonfile.close() return data
abc29c0f1d0632fabfc892e43bca1c639bf077b6
118,938
def _countries_to_dict(ls: list) -> dict: """ convert countries db in the form of a list to a dict :param ls: list :return: dict """ res = dict() for i in ls: res[i[0]] = i[1] return res
9bef8410c037f0adc3dcc7741fb661739e454c8e
118,945
def confirm(prompt: str, end: str=" [Y/n] "): """ Confirm a task, prompting a string and parsing Y/n """ user_input = input(str(prompt) + str(end)) return user_input.lower() == "y"
50a7f9c16a6a98c4b81e3fdd426822b04d7f949e
118,946
def is_in_class(f): """Check if a function is part of a class. Args: f (function): Function to check. Returns: bool: `True` if `f` is part of a class, else `False`. """ parts = f.__qualname__.split(".") return len(parts) >= 2 and parts[-2] != "<locals>"
859f49c5f0127be8d6d1675ee33dc69d87f7db10
118,950
def add_arr_and_dict_to_list(files_list, array, dictionary): """ Populates an empty list with an array and a dictionary Args: files_list: empty list array: contains file paths dictionary: contains file names & how many times they appear Returns: A list containing array and dictionary """ files_list.append(array) files_list.append(dictionary) return files_list
2fc2d4b4d484ec90618c00d00beadab47b75353e
118,958
def makeBundlesDictFromList(bundleList): """Utility to convert a list of MetricBundles into a dictionary, keyed by the fileRoot names. Raises an exception if the fileroot duplicates another metricBundle. (Note this should alert to potential cases of filename duplication). Parameters ---------- bundleList : list of MetricBundles """ bDict = {} for b in bundleList: if b.fileRoot in bDict: raise NameError('More than one metricBundle is using the same fileroot, %s' % (b.fileRoot)) bDict[b.fileRoot] = b return bDict
ae121beaa69f0f69e8d544dea7b8e208e7b609ca
118,961
def combine_images(im1,im2,alpha): """ Blend two images with weights as in alpha. """ return (1-alpha)*im1 + alpha*im2
fbdde4519573f6e522e3fb0d96874d211e416433
118,965
def unpack(mapping, *keys): """Returns tuple containing values stored in given mapping under given keys. Can be used for unpacking multiple values from dictionary and assigning them to multiple variables at once. """ return ( tuple(mapping[key] for key in mapping) if len(keys) > 1 else mapping[keys[0]] )
a2071192c19cb37d0788971ec214efa3ab43bb03
118,966
def unique(source): """ Returns unique values from the list preserving order of initial list. :param source: An iterable. :type source: list :returns: List with unique values. :rtype: list """ seen = set() return [seen.add(x) or x for x in source if x not in seen]
ae30ede6c2520f690bdf4cd411593ea9826c5c07
118,967
import re def _sanitize(name): """Removes any characters in string name that aren't alnum or in '_. Any spaces will be replaced with underscores. If a name starts with an underscore or a period it's removed. Only alphanumeric characters, or underscores and periods, are allowed. Args: name : (str) The name to be sanitized. Raises: N/A Returns: (str) Sanitized name. """ if not name: # If not name, it's probably an empty string, but let's throw back # exactly what we got. return name # Replace any spaces with underscores name = name.replace(' ', '_') # If we start our string with an underscore or period, remove it if name[0] in '_.': name = name[1:] # a-z is all lowercase # A-Z is all uppercase # 0-9 is all digits # \. is an escaped period # _ is an underscore # Put them together, negate them by leading with an ^ # and our compiler will mark every non alnum, non ., _ character pattern = re.compile(r'[^a-zA-Z0-9\._-]+') # Then we sub them with nothing fixed = pattern.sub('', name) return fixed
68c63c3e08f4d3e95190d85f691464de9656da14
118,968
def case_transfer_matching(cased_text: str, uncased_text: str) -> str: """Transfers the casing from one text to another - assuming that they are 'matching' texts, alias they have the same length. Args: cased_text: Text with varied casing. uncased_text: Text that is in lowercase only. Returns: Text with the content of `uncased_text` and the casing of `cased_text`. Raises: ValueError: If the input texts have different lengths. """ if len(cased_text) != len(uncased_text): raise ValueError( "'cased_text' and 'uncased_text' don't have the same length, use " "case_transfer_similar() instead" ) return "".join( [ y.upper() if x.isupper() else y.lower() for x, y in zip(cased_text, uncased_text) ] )
a98eb89556d7131cb199751f7005e6a5224c6f52
118,973
from typing import Type def inheritable(x): """ inheritable(x) -> bool Returns whether a class type can be inherited. Args: x (type): the input class name. Example:: >>> inheritable(int) True >>> inheritable(bool) False """ if isinstance(x, Type) or callable(x): return False try: class tmp(x): pass return True except TypeError: return False
582edf51f6369ad89c23de97d6d759cea0b5786b
118,974
def scale_pc(data, scale): """ scale the point cloud Input: data: (B,num_points,3) tensor scale: scalar tensor Return: (B,num_points,3) tensor, scaled point cloud """ return data * scale
a7792c7f43797b02a6c28ae0bd0629590ae7cf0c
118,987
def get_output_text(cell): """Returns the output text of a cell. Args: cell (nbformat.NotebookNode): A cell from a NotebookNode (ex. `notebook_node.cells[0]`) Returns: str: The string output of the cell, or an empty string if the cell does not have any outputs. """ output_lines = [] if "outputs" not in cell: return "" for output in cell.outputs: if "text" in output: output_lines.append(output.text) if "data" in output: if "text/plain" in output.data: output_lines.append(output.data["text/plain"]) return "".join(output_lines)
b0ac25a42ac9c3f52d7edb6cacb945328fbb1fc2
118,991
import math def euler_angles_from_rotation_matrix(R): """From the paper by Gregory G. Slabaugh, Computing Euler angles from a rotation matrix psi, theta, phi = roll pitch yaw (x, y, z) Args: R: rotation matrix, a 3x3 numpy array Returns: a tuple with the 3 values psi, theta, phi in radians """ def isclose(x, y, rtol=1.0e-5, atol=1.0e-8): return abs(x - y) <= atol + rtol * abs(y) phi = 0.0 if isclose(R[2, 0], -1.0): theta = math.pi / 2.0 psi = math.atan2(R[0, 1], R[0, 2]) elif isclose(R[2, 0], 1.0): theta = -math.pi / 2.0 psi = math.atan2(-R[0, 1], -R[0, 2]) else: theta = -math.asin(R[2, 0]) cos_theta = math.cos(theta) psi = math.atan2(R[2, 1] / cos_theta, R[2, 2] / cos_theta) phi = math.atan2(R[1, 0] / cos_theta, R[0, 0] / cos_theta) return psi, theta, phi
8fa13c10cf6459180607a588eebc0c585efeb46a
118,994
def get_future_cone(future_states, location, neighbors_fn): """ Get the future light cone given future states at the provided location. The neighbors_fn should take a state, location and radius and return the list of neighbors. """ return tuple(neighbors_fn(state, location, i+1) for i, state in enumerate(future_states))
e663579f5ebf4f4790d7d4d6784973787a864361
118,995
def parse_header(data: str) -> tuple: """Parse header, return column names.""" return tuple(data.rstrip('\n').split('\t'))
7487d8507a9835c89837d03fbe2a451ebc2014d0
118,998
def append_train_args(base_args, data_name, path, uncertainty): """ Appends dataset and UQ specific args to the base train args for training. :dict base_args: basic training args :str data_name: name of dataset currently being used :str path: path of dataset currently being used :str uncertainty: UQ method being used """ save_dir = 'ckpnts/' + uncertainty + '_' + data_name[:-4] features_path = 'features/' + data_name[:-4] + '_features.npz' additional_args = {'--data_path': path, '--save_dir': save_dir, '--uncertainty': uncertainty, '--features_path': features_path} base_copy = base_args.copy() base_copy.update(additional_args) return base_copy
093f7a36de38a6a5ae2be8e4713c35ba720cb653
119,004
def reverse_lookup(d, v): """Retorna uma lista com as chaves do dicionário 'd' que apontam para um determinado valor 'v'. Args: d (dict): dicionário no qual queremos encontrar todas as chaves para o valor 'v'. v (any): valor pelo qual desejamos encontrar as chaves correspondentes no dicionário 'd'. Returns: list: Todas as chaves referentes ao valor 'v' presente no dicionário 'd'. """ out = [] for key, value in d.items(): if value == v: out.append(key) return out
b19df6a7c20b0c4f9ee8d2ec24aa206704b06357
119,005
def c_qbounds(model, m): """ Constraint rule for upper bound for absolute difference: |Q[m]-QEST[m]| < exp_precision*QEST[m]. Parameters ---------- model : InvPBaseModel m : int photocounting number. """ return (0, abs(model.QEST[m] - model.Q[m]), model.exp_precision * model.Q[m])
7c12a1bf66d649de17feb43c1f1f596c170640e6
119,007
import math def calc_base(vol, height): """ Calculates the length of one side of the base of a rectangular pyramid, given the volumne and height. """ return math.sqrt((3 * vol) / height)
7221897ac190a59e322326b894a4c798a8744828
119,013
import string import re import logging def filename(text): """Normalizes text to be used as a filename. Args: text (str): Text to be converted into a filename acceptable string. Example: >>> filename('Intel Core(TM) i7-7700 CPU') 'intel_core_i7-7700_cpu' >>> filename('Intel(R) Xeon(R) Platinum 8180 CPU @ 2.50GHz') 'intel_xeon_platinum_8180_cpu_2.50ghz' Returns: String: The filename ready text. """ try: invalid_chars = "[^-_. {}{}]+".format(string.ascii_letters, string.digits) # Convert to lowercase and remove everything within parentheses text = re.sub(r"\([^)]*\)", "", text.lower()) # Remove invalid characters text = re.sub(invalid_chars, "", text) # Remove double whitespace text = re.sub(r"\s+", " ", text) # Spaces to underscores text = re.sub(r" ", "_", text.strip()) return text except IOError as err: logging.error(err)
001ff4c9fa1951ed69cdf5837f470f0a98f8cfd8
119,015
def all_equal(sequence): """ Returns true, if all elements of sequence are equal""" return all(x == sequence[0] for x in sequence)
e49b9d81b22e95e944a3daa566c4d56e3168cf46
119,023