content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def preprocess_data_for_clustering(df): """Prepare data in order to apply a clustering algorithm Parameters ---------- df : pandas.DataFrame Input data, *i.e.* city-related timeseries, supposed to have `station_id`, `ts` and `nb_bikes` columns Returns ------- pandas.DataFrame Simpified version of `df`, ready to be used for clustering """ # Filter unactive stations max_bikes = df.groupby("station_id")["nb_bikes"].max() unactive_stations = max_bikes[max_bikes == 0].index.tolist() df = df[~ df['station_id'].isin(unactive_stations)] # Set timestamps as the DataFrame index # and resample it with 5-minute periods df = (df.set_index("ts") .groupby("station_id")["nb_bikes"] .resample("5T") .mean() .bfill()) df = df.unstack(0) # Drop week-end records df = df[df.index.weekday < 5] # Gather data regarding hour of the day df['hour'] = df.index.hour df = df.groupby("hour").mean() return df / df.max()
deabc1feb34f1e8bf1fd4b4575964dc666552cfa
72,899
def gen_barcode_digit(engine): """Pick a random number between 0 and 9""" return int(engine.triangular(0, 9))
28c9d5ecdd6779d609a5f5d1c97101e3cc493961
72,901
import re def ppdInfo(filename): """ Get information from the printer's installed PPD file. All keys will be set to blank strings if they could not be found in the PPD. """ textfile = open(filename, 'r') filetext = textfile.read() textfile.close() info = { } matches = re.findall('\*Manufacturer:[ \t]*\"(.*)\"', filetext) if len(matches) > 0: info['Manufacturer'] = matches[0] else: info['Manufacturer'] = '' matches = re.findall('\*ModelName:[ \t]*\"(.*)\"', filetext) if len(matches) > 0: info['ModelName'] = matches[0] else: info['ModelName'] = '' matches = re.findall('\*NickName:[ \t]*\"(.*)\"', filetext) if len(matches) > 0: info['NickName'] = matches[0] else: info['NickName'] = '' # # Update the Model and NickName to include the Manufacturer # if they do not already. # if len(info['ModelName']) > 0: if info['ModelName'].find(info['Manufacturer']) != 0: info['ModelName'] = info['Manufacturer'] + " " + info['ModelName'] if info['NickName'].find(info['Manufacturer']) != 0: info['NickName'] = info['Manufacturer'] + " " + info['NickName'] return info
553c89581b9be529872ead9f97e60358a27a405c
72,903
def big_sorting(unsorted): """Hackerrank Problem: https://www.hackerrank.com/challenges/big-sorting/problem Consider an array of numeric strings where each string is a positive number with anywhere from 1 to 10^6 digits. Sort the array's elements in non-decreasing, or ascending order of their integer values and print each element of the sorted array on a new line. Solve: We store a dictionary where the key is the length of the integer and the value is a list of integers that fit that length / key. Then, we just extend a list with each sorted sub list starting from the smallest key Args: unsorted (list): Array of unsorted integers Returns: list: the sorted list of integers """ sorted_dict = {} for i in unsorted: if len(str(i)) in sorted_dict.keys(): sorted_dict[len(str(i))].append(i) else: sorted_dict[len(str(i))] = [i] sorted_list = [] for k in sorted(sorted_dict.keys()): sorted_list.extend(sorted(sorted_dict[k])) return sorted_list
909ca4844b943a34e65b730f8efe43e355593724
72,905
from pathlib import Path def get_test_file_path(filename): """Given a filename prepend it with the correct test data location""" return str(Path(__file__).resolve().parent / "assets" / filename)
45bae871be3802cb50a3ac74a6282f3932b9e8af
72,906
import base64 def b64encode(data: str, urlsafe: bool = False) -> str: """Accepts a string and returns the Base64 encoded representation of this string. `urlsafe=True` encodes string as urlsafe base64 """ if urlsafe: b64 = base64.urlsafe_b64encode(data.encode("ascii")) return b64.decode("ascii") b64 = base64.b64encode(data.encode("ascii")) return b64.decode("ascii")
7805052990646dd946ddfb2c1113dbf1bfebd866
72,911
def consolidate(arr): """Merges intersecting sets in a list of sets. Taken from: http://rosettacode.org/wiki/Set_consolidation#Python:_Iterative Recursive version will hit max recursion depth. """ sets = [s for s in arr if s] for i, s1 in enumerate(sets): if s1: for s2 in sets[i+1:]: if s1.intersection(s2): s2.update(s1) s1.clear() s1 = s2 return [s for s in sets if s]
6067e723cff58475f7be43224178373b1099f4f5
72,916
def translate_severity(sev): """ Translate alert severity to demisto """ if sev in ['Medium', 'High']: return 3 if sev == 'Low': return 2 return 0
a8de42c2ca3445a014633b1b5af6c4baf0d2bea7
72,917
def exclude_if_local(target_function): """A member function of Engine wrapped with this function will not execute if the Engine is in "local" mode.""" def _wrapper(self, *args, **kwargs): if self.is_local: return None return target_function(self, *args, **kwargs) return _wrapper
2fa99ab972a47e99d254357c891c1b36bfd3672b
72,918
def build_cz_text(cz_data, prefix): """ Create a summary of Conflict Zone activity """ if cz_data == {}: return "" text = "" if 'l' in cz_data and cz_data['l'] != '0' and cz_data['l'] != '': text += f"{cz_data['l']}xL " if 'm' in cz_data and cz_data['m'] != '0' and cz_data['m'] != '': text += f"{cz_data['m']}xM " if 'h' in cz_data and cz_data['h'] != '0' and cz_data['h'] != '': text += f"{cz_data['h']}xH " if text != '': text = f".{prefix} {text}" return text
d8fdb8df8296df87745b96ea799394afd9e97c57
72,924
def to_bulk(a, size=100): """Transform a list into list of list. Each element of the new list is a list with size=100 (except the last one). """ r = [] qt, rm = divmod(len(a), size) i = -1 for i in range(qt): r.append(a[i * size:(i + 1) * size]) if rm != 0: r.append(a[(i + 1) * size:]) return r
86eb380efd019fa99d8945dc22d25a0dc8d1462e
72,925
def format_bucket_prefix(base_prefix: str, dirname: str) -> str: """Format an S3 bucket key prefix by joining a base prefix with a directory name. """ base_prefix = base_prefix.rstrip("/").lstrip("/") dirname = dirname.lstrip("/") prefix = "/".join((base_prefix, dirname)) if not prefix.endswith("/"): prefix = prefix + "/" return prefix
567d007e2afee48832fe90ad871a698951580c69
72,932
def remove_punctuation(input_string): """Return a str with punctuation chars stripped out""" tran_table = str.maketrans('','', '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~') output_string = input_string.translate(tran_table) return output_string pass
f8b01b58563bdd0d2583558bdf7063126a47f229
72,934
def test_pkgrepo_with_architectures(pkgrepo, grains, sources_list_file, subtests): """ Test managing a repo with architectures specified """ name = "deb {{arch}}http://foo.com/bar/latest {oscodename} main".format( oscodename=grains["oscodename"] ) def _get_arch(arch): return "[arch={}] ".format(arch) if arch else "" def _run(arch=None, test=False): return pkgrepo.managed( name=name.format(arch=_get_arch(arch)), file=sources_list_file, refresh=False, test=test, ) with subtests.test("test=True"): # Run with test=True ret = _run(test=True) assert ret.changes == {"repo": name.format(arch="")} assert "would be" in ret.comment assert ret.result is None with subtests.test("test=False"): # Run for real ret = _run() assert ret.changes == {"repo": name.format(arch="")} assert ret.comment.startswith("Configured") assert ret.result is True with subtests.test("test=True repeat"): # Run again with test=True, should exit with no changes and a True # result. ret = _run(test=True) assert not ret.changes assert "already" in ret.comment assert ret.result is True with subtests.test("test=False repeat"): # Run for real again, results should be the same as above (i.e. we # should never get to the point where we exit with a None result). ret = _run() assert not ret.changes assert "already" in ret.comment assert ret.result is True expected_changes = { "line": { "new": name.format(arch=_get_arch("amd64")), "old": name.format(arch=""), }, "architectures": {"new": ["amd64"], "old": []}, } with subtests.test("test=True arch=amd64"): # Run with test=True and the architecture set. We should get a None # result with some expected changes. ret = _run(arch="amd64", test=True) assert ret.changes == expected_changes assert "would be" in ret.comment assert ret.result is None with subtests.test("test=False arch=amd64"): # Run for real, with the architecture set. We should get a True # result with the same changes. ret = _run(arch="amd64") assert ret.changes == expected_changes assert ret.comment.startswith("Configured") assert ret.result is True with subtests.test("test=True arch=amd64 repeat"): # Run again with test=True, should exit with no changes and a True # result. ret = _run(arch="amd64", test=True) assert not ret.changes assert "already" in ret.comment assert ret.result is True with subtests.test("test=False arch=amd64 repeat"): # Run for real again, results should be the same as above (i.e. we # should never get to the point where we exit with a None result). ret = _run(arch="amd64") assert not ret.changes assert "already" in ret.comment assert ret.result is True expected_changes = { "line": { "new": name.format(arch=""), "old": name.format(arch=_get_arch("amd64")), }, "architectures": {"new": [], "old": ["amd64"]}, } with subtests.test("test=True arch=None"): # Run with test=True and the architecture set back to the original # value. We should get a None result with some expected changes. ret = _run(test=True) assert ret.changes == expected_changes assert "would be" in ret.comment assert ret.result is None with subtests.test("test=False arch=None"): # Run for real, with the architecture set. We should get a True # result with the same changes. ret = _run() assert ret.changes == expected_changes assert ret.comment.startswith("Configured") assert ret.result is True with subtests.test("test=True arch=None repeat"): # Run again with test=True, should exit with no changes and a True # result. ret = _run(test=True) assert not ret.changes assert "already" in ret.comment assert ret.result is True with subtests.test("test=False arch=None repeat"): # Run for real again, results should be the same as above (i.e. we # should never get to the point where we exit with a None result). ret = _run() assert not ret.changes assert "already" in ret.comment assert ret.result is True
9e6ae74c792e2f5df05ac936624c62d096e483a4
72,942
def block_num_from_hash(block_hash: str) -> int: """ return the first 4 bytes (8 hex digits) of the block ID (the block_num) Args: block_hash (str): Returns: int: """ return int(str(block_hash)[:8], base=16)
8133afdbc13c37e28093619efbc69100001e0506
72,947
from typing import List def is_valid_rule_two(character_indices_policy: List[int], character: str, password: str) -> bool: """ Return True if a password is abides to its asscoiated policy else return False The policy is represented as follows: '[1-3] a' --> the character 'a' should occur either as the first or the third character not both DOCTEST >>> is_valid_rule_two(character_indices_policy=[6, 10], character='p', password='ctpppjmdpppppp') True >>> is_valid_rule_two(character_indices_policy=[6, 10], character='p', password='ctppppmdpppppppp') False >>> is_valid_rule_two(character_indices_policy=[6, 10], character='p', password='ctpppmdpplppppp') False """ character_indices = [i + 1 for i, c in enumerate(password) if c == character] if len(set(character_indices_policy).intersection(set(character_indices))) == 1: return True return False
0f7771b4d12272f9d377e156aed2dbd50ea6f9f7
72,949
import base64 def b64encode(t): """Encoder using Base64. >>> b64encode("Hola mundo") 'SG9sYSBtdW5kbw==' """ return base64.b64encode(t)
e527986f875517adb971aebdeb820d01176887ca
72,950
import colorsys def rgb2hsv(rgb_tuple): """Converts RGB floats to HSV.""" r = rgb_tuple[0] g = rgb_tuple[1] b = rgb_tuple[2] hsv_tuple = colorsys.rgb_to_hsv(r, g, b) return hsv_tuple
7f02d6efa7abc41698f561b12b3f758b5f492450
72,951
def format_time_to_subrip_timecode(timestamp): """Returns a SubRip timecode string from a datetime.time object""" return timestamp.strftime("%H:%M:%S") + ",%s" % timestamp.strftime("%f")[:3]
53ad14e31a2b46133b6caf68116e268e84de9754
72,961
def get_rpath_attribute(root, element_path, attribute, default=None): """Returns the value of an attribute at an rpath. If more than one element exists with the same name, only the first is checked. Args: root: The XML element to search from. path: The path to the element. attribute: The name of the attribute to fetch. Returns: The attribute's value as a string if found, else the value of `default`. """ ns_url = 'http://schemas.android.com/apk/res/android' ns = { 'android': ns_url, } elem = root.find(element_path, ns) if elem is None: return '' # ElementTree elements don't have the same helpful namespace parameter that # the find family does :( attrib_name = attribute.replace('android:', '{' + ns_url + '}') return elem.get(attrib_name, default)
54e84ccc4e72fcded3c9338aabf2962e517f7c24
72,970
def getTextFile(filename): """ Returns a list where each element contains text from each line of given text file. """ return [line.rstrip() for line in open(filename,'r').readlines()]
943965510b6a7bc00a2ddef1b089578937414749
72,972
def bbox_resize(bbox, scale_factor): """ Resize the bbox according to the `scale_factor` saved for resizing the image. Args: bbox (ndarray): All gt boxes in an image, and the shape of `bbox` is `K x 4` scale_factor (float or int): The `scale_factor` used in resizing the image. Returns: resized_bbox (ndarray): The resized bbox by `scale_factor` """ assert isinstance(scale_factor, (int, float)) resized_bbox = bbox * scale_factor return resized_bbox
8596a84b3c7ee2179ac33282a709a3cedbd99672
72,974
from datetime import datetime def _construct_message(msg): """Formats a message from a dictionary Parameters ---------- msg: dict The message in dict form Returns ------- str A stringified version of the message """ date = datetime.fromisoformat(msg['timestamp'].split('.')[0]) msg_string = f"Nytt trafikmeddelande för {msg['title']}, " msg_string += f"{date.month}/{date.day} {date.hour}:{date.minute}. " msg_string += f"{msg['priority']}, {msg['category']}: {msg['description']} " if msg['location'] is not None: msg_string += f"Exakt plats: {msg['location']}." return msg_string
be8ffa51bc99adc16826fdc85a5f34818fa5e0da
72,977
def _get_aggregates_by_provider(ctx, rp): """Returns a list of UUIDs of any aggregates for the supplied resource provider. """ query = """ MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-[:ASSOCIATED]-> (agg) RETURN agg.uuid AS agg_uuid """ % rp.uuid result = ctx.tx.run(query).data() return [rec["agg_uuid"] for rec in result]
c68ecbca0dd18726d04c0b00293a4e536b5c844c
72,985
def surface_tension(t_k=453.7): """Surface tension as a function of temperature Parameters ---------- t_k : float, default=453.7 K, temperature. Default is the melting point. Returns ------- σ : float N m, surface tension Davison, H.W. "Complication of Thermophysical Properties of Liquid Lithium." NASA Technical Note TN D-4650. Cleveland, Ohio: Lewis Research Center, July 1968. "Using equation [below], the data are correlated with a standard deviation of +- 1.9 percent. The maximum difference between the data and the correlation is +5.2 percent." """ σ = 0.447 - 1.07e-4 * t_k - 1.351e-8 * t_k**2 return σ
f7f3e54d71fda918dd1aded6aef7b8d25ca38295
72,990
def _policy_profile_generator_xml(total_profiles): """ Generate policy profile response in XML format. :param total_profiles: integer representing total number of profiles to return """ xml = ["""<?xml version="1.0" encoding="utf-8"?> <set name="virtual_port_profile_set">"""] template = ( '<instance name="%(num)d"' ' url="/api/n1k/virtual-port-profile/%(num)s">' '<properties>' '<id>00000000-0000-0000-0000-00000000000%(num)s</id>' '<name>pp-%(num)s</name>' '</properties>' '</instance>' ) xml.extend(template % {'num': n} for n in range(1, total_profiles + 1)) xml.append("</set>") return ''.join(xml)
b2ee903466976aa8348f8e9a838f442b14d48976
72,996
def extended_euclidian(a, b): """ returns (x, y, d) such that: x*a + y*b = d, d = gcd(a,b) r_1 = a - b * q_1 r_2 = b - r_1 * q_2 ... r_n = r_n-2 - r_n-1*q_n = a * x_n + b * y_n x_n = x_n-2 - x_n-1 * q_n y_n = y_n-2 - y_n-1 * q_n """ # Naive version: # x2, y2 = 1, 0 # x1, y1 = 0, 1 # while b > 0: # q, a, b = a // b, b, a % b # x = x2 - x1 * q # y = y2 - y1 * q # x2, x1 = x1, x # y2, y1 = y1, y # return x2, y2, a # Recursive version O(log^2(a)) # suppose we know x1, y1 for (b, a%b) and a%b = a - b*q # then b*x1 + (a%b)*y1 = a*y1 + b*(x1 - y1*q) if a == b == 0: return 0, 0, 0 if b == 0: return 1, 0, a x, y, d = extended_euclidian(b, a % b) return y, x - y * (a // b), d
8fc587f142150b42337c417ecd43c6260d2f048e
73,000
def straight_line(x, m, c): """ A straight line model: y = m*x + c Args: x (list): a set of abscissa points at which the model is defined m (float): the gradient of the line c (float): the y-intercept of the line """ return m * x + c
f74a0bb1663e4d340e7f47d5317f92a3840ed171
73,003
def strToDna(seqStr): """Returns str having been reduced to capital ACTG.""" return "".join([c for c in seqStr if c in 'ACGTacgt']).upper()
055064e5f4ce6af91fb54cb5eaec0d246845c343
73,011
def convert_to_bool(value): """Function to convert string value to boolean Args: value (str): String value of the boolean value Returns: bool: True/False """ if value.lower() == 'true': return True return False
44b8fc8d41669d6bd3f6e3eac96166505d1bc0fe
73,012
def nodes(G, t=None): """Return a list of the nodes in the graph at a given snapshot. Parameters ---------- G : Graph opject DyNetx graph object t : snapshot id (default=None) If None the the method returns all the nodes of the flattened graph. Returns ------- nlist : list A list of nodes. If data=True a list of two-tuples containing (node, node data dictionary). Examples -------- >>> G = dn.DynGraph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2], 0) >>> dn.nodes(G, t=0) [0, 1, 2] >>> G.add_edge(1, 4, t=1) >>> dn.nodes(G, t=0) [0, 1, 2] """ return G.nodes(t)
0f34f05ecce11ff0ae55e75608fb342db43368f4
73,015
def get_output_tensor_by_name(inference_response, name): """Find an output Tensor in the inference_response that has the given name Parameters ---------- inference_response : InferenceResponse InferenceResponse object name : str name of the output Tensor object Returns ------- Tensor The output Tensor with the specified name, or None if no output Tensor with this name exists """ output_tensors = inference_response.output_tensors() for output_tensor in output_tensors: if output_tensor.name() == name: return output_tensor return None
4c730fbb2027fc0c333d88016826016743215016
73,017
def _ListToDictionary(lst, separator): """Splits each element of the passed-in |lst| using |separator| and creates dictionary treating first element of the split as the key and second as the value.""" return dict(item.split(separator, 1) for item in lst)
cd79b890377724bbe60346b306d25ebd5f564448
73,020
def is_error_string(return_value): """ Check if return value is error message >>> is_error_string(42) False >>> is_error_string("Some message") False >>> is_error_string("ERROR: some error message") True """ return isinstance(return_value, str) and return_value.find("ERROR") != -1
7184f3491fccb2f9ef784f4f4bd2e8570b1872a6
73,021
from pathlib import Path def find_level(filepath: Path, root: Path) -> int: """Dizin seviyesini bulma Arguments: root {Path} -- Dizin yolu startpath {Path} -- Ana dizin yolu Returns: int -- Derinlik seviyesi Examples: >>> find_level(Path("./Documents/Configuration"), Path(".")) 2 """ filepath = filepath.relative_to(root) level = len(filepath.parts) return level
36a994f09005385e36ce9df752d76aa09d62b530
73,031
def zero_diag(Q): """ Copy of Q altered such that diagonal entries are all 0. """ Q_nodiag = Q.copy() for ii in range(Q.shape[0]): Q_nodiag[ii, ii] = 0 return Q_nodiag
8c40fc58ee8e7af7a905de1d17b39c7d074cda17
73,034
def progress_bar(*, progress: int, maximum: int, per: int = 1) -> str: """Constructs a progress bar. .. versionadded:: 2.0 .. versionchanged:: 3.0 * Re-ordered the arguments to ``progress``, ``maximum``, and ``per``. * Raise :exc:`ValueError` if any of the following apply: * ``maximum`` is negative or 0. * ``per`` is negative, 0, or greater than ``maximum``. * ``progress`` is less than maximum. Parameters ---------- progress: :class:`float` The value the progress bar is currently at. .. versionchanged:: 3.0 This is now a keyword-only argument. maximum: :class:`int` The maximum value of the progress bar. .. versionchanged:: 3.0 This is now a keyword-only argument. per: :class:`int` The value of each portion of the progress bar. Defaults to ``1``. .. versionchanged:: 3.0 This is now a keyword-only argument. Returns ------- :class:`str` The constructed progress bar. Raises ------ ValueError An invalid ``per``, ``maximum``, or ``progress`` value was given. """ if maximum <= 0: raise ValueError(f"invalid maximum {maximum} (must be > 0)") if per > maximum or per <= 0: raise ValueError(f"invalid per {per} (must be > 0 and < `maximum` value)") if progress > maximum: raise ValueError(f"invalid progress {progress} (must be < `maximum` value)") total = maximum // per filled = int(progress // per) # Have to subtract in these cases to account for the # the edge pieces, since we're doing the simple-but- # not-so-simple approach of calculating the body. FR = "<:pb_r_f:786093987336421376>" EL = "<:pb_l_e:786093986745942037>" FB = "<:pb_b_f:786093986703605830>" EB = "<:pb_b_e:786093986233188363>" if filled == total: FL = "<:pb_l_f:786093987076374548>" return FR + FB * (total - 2) + FL if filled == 0: ER = "<:pb_r_e:786093986838347836>" return ER + EB * (total - 2) + EL return FR + FB * (filled - 1) + EB * (total - filled - 1) + EL
1d3e3bbeee5293e096839f0e56c486711ff8cbd5
73,039
def simple_split(X, train_len=None, test_len=None, valid_len=None): """ Split the data in train-test-validation using the given dimensions for each set. :param X: numpy.array or pandas.DataFrame Univariate data of shape (n_samples, n_features) :param train_len: int Length in number of data points (measurements) for training. If None then allow_muliple_split cannot be True. :param test_len: int Length in number of data points (measurements) for testing :param valid_len: int Length in number of data points (measurements) for validation :return: list train: numpy.array, shape=(train_len, n_features) validation: numpy.array, shape=(valid_len, n_features) test: numpy.array, shape=(test_len, n_features) """ if test_len is None: raise ValueError('test_len cannot be None.') if train_len is None: train_len = X.shape[0] - test_len valid_len = 0 if valid_len is None: valid_len = X.shape[0] - train_len - test_len return X[:train_len], \ X[train_len:train_len+valid_len], \ X[train_len+valid_len:]
c6ab6a6687f8ca079d4dc679303282a6b93346e8
73,042
def whitespace_smart_split(command): """ Split a command by whitespace, taking care to not split on whitespace within quotes. >>> whitespace_smart_split("test this \\\"in here\\\" again") ['test', 'this', '"in here"', 'again'] """ return_array = [] s = "" in_double_quotes = False escape = False for c in command: if c == '"': if in_double_quotes: if escape: s += c escape = False else: s += c in_double_quotes = False else: in_double_quotes = True s += c else: if in_double_quotes: if c == '\\': escape = True s += c else: escape = False s += c else: if c == ' ': return_array.append(s) s = "" else: s += c if s != "": return_array.append(s) return return_array
16f55605a3fdfd7eaf94a540c680bac6b9860fb7
73,046
def default_cost_function(segments, nodes, turn_cost=3000, intersection_cost=500, min_travel_lanes_for_deadend_uturn=1, deadend_uturn_cost=0): """ Return the default cost function for building the segment->segment nework. Doesn't allow vehicles to go across a street from one blockface to another. Parameters ---------- turn_cost : int The cost pentalty for making a turn (Default 3000) intersection_cost : int The cost pentalty for crossing an intersection (Default 500) min_travel_lanes_for_deadend_uturn : int The number of lanes required for the vehicle to make a u-turn on a dead end. For trucks it should be >= 2. (Default 1) deadend_uturn_cost : int The additional cost penalty for making a uturn. Used to disincentivize making a u-turn on a dead end, but not disallow it. (Default 0) Returns ------- function(geocoder, segment_id_1, segment_id_2, node_id) """ def cost_function(segment_id_1, segment_id_2, node_id): """ A function that returns the cost of going from segment_id_1 to segment_id_2. If the transition isn't possible, return None. Parameters ---------- geocoder : gomi.geocoder.Geocoder A reference to a Geocoder is passed so that the function can look up attributes of the segments and node. segment_id_1, segment_id_2 : str The segments node_id : str The node connecting the two segments """ segment_id_1 = segment_id_1[:-1] segment_id_2 = segment_id_2[:-1] travel_lanes = segments[segment_id_1]['number_travel_lanes'] if ( # No transitions between the same segment (segment_id_1 != segment_id_2) or # Unless it's a dead end with enough travel lanes ( (nodes[node_id]['dead_end']) and (travel_lanes >= min_travel_lanes_for_deadend_uturn) ) ): cost = 0 # If crossing physical IDs, it's an intersection if (segments[segment_id_1]['physical_id'] != segments[segment_id_2]['physical_id']): cost = intersection_cost # If the street codes don't overlap it's a turn if len(segments[segment_id_1]['street_code'].intersection( segments[segment_id_2]['street_code'] )) == 0: cost = turn_cost # Include half the length of each segment cost += segments[segment_id_1]['len']/2 cost += segments[segment_id_2]['len']/2 # If it's a dead end, add u-turn penalty if segment_id_1 == segment_id_2: cost += deadend_uturn_cost return cost return cost_function
3945b743514665542e88386cc50d3b7a52298205
73,047
def binary(num, length=8): """ Format an integer to binary without the leading '0b' """ return format(num, '0{}b'.format(length))
6c02431c7ae3c00415f23c812b3cd21eae7aaf4c
73,052
def addFontColor(value, arg) : """Return the given string between <font>...</font> markup. string is colorized by the color given in arg.""" for color,status in ( ('green','UP'), ('green','OK'), ('red','CRITICAL'), ('red','DOWN'), ('gold','WARNING'), ('violet','UNKNOWN') ) : if arg == status : return '<font color='+color+'>'+value+'</font>'
73a0f89f8b65c0e8a76599d0df2fb0582a391b65
73,056
def to_page_range(entry): """ Contructs a range object from a single entry for the pages it accessed. Range has increments of 8 because page access in the traces are in multiples of 8. """ parts = entry.payload.split(' ') sector = int(parts[0]) try: blocks = int(parts[2]) except IndexError: blocks = 1 return range(sector, sector + blocks, 8)
dddc960601f937953a150c3132048bb8823e1434
73,057
import torch def invSquare(input, axis=1): """ Apply inverse square normalization on input at certain axis. Parammeters: ---------- input: Tensor (N*L or rank>2) axis: the axis to apply softmax Returns: Tensor with softmax applied on that dimension. """ input_size = input.size() trans_input = input.transpose(axis, len(input_size)-1) trans_size = trans_input.size() input_2d = trans_input.contiguous().view(-1, trans_size[-1]) square_2d = input_2d**(-2) sum_square_2d = torch.sum(square_2d, 1, keepdim=True) square_norm_2d = square_2d/sum_square_2d square_norm_nd = square_norm_2d.view(*trans_size) return square_norm_nd.transpose(axis, len(input_size)-1)
ca7c7702ab1d36cf2ae474dbf10981bf1dab7f4b
73,062
def get_float_from_config(config, section, key, default=None): """ Get a value from the config if it exists, otherwise return the default value :param config: The configuration to get the value from :param section: The section to get the value from :param key: The key that may or may not exist :param default: The default value to return, which is None by default :return: The value if it exists, else default """ if config.has_option(section, key): return config.getfloat(section, key) else: return default
9831fa525f363fbe2bd75dc2eeedcc124ae4f55b
73,063
from typing import Tuple import random def estimate_probability(n: int, num_goat: int =2) -> Tuple[float, float]: """Monty hall paradox Wiki: https://en.wikipedia.org/wiki/Monty_Hall_problem Parameters ---------- :param n: number of repetitions of the experiment, n >= 1 :type n: int :param num_goat: number of doors with goats, num_goat >= 1 :type num_goat: int Returns ------- :return: Tuple[float, float] The first element is the probability without changing the choice. The second element is probability with choice change. """ CAR, GOAT = 'car', 'goat' DOORS = [CAR] + [GOAT] * num_goat p_with_change = 0 p_without_change = 0 for _ in range(n): doors = DOORS[:] random.shuffle(doors) player_choice = random.choice(doors) p_without_change += (player_choice == CAR) doors.remove(player_choice) while len(doors) > 1: doors.remove(GOAT) p_with_change += (doors[0] == CAR) return p_without_change / n, p_with_change / n
42a5dfba0b688800905439a7c4ba25aea3e1bdd4
73,068
def _filter_by_country(tools, country): """Filters sliver tools based on the tool's country.""" return filter(lambda t: t.country == country, tools)
a3e991e5cf2d9a307169064d793a8a57c522fe25
73,071
import re def get_coincidences(text, coincidences, unique_values = False): """ Looks for the coincidences list strings in the text variable and return a list with the coincidences. :param text: a string :param coincidences: a list of strings. The strings to looking for in the text. :param unique_values: a boolean. True is we want to return duplicates values found. :return: a list of strings. The coincidences list strings found in text """ try: expected_coincidences = '|'.join(coincidences) coincidences_list = re.findall(expected_coincidences, text) if unique_values: uniques = [] for e in coincidences_list: if e not in uniques: uniques.append(e) coincidences_list = uniques return coincidences_list except Exception as e: return []
804c0aa62f8e8e7fc9e9f1f1788d8c4185ee0f72
73,073
import ipaddress def ip_itoa(ip: int, v6: bool) -> str: """Converts an IP integer 'ip' to a string.""" if v6: return str(ipaddress.IPv6Address(ip)) else: return str(ipaddress.IPv4Address(ip))
953b4d83e6e1906d68a89a0c757a67abe5c7b21d
73,074
from typing import List from typing import Set def get_unique_values(rows: List[List], column_index: int) -> Set: """Find the unique values for a column in a dataset. Args: rows: The rows of the dataset. column_index: Column index to get the unique values of. Returns: A set of the unique values for the given column index. """ return set([row[column_index] for row in rows])
d3cb1b503af483f214b859d2cd245c3bd1cf4bcb
73,076
def perm_or_factory(perm, *args, **kwargs): """Check if perm is a factory (callable) and if so, apply arguments. If not, just return the perm.""" if callable(perm): return perm(*args, **kwargs) return perm
01d6c219009926e335bca8d4e988172cb4ffe3b1
73,077
import pathlib import zoneinfo def get_zoneinfo_path() -> pathlib.Path: """Get the first zoneinfo directory on TZPATH containing the "UTC" zone.""" key = "UTC" for path in map(pathlib.Path, zoneinfo.TZPATH): if (path / key).exists(): return path else: raise OSError("Cannot find time zone data.")
1df867dd946a097692bc7a450d5d7d7d0fdd67cb
73,078
import torch def numpy_to_torch32(numpy_array): """Convert numpy array into torch float32 tensor""" return (torch.from_numpy(numpy_array)).to(torch.float32)
61b76ca89a68df17d103754a22c6444618fd913e
73,081
def check_blank_line(message): """Check if there is a blank line between subject and a paragraph.""" splitted = message.splitlines() if len(splitted) > 1: # check should only be needed for multyline commit messages check = not splitted[1] else: check = True return check
956d6dc70249d74ef8190a795b9a6260ce9cc477
73,086
def _starts_with_space(line, return_on_blank=True): """ returns true if line starts with space :line: the line to be examined :return_on_blank: what to return if line == "" :returns: True if starts with space, False else """ try: return line[0] == ' ' except IndexError: return return_on_blank
fe992032343f79e321705e1d4f7cca25acc46d9a
73,087
def make_shortcode(content: str, shortcode: str, *arguments: str, **kwargs: str) -> str: """Returns a shortcode built from the arguments, with the form {{< shortcode *args **kwargs >}}content{{< / shortcode >}}""" key_value_pairs: str = " ".join(["{}={}" for key, value in kwargs.items()]) return "{{{{< {0} {1} {2} >}}}}{3}{{{{< / {0} >}}}}".format( shortcode, " ".join(arguments), key_value_pairs, content )
0893c5b9cab2ccb00bed9b27b09c8757fb85b9f0
73,089
def graph_resources(processor, graph): """Create graph with the overall hierarchy of resources.""" if len(processor.organizations) > 0: for org in processor.organizations: graph.graph_organization(org) elif len(processor.folders) > 0: for folder in processor.folders: graph.graph_folder(folder) else: for project in processor.projects: graph.graph_project(project) return graph
1bfeb439b512f260a45f4a0fa92c6b7062f19997
73,093
def gamma_PML(x, gamma, PML_start, PML_thk): """ Polynomial stretching profile for a perfectly matched layer. Parameters: x : physical coordinate gamma : average value of the profile PML_start : where the PML starts PML_thk : thickness of the PML Returns: the value of the profile function at x """ return 1 + 3*(gamma - 1)*((abs(x - PML_start))/PML_thk)**2
3d5bcc5aeb997e429a000b51957c61dd00661a45
73,094
def normalize_version_number(version_number): """Clean up the version number extracted from the header Args: version_number (str): Version number to normalize. Returns: The normalized version number. """ return version_number.replace('.', '_')
7cd50b850ed132a7b5e07df3596f07fa3d396a0e
73,096
import json def read_json_file(filename): """ Reads Json file Parameters: filename (string) : filename of json file to be read Returns: list of people and their stress level """ try: with open(filename, "r", encoding='UTF-8') as read_file: data = json.load(read_file) return data except IOError: with open(filename, "w+", encoding='UTF-8') as read_file: read_file.write("{}") data = json.load(read_file) return data
647548135898ba06856ca647edb74332d324aafd
73,097
def SplitListByGrouping(list, x): """ Receive a list of items. If the length is longer than x, then split the list into several lists, each with a maximum of x elements. This is then returned as a 2D array. Args list: list of items in any format or mixed. e.g. ['ABC', 23590.34, 23] If a single string is inputted, then the string is split e.g. 'a235235' -> ['a23', '523', '5'] x: maximum length to be allowed for any single list. e.g. 100 Returns: outputList: list of items, where each item is a list of maximum length x. """ outputList = [] index = 0 while index < len(list): item = list[index : (index + x)] outputList.append(item) index += x return outputList
87866fe3a1e62ec0421080e20b6880335d84f18c
73,098
from typing import List def producing_node_identifier(nodes:List[str])-> dict: """function to assign an unique identifier to the nodes param nodes: list of nodes in the PPI file return: dict containing node,identifier pair""" # assigning node identifier node_list={(index+1): node for index,node in enumerate(nodes)} return node_list
d3d9dbc79563f563d3ba9570b7e824f09ca25b96
73,101
import re def sanitize_input_name(name: str) -> str: """Sanitizes input name.""" return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')
84401f8da2f214e0a14afac8f84b80f28262e95c
73,103
def trim_slice(lines, slice_tuple): """ Trim a slice tuple (begin, end) so it starts at the first non-empty line (obtained via indented_tree_line_generator / get_line_info) and ends at the last non-empty line within the slice. Returns the new slice. """ def _empty(line): return not line or line.strip() == '>' if not slice_tuple: return None slice_start, slice_end = slice_tuple if slice_start is None: slice_start = 0 if slice_end is None: slice_end = len(lines) # Trim from beginning while slice_start < slice_end and _empty(lines[slice_start]): slice_start += 1 # Trim from end while slice_end > slice_start and _empty(lines[slice_end-1]): slice_end -= 1 return (slice_start, slice_end)
dd9bdb26eb7fddd2c856bb5a4b7a5acbf83ba59d
73,105
def create_core_file(duthost, docker_name=None): """ Create .core file in SONiC or inside in docker container :param duthost: duthost object :param docker_name: docker name - in case when you need to create .core file inside in docker container """ cmd = 'bash /etc/sonic/core_file_generator.sh' if docker_name: cmd = 'docker exec {} {}'.format(docker_name, cmd) available_core_files = duthost.shell('ls /var/core/')['stdout_lines'] duthost.shell(cmd) new_available_core_files = duthost.shell('ls /var/core/')['stdout_lines'] new_core_files = list(set(new_available_core_files) - set(available_core_files)) num_of_new_core_files = len(new_core_files) expected_num_of_new_core_files = 1 if not new_core_files: raise AssertionError('Core file was not generated') assert num_of_new_core_files == expected_num_of_new_core_files, 'More than expected number of core files generated' new_core_file_name = new_core_files[0] return new_core_file_name
ed8da0d355ae567fe71e76d4f32f21140c820371
73,106
from datetime import datetime def exceeds_threshold(last_run_created, threshold): """ Check whether a Mill run has exceeded the run threshold. This explicitly ignores timezones since the datetime should be UTC. Granularity smaller than a second is ignored. :param last_run_created: A datetime object corresponding to the last run. :param threshold: A threshold to check against in hours. :return: True if last_run_created is older than the threshold, and False otherwise. Returns False if the offset is exactly equal to the threshold. """ last_run_age = datetime.now() - last_run_created last_run_hours = (last_run_age.days * 24) + (last_run_age.seconds / 3600) return last_run_hours > int(threshold)
a9fee3e59a35d43c7f0a1e133cf22fea9cdf74ca
73,107
def calc_share_weird_states(data_individual_mixed, data_group_mixed): """ In some states the strategy of the algorithm does not perfectly align with the win-stay lose-shift strategy described in the paper. I calculate the share of those states here (there are very very few). Args: data_individual_mixed (DataFrame): Individual level data from the experiment data_group_mixed (DataFrame): Group level data from the experiments Returns: float: Share of weird states from all rounds in the experiments """ # Round in which the algorithm did NOT play the price of 1 or 4 # This happens rarely bcs the strategy in three-firm markets is not # perfect WSLS. data_algos_group = data_group_mixed.loc[~data_group_mixed['treatment'].isin(['2H0A', '3H0A'])].copy() n_price_not_1_or_4 = len(data_algos_group.loc[~data_algos_group['price_algorithm'].isin([1,4])]) # Similarly if the algo plays a price of 4 after an upwards deviation data_algos_individual = data_individual_mixed.loc[ ~data_individual_mixed['treatment'].isin(['2H0A', '3H0A']) ].copy() # Note that those are in 1H1A and thus individual = Group from an otree-data perspective n_price_4_but_different_state = len(data_algos_individual.loc[ (data_algos_individual['price_algorithm'] == 4) & (~data_algos_individual['price_lag_1'].isin([1,4])) & (data_algos_individual['round']>1) ]) share_weird_states = (n_price_not_1_or_4 + n_price_4_but_different_state) / len(data_algos_group) return share_weird_states
93ffef5f09468a825d7c5931fb055533f47ef864
73,110
def convert_alt_list_to_string(alt): """ The ALT field in the VCF file is represented as a list, can convert this to a comma seperated string. """ _vars = list() for _var in alt: _vars.append(str(_var)) return ','.join(_vars)
11aef24df506094d711eb6e8e644a1272d461c78
73,123
def reconcile_sensors(sensor_key_list): """ Make a list of the unique sensors from a set of fits """ sensor_list=[] for this_dict in sensor_key_list: for key in this_dict.keys(): if key not in sensor_list: sensor_list += [key] return sensor_list
20870cce904e45de2ca63065b54b478e7911eb10
73,131
import typing import collections def route(url: str, methods: typing.Iterable[str] = ("GET", "HEAD"), **kwargs): """ A companion function to the RouteGroup class. This follows :meth:`.Blueprint.route` in terms of arguments, and marks a function as a route inside the class. This will return the original function, with some attributes attached: - ``in_group``: Marks the function as in the route group. - ``rg_delegate``: Internal. The type of function inside the group this is. - ``route_kwargs``: Keyword arguments to provide to ``wrap_route``. - ``route_url``: The routing URL to provide to ``add_route``. - ``route_methods``: The methods for the route. - ``route_hooks``: A defaultdict of route-specific hooks. Additionally, the following methods are added. - ``hook``: A decorator that adds a hook of type ``type_``. - ``before_request``: A decorator that adds a ``pre`` hook. - ``after_request``: A decorator that adds a ``post`` hook. .. versionadded:: 2.1.1 .. versionchanged:: 2.1.3 Added the ability to add route-specific hooks. .. versionchanged:: 2.2.0 Now accepts an already edited function as the function to decorate - this will add a new \ routing url and method pair to the :attr:`.Route.routes`. .. versionchanged:: 2.2.2 Default methods changed to GET and HEAD. :param url: The routing URL of the route. :param methods: An iterable of methods for the route. """ def inner(func): # add the required attrs which are used on a scan later func.in_group = True func.rg_delegate = "route" func.route_kwargs = kwargs # try and append to the routes # failing that, create a new list try: func.routes.append((url, methods)) except AttributeError: func.routes = [(url, methods)] if not hasattr(func, "route_hooks"): func.route_hooks = collections.defaultdict(lambda: []) # helper for route-specific hooks. def hook(type_: str): def _inner2(hookfunc): func.route_hooks[type_].append(hookfunc) return hookfunc return _inner2 func.hook = hook func.before_request = hook("pre") func.after_request = hook("post") return func return inner
b0b0af22507e47d37f787abc712018fe38397f2a
73,133
import socket def get_ip_addr(return_string=True): """ Return IP Address of current host """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8",80)) ip_addr = s.getsockname()[0] s.close() return str(ip_addr) if return_string else ip_addr
174cb30ad425a053387c10d071d8970b376f9d31
73,136
def _extract_class(name: str) -> str: """Extract a predicted class name from DAI column name. Examples: >>> _extract_class('target_column.class1') 'class1' """ return name.split('.')[-1]
525d0dd47566ce32835d41979468e4a3b04c3b01
73,143
def write_output(init_dict, df): """The function converts the simulated variables to a panda data frame and saves the data in a txt and a pickle file. """ # Distribute information source = init_dict["SIMULATION"]["source"] df.to_pickle(source + ".grmpy.pkl") with open(source + ".grmpy.txt", "w") as file_: df.to_string(file_, index=False, na_rep=".", col_space=15, justify="left") return df
a317bf570d95ef22b853f55591b5e2414071f8ac
73,144
def _ReorderMapByTypTags(result_map): """Rearranges|result_map| to use typ tags as the top level keys. Args: result_map: Aggregated query results from results.AggregateResults Returns: A dict containing the same contents as |result_map|, but in the following format: { typ_tags (tuple of str): { suite (str): { test (str): build_url_list (list of str), }, }, } """ reordered_map = {} for suite, test_map in result_map.items(): for test, tag_map in test_map.items(): for typ_tags, build_url_list in tag_map.items(): reordered_map.setdefault(typ_tags, {}).setdefault(suite, {})[test] = build_url_list return reordered_map
749a9abd18c0270d4d93f0e3c1092d53fcd237df
73,146
def create_demand_table(connector, demand_list, years): """ Writes a ``demand`` table to an SQLite database. Parameters ---------- connector : sqlite3 connection object Used to connect to and write to an sqlite database. demand_list : list A list of DemandCommodity objects. years : list or array A list of the years in the model simulation. Returns ------- table_command : string The command for generating the "demand" table. """ table_command = """CREATE TABLE "Demand" ( "regions" text, "periods" integer, "demand_comm" text, "demand" real, "demand_units" text, "demand_notes" text, PRIMARY KEY("regions","periods","demand_comm"), FOREIGN KEY("periods") REFERENCES "time_periods"("t_periods"), FOREIGN KEY("demand_comm") REFERENCES "commodities"("comm_name") );""" insert_command = """ INSERT INTO "Demand" VALUES (?,?,?,?,?,?) """ cursor = connector.cursor() cursor.execute(table_command) # loops over each commodity (electricity, steam, h2, etc.) for demand_comm in demand_list: demand_dict = demand_comm.demand # loops over each region where the commodity is defined for region in demand_dict: data = demand_dict[region] db_entry = [(region, int(y), demand_comm.comm_name, d, demand_comm.units, '') for d, y in zip(data, years)] cursor.executemany(insert_command, db_entry) connector.commit() return table_command
f3c1410f934a25702f7474a582f5e7115baed541
73,147
def link_artist(artist_id): """Generates a link to an artist Args: artist_id: ID of the artist Returns: The link to that artist on Spotify """ return "https://open.spotify.com/artist/" + artist_id
2ba752b848f6bebc599107c715ff9287df1264a9
73,148
def box_overlap(row, window): """ Calculate the Intersection over Union (IoU) of two bounding boxes. Parameters ---------- window : dict Keys: {'x1', 'x2', 'y1', 'y2'} The (x1, y1) position is at the top left corner, the (x2, y2) position is at the bottom right corner box : dict Keys: {'x1', 'x2', 'y1', 'y2'} The (x, y) position is at the top left corner, the (x2, y2) position is at the bottom right corner Returns ------- float in [0, 1] """ #construct box box={} #top left box["x1"]=row["origin_xmin"] box["y1"]=row["origin_ymin"] #Bottom right box["x2"]=row["origin_xmax"] box["y2"]=row["origin_ymax"] assert window['x1'] < window['x2'] assert window['y1'] < window['y2'] assert box['x1'] < box['x2'], "Box {} is invalid".format(row) assert box['y1'] < box['y2'] # determine the coordinates of the intersection rectangle x_left = max(window['x1'], box['x1']) y_top = max(window['y1'], box['y1']) x_right = min(window['x2'], box['x2']) y_bottom = min(window['y2'], box['y2']) if x_right < x_left or y_bottom < y_top: return 0.0 # The intersection of two axis-aligned bounding boxes is always an # axis-aligned bounding box intersection_area = (x_right - x_left) * (y_bottom - y_top) # compute the area of both AABBs window_area = (window['x2'] - window['x1']) * (window['y2'] - window['y1']) box_area = (box['x2'] - box['x1']) * (box['y2'] - box['y1']) overlap = intersection_area / float(box_area) return overlap
5997c1d4085521562722257b2b78377ab8c7bdcc
73,151
from typing import TextIO import json import io def combine_clks_blocks(clk_f: TextIO, block_f: TextIO): """Combine CLKs and blocks to produce a json stream of clknblocks. That's a list of lists, containing a CLK and its corresponding block IDs. Example output: {'clknblocks': [['UG9vcA==', '001', '211'], [...]]} """ try: blocks = json.load(block_f)['blocks'] clks = json.load(clk_f)['clks'] except ValueError as e: msg = 'Invalid CLKs or Blocks' raise ValueError(msg) from e clknblocks = [[clk] for clk in clks] for rec_id, block_ids in blocks.items(): rec_id = int(rec_id) for block_key in block_ids: clknblocks[rec_id].append(block_key) out_stream = io.StringIO() json.dump({'clknblocks': clknblocks}, out_stream) out_stream.seek(0) return out_stream
9d2304b3fdcdcf79d791247165b3fa4c8dc8f339
73,152
def snp_count(vcf, chromosome, start, end): """ Count the number of snps in the window. :param vcf: vcf, a vcf file with SNPs and their genomic positions :param chromosome: str, Chromosome name :param start: int, Start position of the sequence :param end: int, End position of the sequence :return: int, number of snps in the vcf window """ snp_count = vcf.sample_variant(str(chromosome), int(start), int(end)) snp_count = sum(1 for item in snp_count) return snp_count
c18f52662f96d3013452c4884f506901e9da146b
73,153
def get_guidelines(lang): """Returns a localized url for the download of the user_guidelines. """ elri_guidelines='metashare/ELRI_user_guidelines_'+lang+'.pdf' return elri_guidelines
1a72ab831c7752d156679f69a1972da0904ab556
73,154
def amz_group_grant(uri, permission): """ Returns XML Grant for group with URI. :param uri: group URI :param permission: permission value """ grant = ( '<Grant>' '<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'xsi:type="Group">' '<URI>%s</URI>' '</Grantee>' '<Permission>%s</Permission>' '</Grant>' % (uri, permission)) return grant
6db69daa14e2c24544e4a2639a7b9229a15135d4
73,156
def escapeXMLChars(text): """Controls characters that need to be escaped (to obtain a well-formed XML document) Args: text: The text that will be escaped (string) Returns: text: new text containing XML entities instead of characters (string) """ text = text.replace("&", "&amp;") #text = text.replace("\"", "&quot;") #text = text.replace("'", "&apos;") text = text.replace("<", "&lt;") #text = text.replace(">", "&gt;") return text
284bfefb400ebe5381a2b9454a2a8b15f199f36d
73,157
def alpha_B_HII(temperature): """ Calculate the HII recombination rate This is the rate at which ionised hydrogen recombines into neutral hydrogen Total recombinations per second per unit volume = alpha * ne * nH ne = electron number density nH = hydrogen number density Parameters ---------- temperature: float Temperature in K Returns ------- alpha_B_HII : float The recombination rate """ # HII recombination rate # input : T in K # output : HII recombination rate (in cm3 / s) l = 315614./temperature a = 2.753e-14 * l**1.5 / (1. + (l/2.74)**0.407)**2.242 return a
b3cb5b00c7a06cf42847725de6b0c9efad320806
73,162
def _PyEval_SliceIndex(space, w_obj, pi): """Extract a slice index from a PyInt or PyLong or an object with the nb_index slot defined, and store in *pi. Silently reduce values larger than PY_SSIZE_T_MAX to PY_SSIZE_T_MAX, and silently boost values less than -PY_SSIZE_T_MAX-1 to -PY_SSIZE_T_MAX-1. Return 0 on error, 1 on success. Note: If v is NULL, return success without storing into *pi. This is because_PyEval_SliceIndex() is called by apply_slice(), which can be called by the SLICE opcode with v and/or w equal to NULL. """ if w_obj is not None: pi[0] = space.getindex_w(w_obj, None) return 1
73c065681793dd93045557c42289cae3c64f16d3
73,165
import torch def make_initialization_inputs(inputs, device=None): """ Take either tensor, shape tuple or list of them, and always return tensor or list of them. """ if isinstance(inputs, torch.Tensor): pass elif isinstance(inputs, tuple): inputs = torch.rand(*inputs, device=device) elif isinstance(inputs, list): inputs = [make_initialization_inputs(item, device=device) for item in inputs] return inputs
e255fbce01f412088d635e31776c6b348a0a3926
73,166
def html_table_to_dict_list(soup_table): """ Input: - 'soup_table': a Beatiful soup object that was selected by the tag 'table'. Returns a list of dicts containing the table's data. It assumes the header is marked by 'th' tags. """ trs = soup_table.find_all('tr') html_header = trs[0].find_all('th') table_header = [t.text for t in html_header] return [dict(zip(table_header, [t.text for t in tr.find_all('td')])) for tr in trs[1:]]
36e1b5eaa1557d5eb62f41993299341c1e03131e
73,178
def to_bytes(value) : """ Decodes value to bytes. Args: value : Value to decode to bytes Returns: :obj:`bytes` : Return the value as bytes: if type(value) is :obj:`bytes`, return value; if type(value) is :obj:`str`, return the string encoded with UTF-8; otherwise, returns bytes(value). """ if value is None : return value elif type(value) is bytes : return value elif type(value) is str : return value.encode('utf-8') else : return bytes(value)
e38b7348c84d570caa2891b1d3be8fa789eda615
73,180
def str_to_bytes(value): """Return bytes object from UTF-8 formatted string.""" return bytes(str(value), 'UTF-8')
4bc843b070d50046b9099a8163be1888a3ba541d
73,182
def get_lattice_points(tup1, tup2, check_diagonals = True): """Returns a list of points which lie in a line defined by two endpoints""" x1, y1 = tup1[0], tup1[1] x2, y2 = tup2[0], tup2[1] # for horizontal lines, y values are same if y1 == y2: if x1 <= x2: points = [(i, y1) for i in range(x1, x2 + 1)] return points else: points = [(i, y1) for i in range(x2, x1 + 1)] return points # for vertical points, x values are same elif x1 == x2: if y1 <= y2: points = [(x1, i) for i in range(y1, y2 + 1)] return points else: points = [(x1, i) for i in range(y2, y1 + 1)] return points elif check_diagonals: # diagonal lines slope = (y2-y1)/(x2-x1) y_int = y1 - slope*x1 # getting the poitns if x1 <= x2: points = [(i, int(slope * i + y_int)) for i in range(x1, x2 + 1) if (slope * i + y_int).is_integer()] return points else: points = [(i, int(slope * i + y_int)) for i in range(x2, x1 + 1) if (slope * i + y_int).is_integer()] return points
8d6c5780ce227ae7b70480e929ff7e44c070d14e
73,183
def calc_total_biomass(available_forage): """Calculate the total biomass across forage types, in kg per ha.""" sum_biomass = 0. for feed_type in available_forage: sum_biomass += feed_type.biomass return sum_biomass
b94f025381526ebf58c2e01b2a60e9b1083be6b7
73,184
def load_text(filename): """Load text file Parameters ---------- filename: str Path to file Returns ------- text: string Loaded text. """ with open(filename, 'r') as f: return f.readlines()
eda3f0f05b3bc924e9d0e80dfa78b29dbbbfc7ed
73,187
def addDot(num): """Formats the number into a string and adds a '.' for every thousand (eg. 3000 -> 3.000) Parameters ---------- num : int integer number to format Returns ------- number : str a string representing that number with added dots for every thousand """ return '{0:,}'.format(int(num)).replace(',','.')
9ac4b98a43539d123a7515328585bfb7a216d3e1
73,188
def cookie_repr(c): """ Return a pretty string-representation of a cookie. """ return f"[key]host=[/key][cyan3]{c['host_key']}[/cyan3] " +\ f"[key]name=[/key][cyan3]{c['name']}[/cyan3] " +\ f"[key]path=[/key][cyan3]{c['path']}[/cyan3]"
26720d914481ec52c230be12602052ea61b736c0
73,195
def intcode_four(parameter_list, code_list): """ Prints item in parameter_list[0] place in code_list. Returns True. """ print(parameter_list[0]) return True
2ec2ec368b9c37fbb9a2bcecd01cb939e3decda2
73,200
import torch def train_loop(dataloader, model, loss_fn, optimizer, device): """ training loop Arguments: dataloader: pytorch dataloader for training model: pytorch model loss_fn: the loss function to train the model optimizer: the optimizer to train the model device: pytorch device (cuda or cpu) Returns: mean_loss(float): the mean loss value correct(float): the mean accuracy """ model.train() size = len(dataloader.dataset) total_loss = 0 correct = 0 n_iter = len(dataloader) is_cuda = device.type.find('cuda')!=-1 for batch, (X, y, _) in enumerate(dataloader): if is_cuda: X = X.to(device) y = y.to(device) # Compute prediction and loss pred = model(X) loss = loss_fn(pred, y) # get the accuracy with torch.no_grad(): correct += (pred.softmax(1).argmax(1) == y).type(torch.float).sum().item() total_loss += loss.item() # Backpropagation optimizer.zero_grad() loss.backward() optimizer.step() if batch % 2 == 0: loss, current = loss.item(), batch * dataloader.batch_size print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") mean_loss = total_loss/n_iter correct /= size print(f"Train Stats: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {mean_loss:>8f}") return mean_loss,correct
fcc8915209db69b45899ace83dad61a5f027a4a2
73,208
from typing import Any def str_to_python(value: str) -> Any: """ Convert a node value (string) into python native equivalent :param value: incoming variable string :returns: value as a native Python data type :rtype: Any """ try: if "." in value: # assume to be float ret = float(value) elif len(value) > 1 and value.startswith("0"): # Don't convert to int if it has a leading zero # We want to keep the zero, converting to int would remove it ret = value else: # try to convert to int ret = int(value) except ValueError: # conversion did not succeed, leave it as a string. ret = value return ret
3d441d1a7eaa12a2eb6f03be302b1d06b00b84bb
73,213
from typing import Union def get_list_value(lst: Union[list, tuple], inds): """get value form index. Args: lst (Union[list, tuple]): target list. inds (Any): value from index. Returns: list: result """ return [lst[i] for i in inds]
9b57377011b792714aaa21b90016e04ef85f68d1
73,215
def get_up_hill(hosp_data): """Given the temporal data of a hospital, get the portion of the timeline belonging to an up-hill peak, up to the point of the first critical state is found. Parameters ----------- hosp_data: pd.DataFrame Historical capacidad hospitalaria of a single hospital. Rows correspond to a daily record of its capacity. Returns -------- up_hill_data: pd.DataFrame Portion of the data that bellongs to an up-hill segment of the historical data up to the first next critical status. hosp_data: pd.DataFrame Remaining portion of the data, with the up_hill_data removed. """ is_peak = hosp_data['estatus_capacidad_uci'] == 'Crítica' if is_peak.sum() > 0: idx_peak = is_peak.argmax() up_hill_data = hosp_data.iloc[ :idx_peak + 1] hosp_data = hosp_data.iloc[ idx_peak + 1:] else: up_hill_data = None hosp_data = None return up_hill_data, hosp_data
5abc81776260f9e36f0b7b97684888172cccb0c8
73,216
import random def should_drop(drop_percentage): """ Based on the given percentage, provide an answer whether or not to drop the image. Args: drop_percentage: the likelihood of a drop in the form of a float from [0,1] Returns: a boolean whether to drop or not drop the image """ return random.random() < drop_percentage
f624577d0941c93a83db54f987dda57b7d03a5ea
73,222
def parse_instruction(raw_instruction): """ ABCDE (0)1002 DE - two-digit opcode, 02 == opcode 2 C - mode of 1st parameter, 0 == position mode B - mode of 2nd parameter, 1 == immediate mode A - mode of 3rd parameter, 0 == position mode, omitted due to being a leading zero :param raw_instruction: Assuming int value :return: op_code, param_1_mode, param_2_mode, param_3_mode, """ instruction = list(f"{raw_instruction:05}") op_code = int(''.join(instruction[3:5])) mode_1 = int(instruction[2]) mode_2 = int(instruction[1]) mode_3 = int(instruction[0]) return op_code, mode_1, mode_2, mode_3
bcf4995be4e03f30dfe7a502f958ae2746cb0ec7
73,229