content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def set_reference_period(df, period): """ A function to re-reference an index series on a given period.""" base_mean = df[period].mean() re_referenced = df.div(base_mean) * 100 # Fill NaNs from division with zeros re_referenced.fillna(0, inplace=True) return re_referenced
bc93621952a192457cf6c456cc272276aa702627
49,809
def split_bytes(code): """Split 0xABCD into 0xAB, 0xCD""" return code >> 8, code & 0xff
47a98d31dcee4f9fa73d236a0b78d55586827a68
49,810
def NN_moffat(x, mu, alpha, beta, logamp): """ One-dimensional non-negative Moffat profile. See: https://en.wikipedia.org/wiki/Moffat_distribution """ amp = 10**logamp return amp*(1. + ((x-mu)**2/alpha**2))**(-beta)
50576d4a99ecbf6aab1f67fd99e0421d0cd644a6
49,811
def first_negative(l): """ Returns the first negative element in a given list of numbers. """ for x in l: if x < 0: return x return None
1e62ce772f7d38e3835e5d6635c33bf365f134e7
49,812
def convert_to_int(word, base=ord('A')): """ Converts a word to an integer representation. `base` should be ord('A') for uppercase words, 'a' for lowercase """ result = 0 for i,c in enumerate(word): result |= (ord(c) - base) << (5*i) return result
9169a8f3b02d839534c43189156f4d0e03458874
49,813
from typing import Dict def deserialize(config: Dict): """ Deserialize the config dict to object Args: config: (dict) nn_strategy config dict from seralize function Returns: object """ if config is None: return None if ('@module' not in config) or ('@class' not in config): raise ValueError("The config dict cannot be loaded") modname = config['@module'] classname = config['@class'] mod = __import__(modname, globals(), locals(), [classname]) cls_ = getattr(mod, classname) data = {k: v for k, v in config.items() if not k.startswith('@')} return cls_(**data)
5fc22b0f2e7f1a24624fa0c26802ea8ed4ba6017
49,814
def compute_water_ingress_given_damage(damage_index, wind_speed, water_ingress): """ compute percentage of water ingress given damage index and wind speed Args: damage_index: float wind_speed: float water_ingress: pd.DataFrame index: damage index columns: wi Returns: prop. of water ingress ranging between 0 and 1 """ assert 0.0 <= damage_index <= 1.0 # Note that water_ingress index are upper threshold values of DI idx = water_ingress.index[(water_ingress.index < damage_index).sum()] return water_ingress.at[idx, 'wi'](wind_speed)
9c8a81c080ac4b98e217a2b583e660ed8572dc5d
49,815
def CheckUrlPatternIndexFormatVersion(input_api, output_api): """ Checks the kUrlPatternIndexFormatVersion is modified when necessary. Whenever any of the following files is changed: - components/url_pattern_index/flat/url_pattern_index.fbs - components/url_pattern_index/url_pattern_index.cc and kUrlPatternIndexFormatVersion stays intact, this check returns a presubmit warning to make sure the value is updated if necessary. """ url_pattern_index_files_changed = False url_pattern_index_version_changed = False for affected_file in input_api.AffectedFiles(): basename = input_api.basename(affected_file.LocalPath()) if (basename == 'url_pattern_index.fbs' or basename == 'url_pattern_index.cc'): url_pattern_index_files_changed = True if basename == 'url_pattern_index.h': for (_, line) in affected_file.ChangedContents(): if 'constexpr int kUrlPatternIndexFormatVersion' in line: url_pattern_index_version_changed = True break out = [] if url_pattern_index_files_changed and not url_pattern_index_version_changed: out.append(output_api.PresubmitPromptWarning( 'Please make sure that url_pattern_index::kUrlPatternIndexFormatVersion' ' is modified if necessary.')) return out
e1a153615ccc6284170d33ba2dfd2ef94985f467
49,816
def is_element_checked(driver, element): """Check DOM input element checked property using JS. Args: driver (base.CustomDriver), locator (tuple) Return: True if element is checked, False if element is not checked. """ return driver.execute_script("return arguments[0].checked", element)
2db0a7003a7565312f68522a8d3d8a90962dc191
49,817
import json def response(message, status_code): """ returns confirmation""" return { 'statusCode': status_code, 'body': json.dumps(message), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*' }, }
e97ca33ad17f7c40ba4771fab587be2fa4e9467f
49,818
def is_jquery_not_defined_error(msg): """ Check whether the JavaScript error message is due to jQuery not being available. """ # Firefox: '$ is not defined' # Chrome: 'unknown error: $ is not defined' # PhantomJS: JSON with 'Can't find variable: $' return any(txt in msg for txt in ( "$ is not defined", "Can't find variable: $", ))
2dfa831aecb80269f0f86cf9c4b561ccbff307c3
49,822
def merge_sort(array): """ the merge sort algorithm sorts an array of elements out of place, is stable, and takes O(n log n) time Divide: split the array into left and right halves Conquer: sort the half Combine: merge the two sorted array """ def merge(left, right): """Merge two sorted lists into a combined sorted list""" # A: init the new array combined = list() # B: have the two array "battle" for each position in the combined left_idx, right_idx = 0, 0 while left_idx < len(left) and right_idx < len(right): # get the next elements from both left_elem = left[left_idx] right_elem = right[right_idx] # lowest value gets the spot (left gets priority, so it's stable) if left_elem <= right_elem: combined.append(left_elem) left_idx += 1 else: # left_elem > right_elem combined.append(right_elem) right_idx += 1 # C: also insert any leftovers at the end if left_idx < len(left): combined.extend(left[left_idx:]) elif right_idx < len(right): combined.extend(right[right_idx:]) return combined # Base Case: if the array has one (or zero elements?), it's sorted # Recursive: more than 1 element if len(array) > 1: # find the middle middle = len(array) // 2 # sort the left sorted_left = merge_sort(array[:middle]) # sort the right sorted_right = merge_sort(array[middle:]) # merge the two array = merge(sorted_left, sorted_right) # return the sorted array return array
8bf5aafefa6a1eec4750f626f853d0bbc9e410cf
49,823
import numpy def calculate_fft(timestep, data): """Performs FFT on time-signal, returning magnitudes at each frequency.""" fourier = numpy.fft.rfft(data) fft_magnitude = numpy.absolute(fourier) frequency = numpy.fft.fftfreq(fft_magnitude.size, d = timestep) # Get rid of negative frequencies, they're not useful for visualization! # freq = list() # mag = list() # for i,f in enumerate(frequency): # if f >= 0: # freq.append(f) # mag.append(fft_magnitude[i]) # return (numpy.array(freq), numpy.array(mag)) return frequency, fft_magnitude
d396e09fb346793c9b0243d49db39005138134c8
49,824
def determine_fewest(value, denominations): """Find the fewest denominations needed.""" fewest = [] total_value = int(value) while total_value > 0: for denomination in denominations: if denomination.value > total_value: continue number_ = total_value//denomination.value fewest.append((number_, denomination,)) total_value = total_value - (denomination.value * number_) return fewest
96c3a0fb78c24bc77fcab7141a7560dbb4176f45
49,825
def sign(num): """ +1 when positive, -1 when negative and 0 at 0 """ if num > 0: return 1 elif num < 0: return -1 else: return 0
c3ddaf3d7f25e8899df9e5ee087e6820060ca35a
49,826
def raw_face_coordinates(face, x, y, z): """ Finds u,v coordinates (image coordinates) for a given 3D vector :param face: face where the vector points to :param x, y, z: vector cartesian coordinates :return: uv image coordinates """ if face == 'left': u = z v = -y ma = abs(x) elif face == 'right': u = -z v = -y ma = abs(x) elif face == 'bottom': u = -x v = -z ma = abs(y) elif face == 'top': u = -x v = z ma = abs(y) elif face == 'back': u = x v = y ma = abs(z) elif face == 'front': u = -x v = -y ma = abs(z) else: raise Exception('Tile ' + face + 'does not exist') return (u / ma + 1) / 2, (v / ma + 1) / 2
1c2fd8d08327894a7cd68795d98641a9c6ac7f12
49,827
import sys import os import subprocess def helm(folder: str, version: str, owner: str, repo: str, commit_sha: str, token: str) -> bool: """ Publish to pypi. Arguments: folder: The folder to be published version: The version that will be published owner: The GitHub repository owner repo: The GitHub repository name commit_sha: The sha of the current commit token: The GitHub token """ print(f"::group::Publishing Helm chart from '{folder}' to GitHub release") sys.stdout.flush() sys.stderr.flush() try: yaml_ = ruamel.yaml.YAML() # type: ignore with open(os.path.join(folder, "Chart.yaml"), encoding="utf-8") as open_file: chart = yaml_.load(open_file) chart["version"] = version with open(os.path.join(folder, "Chart.yaml"), "w", encoding="utf-8") as open_file: yaml_.dump(chart, open_file) for index, dependency in enumerate(chart.get("dependencies", [])): if dependency["repository"].startswith("https://"): subprocess.run(["helm", "repo", "add", str(index), dependency["repository"]], check=True) subprocess.run(["cr", "package", folder], check=True) subprocess.run( [ "cr", "upload", f"--owner={owner}", f"--git-repo={repo}", f"--commit={commit_sha}", "--release-name-template={{ .Version }}", f"--token={token}", ], check=True, ) if not os.path.exists(".cr-index"): os.mkdir(".cr-index") subprocess.run( [ "cr", "index", f"--owner={owner}", f"--git-repo={repo}", f"--charts-repo=https://{owner}.github.io/{repo}", "--push", "--release-name-template={{ .Version }}", f"--token={token}", ], check=True, ) print("::endgroup::") except subprocess.CalledProcessError as exception: print(f"Error: {exception}") print("::endgroup::") print("With error") return False return True
38e2e54e6944040500658fd86edbae204796702c
49,830
def get_all_child_nodes(parent_node): """Takes a minidom parent node as input and returns a list of all child nodes """ child_node_list = parent_node.childNodes return child_node_list
aaacd0673e4f08f015ddf119d423191dfd34cdb6
49,831
import math def compact_bit_pack(value, signed): """Pack an integer into its most compact representation as a stream of bytes.""" byte_length = int(math.ceil((value.bit_length()+1)/8)) as_bytes = value.to_bytes(byte_length, 'big', signed=signed) return as_bytes
a585a30e4f19c85c98b813fcf9146bca11a4f67c
49,832
def is_easz_conflict(info_list): """ Return True/False if info list conflicts on EASZ resolution function (EOSZ NT sequence). """ first_info = info_list[0] for cur_info in info_list[1:]: if first_info.easz_nt_seq != cur_info.easz_nt_seq: return True return False
9fc2b0b8a4df7c958967c6c525eee7d10c0e416c
49,833
def getPositionAtTime(t): """ Simulation time t runs from 0 to 99 First quarter: walking right from 50, 50 to 250, 50 Second quarter: walking down from 250, 50 to 250, 250 Third quarter: walking left from 250, 250 to 50, 250 Fourth quarter: walking up from 50, 250 to 50, 50 """ if 0 <= t < 25: return 50 + ((t - 0) * 8), 50, 'right' elif 25 <= t < 50: return 250, 50 + ((t - 25) * 8), 'front' elif 50 <= t < 75: return 250 - ((t - 50) * 8), 250, 'left' elif 75 <= t < 100: return 50, 250 - ((t - 75) * 8), 'back'
a9436cc34ae929cb074ae5686ad8a23d2c349c15
49,834
def apply_func_to_list(func): """ return a function that applies func to a list """ return lambda l,*args: [func(x,*args) for x in l]
c8017e54551647b972e94f62b4422957db382f8f
49,835
import torch def expand_dims_for_broadcast(low_tensor, high_tensor): """Expand the dimensions of a lower-rank tensor, so that its rank matches that of a higher-rank tensor. This makes it possible to perform broadcast operations between low_tensor and high_tensor. Args: low_tensor (Tensor): lower-rank Tensor with shape [s_0, ..., s_p] high_tensor (Tensor): higher-rank Tensor with shape [s_0, ..., s_p, ..., s_n] Note that the shape of low_tensor must be a prefix of the shape of high_tensor. Returns: Tensor: the lower-rank tensor, but with shape expanded to be [s_0, ..., s_p, 1, 1, ..., 1] """ low_size, high_size = low_tensor.size(), high_tensor.size() low_rank, high_rank = len(low_size), len(high_size) # verify that low_tensor shape is prefix of high_tensor shape assert low_size == high_size[:low_rank] new_tensor = low_tensor for _ in range(high_rank - low_rank): new_tensor = torch.unsqueeze(new_tensor, len(new_tensor.size())) return new_tensor
b90483cacd7379831514d84f341d994763d90547
49,836
import json def get_body_text_from_path(path): """Input: path [string]: full path of target file Output: String of body text""" blocks = [] with open(path, 'r') as file: j = json.load(file) for block in j['body_text']: blocks.append(block['text']) fulltext = '\n\n'.join(blocks) return fulltext
81502ed5d23ba56836d93912b52af2d06c6c516f
49,837
def ft_generate_center(topics_amount): """ Generates a list of lists used to set the center for each layout. Displays topics in rows of 5. """ x = 0 y = 0 center_list = [[x, y]] for i in range(topics_amount - 1): if ((i + 1) % 5 == 0): x = 0 y -= 5 else: x += 5 center_list.append([x, y]) return center_list
65bdba39dd2937122f336d4f8c48774b201446f4
49,838
def pack_color(rgb_vec): """ Returns an integer formed by concatenating the channels of the input color vector. Python implementation of packColor in src/neuroglancer/util/color.ts """ result = 0 for i in range(len(rgb_vec)): result = ((result << 8) & 0xffffffff) + min(255,max(0,round(rgb_vec[i]*255))) return result
82f8e6e922ea9bc4c5c5176463942a0fa098d751
49,839
import sys import inspect def _check_dens_func_arguments(dens_func): """ Check that the dens_func has been properly defined (i.e. that it is a function of x,y,z, or of z,r) Return the list of arguments """ # Call proper API, depending on whether Python 2 or Python 3 is used if sys.version_info[0] < 3: arg_list = inspect.getargspec(dens_func).args else: arg_list = inspect.getfullargspec(dens_func).args # Take into account the fact that the user may be passing a class, # with a __call__ method if arg_list[0] == 'self': arg_list.pop(0) # Check that the arguments correspond to supported functions if not (arg_list==['x', 'y', 'z'] or arg_list==['z', 'r']): raise ValueError( "The argument `dens_func` needs to be a function of z, r\n" "or a function of x, y, z.") return arg_list
74efb1756f966b0241568c80b022e1987edb802e
49,840
def cancel_check(self): """ Аннулирование чека. """ self.wait_printing() return self.protocol.command( 0x88, self.password )
8ab5cf8073370f92cb8d2e1c97a19da30f36aec0
49,841
import json def read_jsonl(filename): """ Read jsonl file and return output """ output = [] with open(filename, 'r') as fp: for line in fp: output.append(json.loads(line)) return output
6bc3e2b6a19410e84809d1a17aca3c052f613d27
49,843
def get_K_sc_h_0(): """Ksch0:過冷却度を計算する式の係数(-) (6b) Args: Returns: float: Ksch0:過冷却度を計算する式の係数(-) (6b) """ return -4.02655782981397
d0dfde4b13e615c9eb6c4c4bdd8a9779e9af6f47
49,844
import os def get_n_processors(casedir="./", dictpath="system/decomposeParDict"): """Read number of processors from decomposeParDict.""" dictpath = os.path.join(casedir, dictpath) with open(dictpath) as f: for line in f.readlines(): line = line.strip().replace(";", " ") if line: line = line.split() if line[0] == "numberOfSubdomains": return int(line[1])
f506113c76c5fa643b2c4e87f70245c78b6c9291
49,845
def strbytes(value, annotation): """Convert value between str and bytes depending on annotation""" if not isinstance(value, (bytes, str)): return value if isinstance(value, str) and bytes in annotation and str not in annotation: return bytes(str(value), 'utf-8') if isinstance(value, bytes) and str in annotation and bytes not in annotation: return value.decode('utf-8') return value
a92933d27ea2916c33ec2ee4efee02b4bc8298fa
49,846
from typing import List from typing import Optional def find_sum_components( values: List[int], count: int, target: int, components: Optional[List[int]] = None ) -> List[int]: """Returns N values which sum to the given target value""" if components is None: components = [] if sum(components) > target or len(components) > count: return [] elif sum(components) == target and len(components) == count: return components for value in values: sum_components = find_sum_components( values, count, target, components + [value] ) if sum_components: return sum_components return []
aa0de9819732e7d2e835f483b48d4922c9fd4f86
49,847
def blocks_slice_to_chunk_slice( blocks_slice: slice, chunk_shape: int, chunk_coord: int ) -> slice: """ Converts the supplied blocks slice into chunk slice :param blocks_slice: The slice of the blocks :param chunk_shape: The shape of the chunk in this direction :param chunk_coord: The coordinate of the chunk in this direction :return: The resulting chunk slice """ return slice( min(max(0, blocks_slice.start - chunk_coord * chunk_shape), chunk_shape), min(max(0, blocks_slice.stop - chunk_coord * chunk_shape), chunk_shape), )
c23b09cc5d0b65dbc8add0e7a237669a03d0da88
49,848
def block(*_): """Returns the last element of a list.""" return _[-1]
e0e4a46b3c78216582760a6b06c723dfe63c061b
49,849
def get_attr(model: object, name: str): """Get Object Attribute IF Exist""" if hasattr(model, name): return getattr(model, name) return None
c1eb619066d72f64889234ae322cdb2767d29654
49,850
def format_txt_from_dict(resp_dict, apl_trans:str, id_plus_dir:str)->str: """Formats the entry for each prediction""" lines = [ id_plus_dir, apl_trans, resp_dict['transcript'], " ".join([str(round(conf, 4)) for conf in resp_dict['confidence']]), ' '.join([f"({word}, {str(round(conf, 4))})" for word, conf in resp_dict['words_confidence']]) ] return '\n'.join(lines)
9a0601665bef9c8f596b11e09058285fea9aa525
49,851
import os def exists(path): """Checks if a file is present at the given path.""" return os.path.exists(path)
165a3ae40deed87079d2bff036614100fc2bddcc
49,853
import re def homogeneize_phone_number(numbers): """ Homogeneize the phone numbers, by stripping any space, dash or dot as well as the international prefix. Assumes it is dealing with French phone numbers (starting with a zero and having 10 characters). :param numbers: The phone number string to homogeneize (can contain multiple phone numbers). :return: The cleaned phone number. ``None`` if the number is not valid. """ if not numbers: return None clean_numbers = [] for number in numbers.split(','): number = number.strip() number = number.replace(".", "") number = number.replace(" ", "") number = number.replace("-", "") number = number.replace("(", "") number = number.replace(")", "") number = re.sub(r'^\+\d\d', "", number) if not number.startswith("0"): number = "0" + number if len(number) == 10: clean_numbers.append(number) if not clean_numbers: return None return ", ".join(clean_numbers)
28d0b67daeb0a06eff80f7d00ac41be4b16590e3
49,856
def retrieve_all_parameters(parameter_info_dict): """Retrieve all parameters from parameter dictionary.""" return sorted({x for v in parameter_info_dict.values() for x in v})
4024021bf039168d5f1926e6f602490b3a08ec4d
49,857
import requests import json def get_coordinates_info(lon, lat): """ Request from geocode.arcgis API an info about (lat, lon) coordinates :param lon: a number for Longitude :param lat: a number for Latitude :return: dict if found data else None """ path = r"https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/reverseGeocode?" \ "location={}%2C{}&langCode=en&outSR=&forStorage=false&f=pjson".format(lon, lat) x = requests.get(path) if x.ok: return json.loads(x.text)
e421909cab65587523197e4e6cbed1afbb37576c
49,858
def extract_keywords(raw_text): """ :raw_text: (str) raw text :return: (list of str) """ raw_keywords = raw_text.split(" ") unique_keywords = list(set(raw_keywords)) return unique_keywords
9f66c85ea36f1bfa34f53847a46e9223cd7778d3
49,860
def consecutiveSlopes(ys, xs): """ Get slopes of consecutive data points. """ slopes = [] samplePeriod = xs[1]-xs[0] for i in range(len(ys)-1): slope = (ys[i+1]-ys[i])/(samplePeriod) slopes.append(slope) return slopes
f0c5b107f08d436560c079fbdc15b9cacaca55c1
49,861
import hashlib import requests def check_password_frequency(passwords_to_test: dict) -> dict: """ Checks password usage frequency. """ passwords_frequency = dict() for password in passwords_to_test: digest = hashlib.sha1(password.encode("utf8")).hexdigest().upper() prefix = digest[0:5] suffix = digest[5:] response = requests.get("https://api.pwnedpasswords.com/range/" + prefix) for record in response.text.upper().split("\r\n"): hashed, count = record.split(":") if suffix == hashed: print("Password: {} -- x{} times".format(password, count)) passwords_frequency[password] = count return passwords_frequency
d345652a6444ca516a56e1f3c1abc1257744c37b
49,862
def _user_has_course_access_role(user): """ Returns a boolean indicating whether or not the user is known to have at least one course access role. """ try: return user.courseaccessrole_set.exists() except Exception: # pylint: disable=broad-except return False
231f7a5551a1861ece44c5c85ec96e59d4c7e793
49,863
import time def timerfunc(func): """ A timer decorator """ def function_timer(*args, **kwargs): """ A nested function for timing other functions """ start = time.time() value = func(*args, **kwargs) end = time.time() runtime = end - start msg = "The runtime for {func} took {time} seconds to complete" print(msg.format(func=func.__name__, time=runtime)) return value return function_timer
ee7582c993229c18b01713fdddd8ef1797949a42
49,864
import imp def get_city(source_file): """ Given a source file, get the city it refers to. """ data_module = imp.load_source('local_data', source_file) return data_module.city
9eef94be0a8cceecde91cd7f1f23e4d1aea6e2dc
49,865
def isnum(n): """ Return True if n is a number, else False """ try: y = n+1 return True except: return False
46c9b8e15a1f29e6e715339f51c6d56975b6ffb3
49,866
def service_cluster_full(s, base_url): """ qrs/servicecluster/full """ r = s.get(base_url + "/qrs/servicecluster/full?xrfkey=abcdefg123456789") rjson = r.json()[0] return r.status_code, rjson
77b5dc34cd6cefe648ffff5c2e976718b17a653e
49,868
import inspect import asyncio def convert_gen_to_async(gen, delay): """Convert a regular generator into an async generator by delaying between items""" assert inspect.isgenerator(gen) async def inner_async_gen(): for value in gen: yield value await asyncio.sleep(delay) return inner_async_gen
94fbe2e84c14bc359f8a8da28a466ff4a6ed4f5a
49,869
def contains_key_and_has_value(check_dict, key): """ 依據傳入的字典與鍵值,確認是否有此鍵且有值 Returns: True: 有鍵值也有值 False: 可能沒有鍵值或沒有值(None) """ if key in check_dict and check_dict[key]: return True return False
cd4c6ef42813c766689fd0889ed228309edcce6d
49,870
def dedup_and_title_case_names(names): """Should return a list of title cased names, each name appears only once""" NMS_NODUB = [] [NMS_NODUB.append(x) for x in names if x not in NMS_NODUB] NMS_CAPS = [] for name in NMS_NODUB: NMS_CAPS.append(name.title()) return NMS_CAPS
3ad50bbbb1f6fcd5386de13b985ec503c8b88b2d
49,871
import os def _prevent_overwrite(write_path, suffix='_annotated'): """Prevents overwrite of existing output files by appending a suffix when needed :param write_path: potential write path :type write_path: string :return: :rtype: """ while os.path.exists(write_path): sp = write_path.split('.') if len(sp) > 1: sp[-2] += suffix write_path = '.'.join(sp) else: write_path += suffix return write_path
c889cf92008543a4f694c66175122a6206540196
49,872
def recover_data(Z, U, k): """Recovers an approximation of original data using the projected data :param Z: Reduced data representation (projection) :type Z: numpy.array :param U: eigenvectors of covariance matrix :type U: numpy.array :param k: Number of features in reduced data representation :returns: Approximated features' dataset :rtype: numpy.array """ X_rec = Z.dot(U[:, 0:k].T) return X_rec
0a539d942989823c9088d5be7db39dd5f7da049b
49,873
def get_span_length(spans: list): """ For spacy.span object in a list gets the length of the object :param spans: list of spacy.span objects :return: list of lengths of the spacy.span objects """ return [len(span) for span in spans]
8387f08849f089d896105dd685bf2536684e36d6
49,874
import os def _command(command, *args): """ Run a command. """ cl = [] cl.append(os.path.join(command)) cl.extend(args) return ' '.join(cl)
e2cba69e84e5d870a1ac8d2e6b104ba0cb7ae880
49,875
def get_same_pinyin_vocabulary(same_pinyin_file): """ 获得具有相同拼音的词表,文件来自https://github.com/shibing624/pycorrector/blob/master/pycorrector/data/same_pinyin.txt :param same_pinyin_file: :return: {"word1":samepinyin,"word2":samepinyin} """ same_pinyin = {} with open(same_pinyin_file, 'r', encoding='utf-8') as f: lines = f.readlines() for line in lines[1:]: temp = line.strip("\n") split_1 = temp.split('\t') word_index = split_1[0] # 词根 # word_same_1 = split_1[1] #同音同调 # word_same_2 = split_1[2] #同音异调 # word_samePinyin = split_1[1]+split_1[2] sameWords = "" for i in split_1[1:]: # 拼接同音同调和同音异调词表 sameWords += i same_pinyin[word_index] = list(sameWords) # 将同音同调和同音异调放在同一个list中 # same_pinyin[word_index] = [list(word_same_1),list(word_same_2)] # 将同音同调和同音异调放在不同list中 # 格式[word,freq] return same_pinyin
0039e3bdcec404a0790a7353c72e4c101d91e0ab
49,876
def replace_double_hyphen(value): """Replace ``--`` (double) with ``-`` (single). Args: value (str): string which have ``--``. Returns: str (``--`` is replaced with ``-``.) Examples: >>> val = "123--456" >>> replace_double_hyphen(val) "123-456" """ value = str(value).replace("--", "-") return value
1a9eb18276db8894f0e5152a620e2fea73b5d49d
49,878
import binascii import os def new_credentials(): """Generate a new identifier and seed for authentication. Use the returned values in the following way: * The identifier shall be passed as username to SRPAuthHandler.step1 * Seed shall be passed to SRPAuthHandler constructor """ identifier = binascii.b2a_hex(os.urandom(8)).decode().upper() seed = binascii.b2a_hex(os.urandom(32)) # Corresponds to private key return identifier, seed
1460b1a6218a9b214668aef4cbd044a46300fdc1
49,879
def menu(): """Общее меню""" menu = [] return menu
4c4dfceed2611382d0687401746b7dba9afc4750
49,880
import torch def make_separable(ker, channels): """Transform a single-channel kernel into a multi-channel separable kernel. Args: ker (torch.tensor): Single-channel kernel (1, 1, D, H, W). channels (int): Number of input/output channels. Returns: ker (torch.tensor): Multi-channel group kernel (1, 1, D, H, W). """ ndim = torch.as_tensor(ker.shape).numel() repetitions = (channels,) + (1,)*(ndim-1) ker = ker.repeat(repetitions) return ker
80c8ab22f8b39fb5fb91223ed8ecb0c6c3d75a05
49,882
from typing import Dict from typing import Any import pickle def read_language_file(pickle_filepath: str) -> Dict[Any, Any]: """ Read language file. Parameters ---------- pickle_filepath : str Returns ------- language_data : Dict[Any, Any] Examples -------- >> from lidtk.utils import make_path_absolute >> path = make_path_absolute('~/.lidtk/lang/de.pickle') >> data = read_language_file(path) >> sorted(list(data.keys())) ['paragraphs', 'used_pages'] """ with open(pickle_filepath, "rb") as handle: unserialized_data = pickle.load(handle) return unserialized_data
7bb47a38190d77d350ae0042ca3867447b02f29e
49,883
def redistribute_occ(occ_list): """ Redistribute occupants in occ_list, so that each apartment is having at least 1 person and maximal 5 persons. Parameters ---------- occ_list Returns ------- occ_list_new : list List holding number of occupants per apartment """ occ_list_new = occ_list[:] if sum(occ_list_new) / len(occ_list_new) > 5: # pragma: no cover msg = 'Average number of occupants per apartment is higher than 5.' \ ' This is not valid for usage of Richardson profile generator.' raise AssertionError(msg) # Number of occupants to be redistributed nb_occ_redist = 0 # Find remaining occupants # ############################################################### for i in range(len(occ_list_new)): if occ_list_new[i] > 5: # Add remaining occupants to nb_occ_redist nb_occ_redist += occ_list_new[i] - 5 # Set occ_list_new entry to 5 persons occ_list_new[i] = 5 if nb_occ_redist == 0: # Return original list return occ_list_new # Identify empty apartments and add single occupant # ############################################################### for i in range(len(occ_list_new)): if occ_list_new[i] == 0: # Add single occupant occ_list_new[i] = 1 # Remove occupant from nb_occ_redist nb_occ_redist -= 1 if nb_occ_redist == 0: # Return original list return occ_list_new # Redistribute remaining occupants # ############################################################### for i in range(len(occ_list_new)): if occ_list_new[i] < 5: # Fill occupants up with remaining occupants for j in range(5 - occ_list_new[i]): # Add single occupant occ_list_new[i] += 1 # Remove single occupant from remaining sum nb_occ_redist -= 1 if nb_occ_redist == 0: # Return original list return occ_list_new if nb_occ_redist: # pragma: no cover raise AssertionError('Not all occupants could be distributed.' 'Check inputs and/or redistribute_occ() call.')
9a9883268fe51edeb1b6c68f60fc763a236c9c41
49,884
import random def pick_atom_idx(mol, prepick=None): """pick an atom from the molecule""" mol.UpdatePropertyCache() if not (prepick is not None and prepick >= 0 and prepick < mol.GetNumAtoms()): pickable_atoms = [a.GetIdx() for a in mol.GetAtoms() if a.GetImplicitValence() > 0] if pickable_atoms: prepick = random.choice(pickable_atoms) else: prepick = None return prepick
9fa6b1ce48c8edf05be83e5426ab9e19c1ad2893
49,885
import struct def calculate_sum32(data): """sum of all elements from a buffer of 32-bit values.""" if (len(data) & 0x3) != 0: raise ValueError("Length of data is not multiple of DWORDs") fmt = "<{}I".format(len(data) // 4) buffer32 = struct.unpack(fmt, data) result32 = sum(buffer32) & 0xffffffff result32 = 0xFFFFFFFF - result32 + 1 return result32
05d04fbbed0819318fa481aa0c98246534496274
49,886
def rem_num(num, lis): """ Removes all instances of a number 'num', from list lis. """ return [ele for ele in lis if ele != num]
c2fd18b49a70a01bf9d44da1b2aacf8d4e93cbe9
49,887
def string_to_int_list(number_string): """Convert a string of numbers to a list of integers Arguments: number_string -- string containing numbers to convert """ int_list = [] for c in number_string: int_list.append(int(c)) return int_list
f53d286dc5a0ac4ad0312d5d071577b9ff93554e
49,889
def order_models_based_on_method_feastureName_and_performance( model_evaluation_results_df, sort_by = "real_test_mae", aggreagate_by = "mean" ): """ .......................................................................................... This function takes results of multi-model evalusdation and returns the list of ordered models done with different approaches, on different number of features. Function is used by Plot_SalePrice_with_all_predictions() .......................................................................................... simple_models_df : dataframe, unordered models, created with different number of features, sort_by : str, {real_test_mae, real_train_mae, test_mae, train_mae} error results, used to order the models, aggreagate_by : str, {"mean", "mean"} I run models, several time, using different combinations, of rows in test/train data, that function aggregates values from these models, """ # ... to start with model_evaluation_results_df = model_evaluation_results_df.copy() # ... constants, cols_to_present = ["method",'feature nr'] cols_to_take = [ sort_by, "best_alpha", "best_l1"] #(simple_models_df.loc[:,"ID"]==0).sum() # ... find results for each combination of feature/method agg_for_method_feature_comb = model_evaluation_results_df.groupby(["method", 'feature nr'])[cols_to_take].agg([aggreagate_by]) agg_for_method_feature_comb = agg_for_method_feature_comb.reset_index(drop=False) agg_for_method_feature_comb.columns = ["method", "Feature_nr", sort_by, "best_alpha", "best_l1"] sorted_agg_for_method_feature_comb = agg_for_method_feature_comb.sort_values(sort_by) sorted_agg_for_method_feature_comb = sorted_agg_for_method_feature_comb.reset_index(drop=True) # ... display, return sorted_agg_for_method_feature_comb
894e256ed4d5dbeee09eed13810818071f17a217
49,890
def __name__(): """Returns the name of the object. """ return "Demand and Supply Interpolator"
f7c869c8d03afb4a7e1bfc5a5c99c7069645b49b
49,893
def bold_follows(parts, i): """Checks if there is a bold (''') in parts after parts[i]. We allow intervening italics ('').""" parts = parts[i + 1:] for p in parts: if not p.startswith("''"): continue if p.startswith("'''"): return True return False
a51ac103bb00845f7ea547f24da37e239135f519
49,895
def pci_deleted(new_list, old_list): """Returns list of elements in old_list which are not present in new list.""" delete_list = [] for entry in old_list: # Check whether bus addresses and interface names match in the two lists for # each entry. bus_addr_matches = [x for x in new_list if x['bus_address'] == entry['bus_address'] and x.get('config_script', None)] device_matches = [x for x in new_list if x['device'] == entry['device'] and x.get('config_script', None)] if not (bus_addr_matches and device_matches): delete_list.append(entry) return delete_list
509ebba83c0c9a5f78804993c66bb3fa05c22a4b
49,896
def fill_list_var(to_check, length=0, var=1): """Return a list filled with the specified variable and the desired length.""" difference = length - len(to_check) if difference > 0: if isinstance(to_check, list): to_check.extend([var] * difference) return to_check
d174b8410f4732e3f2f0c1110a0d766d37d887ae
49,897
import pandas as pd def isleapyear(yr): """ :param yr: an integer year value (i.e: 2019) :return: boolean, True if a leap year, False if not a leap year """ # Month and Day do not matter, just required. Converts to dataframe placeholder = pd.DataFrame({'year': [yr], 'month': [1], 'day': [1]}) # Converts to the datetime format date = pd.to_datetime(placeholder) # Pandas function to tell if leap year leap = int(date.dt.is_leap_year) return leap
8a9e743d9c0632f1749f51f85b482469f7a58bae
49,898
def get_symbols(formula): """ get all symbols in formula """ return set([abs(lit) for clause in formula for lit in clause])
6674fb048509d9999d66436db579f9cc95136495
49,900
def ensure_bytes(value): """Converts value to bytes. Converts bytearray and str to bytes. Scanners may create child files that are one of these types, this method is used on every file object to ensure the file data is always bytes. Args: value: Value that needs conversion to bytes. Returns: A byte representation of value. """ if isinstance(value, bytearray): return bytes(value) elif isinstance(value, str): return value.encode('utf-8') return value
823aed980535b3a940849d7b265e78a65d3cca33
49,901
def scale_param(X, X_min, X_max): """ Formula source: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html In this case 1 is max, 0 is min """ sigma = (X-X_min) / (X_max - X_min) return sigma*(1 - -1)+-1
9a7c091993b86d9a154c55c1fa5cbe6c1536ed69
49,902
import os def get_environment(): """Returns ``production``, ``staging``, ``testing`` or ``development`` depending on the Server Name. See :term:`production version`, :term:`staging version`, :term:`testing version`, and :term:`production version` for meaning. """ if os.environ.get('SERVER_NAME', '').startswith('production'): return 'production' elif os.environ.get('SERVER_NAME', '').startswith('staging'): return 'staging' elif os.environ.get('SERVER_NAME', '').startswith('v') and os.environ.get( 'SERVER_NAME', '' ).endswith('appspot.com'): return 'testing' elif os.environ.get('SERVER_NAME', '').startswith('test'): return 'test' return 'development'
9797b65dba5e027aa9c373a4a3bbcce8dfdd5f8a
49,903
def log(message: str) -> bool: """Logs a message. :param message: """ return False
4a3c5b645871ed3f9983abc4c7fece5981bdbf2c
49,904
import json def read_json_file(file_to_read: str) -> dict: """Read a json file. :type file_to_read:str: :param file_to_read:str: :raises: :rtype: dict """ json_results = dict() with open(file_to_read, 'r') as json_file: json_results = json.load(json_file) return json_results
c36e5fdacbf3521c5fa57ceefc1923c5e8ef85e9
49,905
def generate_src_masks(source_padded, pad): """ Generate sentence masks for encoder This is the padding mask @param source_padded (Tensor): encodings of shape (b, src_len), where b = batch size, src_len = max source length """ source_mask = (source_padded != pad).unsqueeze(-2) return source_mask
820220ad458913e1774df2ea06013a326a3ba11e
49,906
import lzma def C(x: bytes): """ gives the compressed length of a byte string """ return len(lzma.compress(x))
4180d3f4ae29e245d6f3d0b6bb47a5c067500842
49,907
import argparse def parse_args(default_minppmm=1000): """Retrieve command line parameters. Returns: ArgumentParse: command line parameters """ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("-p", "--plumedir", required=True, help="path to input plume file directory") parser.add_argument("-r", "--regex", required=False, default="ang.*_detections/ime_minppmm{}/ang.*_ime_minppmm{}.*".format( default_minppmm, default_minppmm), help="Regular expression to match for plume files") parser.add_argument("-w", "--windir", required=True, help="path to input wind file directory") parser.add_argument("-o", "--outfile", required=True, help="path to output plume list") parser.add_argument("-f", "--force", help="Force reprocessing of all files (not just the new ones)", action='store_true') parser.add_argument("-n", "--nprocs", type=int, default=1, help="number of parallel processes to use; default=1 (sequential)") parser.add_argument("--flmode", help="Executes script in flightline mode, running on a single flightline", action="store_true") args = parser.parse_args() return (args.plumedir, args.regex, args.windir, args.outfile, args.force, args.nprocs, args.flmode)
2c1d4d1d691828965f3a85b48e21eab8051e4713
49,908
def format_mac(str_hex): """Accept a string of hexadecimal digits and return a string of MAC address format. Arguments: str_hex: a string of hexadecimal digits """ i = 1 str_mac = ':'.join([str_hex[i:i+2] for i in range(0, 12, 2)]) return str_mac
9b009984bd09d7ac80db51ae01176bcdcb0014b2
49,909
def find_min_image_height ( images ): """Retorna menor altura dentre lista de imagens Args: images: lista de imagens Returns: menor altura """ return min( image.height for image in images )
8b0e35fb701ec5973dc2251e719cef3232719176
49,910
import torch def min_max_norm(x): """Min-Max normalization using PyTorch.""" maX = torch.max(x) mIn = torch.min(x) return (x - mIn) / (maX - mIn)
027489c8325a250bcd33481bb61a35338a776324
49,911
def convert_from_tuple(core_tuple): """ Converts arrus core tuple to python tuple. """ v = [core_tuple.get(i) for i in range(core_tuple.size())] return tuple(v)
bc4bed173ab28d7209fc101ced8562a14aefb80c
49,912
import os def get_target_dirs(read_from='list', list=[], path='.'): """ Get target directories from multiple sources like a manual list, an input file or from the filesystem directories. Parameters ---------- read_from : str, 'infile' or 'ls' or 'list' (default='list') Reads target directories from a source. Options: - 'list', reads from a list of names given with `list` attribute (default=[]) - 'infile', reads from a file name given with `path` attribute (required). - 'ls', reads from system directories given in `path` attribute (default='.'). """ if read_from == 'list': return list if read_from == 'file': with open(path, 'r') as f: lines = f.readlines() lines = [x.strip() for x in lines] return lines if read_from == 'ls': dirs = [f for f in os.listdir(path) if os.path.isdir( f) and not f.startswith('.')] return dirs
c31da92ac8699c992bd2d5f94682e6b6a4c10b1a
49,913
def convert_perm(m): """ Convert tuple m of non-negative integers to a permutation in one-line form. INPUT: - ``m`` - tuple of non-negative integers with no repetitions OUTPUT: ``list`` - conversion of ``m`` to a permutation of the set 1,2,...,len(m) If ``m=(3,7,4)``, then one can view ``m`` as representing the permutation of the set `(3,4,7)` sending 3 to 3, 4 to 7, and 7 to 4. This function converts ``m`` to the list ``[1,3,2]``, which represents essentially the same permutation, but of the set `(1,2,3)`. This list can then be passed to :func:`Permutation <sage.combinat.permutation.Permutation>`, and its signature can be computed. EXAMPLES:: sage: sage.algebras.steenrod.steenrod_algebra_misc.convert_perm((3,7,4)) [1, 3, 2] sage: sage.algebras.steenrod.steenrod_algebra_misc.convert_perm((5,0,6,3)) [3, 1, 4, 2] """ m2 = sorted(m) return [list(m2).index(x)+1 for x in m]
aaf2755985a8e66efdcd06c0e42fdda53481b287
49,914
def _insert_jinja_configuration(c): """Insert the configuration for the sphinx-jinja extension. The "default" Jinja context includes all variables in the conf.py configuration namespace. """ c["jinja_contexts"] = {"default": c} return c
b5a8ff7220682a420ed677b505907a031e66f314
49,915
def infectious_from_cases(cases, R0) -> float: """ Initializes the "infectious" component of a SIR model from the current number of cases. This formula assumes a perfect exponential growth. """ if R0 <= 1: raise ValueError(f"R0 must be greater than one (got {R0})") seed = 1 return (cases * (R0 - 1) + seed) / (2 * R0 - 1)
c1be11670b074ef714528db8c32f8f22be8ff716
49,917
def find_offset(fn:str): """\ Finds the offset for the XSPD block. At the moment this just searches the file for the XSPD header, but in future this will use the offsets of the other blocks to seek to the correct location. """ BS = 4194304 with open(fn, "rb") as wad: count = 0 b = wad.read(BS) while len(b) > 0: if b"XSPD" in b: return b.find(b"XSPD") + BS*count count += 1
8c007b5741e5323f6a1830f2812d8c61aae504b2
49,918
def is_paired(aln): """ Input: pysam.AlignedSegment "properly" paired (correct reference and orientation) NB: This is used instead of the properly_paired flag as rare superamplicons fall outside the expected insert size distribution and are not marked as such. NB: It does not check if the orientation is FR as RF orientations are discarded due to no biological sequence. Returns: Boolean """ return aln.is_paired and aln.is_reverse != aln.mate_is_reverse
8ff169a1bc4a2d30fe7ea0ba92135e773349f952
49,919
def normalize_bbox(bbox): """ return a tupe(upper left x|lon,upper left y|lat,bottom right x|lat, bottom right y|lon) )From a bounds bbox is a free style lat/lon bounding box. WGS84 bounds: 112.85 -43.7 153.69 -9.86 """ lons = [d for d in bbox if d > 100 and d < 160] lats = [d for d in bbox if d > -46 and d < -7] if len(lons) != 2 or len(lats) != 2: raise Exception("The bounding box({}) is not belonging to australia".format(bbox)) lats.sort(reverse=True) lons.sort() return [lons[0],lats[0],lons[1],lats[1]]
f9c7a89648e762851af15f898fd586ddbb38673c
49,920
import string def remove_punctuation(text: str) -> str: """Remove punctuation characters from a string. Args: text (str): String containing punctuation to be removed. Returns: str: String with all punctuation removed. """ return text.translate(str.maketrans("", "", string.punctuation))
a2968f8da45d992ce1ccd5957a1b0141c35429bc
49,921
def get_guess_doc_provenance(sources, icsd=None): """ Returns a guess at the provenance of a structure from its source list. Return possiblities are 'ICSD', 'SWAP', 'OQMD' or 'AIRSS', 'MP' or 'PF'. """ prov = 'AIRSS' if isinstance(sources, dict): sources = sources['source'] elif isinstance(sources, str): sources = [sources] for fname in sources: fname_with_folder = fname fname = fname.split('/')[-1].lower() if (fname.endswith('.castep') or fname.endswith('.res') or fname.endswith('.history') or ('oqmd' in fname and fname.count('.') == 0) or (any(s in fname for s in ['mp-', 'mp_']) and fname.count('.') == 0) or (any(s in fname for s in ['pf-', 'pf_']) and fname.count('.') == 0)): if any(substr in fname for substr in ['collcode', 'colcode', 'collo']): if fname.count('-') == 2 + fname.count('oqmd') or 'swap' in fname: prov = 'SWAPS' else: prov = 'ICSD' elif 'swap' in fname_with_folder: prov = 'SWAPS' elif '-ga-' in fname: prov = 'GA' elif icsd is not None: prov = 'ICSD' elif 'oqmd' in fname: prov = 'OQMD' elif '-icsd' in fname: prov = 'ICSD' elif 'pf-' in fname and prov is None: prov = 'PF' elif any(s in fname for s in ['mp-', 'mp_']) and prov != 'PF': prov = 'MP' elif '-sm-' in fname: prov = 'SM' elif '-doi-' in fname: prov = 'DOI' elif '-config_enum' in fname: prov = 'ENUM' return prov
75fecc6292a7026816d420d511fc47a05580599a
49,922
def _dictionary2string(dict_x): """ :param dict_x: a dictionary :return: string in one line """ _keysNames = dict_x.keys() pairs = "" for _keyName in _keysNames: pairs += str(_keyName) + ": " \ + str(dict_x[_keyName]) + ', ' return pairs
9de74046bddcab466706be191e335b28f52ba41b
49,924
def battery_status(battery_analog_in) -> float: """Return the voltage of the battery""" return (battery_analog_in.value / 65535.0) * 3.3 * 2
df3356ca5247767c13c750827917928ef7c3825c
49,925
import os def alistdir(sendrote): """遍历指定目录下所有文件生成文件和路径""" listrote = [] print("列出该系统目录下所有文件夹和文件:") listrote = [] for root, dirs, files in os.walk(sendrote): for file in files: curroot = os.path.join(root, file) listrote.append(curroot) return listrote
001dad102bc2421ea4674a814b6ded49605f40b5
49,926
def _serializable_load(cls, args, kwargs, state): """ Create the new instance using args and kwargs, then apply the additional state. This is used by the __reduce__ implementation. :param cls: class to create an insance of :param args: positional arguments :param kwargs: keyword arguments :param state: additional stored state :return: function call result """ obj = cls(*args, **kwargs) obj._set_state(state) return obj
40ec75f31f8262ce511f1c64f82944803d747817
49,927
def flatten(data): """ Flattens the given data structure. Returns: list[str] """ list_ = [] if isinstance(data, list): [list_.extend(flatten(item)) for item in data] elif isinstance(data, dict): [list_.extend(flatten(item)) for item in data.values()] else: list_.append(data) return list_
f0c37b7e4fedf797c630f38b92a6b799b05354dd
49,931