content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import sys import functools import signal def timeout(max_running_time): """Test method decorator that fails the test if it executes longer than |max_running_time| seconds. It exists to terminate tests in case of deadlocks. There's a high chance that process is broken after such timeout (due to hanging deadlocked threads that can own some shared resources). But failing early (maybe not in a cleanest way) due to timeout is generally better than hanging indefinitely. |max_running_time| should be an order of magnitude (or even two orders) larger than the expected run time of the test to compensate for slow machine, high CPU utilization by some other processes, etc. Can not be nested. Noop on windows (since win32 doesn't support signal.setitimer). """ if sys.platform == 'win32': return lambda method: method def decorator(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): signal.signal(signal.SIGALRM, lambda *_args: self.fail('Timeout')) signal.setitimer(signal.ITIMER_REAL, max_running_time) try: return method(self, *args, **kwargs) finally: signal.signal(signal.SIGALRM, signal.SIG_DFL) signal.setitimer(signal.ITIMER_REAL, 0) return wrapper return decorator
f3f8c55aaea56d0dcf99f666e6f441373b9eff23
29,144
def z_minus_its_reciprocal(z: complex) -> complex: """ The function z - 1/z. I define this function explicitly because it plays an important role in the model. Furthermore, we also want to plot it and test it. Note: I do not handle the case of zero (or infinite) argument here. Args: z (complex): a nonzero complex number Returns: complex: The value of z - 1/z. """ return z - (1 / z)
46de97ee9c628faf5fe626108af801827e2dfc8f
29,145
import subprocess def run_test(cfg): """ Runs `npm test` from the correct cwd and returns the return code. """ return subprocess.call(['npm', 'test'], cwd=cfg.get('Directories', 'test_cwd'))
11a118c875b8354eb0b887e1149268a8438f2e0b
29,146
from typing import List def split_str(data_str: str) -> List[str]: """ Split a string into a list of strings. The string will be split at ',' or '\n' with '\n' taking precedence. :param data_str: A string to be split. :return: List[str] """ if '\n' in data_str: return list(str(data_str).splitlines()) elif ',' in data_str: return list(str(data_str).split(',')) else: i = list() i.append(data_str) return i
450b301f2b696e2240f9bb8fe49def815c59478d
29,147
def findAttrInAttributes(klass, classAttr): """Find classAttr in WorkflowAttributes of klass.""" for attr in klass.attributes: if attr.classAttr == classAttr: return attr
0ecf387b24af116e5d4a038d2788f4dab8a4aeeb
29,148
def remove_zp_cluster(clusters): """ 移除clusters中的零指代 """ clusters_wo_zp = list() for cluster in clusters: cluster_wo_zp = list() for sloc, eloc in cluster: if eloc - sloc > 0: cluster_wo_zp.append([sloc, eloc]) if len(cluster_wo_zp) > 1: clusters_wo_zp.append(cluster_wo_zp) return clusters_wo_zp
13ee735f911e52302c34c46a2df66ef320a3f507
29,149
def cumulative_discounted_rewards(trajectories): """calculate the cumulative rewards for the given trajectories 1. input: a list of trajectories is a list of tuples, one tuple being comprised of the following values, IN ORDER: 1. current state (s) 2. action agent chooses (a) 3. reward (r) 4. state agent enters after choosing a (s2) 5. next action agent chooses from new state (a2) 2. output: 1. list of cumulative reward for each list of trajectories """ discount_factor = 0.9 # print("boop", discount_factor) all_rewards = [] for i, trajectory_list in enumerate(trajectories): curr_rewards = [] for j, trajectory in enumerate(trajectory_list): state, action, reward, next_state, done = trajectory_list[0], trajectory[1], trajectory[2], trajectory[3], trajectory[4] discounted_return = 0 count = 0 for k in range(j, len(trajectory_list)): discounted_return += (discount_factor ** count) * trajectory_list[k][2] count += 1 curr_rewards.append(discounted_return) # done with the first list in all the trajectories all_rewards.append(curr_rewards) return all_rewards
897b08144093fd1e6bd1c6fd05db0f49be487a17
29,150
def file_util_read_byte(path): """读取二进制文件(byte)""" with open(path, 'br') as f: rst_bytes = bytes() for line in f: rst_bytes += line return rst_bytes
eda93cffa54a7a7bdbf617954866bd5cbabdcc64
29,151
import os def get_problems(fname="problem_names.txt"): """ Get the list of problems in `fname`. :param fname: The name of the file that has problems. """ if os.path.exists(fname): with open(fname, "r") as problem_file: raw = problem_file.readlines() return list(map(lambda _x: _x.strip(), raw)) return []
3b70822bf5ae4aae924b4fce6570c832f576d882
29,153
def tokenization(tweet): """ DESCRIPTION: Tokenizes a tweet into words INPUT: tweet: a tweet as a python string OUTPUT: list of tweet's tokens (words) """ return list(tweet.split())
60c59fdbe775ea8178b3f2c4de6657e3fedd776a
29,155
def tree_named(tree): """single node tree fixture - where the node has the service_name field filled out""" list(tree.values())[0].service_name = 'dummy' return tree
fe5d1c10004035343498c2eb413f010baeb458e5
29,157
import time def tic(): """start = tic() start tictoc timer start - time in seconds """ global _tic _tic = time.time() return _tic
85f4e6ccbae8ecccf761e8474d42cb12fcc7466c
29,158
import numpy def radians_angles_average(x: numpy.ndarray) -> float: """ Calculates the average between radian angles. :param x: numpy array of degree angles in range (0, :math:`2\pi`) or (:math:`-\pi`, :math:`\pi`). :return: the average. """ n = x.size return numpy.arctan2((1 / n) * numpy.sum(numpy.sin(x)), (1 / n) * numpy.sum(numpy.cos(x)))
87ffa9c968c72f7246bd93b9766d4702a48276d6
29,159
import sys import argparse import os def get_args(argv): """ get args using argparse.ArgumentParser ArgumentParser e.g: argparse https://docs.python.org/3/library/argparse.html jumeg wrapper for <mne.io.read_raw_bti> https://martinos.org/mne/stable/generated/mne.io.read_raw_bti.html#mne.io.read_raw_bti Results: -------- parser.parse_args(), parser """ info_global = """ JuMEG import 4D/BTi data to FIF call <mne.io.read_raw_bti> used python version: {} """.format(sys.version.replace("\n"," ")) h_fif_stage= """ fif stage: start path for fif files from list -> start path to fif file directory structure e.g. /data/megstore1/exp/INTEXT/mne/ """ #h_fif_path_prefix="path prefix append to <fif stage>" h_pdf_stage=""" pdf stage: start path for fif files from list -> start path to fif file directory structure e.g. /data/megstore1/exp/INTEXT/mne/""" h_pdf_fname = "Path to the processed data file (PDF)" h_config_fname = "Path to system config file" h_head_shape_fname = "Path to the head shape file" h_rotation_x = "Degrees to tilt x-axis for sensor frame misalignment. Ignored if convert is True." h_translation = "array-like, shape (3,)\nThe translation to place the origin of coordinate system to the center of the head.\nIgnored if convert is True." h_convert = "Convert to Neuromag coordinates or not." h_rename_channels = "Whether to keep original 4D channel labels or not. Defaults to True." h_sort_by_ch_name = "Reorder channels according to channel label.\n4D channels don’t have monotonically increasing numbers in their labels.\nDefaults to True." h_ecg_ch = "The 4D name of the ECG channel.\nIf None, the channel will be treated as regular EEG channel." h_eog_ch = "The 4D names of the EOG channels.\nIf None, the channels will be treated as regular EEG channels." h_preload ="""Preload data into memory for data manipulation and faster indexing. If True, the data will be preloaded into memory (fast, requires large amount of memory). If preload is a string, preload is the file name of a memory-mapped file which is used to store the data on the hard drive (slower, requires less memory).""" h_verbose ="bool, str, int, or None" h_overwrite ="overwrite existing fif files" #--- parser parser = argparse.ArgumentParser(info_global) #--- bti input files parser.add_argument("-spdf", "--pdf_stage", help=h_pdf_stage,metavar="PDF_STAGE",default=os.getenv("JUMEG_PATH_BTI_EXPORT","/data/MEG/meg_store2/megdaw_data21")) # parser.add_argument("-spdf","--pdf_stage",help=h_pdf_stage,metavar="PDF_STAGE", default="${HOME}/MEGBoers/data/megdaw_data21") #--- fif output #parser.add_argument("-sfif", "--fif_stage", help=h_fif_stage,metavar="FIF_STAGE",default=os.getenv("JUMEG_PATH_MNE_IMPORT","/data/MEG/meg_strore1/exp")) parser.add_argument("-sfif","--fif_stage",help=h_fif_stage,metavar="FIF_STAGE",default="${HOME}/MEGBoers/data/exp/INTEXT/mne") #parser.add_argument("-fif_path_prefix","--fif_path_prefix",help=h_fif_path_prefix,default="mne") parser.add_argument("-fif_ext","--fif_extention",help="fif file extention",default="-raw.fif") #--- parameter parser.add_argument("-pdf_fname","--pdf_fname",help=h_pdf_fname,default="c,rfDC") parser.add_argument("-config_fname","--config_fname", help=h_config_fname, default="config") parser.add_argument("-head_shape_fname","--head_shape_fname",help=h_head_shape_fname,default="hs_file") parser.add_argument("-rot", "--rotation_x", help=h_rotation_x, default=None) parser.add_argument("-translation", "--translation", help=h_translation, default=None) parser.add_argument("-ecg_ch", "--ecg_ch", help=h_ecg_ch) parser.add_argument("-eog_ch", "--eog_ch", help=h_eog_ch) #--- flags parser.add_argument("-prel", "--preload", action="store_true",default=True,help=h_preload) parser.add_argument("-sort", "--sort_by_ch_name",action="store_true",default=True,help=h_sort_by_ch_name) parser.add_argument("-rename","--rename_channels",action="store_true",default=True,help=h_rename_channels) parser.add_argument("-conv", "--convert", action="store_true",default=True,help=h_convert) parser.add_argument("-overwrite","--overwrite", action="store_true",help=h_overwrite) parser.add_argument("-save", "--save", action="store_true",default=True,help="save as fif file") parser.add_argument("-v", "--verbose", action="store_true",help=h_verbose) parser.add_argument("-r", "--run", action="store_true",help="!!! EXECUTE & RUN this program !!!") parser.add_argument("-log", "--logfile", action="store_true",help="generate logfile") #--- init flags # ck if flag is set in argv as True # problem can not switch on/off flag via cmd call opt = parser.parse_args() for g in parser._action_groups: for obj in g._group_actions: if str( type(obj) ).endswith('_StoreTrueAction\'>'): if vars( opt ).get(obj.dest): opt.__dict__[obj.dest] = False for flg in argv: if flg in obj.option_strings: opt.__dict__[obj.dest] = True break return opt, parser
50830fd4107a333945fb99558444cd0becbad791
29,160
def ps_probs(feedback_table, confidence, action): """ Returns the probability that an action is optimal based off of previous feedback and given the confidence in the feedback. """ # The following is a pretty arbitrary way to prevent overflow, it also artificially limits how much feedback # affects the agent. Worth changing if you know a better way. if (feedback_table[action]) > 30: return 1 elif (feedback_table[action]) < -30: return 0 #if confidence == 1: #confidence == .99999 fdbk_probs = [] for i, fdbk in enumerate(feedback_table): # sides of the binomial probability calculation left_side = confidence**fdbk right_side_power = 0 for j, other_actions in enumerate(feedback_table): if i != j: right_side_power += other_actions right_side = (1-confidence)**right_side_power fdbk_probs.append(left_side*right_side) if fdbk_probs[action] > 1: fdbk_probs[action] = 1 #print(fdbk_probs[action]) return fdbk_probs[action]
46e3556d57b07ea1a8c43e02d58f5de08522cea8
29,162
import re def _clean_multirc_inputs(dataset_name, text): """Removes HTML markup from Multi-RC task input text.""" if dataset_name == "super_glue/multirc": # Remove HTML markup. text = re.sub(r"<br>", " ", text.decode("utf-8")) text = re.sub(r"<(/)?b>", " ", text) return text
71d0999ddb9a3942e6d53473c534d55924a0aaa1
29,165
def is_complex_parsing_required(value): """ Determine if the string being parsed requires complex parsing. Currently, this is solely determined by the presence of a colon (:). Args: value (str): A string that will be parsed. Returns: bool: Flag value to indicate whether the string requires complex parsing. """ return ":" in value
b5b13eb6f8a28d69a2a069fa8228ace0e842873b
29,166
def is_xxx_exist(data): """用来判断com_manage函数中,得到的whois信息是否包含xxx标志,若包括则需要重新发送""" if data.find("\"xxx\"") != -1 and data.find("\"=xxx\"") != -1: return True else: return False
e23d995e18d2c604e3d587ba1784bc0717f135e4
29,168
def merge_nodeproviders(*nodeproviders): """ Create a term-mixed NodeProvider from multiple instances. """ # General checks. if len(nodeproviders) == 0: return None elif len(nodeproviders) == 1: return nodeproviders[0] if not len(set(type(np) for np in nodeproviders)) == 1: raise TypeError('inconsistent provider types') provider_class = type(nodeproviders[0]) new_provider = provider_class(romanize=True) for provider in nodeproviders: new_provider._merge(provider) return new_provider
b67e44d293ce0d6da1bcc600b6f3c81d50f4b8f0
29,170
import inspect def ismethod(func): """ Is func a method? Note that this has to work as the method is defined but before the class is defined. At this stage methods look like functions. """ signature = inspect.signature(func) return signature.parameters.get('self', None) is not None
bbc971ae9ccde0c44e12e89027cd7180bfeac178
29,171
def getPrefix(netmask): """ Get the CIDR prefix representing the netmask. :param netmask: Netmask to convert to CIDR :type netmask: :returns: CIDR prefix representing the netmask :rtype: int """ return sum([bin(int(x)).count('1') for x in netmask.split('.')])
ed721e171d76236d13a67c7f3ea164c996171a4b
29,172
def rolling_mean(ts, window): """Calculate rolling mean of time series. Uses pandas.DataFrame.rolling() to calculate rolling mean of a given window size. If more than one column of data in ts, returns rolling mean using given window size for each column of data. Returns nans for times before first window. :param ts: Time series data as a pandas DataFrame. :param window: Window size over which to calculate mean (int). :return ts_std: DataFrame with same columns as ts but with rolling mean in place of data column. """ ts_mean = ts.rolling(window).mean() ts_mean["time"] = ts["time"] # don't want mean of time! return ts_mean
8cd2933b1a9c285666a62a5edacae8477dec1d3d
29,173
def X1X2_to_Xs(X1, X2): """Convert dimensionless spins X1, X2 to symmetric spin Xs""" return (X1+X2)/2.
9938f188d766d7895986b8796bb8eeeafe3b5b7d
29,174
def confirm_new_game(): """ Specialized input to ask confirmation about starting a new game. :return: The user confirmation :rtype: bool """ print("") # Empty line for aesthetical purposes acceptable = set(["Y", "YES", "N", "NO"]) choice = "" while choice not in acceptable: choice = input("Do you want to play again (y/n)? ").upper() return choice[0] == "Y"
6285e6d9250bfb814db71ab237e33884823b7096
29,175
def repeat(l, n): """ Repeat all items in list n times repeat([1,2,3], 2) => [1,1,2,2,3,3] http://stackoverflow.com/questions/24225072/repeating-elements-of-a-list-n-times """ return [x for x in l for i in range(n)]
0a27596da9ef804a8a5badc0a0111b56c937aa35
29,178
import torch def vec2mat0(vec): """Vector dim comes first, unlike v2m""" return torch.unsqueeze(vec, 1)
e2d444b2772c092bb0375b6612b56d6b82a160f0
29,179
def roman_to_int(s): """ :type s: str :rtype: int """ roman_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} if len(s) == 1: return roman_dict[s[0]] # atleast 2 len converted_int = 0 prev_item = -1 for idx, item in enumerate(s): if prev_item == -1: prev_item = item if idx == len(s) - 1: converted_int += roman_dict[item] continue if roman_dict[prev_item] < roman_dict[item]: converted_int += (roman_dict[item] - roman_dict[prev_item]) prev_item = -1 else: converted_int += roman_dict[prev_item] if idx == len(s) - 1: converted_int += roman_dict[item] prev_item = item return converted_int
b3cbe5ee251d9b6cbb909f45562677deb0839bb2
29,181
from typing import IO import requests def download_file(url: str) -> IO: """Download a remote file. Parameters ---------- url: string Request URL. Returns ------- io.BytesIO """ r = requests.get(url, stream=True) r.raise_for_status() return r.raw
85c1e885573bee2619a473a2d9954781d46d3f9d
29,182
import torch def logit(p, a=torch.tensor(0.0), b=torch.tensor(1.0)): """ for scalar parameters with bounded support (no gaps) basically a logit transform """ return torch.log(p - a) - torch.log(b - p)
2efd8cb775f987c9dcd09c3c964bf85a76ad72a8
29,183
from typing import List from typing import Union def append_case_sink_edges(edge_list:List[tuple], exposed_ids:List[int], matching_ratio:Union[int, str], matching_ratio_dic:Union[dict, None])->List[tuple]: """Append edges to edge_list which the exposed to the sink, with capacities that result in constant 1:k matching or entire number matching.""" if isinstance(matching_ratio, int): append_list = [(exp_id, 'sink', {'capacity':matching_ratio, 'weight':0})\ for exp_id in exposed_ids] elif matching_ratio=='entire_number': assert isinstance(matching_ratio_dic, dict), 'entire_number_dic is not a dictionary' append_list = [(exp_id, 'sink', {'capacity':matching_ratio_dic[exp_id], 'weight':0})\ for exp_id in exposed_ids] else: assert False, f'{matching_ratio} not implemented yet' return edge_list + append_list
dd64ca1bdfcecb1ef02bf2e6abe66db8ac5d04c9
29,184
def bytes2bin(bites, sz=8): """Accepts a string of ``bytes`` (chars) and returns an array of bits representing the bytes in big endian byte order. An optional max ``sz`` for each byte (default 8 bits/byte) which can be used to mask out higher bits.""" if sz < 1 or sz > 8: raise ValueError("Invalid sz value: %d" % sz) ''' # I was willing to bet this implementation was gonna be faster, tis not retval = [] for bite in bytes: bits = [int(b) for b in bin(ord(bite))[2:].zfill(8)][-sz:] assert(len(bits) == sz) retval.extend(bits) return retval ''' retVal = [] for b in [bytes([b]) for b in bites]: bits = [] b = ord(b) while b > 0: bits.append(b & 1) b >>= 1 if len(bits) < sz: bits.extend([0] * (sz - len(bits))) elif len(bits) > sz: bits = bits[:sz] # Big endian byte order. bits.reverse() retVal.extend(bits) return retVal
249bef3ef240f63273853ede0b820a9b59b935de
29,185
def find_degree_info(soup): """Find degree info in parsed html text""" degree = None doc = ["PhD", "Assistant Professor"] bs = ["B.A.", "BA", "B.S.", "BS", "Student", "Undergraduate"] ms = ["M.S.", "Graduate Student", "MS", "Adjunct Instructor", "Associate Instructor"] mba = ["MBA", "M.B.A"] try: #linkedin headline if any desc2 = soup.find('div', attrs={'class': 'f', 'class': 'slp'}).get_text() # print desc2 if any(d in desc2 for d in doc): degree = "PhD" elif any(a in desc2 for a in mba): degree = "MBA" elif any(m in desc2 for m in ms): degree = "MS/MA" elif any(b in desc2 for b in bs): degree = "BA/BS" else: degree = "None" if degree == "None": # all search descriptions desc = soup.findAll('span', attrs={'class': 'st'}) for des in desc: des = des.get_text() # print des if any(d in des for d in doc): degree = "PhD" break elif any(a in des for a in mba): degree = "MBA" break elif any(m in des for m in ms): degree = "MS/MA" break elif any(b in des for b in bs): degree = "BA/BS" break except AttributeError: # all search descriptions desc = soup.findAll('span', attrs={'class': 'st'}) for des in desc: des = des.get_text() # print des if any(d in des for d in doc): degree = "PhD" break elif any(a in des for a in mba): degree = "MBA" break elif any(m in des for m in ms): degree = "MS/MA" break elif any(b in des for b in bs): degree = "BA/BS" break else: degree = "None" return degree
66df8f331df4f883bba4194d9ed584748ccd516a
29,187
def validate(state): """ dict |-> bool This predicate function takes a state dict and returns a bool which is True if the state is valid and False if it is not valid This does not check options, only if the state itself is consistent.""" return False
df9924e12a9797c2e89ee770286da894a5abf993
29,189
def multi_install( pyenv, sequence, *, index_url="", pre=False, user=False, upgrade=False ): """ 一次安装包名列表 sequence 中所有的包。 注意:如果 sequence 中有一个包不可安装(没有匹配的包等原因),那sequence中所有的 包都不会被安装,所以不是必须的情况下尽量不用这个函数来安装。 """ return pyenv.install( *sequence, pre=pre, user=user, index_url=index_url, upgrade=upgrade )
fdd71b443f5054224226b1943b94b5066e3c6bec
29,190
import os import shutil def setup_report_directory(directory): """Create an empty report directory.""" if os.path.exists(directory): shutil.rmtree(directory) os.makedirs(directory) return directory
0080208da6d48ed382e770f5732f61b49a6330a1
29,191
import os def files(): """return a list of `~/Library/LaunchAgents/*.plist` files""" path = os.path.expanduser("~/Library/LaunchAgents") if not os.path.exists(path): return [] result = [] for root, dirs, files in os.walk(path): plistfiles = list( filter(lambda f: os.path.splitext(f)[1] == ".plist", files)) result += list(map(lambda f: os.path.join(root, f), plistfiles)) return result
d4718db9e622c24100884a0b41c0bdf405337dca
29,192
def get_cols_and_rows(payoff_entries): """Determine how many columns and rows the output plot should have based on how many parameters are being swept.""" # make a list of the lengths of each parameter list that are more than one # value for that parameter lengths = [len(x) for x in payoff_entries if len(x) > 1] if len(lengths) == 0: # plot as a single subplot n_cols, n_rows = 1, 1 elif len(lengths) == 1: # plot as row with increasing parameter going from left to right # find the param that has > 1 entry longest = [[i, x] for i, x in enumerate(payoff_entries) if len(x) > 1] col_params = longest[0][1] n_cols, n_rows = len(col_params), 1 elif len(lengths) == 2: # plot as lengths[0] x lengths[1] rectangular grid with increases down and right # find the two params that have > 1 entry longest = [[i, x] for i, x in enumerate(payoff_entries) if len(x) > 1] col_params = longest[0][1] row_params = longest[1][1] n_cols, n_rows = len(col_params), len(row_params) else: # plot on an j x j square grid and just leave some blank at the end tot_num = 1 for i in lengths: tot_num *= i j = 3 while j ** 2 < tot_num: j += 1 n_cols, n_rows = j, j return n_cols, n_rows
2ae73cf52cb68e7688816254e21d71b1747caa22
29,193
def get_filename(name): """Return filename for astrocats event.""" return name.replace('/', '_') + '.json'
e12598b6ca16299fd939bd3cfb619882bb50145c
29,194
def read_hex_digit(char: str) -> int: """Read a hexadecimal character and returns its positive integer value (0-15). '0' becomes 0, '9' becomes 9 'A' becomes 10, 'F' becomes 15 'a' becomes 10, 'f' becomes 15 Returns -1 if the provided character code was not a valid hexadecimal digit. """ if "0" <= char <= "9": return ord(char) - 48 elif "A" <= char <= "F": return ord(char) - 55 elif "a" <= char <= "f": return ord(char) - 87 return -1
071445b3c0ec7a5392a7b5b0b354b4d3c59a3687
29,195
import base64 def rest_md5_to_proto(md5): """Convert the REST representation of MD5 hashes to the proto representation.""" return base64.b64decode(md5)
64889241492ea5265c50b4ef95d55e864544d904
29,197
from pathlib import Path def get_timestep(path): """ Get the timestep from a filename. """ path = Path(path) fname = path.stem tstep = fname.split('.')[-1] return int(tstep)
f5eb746e06a6411008e609333cef8feb3250077c
29,199
def find_bit_ranges(bit_str: str): """ returns [(1/0, msb, lsb)]""" if len(bit_str) == 0: return [] if len(bit_str) == 1: return [(int(bit_str), 0 , 0)] ranges = [] lsb = 0 val = bit_str[-1] for ii, cc in enumerate(reversed(bit_str)): if ii == lsb: continue if cc != val: ranges.append((int(val), ii-1, lsb)) val = cc lsb = ii ranges.append((int(val), len(bit_str) - 1, lsb)) return list(reversed(ranges))
2502283bfcdcbd6b0662a782aab0f802db8cb480
29,200
import math def log_base(i, base): """Integer -> integer log_base. """ # Add a small epsilon since log() has a slight error and int() truncates, # to avoid errors on powers of base. if i == 0: return 0 return int(math.log(i, base) + 0.00000000000001) + 1
6eda6e30a6d8aaa63e04130655e89cca55fa1d26
29,201
import itertools def largest_group(iterable, key): """ Find a group of largest elements (according to ``key``). >>> s = [-4, 3, 5, 7, 4, -7] >>> largest_group(s, abs) [7, -7] """ it1, it2 = itertools.tee(iterable) max_key = max(map(key, it1)) return [el for el in it2 if key(el) == max_key]
3b3ead0361f3c1cc94bad8e8fdf3bddd73c36bb7
29,204
import jinja2 def render_template(env: jinja2.Environment, template: str, **context): """Render the given template with an additional context being made available in it.""" return env.get_template(template).render(**context)
99c71c306ee088e70a5168b4237709c4c71e8e80
29,205
def abg(hm, hp): """ Calculate alpha, beta and gamma for first and second derivative approximation """ a1 = -hp/(hm*(hm+hp)) g1 = hm/(hp*(hm+hp)) b1 = -a1-g1 a2 = 2/(hm*(hm+hp)) g2 = 2/(hp*(hm+hp)) b2 = -a2-g2 return a1, a2, b1, b2, g1, g2
54b7b198d6b5bd9948e7abd7870589cbc4b8dcbf
29,208
from typing import Tuple import subprocess import re def get_active_window_info_wayland() -> Tuple[str, str]: """Retrieve active window class and active window title on Wayland. Inspired by https://gist.github.com/rbreaves/257c3edfa301786e66e964d7ac036269 Returns: Tuple(str, str): window class, window title """ def _get_cmd_result(cmd: str) -> str: stdout_bytes: bytes = subprocess.check_output(cmd, shell=True) stdout = stdout_bytes.decode() if match := re.search(r"'(.+)'", stdout): return match.groups()[0].strip('"') return "" cmd_eval = ( "gdbus call -e -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval" ) cmd_win_idx = ( ' "global.get_window_actors().findIndex(a=>a.meta_window.has_focus()===true)"' ) window_idx = _get_cmd_result(cmd_eval + cmd_win_idx) cmd_wm_class = ( f' "global.get_window_actors()[{window_idx}].get_meta_window().get_wm_class()"' ) wm_class = _get_cmd_result(cmd_eval + cmd_wm_class) cmd_wm_title = ( f' "global.get_window_actors()[{window_idx}].get_meta_window().get_title()"' ) title = _get_cmd_result(cmd_eval + cmd_wm_title) return wm_class, title
f06ecce611bed08d8105b982238f2624fde7fb68
29,210
def remove_non_ascii(text: str) -> str: """ Removes non ascii characters :param text: Text to be cleaned :return: Clean text """ return ''.join(char for char in text if ord(char) < 128)
94a003856809eb740b5c85af094095acb2e07dad
29,211
def get_embedding_matrix(model): """Calculate 3 embedding matrix for bert model @param model: (BertForSequenceClassification) bert model to extract embedding matrix @return word_emb: (torch.tensor) word embedding @return pos_emb: (torch.tensor) position embedding @return sent_emb: (torch.tensor) position of sentence embedding """ word_emb, pos_emb, sent_emb = model.bert.embeddings.word_embeddings.weight, model.bert.embeddings.position_embeddings.weight, model.bert.embeddings.token_type_embeddings.weight return word_emb, pos_emb, sent_emb
6cbb464ce93efce71f6d385d4b2e1ffacc976f32
29,212
def group_by_keys(dict_list, keys): """ >>> data = [ ... {'a': 1, 'b': 2}, ... {'a': 1, 'b': 3} ... ] >>> group_by_keys(data, ['a', 'b']) {(1, 2): [{'a': 1, 'b': 2}], (1, 3): [{'a': 1, 'b': 3}]} """ groups = {} for d in dict_list: value = tuple((d[k] for k in keys)) groups.setdefault(value, []) groups[value].append(d) return groups
425d223eff828e24ebdab4900c0c461868228eb4
29,213
def avg_bound(bounds): """A simple average of those bounds that are set""" count = 0 bound_sum = 0 if bounds.has_upper_bound_success: count += 1 bound_sum += bounds.upper_bound_success if bounds.has_upper_bound_failure: count += 1 bound_sum += bounds.upper_bound_failure if bounds.has_lower_bound_success: count += 1 bound_sum += bounds.lower_bound_success if bounds.has_lower_bound_failure: count += 1 bound_sum += bounds.lower_bound_failure if count > 0: return bound_sum / count else: return 0
85dcfe845bd717eebfbebed5668a499c96cfdf35
29,214
import re def split_args(args): """Parse and split arguments.""" return [a.strip('"-') for a in re.findall(r'[^\s\"]+|".+"', args)]
765b10b6a2603843cda4a57264ac0f02359050e3
29,216
import time def find_info(j, find_min=False, num_allowed=-1, num_sqr_end=-1, find_input=False): """ This function gets: j - index of an input/output file It returns the riddle's solution: matchsticks - a Boolean array indexed 0 - 23 which represents the current matchsticks configuration: True - the indexed matchstick exists, False - the indexed matchstick does not exist. """ par = '' if num_allowed == -1: par = 'num_allowed' elif num_sqr_end == -1: par = 'num_sqr_end' f = open('output_sq' + str(j) + '.txt', 'r') text = f.read() while "Execution time: " not in text: time.sleep(1) f = open('output_sq' + str(j) + '.txt', 'r') text = f.read() rows = text.split('\n') list_match_ind = [] arr_match = [] list_bool1_ind = [] arr_bool1 = [] for row in rows: if 'matchsticks' in row and 'specification' not in row: st, val = row.split(' = ') if st not in list_match_ind: list_match_ind.append(st) arr_match.append(val) else: st = st.replace(']', '[') var, ind, var2 = st.split('[') arr_match[int(ind)] = val if 'bool1' in row and 'specification' not in row: st, val = row.split(' = ') if st not in list_match_ind: list_bool1_ind.append(st) arr_bool1.append(val) else: st = st.replace(']', '[') var, ind, var2 = st.split('[') arr_bool1[int(ind)] = val elif num_allowed == -1 and 'num_allowed' in row and '>' not in row and '!' not in row: st, num_all = row.split(' = ') num_allowed = int(num_all) elif num_sqr_end == -1 and 'num_squares_end' in row and '>' not in row and '!' not in row: st, num_sq = row.split(' = ') num_sqr_end = int(num_sq) matchsticks = [True if item == 'TRUE' else False for item in arr_match] bool1 = [True if item == 'TRUE' else False for item in arr_bool1] if find_input: return matchsticks, bool1, num_sqr_end, num_allowed if not find_min: return matchsticks elif par == 'num_sqr_end': return matchsticks, num_sqr_end elif par == 'num_allowed': return matchsticks, num_allowed
de0a6bb269ad93ee0178d3a6f11278f5c3457b95
29,217
def _ParseSigsetT(sigset): """Parse a rendered sigset_t value. This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t function. @type sigset: string @param sigset: Rendered signal set from /proc/$pid/status @rtype: set @return: Set of all enabled signal numbers """ result = set() signum = 0 for ch in reversed(sigset): chv = int(ch, 16) # The following could be done in a loop, but it's easier to read and # understand in the unrolled form if chv & 1: result.add(signum + 1) if chv & 2: result.add(signum + 2) if chv & 4: result.add(signum + 3) if chv & 8: result.add(signum + 4) signum += 4 return result
0168c50cbe138814449dee454170b89fc37cc6d5
29,218
import json def _format(dictionary): """Pretty format a dictionary Format a dictionary object into a well formatted JSON string :param dictionary Dictionary object to be pretty formatted :return String with a pretty format of a dictionary or JSON document """ return json.dumps(dictionary, ensure_ascii=False, sort_keys=True, indent=2)
7686d9bbf75f6e33a60ae28b302a2fa2c3d02f64
29,219
import functools import ctypes def hash_code(text: str) -> int: """Implements Java's hashCode in Python. Ref: https://stackoverflow.com/a/8831937 """ return functools.reduce(lambda code, char: ctypes.c_int32(31 * code + ord(char)).value, list(text), 0)
eadda940e3b2d63b8ff83f74816024160334288f
29,220
def ask_yes_no(prompt="[y/n] :", default="y", valid=["y", "n"]): """Display a yes/no question and loop until a valid answer is entered Keyword Arguments: prompt {str} -- the question message (default: {'[y/n] :'}) default {str} -- the default answer if there is no answer (default: {"y"}) valid {list} -- the list of appropriate answers (default: {["y", "n"]}) Returns: str -- the answer """ answer = input(prompt) if answer in valid: return answer elif answer == "": return default else: return ask_yes_no(prompt, default, valid)
f46dcd6ed7fefcb38c4bc5307b49cf59e0c813b4
29,221
def _process_line(request: dict, cost_price_delta: int): """The function that builds out the report line by line""" asset = request["asset"] created = request["created"] try: qty = int(asset["items"][0]["quantity"]) # [0] to filter out irrelevant skus except IndexError: # to handle some older requests without items qty = 0 return (asset["id"], asset["tiers"]["customer"]["id"], qty, cost_price_delta * qty, created)
f650dd5cc7592f700e9b3aa9824b8d79679e4bbb
29,222
import socket def iptoint(ip_v4: str) -> int: """ Convert an ip address to an integer. Adopted from http://goo.gl/AnSFV :param ip_v4: IPv4 address :returns: int of IPv4 hex """ return int(socket.inet_aton(ip_v4).hex(), 16)
bcdad9f575ee4bb33bb4716f95eb8a4124824fe7
29,223
def pack_x_y_sample_weight(x, y=None, sample_weight=None): """Packs user-provided data into a tuple.""" if y is None: return (x,) elif sample_weight is None: return (x, y) else: return (x, y, sample_weight)
d2a7aafff2073a17632ac858fe40093389494af2
29,224
import struct def long_long_int(value): """Decode a long-long integer value :param bytes value: Value to decode :return tuple: bytes used, int :raises: ValueError """ try: return 8, struct.unpack('>q', value[0:8])[0] except TypeError: raise ValueError('Could not unpack data')
76e36535f382eb6e143b0d1f2f17102a8a9b886f
29,225
import re def regex(pattern): """ Compile a case-insensitive pattern. """ return re.compile(pattern, re.I)
b30c594fc9d4134bf464d010d085bf8eb59157fc
29,226
import ast def make_attr_call(attr1, attr2, args=None): """ flor._attr1_._attr2_(arg) """ if args is None: return ast.Call( func=ast.Attribute( value=ast.Attribute( value=ast.Name('flor', ast.Load()), attr=attr1, ctx=ast.Load() ), attr=attr2, ctx=ast.Load() ), args=[], # arg is None keywords=[] ) else: return ast.Call( func=ast.Attribute( value=ast.Attribute( value=ast.Name('flor', ast.Load()), attr=attr1, ctx=ast.Load() ), attr=attr2, ctx=ast.Load() ), args=args, # arg is not None keywords=[] )
1b2b839ab8e76730d33405782283b2fbc6e326d3
29,229
import os import logging import pickle def load_pickled_model(filename): """ Purpose: Load a model that has been pickled and stored to persistance storage into memory Args: filename (String): Filename of a pickled model (.pkl) Return: model (Pickeled Object): Pickled model loaded from .pkl """ if not os.path.isfile(filename): error_msg = f"Model Filename ({filename}) does not exist, exiting" logging.error(error_msg) raise Exception(error_msg) try: with open(filename, 'rb') as model_file: model = pickle.load(model_file) except Exception as err: logging.exception(f"Exception Loading Pickle from File into Memory: {err}") raise err return model
15d366d4b6f610ea6f46a94f2140c8df5db5448b
29,230
def assign_assumed_width_to_national_roads_from_file(x, flat_width_range_list, mountain_width_range_list): """Assign widths to national roads assets in Vietnam The widths are assigned based on our understanding of: 1. The class of the road which is not reliable 2. The number of lanes 3. The terrain of the road Parameters - x - Pandas DataFrame row with values - road_class - Integer value of road class - lanenum__s - Integer value of number of lanes on road - flat_width_range_list - List of tuples containing (from_width, to_width, assumed_width) - moiuntain_width_range_list - List of tuples containing (from_width, to_width, assumed_width) Returns assumed_width - Float assigned width of the road asset based on design specifications """ road_class = x.road_class road_lanes = x.lanenum__s if road_lanes is None: road_lanes = 0 else: road_lanes = int(road_lanes) road_terrain = x.terrain assumed_width = 3.5 if road_terrain == 'flat': for vals in flat_width_range_list: if road_class == vals.road_class: if road_lanes > 0 and road_lanes <= 8: assumed_width = road_lanes * vals.lane_width + \ vals.median_strip + 2.0 * vals.shoulder_width else: assumed_width = vals.road_width break else: for vals in mountain_width_range_list: if road_class == vals.road_class: if road_lanes > 0 and road_lanes <= 8: assumed_width = road_lanes * vals.lane_width + \ vals.median_strip + 2.0 * vals.shoulder_width else: assumed_width = vals.road_width break return assumed_width
83d408813057ff9b8a03ab9cb745ed7299be602c
29,231
def formation_temperature(surface_temperature, gradient, depth): """ Calculates formation temperature based on a gradient. Parameters ---------- surface_temperature : float Surface temperature (deg F or deg C) gradient : float Temperature gradient (degF/ft or degC/m) depth : float Depth at which temperature is required (ft or m) Returns ------- float Returns formation temperature at a entered depth """ form_temp = surface_temperature + gradient * depth return form_temp
13b55f67810775cbdd531036bb40780a4138af0a
29,232
def cassini_instrument(): """CIRS instrument API output.""" return { 'id': -82898, 'name': 'CASSINI_CIRS_RAD', }
a871a3560bcbf56b9b524bafa95a834093d04ee0
29,233
def fn_url_p(fn): """check if fn is a url""" url_sw = ['http://', 'https://', 'ftp://', 'ftps://'] for u in url_sw: try: if fn.startswith(u): return(True) except: return(False) return(False)
0d56366d055b985bb819e0516c63acd117f371fd
29,234
import pickle import base64 def ObjectFromBase64EncodedString(EncodedObject): """Generate Python object from a bas64 encoded and pickled object string. Arguments: str: Base64 encoded and pickled object string. Returns: object : Python object or None. """ return None if EncodedObject is None else pickle.loads(base64.b64decode(EncodedObject))
ff82b5e3a130e563a11ed7ccfa4ce80b257b4ada
29,235
import os def is_plain_file(path): """**is_plain_file(path)** -> return True if the file at path is a plain file * path: (string) path to check <code> Example: is_plain_file('/etc/passwd') Returns: True </code> """ return os.path.isfile(path)
1f4cb257ee30b5291915b12ea9b23617271fe869
29,236
def insertion_sort(A): """Sort list of comparable elements into nondecreasing order.""" for i in range(1, len(A)): value = A[i] hole = i while hole > 0 and A[hole-1] > value: A[hole] = A[hole-1] hole -= 1 A[hole] = value return A
467f22572c775472d018f234be51d6ffb8effde2
29,237
def find_one_possible_value(sorted_values, all_matching_indices): """Return the value that has only one element.""" for key, value in all_matching_indices.items(): if len(value) == 1: sorted_values[key] = value[0] return value[0]
6ab3c7fee44fec1ca091cfbfa84f36c124632c9c
29,239
def v3_matrix_from_string(matrix_string): """Convert string-based rows of numbers to list of lists. Turning everything into a list comprehension """ return [ [float(n) for n in row_string.split()] for row_string in matrix_string.splitlines() ]
34bfc01415338f68d6f736e375ee51795a804ef5
29,240
def all_awards_are_reviewed(request): """ checks if all tender awards are reviewed """ return all([award.status != "pending" for award in request.validated["tender"].awards])
325fb138db00b8696fa424b2a4c95e1378ddd667
29,241
def _knapsack(weights, capacity): """ Binary knapsack solver with identical profits of weight 1. Args: weights (list) : list of integers capacity (int) : maximum capacity Returns: (int) : maximum number of objects """ n = len(weights) # sol : [items, remaining capacity] sol = [[0] * (capacity + 1) for i in range(n)] added = [[False] * (capacity + 1) for i in range(n)] for i in range(n): for j in range(capacity + 1): if weights[i] > j: sol[i][j] = sol[i - 1][j] else: sol_add = 1 + sol[i - 1][j - weights[i]] if sol_add > sol[i - 1][j]: sol[i][j] = sol_add added[i][j] = True else: sol[i][j] = sol[i - 1][j] return sol[n - 1][capacity]
158e96376bc3e7a60bbb24111b4b26636aaa86d5
29,242
import random def reduce_branch_length(size): """ Reduce the branch by a random length of 60-95% """ factor = round(random.uniform(0.6, 0.95), 2) return size * factor
52b14fe1007616badbaa95ffaebca0f4314cb21d
29,243
def explode(): """This route is going to exception. Used for testing 500 page.""" return 1/0
70470176ec69c862e2d59a41fd90d3a32ed17102
29,244
def get_layer_save_path(layer): """Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer. Args: layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from. Returns: str or None: Path to save to when data exists. """ hou_layer_info = layer.rootPrims.get("HoudiniLayerInfo") if not hou_layer_info: return save_path = hou_layer_info.customData.get("HoudiniSavePath", None) if save_path: # Unfortunately this doesn't actually resolve the full absolute path return layer.ComputeAbsolutePath(save_path)
16e20d0bcedf9717bb60af400548308ff05b0570
29,245
def prepare_mdtau(nrot, jobs): """ Returns what mdtau should be set to based on the number of hindered rotors and inserts MdTau into the joblist if need be """ mdtau = None if nrot > 0 and '1dTau' in jobs: mdtau = '1' if nrot > 1: mdtau = '2' if nrot > 2: mdtau = '3' if 'MdTau' not in jobs: index = jobs.index('1dTau') jobs.insert(index+1, 'MdTau') return mdtau, jobs
cd05dc6aa6569efc7087d1769270910d1cf0bf2c
29,247
from typing import List from typing import Any from typing import Iterator from typing import Tuple import itertools def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]: """ Iterates over neighbors in a list. s -> (s0,s1), (s1,s2), (s2, s3), ... From https://stackoverflow.com/a/5434936 """ a, b = itertools.tee(s) next(b, None) return zip(a, b)
58bbd84005b8caef2535cf3477f8745d511b5a0b
29,249
import os def get_sensor_path(sensor_path): """Gets the path to look for a sensor JSON file Args: sensor_path (str): the directory to look in Returns: str: the full path to the sensor JSON file """ return os.path.join(sensor_path, "calibrated_sensor.json")
738423a4c9a3f8bb11d431754d4a326b5d22cb23
29,252
def int_to_hexcolor(num: int, *mode: str): """Convert int to hex color arg: num: int (base=10) return: hexcolor string like 'ffba78' """ blue = num % 16**2 num = int(num / 16**2) green = num % 16**2 num = int(num / 16**2) red = num % 16**2 color = {'r': red, 'g': green, 'b': blue} if 'lib' in mode: # ライブラリのカラーコードだったら(GRB) return '{g:02x}{r:02x}{b:02x}'.format(**color) else: return '{r:02x}{g:02x}{b:02x}'.format(**color)
06c709e3840a07c3c7e641ff9b1b8d011da54a43
29,253
def _znode_to_class_and_name(znode): """ Local helper function that takes a full znode path that returns it in the service_class/service_name format expected in this module """ znode = znode.split("/") znode.pop(0) return (znode[0], znode[1])
7039b35227d967978c073a41fa044a75c4a1670d
29,258
def orbresurrect(orb): """Restore previous orb position variables""" return orb.resurrect()
50167fbafabcc864c79e0997c4687f05baab84c5
29,259
def byteToInt(byte): """ byte -> int Determines whether to use ord() or not to get a byte's value. """ if hasattr(byte, 'bit_length'): # This is already an int return byte return ord(byte) if hasattr(byte, 'encode') else byte[0]
a4fe2eab760b5b5792d633a78f4bdd5378719bb7
29,260
import argparse def parse_args(): """ pass """ parser = argparse.ArgumentParser(__doc__) parser.add_argument('--model_name_or_path', type=str, default='plato-mini', help='The path or shortcut name of the pre-trained model.') parser.add_argument('--seed', type=int, default=None, help='Random seed for initialization.') parser.add_argument('--min_dec_len', type=int, default=1, help='The minimum sequence length of generation.') parser.add_argument('--max_dec_len', type=int, default=64, help='The maximum sequence length of generation.') parser.add_argument('--num_return_sequences', type=int, default=20, help='The numbers of returned sequences for one input in generation.') parser.add_argument('--decode_strategy', type=str, default='sampling', help='The decode strategy in generation.') parser.add_argument('--top_k', type=int, default=5, help='The number of highest probability vocabulary tokens to keep for top-k sampling.') parser.add_argument('--temperature', type=float, default=1.0, help='The value used to module the next token probabilities.') parser.add_argument('--top_p', type=float, default=1.0, help='The cumulative probability for top-p sampling.') parser.add_argument('--num_beams', type=int, default=0, help='The number of beams for beam search.') parser.add_argument('--length_penalty', type=float, default=1.0, help='The exponential penalty to the sequence length for beam search.') parser.add_argument('--early_stopping', type=eval, default=False, help='Whether to stop the beam search when at least `num_beams` ' 'sentences are finished per batch or not.') parser.add_argument('--device', type=str, default='cpu', help='The device to select for training the model.') args_ = parser.parse_args() return args_
57f9b236362bd7bd3fb1b4cd271fd51bd61e6858
29,262
def _get_cat_formulae(mpt): """ Retrieve categories and respective formulae Parameters ---------- mpt : MPT model """ values = [] for _, value in mpt.formulae().items(): values.append(" + ".join(value)) return values
b40c60d10be4724eec32ed2d164b09fc722a7bed
29,264
def meta_REstring(REstr): # generic """ get meta information of the RapidEye file name Parameters ---------- REstr : string filename of the rapideye data Returns ------- REtime : string date "+YYYY-MM-DD" REtile : string tile code "TXXXXX" REsat : string which RapedEye satellite Example ------- >>> REstr = '568117_2012-09-10_RE2_3A_Analytic.tif' >>> REtime, REtile, REsat = meta_REstring(REstr) >>> REtime '+2012-09-10' >>> REtile '568117' >>> REsat 'RE2' """ assert type(REstr)==str, ("please provide a string") REsplit = REstr.split('_') REtime = '+' + REsplit[1][0:4] +'-'+ REsplit[1][5:7] +'-'+ REsplit[1][8:10] REsat = REsplit[2] REtile = REsplit[0] return REtime, REtile, REsat
d325034dd8dc7e7033387e22796b1449ba14d4dc
29,265
def rest(s): """Return all elements in a sequence after the first""" return s[1:]
268d56ff3a24b3a5c9b1e4b1d1ded557afcdce8c
29,266
def representationB(model): """ An alternative representation of the model. """ result='' for partition,weight in model.items(): if result!='': result += '+' l = [''.join((chr(65+read_ind) for read_ind in hap)) for hap in partition if len(hap)] if weight!=1: result += f'{weight:d}*' result += f"{'*'.join(l):s}" return result
9c2d09a690a19d9c336831461f7d41479734797e
29,269
def sort_string(s): """ :param s: string, ex: 'apple' :return: string, sorted by a,b,c,d,e... ex: 'aelpp' """ sort_s = '' for ch in sorted(list(s)): sort_s += ch return sort_s
8b4760977ea59275c92b4307666a6ec8ab36e030
29,271
def get_title_from_vuln(vuln): """ Returns CVE ID from vulnerability document When there is no title, returns "No-title" string """ return vuln.get('title', "No-Title")
3c81cd0a873015d8e3d5a96819a7608dbfd8330f
29,272
import json from datetime import datetime def get_user_insert(user, event): """ Gets all insertion data for a single user Parameters ---------- user: dict Dictionary object of a Twitter user event: str Event name of query the user was retrieved from Returns ------- user_insert: dict Dictionary of values extracted and formatted for insertion into a PostgreSQL database """ # Description hashtags try: hashtags = [hashtag_info['tag'].replace('\x00', '') for hashtag_info in user['entities']['description']['hashtags']] except KeyError: hashtags = None # Description mentions try: mentions = [mention_info['tag'] for mention_info in user['entities']['description']['mentions']] except KeyError: mentions = None # Description URLs try: urls = user['entities']['description']['urls'] urls = [json.dumps(url) for url in urls] except KeyError: urls = None # Profile URL try: url = user['entities']['url']['urls'][0]['expanded_url'] except (KeyError, IndexError): url = None now = datetime.now() user_insert = { 'id': user['id'], 'event': event, 'inserted_at': now, 'last_updated_at': now, 'created_at': user['created_at'], 'followers_count': user['public_metrics']['followers_count'], 'following_count': user['public_metrics']['following_count'], 'tweet_count': user['public_metrics']['tweet_count'], 'url': url, 'profile_image_url': user['profile_image_url'], 'description_urls': urls, 'description_hashtags': hashtags, 'description_mentions': mentions, 'verified': user['verified'] } for f in ['description', 'location', 'pinned_tweet_id', 'name', 'username']: try: user_insert[f] = user[f].replace('\x00', '') except KeyError: user_insert[f] = None return user_insert
8c9a269fb7fd349d26899f63af5f8415b7d86210
29,273
def parse_number_input(user_input): """Converts a string of space-separated numbers to an array of numbers.""" lst_str = user_input.strip().split(' ') return list(map(int, lst_str))
5e03156c79814a7916d78203749cfd82ab6b98d5
29,275
import argparse import pathlib def parse_args(parser: argparse.ArgumentParser): """ Parse CLI arguments to control the prediction. :param parser: Argument parser Object. :return: CLI Arguments object. """ parser.add_argument( "input", type=pathlib.Path, help="Path to the folder with the RGB images to be processed.", ) parser.add_argument( "extension", type=str, help="Name of the file extension. For example: <-e jpg>.", ) parser.add_argument( "model", type=pathlib.Path, help="Path to the architecture/model file.", ) parser.add_argument( "output", type=pathlib.Path, help="Path to folder in which the segmented images are to be stored.", ) parser.add_argument( "-v", "--vistype", type=str, help="Visualisation type. Default is grayscale.", choices=["grayscale", "heatmap", "binary"], default="grayscale", required=False, ) parser.add_argument( "-t", "--threshold", type=float, help="Threshold for binary classification. Default is 0.5.", default=0.5, required=False, ) parser.add_argument( "-mt", "--multiple-thresholds", action="store_true", help="Store all thresholds from 0-10 in 1, 10-100 in 10, 90-100 in 1 steps.", default=False, required=False, ) parser.add_argument( "-p", "--progress", action="store_true", help="Show progress bar on stdout.", default=False, required=False, ) parser.add_argument( "--height", type=int, help="Height of the output image.", default=160, required=False, ) parser.add_argument( "-w", "--width", type=int, help="Width of the output image.", default=320, required=False, ) parser.add_argument( "-g", "--gpu", type=int, help="Select the GPU id to predict on.", default=0, required=False, ) return parser.parse_args()
583766506d45335e9ab2cfa6d954a67838e79087
29,276
def _is_finite(constraints): """ Return ``True`` if the dictionary ``constraints`` corresponds to a finite collection of ordered multiset partitions into sets. If either ``weight`` or ``size`` is among the constraints, then the constraints represent a finite collection of ordered multiset partitions into sets. If both are absent, one needs ``alphabet`` to be present (plus a bound on length or order) in order to have a finite collection of ordered multiset partitions into sets. EXAMPLES:: sage: from sage.combinat.multiset_partition_into_sets_ordered import _is_finite sage: W = {"weight": {1:3, 2:3, 4:1}, "length": 5} sage: S = {"size": 44, "min_length": 5} sage: AO = {"alphabet": range(44), "max_order": 5} sage: all(_is_finite(constr) for constr in (W, S, AO)) True sage: AL = {"alphabet": range(44), "min_order": 5} sage: _is_finite(AL) False """ if "weight" in constraints or "size" in constraints: return True elif "alphabet" in constraints: # Assume the alphabet is finite Bounds = set(["length", "max_length", "order", "max_order"]) return Bounds.intersection(set(constraints)) != set()
5802604f8a338b8e0c7b5a99e63661637350371f
29,277
import math def atanh(x): """Get atanh(x)""" return math.atanh(x)
b721fd642ac99dd7e790db4baa73792333d7af7c
29,278
def get_strata(creel_run): """Given a creel_run, return a list of tuples that represent the rows in the Strata table - each row contains the strata label, and foreign keys to the corresponding creel_run, season, space, daytype, period and fishing mode. Arguments: - `creel_run`: An FN011 creel_run object. """ all_strata = [] modes = creel_run.creel.modes.all() spots = creel_run.creel.spatial_strata.all() seasons = creel_run.creel.seasons.all() for season in seasons: daytypes = season.daytypes.all() for spot in spots: for daytype in daytypes: periods = daytype.periods.all() for period in periods: for mode in modes: strata = "{}_{}{}_{}_{}".format( season.ssn, daytype.dtp, period.prd, spot.space, mode.mode ) all_strata.append( ( creel_run.id, strata, season.id, spot.id, daytype.id, period.id, mode.id, ) ) return all_strata
845295eb27b1951e8cc90971419583fc41231b0f
29,279
from datetime import datetime from dateutil.tz import tzlocal import pytz def timestamp_to_datetime(timestamp): """Converts timestamp to datetime string.""" timeUTC = datetime.utcfromtimestamp(timestamp) timeLocal = pytz.utc.localize(timeUTC).astimezone(tzlocal()) return timeLocal.strftime("%Y-%m-%d %H:%M:%S.%f%z")
59f410f72beced48792fb4c40796da7393285c28
29,280
def create_service(ctx, name, args=None): """ Create service with args if required. """ smgr = ctx.getServiceManager() if args: return smgr.createInstanceWithArgumentsAndContext(name, args, ctx) else: return smgr.createInstanceWithContext(name, ctx)
6d1b1908c3c1a7d64c34d9fc83a7843905da7a87
29,281