content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Optional def _create_noncompat_join_key_types_errmsg( left_key_name: str, left_key_type: type, right_key_name: str, right_key_type: type, reason: Optional[str] = None ) -> str: """Create an error message for two column types which are not compatible as join-keys.""" # If the caller didn't supply a reason, provide a generic explanation # that's (maybe) still helpful for new users. _reason = reason if reason is not None else "You may need to convert one of them into a different type before joining." return f'The columns \'{left_key_name}\' ({left_key_type}) and \'{right_key_name}\' ({right_key_type}) are not compatible for use as join-keys. {_reason}'
0dc97119e3fc3c7a0b8140fba995d0aa8cece004
69,539
def _sequence_to_index(seq, dim_list): """ Inverse of _index_to_sequence. Parameters ---------- seq : list of ints List of coordinates for each particle. dim_list : list of int List of dimensions of consecutive particles. Returns ------- i : list Index in a matrix. """ i = 0 for s, d in zip(seq, dim_list): i *= d i += s return i
5a4e2ddc14ff79ce9210faed4c0ede681f82b6e5
69,543
import math def _pdeg2rad(angle): """ Convert dd.mmss to radian """ d = math.floor(angle) angle = round((angle - d) * 100, 10) m = math.floor(angle) s = round((angle - m) * 100, 10) return math.radians(d + m / 60.0 + s / 3600.0)
0447c26f49cba42e442633ed5e192b344bdeaaf4
69,544
def get_sub_grid(grid, row_index, col_index, split): """Get sub_grid from grid containing split numbers of rows and cols.""" return [row[col_index * split:(col_index + 1) * split] for row in grid[row_index * split:(row_index + 1) * split]]
02d84dabc9c8352b86b56f2ff73135ab1e215801
69,546
def parse_dtypes(df, value): """ Get the data type from a string data type representation. for example 'StringType' from 'string' :param value: :param df: :return: """ if not isinstance(value, list): value = [value] try: data_type = [df.constants.DTYPES_DICT[df.constants.SHORT_DTYPES[v]] for v in value] except KeyError: data_type = value if isinstance(data_type, list) and len(data_type) == 1: result = data_type[0] else: result = data_type return result
35a779e5f948f3f68700f1ad86b77985131cc1b9
69,564
def stable_unique(items): """ Return a copy of ``items`` without duplicates. The order of other items is unchanged. >>> stable_unique([1,4,6,4,6,5,7]) [1, 4, 6, 5, 7] """ result = [] seen = set() for item in items: if item in seen: continue seen.add(item) result.append(item) return result
2600a22a36566b25bdfce95dc7f4416e5e5979a7
69,570
def min_max_dates_dict(min_max_dates): """Calculates dates needed by the pipeline, starting from min and max dates. Generates dictionary of dates needed for the calculation starting from a tuple containing min_date and max_date from the input logs. Args: min_max_dates: Tuple containing min_date and max_date for the input data. Returns: Dictionary containing info related to dates needed for the calculation. """ min_date, max_date = min_max_dates tot_days = (max_date - min_date).days if round(tot_days * 0.05) < 30: cohort_days = 30 else: cohort_days = round(round(tot_days * 0.05) / 30) * 30 return { 'min_date': min_date, 'max_date': max_date, 'total_days': tot_days, 'half_days': round((tot_days / 2) + 0.1), 'cohort_days': cohort_days }
14b99c9dd6096131d0d18f77da73a5c1c8d5f885
69,572
def unpad(x, x_mask): """ Unpad a batch of sequences by selecting elements not masked by x_mask. Returns a list of sequences and their corresponding lengths """ x_unp = [] x_unp_mask = [] x_unp_len = [] for seq, seq_mask in zip(x, x_mask): seq_unp = seq[(1 - seq_mask)] x_unp.append(seq_unp) x_unp_mask.append(seq_mask[1 - seq_mask]) x_unp_len.append(seq_unp.shape[0]) return x_unp, x_unp_mask, x_unp_len
4a447505e11505fe5fe6139729c488957a30d9df
69,574
def chaal(current_board, player_character, player_character_position): """ Puts the players character on the current board where he/she wants to. Here chaal stands for Bangla 'চাল'. :param current_board: the current board :param player_character: players character assigned randomly. :param player_character_position: the cell number on the current board he/she wants to put his/her character :return: the changed board due to the character insertion """ for row in range(len(current_board)): for col in range(len(current_board[row])): if current_board[row][col] == player_character_position: current_board[row][col] = player_character return current_board
979b5e2b507a1964e9575e0cc419cb2b1a68821b
69,577
import typing import re def orbital_pattern(compile: bool = True) -> typing.Union[typing.Pattern[str], str]: """ Creates a regex pattern for recognizing orbital strings. Parameters ---------- compile : bool If true the regex is compiled, otherwise it is returned as a string. Returns ---------- Returns a compiled pattern or string depending on the compile parameter. """ regex_string = r"([A-Q]\d( \d+)+)" if compile: return re.compile(regex_string) return regex_string
eb4ed7104e4463e0f65b273010620c456b0c7f14
69,580
import yaml def load_config(yamlfile): """load yaml to a dict""" with open(yamlfile, 'r') as stream: _dict = yaml.safe_load(stream) return _dict
344fd620fad860d7bd24c757aafaa428c1d5a70b
69,583
def snake(string): """Convert to snake case. Word word -> word_word """ return "_".join([word.lower() for word in string.split()])
50c2e11d2d650ce061cdefc56effff147eddb057
69,584
def f_value(ER, EF, dfR, dfF): """ Return an F-statistic for a restricted vs. unrestricted model. Parameters ---------- ER : float `ER` is the sum of squared residuals for the restricted model or null hypothesis EF : float `EF` is the sum of squared residuals for the unrestricted model or alternate hypothesis dfR : int `dfR` is the degrees of freedom in the restricted model dfF : int `dfF` is the degrees of freedom in the unrestricted model Returns ------- F-statistic : float """ return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
ee59bacef0e0e624cb5dee6fd541670eec1c7dfd
69,585
def _merge_dicts(*dict_args): """Destructively merges dictionaries, returns None instead of an empty dictionary. Elements of dict_args can be None. Keys in latter dicts override those in earlier ones. """ result = {} for dictionary in dict_args: if dictionary: result.update(dictionary) return result if result else None
e32a021b2836b08d231adee8f3dc070a2ed42898
69,591
import random def random_odd(n_bits: int) -> int: """ Generates random odd number in range [2 ^ (n_bits - 1), 2 ^ (n_bits) - 1] Odd number probably is prime number """ assert n_bits > 0 value = random.getrandbits(n_bits) value |= (1 << n_bits) | 1 return value
e6a852f941141f7d009142e6821b4184108b5d1e
69,592
def reverse_taskname(name: str) -> str: """ Reverses components in the name of task. Reversed convention is used for filenames since it groups log/scratch files of related tasks together 0.somejob.somerun -> somerun.somejob.0 0.somejob -> somejob.0 somename -> somename Args: name: name of task """ components = name.split('.') assert len(components) <= 3 return '.'.join(components[::-1])
a49c97f3e694120b7aecba9ea325dbe78420150e
69,595
def adjust_timescales_from_daily(ds, tstep): """Adjust continuous growth rate parameters from daily scale to the scale of a single timestep in the dynamic model. Parameters ---------- ds : :class:`xarray.Dataset` Dataset containing coords or variables corresponding to rate parameters, e.g. $\lambda$ (asymptotic exponential growth rate), $\beta$ (daily transmission rate per infectious individual), $\gamma$ (inverse of duration of infectious period), and/or $\sigma$ (inverse of latency period). tstep : float Timestep of model, in days Returns ------- out : :class:`xarray.Dataset` `ds`, with rate parameters adjusted. """ out = ds.copy() for k, v in ds.variables.items(): if k.split("_")[0] in [ "lambda", "beta", "gamma", "sigma", ]: # staying in continuous rate out[k] = out[k] * tstep return out
802b472138be70934ff3e37af367d224970b5517
69,597
from typing import List def read_docs(fnames: List[str]) -> List[str]: """ Reads in the documents. param fnames: List of paths to .txt files to read. returns: List of input documents. """ all_docs = [] for docfile in fnames: doc = open(docfile, 'r').read() all_docs.append(doc) return all_docs
26c8bd7c31ecfd84144ee5d180ae5d41353945f3
69,599
def repr_long_list(seq): """ >>> repr_long_list(list(range(100))) '[0, 1, 2, ..., 98, 99]' """ if len(seq) < 8: return repr(seq) else: return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
9978a1439d66cbd1c2015cb799cace7aeaac411c
69,612
def get_components(items): """Return a list of pairs of IDs and numbers in items. get_components(list(str)) -> list((str, int)) """ components = [] for item in items: item = item.strip() itemid, _, itemnumstr = item.partition(':') itemid = itemid.strip() itemnumstr = itemnumstr.strip() components.append((itemid, int(itemnumstr))) return components
8b483d351c5c3473b7baa9d4ca6729a7846ea3ca
69,613
def describe_attribute_change(attribute_key, new_attribute_value, **kwargs): """Return description of an attribute change (useful for logging). Args: attribute_key (str): Name of the attribute. new_attribute_value: New value of the attribute. **kwargs: Arbitrary keyword arguments. See below. Keyword Args: feature_id_key (str): Name of the feature ID attribute. feature_id_value: Value of the feature ID. old_attribute_value: Old value of the attribute. Returns: str: Change description. """ desc = "Changed {}=".format(attribute_key) if 'old_attribute_value' in kwargs: desc += "{old_attribute_value!r} --> ".format(**kwargs) desc += "{!r}".format(new_attribute_value) if 'feature_id_key' in kwargs: kwargs.setdefault('feature_id_value') desc += " for {feature_id_key}={feature_id_value!r}".format(**kwargs) desc += "." return desc
49d3834fa7cf06185c8654647947f2e1c2cbfc77
69,614
def _get_str_columns(sf): """ Returns a list of names of columns that are string type. """ return [name for name in sf.column_names() if sf[name].dtype == str]
a531714733ff1f4b4badfe855ae009c30dcd9e33
69,615
def isprimer(n: int) -> bool: """Is n prime? >>> isprimer(2) True >>> tuple( isprimer(x) for x in range(3,11) ) (True, False, True, False, True, False, False, False) """ def isprime(k: int, coprime: int) -> bool: """Is k relatively prime to the value coprime?""" if k < coprime*coprime: return True if k % coprime == 0: return False return isprime(k, coprime+2) if n < 2: return False if n == 2: return True if n % 2 == 0: return False return isprime(n, 3)
33fccf076604601bd40298f555180ee5388b00af
69,618
def normalize_area_name(area_name: str) -> str: """Normalize an area name by removing whitespace and case folding.""" return area_name.casefold().replace(" ", "")
4f26155ea1299b2387aee783684f9d94e3360132
69,626
import torch def axisangle2quat(axis, angle): """ axis: B x 3: [axis] angle: B: [angle] returns quaternion: B x 4 """ axis = torch.nn.functional.normalize(axis, dim=-1) angle = angle.unsqueeze(-1) / 2 quat = torch.cat([angle.cos(), angle.sin() * axis], dim=-1) return quat
7ffa57eff85c7fdc46e5e569fc1ed44d0be80461
69,628
import math def get_request_urls(query=None,img_num=0)->list: """ get urls for requesting according to the query and numbers of image that wants to download :param query: keyword of image :param img_num: numbers of image :return: requesting urls """ request_urls_num=math.ceil(img_num/35) urls=[f"https://www.bing.com/images/async?q={query}&first={1+35*i}&count=35&cw=1177&ch=577&tsc=ImageBasicHover&datsrc=I&mmasync=1&SFX={i+1}" for i in range(request_urls_num+1)] # 多+1是因為圖片網址不會每次都請求成功,所以故意多請求一次以免圖片不夠 return urls
4167f05ba7d68f17760a1f6524f14468737b68d9
69,631
from pathlib import Path def make_dir(new_dir, path): """Creates a directory if it doesn't exist. Args: new_dir (str): Name of the new directory to be created. path (str): Path where the new directory will be created. Returns: str: Path of the newly created directory. """ new_path = path / Path(new_dir) new_path.mkdir(exist_ok=True) return new_path
fc44a70477047958f3fa0f6d3adfde2c1b26c131
69,633
def mul_vector_by_scalar(vector, scalar): """ Multiplies a vector by a scalar :param vector: vector :param scalar: scalar :return: vector * scalar """ return tuple([value * scalar for value in vector])
5e1215a7cd03265272eff78966d0273b180ad3eb
69,636
import math def calculateFreq( pmf , begin, end ): """ Calculates the frequencies of the pmf betweer [begin, end) Args: pmf: the probability mass function begin: the first (inclusive) element of the interval end: the last (exclusive) element of the interval Return: the probability, to fall between [begin, end) Side effect Normalize the pmf """ pmf.Normalize() items = pmf.GetDict() ; filtered = [ prob for value, prob in items.iteritems() if value >= begin and value < end ] prob_sum = math.fsum(filtered) return prob_sum
30b6a625e37ae1cd10065d4f66d536d64cae010d
69,638
def default_ssl_connection_selector(connection, app_blame, client_hello): """Returns a nogotofail.mitm.connection.handlers class to use for establishing a SSL connection on connection. If None is returned the connection will continue to use the current handler. """ return None
5c68bd30a62b0ffe5d17e867a124ecb288b76757
69,643
def cache(request): """ Return a cache object that can persist state between testing sessions. cache.get(key, default) cache.set(key, value) Keys must be a ``/`` separated value, where the first part is usually the name of your plugin or application to avoid clashes with other cache users. Values can be any object handled by the json stdlib module. """ return request.config.cache
6b98abc67a32298cee07504944a5d7bd13999410
69,651
import requests def get_access_token(api_key): """Retrieve an access token from the IAM token service.""" token_response = requests.post( "https://iam.cloud.ibm.com/identity/token", data={ "grant_type": "urn:ibm:params:oauth:grant-type:apikey", "response_type": "cloud_iam", "apikey": api_key }, headers={ "Accept": "application/json" } ) if token_response.status_code == 200: print ("Got access token from IAM") return token_response.json()['access_token'] else: # print( token_response.status_code, token_response.json()) return None
b51c59ec76ec784f8c4d9c88c0f07926e4466565
69,652
def next_page(dom): """Gets link for next page for results from apihub/scihub :param dom: object coming back from a Copernicus' scihub search request :type dom: xml.dom object :return: Link ot the next page or None if we reached the end. :rtype: str """ links = dom.getElementsByTagName('link') next_page_, this_page, last = None, None, None for link in links: if link.getAttribute('rel') == 'next': next_page_ = link.getAttribute('href') elif link.getAttribute('rel') == 'self': this_page = link.getAttribute('href') elif link.getAttribute('rel') == 'last': last = link.getAttribute('href') if last == this_page: # we are not at the end next_page_ = None return next_page_
ad7a5c5bb31e583ae2886cb9c1acfa79feb45689
69,655
from pathlib import Path import socket def get_uri(dir_name: str | Path) -> str: """ Return the URI path for a directory. This allows files hosted on different file servers to have distinct locations. Parameters ---------- dir_name : str or Path A directory name. Returns ------- str Full URI path, e.g., "fileserver.host.com:/full/path/of/dir_name". """ fullpath = Path(dir_name).absolute() hostname = socket.gethostname() try: hostname = socket.gethostbyaddr(hostname)[0] except (socket.gaierror, socket.herror): pass return f"{hostname}:{fullpath}"
e22cd056d3e09584ceb427f85471287e1e4a2aac
69,659
def rebalance_A_to_B(A, B, target_relative_weight, transfer_fee): """ Say you want to rebalance two investments A and B so that the relative weight of A to B is the target_relative_weight and transfering the money has a transfer fee proportional to the amount to transfer with fixed rate transfer_fee If transfer is the amount to move from A to B, we want to solve: (A - transfer) / (A - transfer + B + transfer * (1 - transfer_fee)) = target_relative_weight which can be algebraicely written into the equivalent: transfer = (A - target_relative_weight * (A + B)) / (1 - target_relative_weight * transfer_fee) :param A: float, value of A :param B: float, value of B :param target_relative_weight: what A / (A + B) should be after rebalancing :param transfer_fee: a percentage taken from the transfer of value from A to B :return: the amount to transfer from A to B to achieve the target_relative_weight A and B are already balanced, nothing must be taken from A >>> rebalance_A_to_B(10, 10, 0.5, 0) 0.0 For A to be 25% of the total, we need to move 5 unit from A to B (assuming no transfer fee) >>> rebalance_A_to_B(10, 10, 0.25, 0) 5.0 On the other hand, we need to GIVE to A to make A 0.75% of the total (still assuming no transfer fee) >>> rebalance_A_to_B(10, 10, 0.75, 0) -5.0 Example including a transfer fee, here we need to transfer a little more to cover for the fee >>> a, b = 10, 10 >>> target_ratio = 0.25 >>> fee_percent = 0.01 >>> transfer_amount = rebalance_A_to_B(a, b, target_ratio, fee_percent) >>> print(transfer_amount) 5.012531328320802 >>> new_a = a - transfer_amount >>> new_b = b + transfer_amount * (1 - fee_percent) # a portion of the transferred money is lost due to the fees >>> new_ratio = new_a / (new_a + new_b) >>> assert new_ratio == target_ratio >>> print(new_ratio) 0.25 """ return (A - target_relative_weight * (A + B)) / ( 1 - target_relative_weight * transfer_fee )
ea3fd4619819ef8dea380a5f5c768c63e24c15ad
69,661
def W2kW(x): """W -> kW""" return x/1000
4e6cf1bb300050fc03df2287b3c4bdb493c47022
69,662
def yes_or_no(question): """ Ask the user a yes or no question. Returns True for yes, False for no """ suffix = " (y/n)? > " question = question.rstrip() answer = input(question + suffix).lower().strip() if answer in ["y", "ye", "yes"]: return True elif answer in ["n", "no"]: return False else: print("Please answer with 'y' or 'n'\n") return yes_or_no(question)
d4ed228228fcb7cfa61e76ccc13d5ee5b9047de9
69,663
import io def read(path, default=None, encoding='utf8'): """Read encoded contents from specified path or return default.""" if not path: return default try: with io.open(path, mode='r', encoding=encoding) as contents: return contents.read() except IOError: if default is not None: return default raise
cd99cece52d5efa2f15359b584c393de2ac62f48
69,664
def isvalidIp(ip_addr): """Check if valid IPv4 address""" try: subnets = list(map(int, ip_addr.split('.'))) return len(subnets) == 4 and all(map(lambda x: 0 <= x <= 255, subnets)) except ValueError: return False
9f60d7fb13d4e827f533ae68e28d04369cbe53f0
69,669
def log_mapping(conf:str): """ Split str of log format into a dict Example : date:application_date,message:application_message is split in : {'date':'application_date','message':'application_message'} """ mapping = dict() for i in conf.split(','): mapping[i.split(':')[0]] = i.split(':')[1] return mapping
21ccc8fb2fd2226d90659fb7cf87f62a1d29b9c2
69,670
def asst70_variation_descriptors(moa_vid70): """Create assertion70 variation_descriptors test fixture.""" return [moa_vid70]
1e8d1fc13a55b173674c60c339f6d6a13945ca6f
69,673
def form_to_dict(urlencoded: str): """Decode a form of the structure foo=bar&baz=qux&zap=zazzle""" return {obj.split('=')[0]: obj.split('=')[1] for obj in urlencoded.split('&')}
b2d533e8c10df06ac1cd22d2dcbe15ecdf4413c3
69,675
def lowest_weight(edges): """ Get edges based on lowest weight first """ return list(sorted(edges, key=lambda data: data[2]["weight"]))
116d47559e0e6495a098285266bff0a19ad7b40e
69,676
def getFiducials(d, offset=None): """Returns fiducials from the given dict as a list of (x,y) pairs, or None on error. The fiducials are normally translated relative to CROP_RECT_TOP_LEFT_{XY}, but you can give an explicit [x,y] offset if you want. The order of points returned is: left eye out left eye in right eye in right eye out mouth left mouth right """ KEYS = 'LEFT_EYE_OUT LEFT_EYE_IN RIGHT_EYE_IN RIGHT_EYE_OUT MOUTH_LEFT MOUTH_RIGHT'.split() if offset is None: offset = [int(d['CROP_RECT_TOP_LEFT_X']), int(d['CROP_RECT_TOP_LEFT_Y'])] try: ret = [(int(d[k+'_X'])+offset[0], int(d[k+'_Y'])+offset[1]) for k in KEYS] return ret except KeyError: return None
47deba7c9ac12b028d404bbb7a16f15697ffa7ad
69,679
def reverse(dictionary): """reverses a keys and values of a dictionary""" return {v: k for k, v in dictionary.items()}
fe096809c47f8c19f60024246c46ebce4a9f7ee2
69,687
import glob from pathlib import Path def merge_all_dot_sol(outputs_dir, dot_sol_output, num_rows): """ Merge all .SOL into one :param outputs_dir: directory containing the dynamic .SOL :param dot_sol_output: file to write the content to. This is the static .SOL :param num_rows: number of points. This is to make sure that all dynamic SOL are written :return: pathname to the final .SOL file """ # get all the dynamic .SOL match = str(outputs_dir) + '/*.SOLD' all_dot_sols = glob.glob(match) all_dot_sols.sort() # for i in all_dot_sols: # print(i) # if len(all_dot_sols) != num_cells: # something wrong # print("Stopping: The static database seems not complete") # return with open(dot_sol_output, "wb") as outfile: for f in all_dot_sols: with open(f, "rb") as infile: outfile.write(infile.read()) outfile.write('\n'.encode()) dot_sol_output = str(outputs_dir) + '/' + dot_sol_output return Path(dot_sol_output)
2acd298b705a6d832d4e8c9765022f00e08a2d13
69,691
def get_lomb_y_offset(lomb_model): """Get the y-intercept of a fitted Lomb-Scargle model.""" return lomb_model['freq_fits'][0]['y_offset']
6b891fed9d9ff422bea20cc6b5d1acf73126aa27
69,694
import torch def q(x: torch.Tensor, min_value: int, max_value: int, scale: float, zero_point: int = 0) -> torch.Tensor: """ Fake-quantize the input tensor x, using a pytorch fake-quantization node. Args: x: input tensor to quantize. min_value: lower bound of the quantized domain. max_value: upper bound of the quantized domain. scale: quantization scale. zero_point: quantization zero_point Returns: The fake-quantized input tensor. """ return torch.fake_quantize_per_tensor_affine(x, scale=scale, zero_point=zero_point, quant_min=min_value, quant_max=max_value)
f2d8db63dff809c364a14735fc4e6078a2db2f64
69,695
import re def version_hash(version): """Return hash from version dev part as string. Return None if hash information is not available. """ if version and isinstance(version[-1], str): m = re.match(r'.*([12]\d\d\d[01]\d[0123]\d)[-](\w{10,10}\b)', version[-1]) if m is not None: return m.groups()[1] return None
d423d2c6c684bb122dd50e126c24a6bed4a8ef3d
69,696
def empty_line(line): """Check if line is emtpy.""" return not bool(line.strip())
abbe626f60940a6cb76be3c53730c499857ccbae
69,700
def get_alt_svms(svms, classes, target_class): """ Find alternative SVMs (e.g., for target class 0, find the svms classifying 0 against 1, and 0 against 2). Parameters ---------- svms : list List of eTree.Element objects describing the different one-to-one support vector machines in the PMML. classes : numpy.array The classes to be predicted by the model. target_class : str The target class. Returns ------- alt_svms : list List of eTree.Elements filtered to only include SVMs comparing the target class against alternate classes. """ # Noop for regression if classes[0] is None: return svms alt_svms = [ svm for svm in svms if svm.get('targetCategory') == str(target_class) or svm.get('alternateTargetCategory') == str(target_class) ] # Sort svms based on target class order alt_svms = [ next(svm for svm in alt_svms if svm.get('targetCategory') == str(c) or svm.get('alternateTargetCategory') == str(c)) for c in set(classes).difference({target_class}) ] return alt_svms
62d0d8f093f20891b080605cbea1f9c2d08c10c8
69,702
def reverse_dict(dict_): """Reverse the key/value status of a dict""" return dict([[v, k] for k, v in dict_.items()])
e60c3c02c2b358ac862691c5e6f289ea671bc0b6
69,703
def get_new_size(original_size): """ Returns each width and height plus 2px. :param original_size: Original image's size :return: Width / height after calculation :rtype: tuple """ return tuple(x + 2 for x in original_size)
3ce5808c597240f67ab6f0081645282f1d115416
69,705
def extract_urls_from_soup(soup): """ Extract a list of urls from a BeautifulSoup object :param soup: a BeautifulSoup object :return: a list of urls (strings) """ urls = [] for item in soup.find_all('a'): href = item.get('href') urls.append(href) return urls
984613b3344b35a14ac0933f8303bf79b04a085e
69,710
def is_main(config): """Main is true only for the first worker.""" return getattr(config, 'workerinput', {}).get('workerid', 'gw0') == 'gw0'
9e59bcbc07c0a1828cfb7b6f1c4cbef1d82eecec
69,714
def scale(input_interval, output_interval, value): """ For two intervals (input and output) Scales a specific value linear from [input_min, input_max] to [output_min, output_max]. """ min_to, max_to = output_interval min_from, max_from = input_interval mapped_value = min_to + (max_to - min_to) * ((value - min_from) / (max_from - min_from)) return mapped_value
2d70fbf0cb19a24f3467a8ff3d6786fca9db9204
69,718
def get_token(request): """ Returns the the CSRF token required for a POST form. The token is an alphanumeric value. A side effect of calling this function is to make the the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. """ request.META["CSRF_COOKIE_USED"] = True return request.META.get("CSRF_COOKIE", None)
839b4a8327623d1e595c86cdb497faf30b5b2a39
69,720
import random def _randomly_negate(v): """With 50% prob, negate the value""" return -v if random.random() > 0.5 else v
dd08e186b97afac1b6eb617e90be383e83b749bd
69,725
from datetime import datetime def str_to_datetime(time: str): """ convert a string to a datetime. The format of the string is like "1980-12-30" :param time: :return: """ return datetime.strptime(time, "%Y-%m-%d")
03d0efeb7e528ed65d3bc08c891131633eb6a16d
69,726
def unpack_batch_diagnostics(batch_diagnostics): """ Unpacks batch diagnostics DataFrame into a python standard dictionary Targets old data storage schematic Input: - batch_diagnostics: pandas DataFrame Output: - unpack_batch_diagnostics: dict containing simulation runtime diagnostics """ unpack_batch_diagnostics = {} for sim_name, sim_diagnostics in batch_diagnostics.items(): ### unpack simulation diagnostics ### # runtime diagnostics runtime_diagnostics = sim_diagnostics # the entire pandas df contains runtime diagnostics unpacked = [runtime_diagnostics] ### end unpack ### columns = [ "runtime_diagnostics" ] diagnostics = {} for (col, diag) in zip(columns, unpacked): diagnostics.update({col: diag}) unpack_batch_diagnostics.update({sim_name: diagnostics}) return unpack_batch_diagnostics
9ef64c5f08f8d5861404ffc4789c04bbd9b36b7a
69,729
def elementwise_list_addition(increment, list_to_increment): """ Simple method to element-wise increment a list by the values in another list of the same length (as is needed in output generation). Args: increment: A list of values to be added to the previous list list_to_increment: The original list to be incremented Returns: The resulting list with elements being the sum of the elements of the two lists """ assert len(increment) == len(list_to_increment), 'Attempted to increment lists with two lists of different lengths' return [sum(x) for x in zip(list_to_increment, increment)]
c1a497cc8e4489360375dee7efacf903e64ccb17
69,731
import torch def batch_to_tensors(batch, n_tokens, max_length): """ Make input, input mask, and target tensors for a batch of seqa batch of sequences. """ input_sequences, target_sequences = batch sequence_lengths = [len(s) for s in input_sequences] batch_size = len(input_sequences) x = torch.zeros(batch_size, max_length, dtype=torch.long) #padding element y = torch.zeros(batch_size, max_length, dtype=torch.long) for i, sequence in enumerate(input_sequences): seq_length = sequence_lengths[i] #copy over input sequence data with zero-padding #cast to long to be embedded into model's hidden dimension x[i, :seq_length] = torch.Tensor(sequence).unsqueeze(0) x_mask = (x != 0) x_mask = x_mask.type(torch.uint8) for i, sequence in enumerate(target_sequences): seq_length = sequence_lengths[i] y[i, :seq_length] = torch.Tensor(sequence).unsqueeze(0) if torch.cuda.is_available(): return x.cuda(), y.cuda(), x_mask.cuda() else: return x, y, x_mask
079529318e910177ad52b00f13b5537d2b03a586
69,733
def save_frontmatter(data: dict) -> str: """ Saves the given dictionary as markdown frontmatter. Args: data (dict): Dictionary containing all the frontmatter key-value pairs Returns: str: A string containing the frontmatter in the correct plaintext format """ lines = [] for key, value in data.items(): if isinstance(value, list): lines.append(str(key) + ": " + "[" + ",".join([f"'{x}'" for x in value]) + "]") else: lines.append(str(key) + ": " + str(value)) return "\n".join(lines)
96d29a12e00f120fe7da5343beb41930a2cdca6b
69,734
import re def replace_links(url, message): """Replace the visible email links with the desired URL.""" html_regex = re.compile(r'href=[\'"]?([^\'" >]+)') html_output = html_regex.sub("href=\"{}".format(url), message) return html_output
ea424acc683747bd69f80121bd067d7c69468dbf
69,735
def merge_dict(data, *override): """ Merges any number of dictionaries together, and returns a single dictionary Usage:: >>> util.merge_dict({"foo": "bar"}, {1: 2}, {"Doc": "SDK"}) {1: 2, 'foo': 'bar', 'Doc': 'SDK'} """ result = {} for current_dict in (data,) + override: result.update(current_dict) return result
aabe034524e8ff354982362372d5838eac25fd8b
69,737
def _output_file_ext(base, dynamic, profiling_enabled): """Return extension that output of compilation should have depending on the following inputs: Args: base: usually "o" for object files and "hi" for interface files. Preceding dot "." will be preserved in the output. dynamic: bool, whether we're compiling dynamic object files. profiling_enabled: bool, whether profiling is enabled. Returns: String, extension of Haskell object file. """ with_dot = False ext = "" if base[0] == '.': with_dot = True ext = base[1:] else: ext = base if dynamic: ext = "dyn_" + ext if profiling_enabled: ext = "p_" + ext return ("." if with_dot else "") + ext
6bfa3303fe236b128b22ea178ae3cbebc1c35bc5
69,745
def sjoin(collection) -> str: """Sort and join collection of strings into lines.""" return '\n'.join(sorted(collection))
e2acbad60c298a71892e20968f933d9b9f8288d7
69,750
def build_informationmodels_by_publisher_query() -> str: """Build query to count informationmodels grouped by publisher.""" return """ PREFIX dct: <http://purl.org/dc/terms/> PREFIX modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> SELECT ?organizationNumber (COUNT(DISTINCT ?informationmodel) AS ?count) FROM <https://informationmodels.fellesdatakatalog.digdir.no> WHERE {{ ?informationmodel a modelldcatno:InformationModel . ?informationmodel dct:publisher ?publisher . ?publisher dct:identifier ?organizationNumber . }} GROUP BY ?organizationNumber """
0c8ca0a8b9be1bb8e09a92a434a1c94b44880ef0
69,751
def cvfloat(input): """ Convert to float while ignoring None. """ if input is None: return None else: return float(input)
8e71ece1e08b975289c8495f8f2584f5e806fa99
69,753
import re def _get_unique_value(model, proposal, field_name="slug", instance_pk=None, separator="-"): """ Returns unique string by the proposed one. Optionally takes: * field name which can be 'slug', 'username', 'invoice_number', etc. * the primary key of the instance to which the string will be assigned. * separator which can be '-', '_', ' ', '', etc. By default, for proposal 'example' returns strings from the sequence: 'example', 'example-2', 'example-3', 'example-4', ... """ if instance_pk: similar_ones = model.objects.filter( **{field_name + "__startswith": proposal} ).exclude(pk=instance_pk).values(field_name) else: similar_ones = model.objects.filter( **{field_name + "__startswith": proposal} ).values(field_name) similar_ones = [elem[field_name] for elem in similar_ones] if proposal not in similar_ones: return proposal else: numbers = [] for value in similar_ones: match = re.match(r'^%s%s(\d+)$' % (proposal, separator), value) if match: numbers.append(int(match.group(1))) if len(numbers) == 0: return "%s%s2" % (proposal, separator) else: largest = sorted(numbers)[-1] return "%s%s%d" % (proposal, separator, largest + 1)
34c3743b670ee40f4d674c9964a99e91efd9a274
69,755
def line_nr(ana, entry): """ Return the line number of the start of a command. ATTENTION: This is not the row of the command. If you want that use rowcol! ana -- The analysis, which contains the entry entry -- The command entry to get the line number for """ rowcol = ana.rowcol(entry.file_name) return rowcol(entry.region.begin())[0] + 1
b3887d7060fde7db89ccdff08d3c5ef9da6af52b
69,756
def colourBarMinorAxisSize(fontSize): """Calculates a good size for the minor axis of a colour bar. The minor axis is the axis perpendicular to the colour bar axis. :arg fontSize: Font size of colour bar labels, in points. """ # Figure out the font size in pixels # (font points are 1/72th of an inch, # and we're using inside knowledge # that the colourbarbitmap module # uses 96 dpi, and a padding of 6 # pixels). fontSize = fontSize fontSize = 6 + 96 * fontSize / 72. # Fix the minor axis of the colour bar, # according to the font size, and a # constant size for the colour bar return round(2 * fontSize + 40)
7a1e8268480809a32501c37e8aed1c3e598d8eff
69,758
def clean(string: str) -> str: """ Trim trailing space from the end of field names. Some field headers have spaces appended to the end of the cells. """ return string.strip()
2f1581d81318e08e87a70397d5fa1733bbd457ee
69,760
def print_month(month: int) -> str: """Returns `month` in markdown format""" return f"### {month} 월"
23d0d82d2532efc700965cce42e96ea749cb3465
69,761
def _join_and_preserve_sequential_indent(lines): """Joins lines with newlines while preserving indentation of first line""" if not lines: return '' first_line = lines[0] leading_spaces = len(first_line) - len(first_line.lstrip(' ')) return '\n'.join([first_line] + [' ' * leading_spaces + line for line in lines[1:]])
e686165913eb71b33d6bb0d89650258d0bd82155
69,763
def invert_dict(d): """Invert the given dict.""" return {v: k for k, v in d.items()}
5642f85bfd97f4303af43e56bbfb3730909963bf
69,765
def hex_to_rgb(value): """ Calculates rgb values from a hex color code. :param (string) value: Hex color string :rtype (tuple) (r_value, g_value, b_value): tuple of rgb values """ value = value.lstrip('#') hex_total_length = len(value) rgb_section_length = hex_total_length // 3 return tuple(int(value[i:i + rgb_section_length], 16) for i in range(0, hex_total_length, rgb_section_length))
8d1216fbea366e94df6c0754af6a0946559de445
69,767
def NEST_CONFIG_STEP(image): """ generate config step name for nesting """ return 'execute config {}'.format(image)
e2784bbbdcf49f8ce9640a8ec0f5843d497de1aa
69,771
def note_distance(note_pair): """Get the distance in semitones between two named notes. E.g. (Bb1, B1) => 1 (C4, B4) => 11 (C5, Bb5) => 10 Parameters ---------- note_pair : tuple of ints Returns ------- note_distance : int """ char_map = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11} def apply_modifier(val, modifier): if modifier == "#": return val + 1 elif modifier == 'b': return val - 1 else: return val # get the chars first_note = [x for x in note_pair[0]] second_note = [x for x in note_pair[1]] first_name, first_oct = first_note[0], first_note[-1] first_mod = first_note[1] if len(first_note) == 3 else None second_name, second_oct = second_note[0], second_note[-1] second_mod = second_note[1] if len(second_note) == 3 else None base_dist = apply_modifier(char_map[second_name], second_mod) - \ apply_modifier(char_map[first_name], first_mod) oct_diff = int(second_oct) - int(first_oct) base_dist += (oct_diff * 12) return base_dist
a8a3060d0db981959684ebe8f8dde9e335f8b2e7
69,773
def get_aton(mmsi: str) -> bool: """ Gets the AIS Aids to Navigation (AtoN) status of a given MMSI. AIS Aids to Navigation (AtoN): AIS used as an aid to navigation uses the format 9192M3I4D5X6X7X8X9 where the digits 3, 4 and 5 represent the MID and X is any figure from 0 to 9. In the United States, these MMSIs are reserved for the federal government. Src: https://www.navcen.uscg.gov/?pageName=mtmmsi :param mmsi: str MMSI as decoded from AIS data. :return: bool True if MMSI belongs to an AtoN, otherwise False. """ return str(mmsi)[:2] == "99"
e1e6e51af5cfadd3dc93a47ea09e7ddb2915317f
69,778
def dequantize(norm, sign_xi_array): """ Dequantize the quantization code :param norm: Norm of code :param sign_xi_array: Rounded vector of code :return: Dequantized weights """ weights = norm * sign_xi_array return weights
a7335feef29beee432af1c643bddcb6b26d7f471
69,779
def tweak_thimble_input(stitch_dict, cmd_args): """ :param stitch_dict: Dictionary produced by stitchr :param cmd_args: command line arguments passed to thimble :return: Fixed stitchr dict (species capitalised, TCR names blanked) """ stitch_dict['species'] = cmd_args['species'].upper() stitch_dict['name'] = '' for region in ['v', 'j', 'cdr3', 'c']: stitch_dict[region] = stitch_dict[region].upper() return stitch_dict
d9010317a76ed459e5fae04d7aa217cfb43449cf
69,780
def num_neighbours(lag=1): """ Calculate number of neigbour pixels for a given lag. Parameters ---------- lag : int Lag distance, defaults to 1. Returns ------- int Number of neighbours """ win_size = 2 * lag + 1 neighbours = win_size**2 - (2 * (lag - 1) + 1)**2 return neighbours
608c43b1b195d21b3a748a4b90f36d2b75ac175b
69,783
def multiply_point(point: tuple[float, ...], coef: float) -> tuple[float, ...]: """ Multiples each value in a given tuple by a coefficient. :param point: the point to multiply. :type point: tuple[float, ...] :param coef: the coefficient to multiply the points by. :type coef: float :return: the tuple, where each element is multiplied by the coefficient. :rtype: tuple[float] """ return tuple(p * coef for p in point)
99076a9dcb29213f81d56a8ae4d877101b34afa2
69,784
def get_aa_percentage(sequence, aa_list=['A','I','L','M','F','W','Y','V']): """ Calculates percentage of sequence made up of amino acids in aa list. If no list is given, default is hydrophilic amino acids. """ # Right format sequence = sequence.upper() aa_list = [aa.upper() for aa in aa_list] aa_count = 0 # Add counts of all amino acids for aa in aa_list: aa_count += sequence.count(aa) # Obtain percentage and round return round(100*aa_count/len(sequence), 2)
73112af1f9499312c80953405ffc5c0cd829f2c2
69,785
def count_lines(file_name: str) -> int: """ Counts the number of lines in a plain text file. :param file_name: File name. :return: The number of lines in the file. """ with open(file_name) as f: i = 0 for i, l in enumerate(f): pass return i + 1
d93e3bedbcf07080da9d85e7f80dd6ad8cb72160
69,787
from datetime import datetime def _budget_date(date): """Get a budget month date from the actual date.""" if isinstance(date, datetime): if date.day > 7: return datetime(date.year, date.month, 1) if date.month > 1: # decrease by 1 month return datetime(date.year, date.month - 1, 1) return datetime(date.year - 1, 12, 1) # decrease year & month raise TypeError("Invalid date {}".format(date))
fa4e7492b10e825d5009f16a1cf1b4d89b8e332a
69,789
def g_(x,a,c): """ Calculate vector of equations of residuals, evaluated at x G[i] = Sum_j (x[j] - a[i,j])^2 - C[i]**2 where: x is the point we're trying to fit (in M dimensions), a is the (N) already embedded points (in M dimensions), C is the expected distance """ return ((x-a)**2).sum(1) - c**2
82ade5d74aa9d8366c1e4f4ebc955f6eb4c40f35
69,792
def rule_of_succession(kA, kB): # a.k.a. "Laplace-Bayes estimator" """ Posterior probability that a term belongs to A given observations of this term on A and B. """ return (kA + 1) / (kA + kB + 2)
cb01c3ffe0dbfbeb92e96648820a741ff5f5e0e0
69,799
def _reverse(array, dim): """Reverse an `xarray.DataArray` along a given dimension.""" slicers = [slice(0, None)] * array.ndim slicers[dim] = slice(-1, None, -1) return array[tuple(slicers)]
100dfa17a2fdef5b378e70f0d8fbdce8edaa7cb4
69,808
def lst2tup(data): """Converts output of json.loads to have tuples, not lists. """ if isinstance(data, list): return tuple(lst2tup(e) for e in data) if isinstance(data, dict): return {k: lst2tup(v) for k, v in data.items()} return data
14d5b0bfab1269128a76e018283334d686368022
69,818
import six def is_python(cell): """Checks if the cell consists of Python code.""" return (cell["cell_type"] == "code" # code cells only and cell["source"] # non-empty cells and not six.ensure_str(cell["source"][0]).startswith("%%") ) # multiline eg: %%bash
bf72bd0e02097ba6cff0e9b15bbe308b583e1bd3
69,819
import math def wavy(ind, k=10.): """Wavy function defined as: $$ f(x) = 1 - \frac{1}{n} \sum_{i=1}^{n} \cos(kx_i)e^{-\frac{x_i^2}{2}}$$ with a search domain of $-\pi < x_i < \pi, 1 \leq i \leq n$. """ return 1. - sum(( math.cos(k * ind[i]) * math.exp(-(ind[i]*ind[i])/2.) for i in range(len(ind)))) / float(len(ind)),
a25c99dc8cf3f9b066dd2bbc21cc8f2b41a249f2
69,820
def cantons_from_geonames_answer(answer): """ reads the canton abbreviations from the geonames answer returns: the canton abbreviations for a given query answer """ #print(answer) canton_abbrevs = [] for res in answer['geonames']: canton_abbrevs.append(res['adminCode1']) return canton_abbrevs
cecddff9d4472b2d7c9e561b466c08656861cfde
69,822
def bounding_box(points): """ Return the coordinates for the box that contains all POINTS. """ min_x = min([point[0] for point in points]) min_y = min([point[1] for point in points]) max_x = max([point[0] for point in points]) max_y = max([point[1] for point in points]) return [(min_x,min_y),(max_x,max_y)]
dcb2e4b7db93bbafc70ef1eb97d29b5917ce0467
69,825
def delta(self): """ This is the omega of the adjoint form, which is the same as the omega of the reciprocal form. EXAMPLES:: sage: Q = DiagonalQuadraticForm(ZZ, [1,1,37]) sage: Q.delta() 148 """ return self.adjoint().omega()
a835655d7ff54ef7a66338bc96f0da5062a4e286
69,831
def _sortedDictVals(unsorted_dict): """ Sort a dictionary into alphabetical order by keys. """ items = unsorted_dict.items() items.sort() return items
3f3c4440374bdd50351b0654b43cc7359615297f
69,846
def create_options(items): """ creates a tuple of dicts with each key and value set to an item of given list. :param list items: items to create a tuple of dicts from them. :rtype: tuple[dict] """ results = [] for item in items: option = dict(name=item, value=item) results.append(option) return tuple(results)
d86d8c135c9e3933d9841ad01013099741300f39
69,848
def get_emmitors(G, n): """ Find all clusters emitting to selected cluster and returns a dictionary of emittor weight pairs """ emittors = {emittor: data["weight"] for ( emittor, target, data) in G.edges(data=True) if target == n} return emittors
f7fba6b1cb4b2fa2317e63ef6eaf4aedcee3d6a7
69,851
from typing import List def split_multipart_field(field: str) -> List[str]: """ Splits a multi-part field into its component parts. :param field: The multi-part field's value. :return: The parts. """ return field.split("|", maxsplit=2)
00f85a2ef87b88f3223803ddfe3f5d5112909b0f
69,855