content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def ott_str_as_int(o): """Returns the OTT Id `o` as an integer if `o` is an integer or a string starting with ott (case-insensitive). Raises a ValueError if the string does not match ^(OTT)?[0-9]+$ """ if isinstance(o, int): return o if o.startswith('ott'): return int(o[3:]) try: return int(o) except: if o.lower().startswith('ott'): return int(o[3:]) raise
50b7b4f4f84d49c394fdbdb83b1b72405c88f888
88,239
def parse_residue_spec(resspec): """ Parse a residue specification: [<chain>-][<resname>][[#]<resid>] where resid is /[0-9]+/. If resname ends in a number and a resid is also specified, the # separator is required. Returns a dictionary with keys 'chain', 'resname', and 'resid' for the fields that are specified. Resid will be an int. Parameters ---------- resspec: str Returns ------- dict """ # A-LYS2 or PO4#2 # <chain>-<resname><resid> *chain, res = resspec.split('-', 1) res, *resid = res.rsplit('#', 1) if resid: # [] if False resname = res resid = resid[0] else: idx = 0 for idx, char in reversed(list(enumerate(res))): if not char.isdigit(): idx += 1 break resname = res[:idx] resid = res[idx:] out = {} if resid: resid = int(resid) out['resid'] = resid if resname: out['resname'] = resname if chain: out['chain'] = chain[0] return out
f8b3aa8ef287567d25ab74679acbd067e6b98ce0
88,240
from typing import Dict from typing import Any import yaml def load_config(filename: str) -> Dict[str, Any]: """Load the configuration from a yaml file. :param filename: The path of the yaml file. :type filename: str :return: A dictionary of the configuration. :rtype: Dict[str, Any] """ with open(filename, 'rt') as file_ptr: return yaml.load(file_ptr, Loader=yaml.FullLoader)
ad5560de9c0e4a085e1c32548289c3ace6023eed
88,241
def get_index(seq, value): """ Find the first location in *seq* which contains a case-insensitive, whitespace-insensitive match for *value*. Returns *None* if no match is found. """ if isinstance(seq, str): seq = seq.split() value = value.lower().strip() for i, item in enumerate(seq): if item.lower() == value: return i return None
84fcd6a72911f0f77d550372ac7e6562a4a12895
88,244
import yaml def read_yaml_config(config_file: str) -> dict: """ Parse a yaml config file Args: config_file (str): path to config file to parse Returns: dict: the configuration dictionary """ with open(config_file) as f: return yaml.load(f, Loader = yaml.FullLoader)
8da8a115a67a3febeb16181f7605b4823d608d1b
88,245
from typing import Union def get_non_unique_numbers(numbers: list[int]) -> Union[int, list[int]]: """ Return non-unique numbers in a list of numbers. If only one non-unique number is found, return this number. if all numbers is unique, raise ValueError. Usage: >>> get_non_unique_numbers([1, 3, 1]) 1 >>> get_non_unique_numbers([10, 3, 10, 30, 0, 30]) [10, 30] >>> get_non_unique_numbers([1, 2, 3]) Traceback (most recent call last): ... ValueError: no unique number in numbers """ unique_numbers = set() non_unique_numbers = list() for number in numbers: if number in unique_numbers: non_unique_numbers.append(number) else: unique_numbers.add(number) if not non_unique_numbers: raise ValueError('no unique number in numbers') if len(non_unique_numbers) == 1: return non_unique_numbers[0] return non_unique_numbers
ba378d1ea4d7a100ebdda0860326487919ec1eef
88,249
import re def remove_yaml_from_markdown(markdown_text: str) -> str: """Function remove YAML from text of markdown file. Args: markdown_text: text of markdown file. Returns: Text of markdown file without YAML. """ return re.sub(r"^---(.|\n)*?---\n", "", markdown_text.lstrip()).lstrip()
d8007c96786742457b471a1ebc5c92e54464ecab
88,251
def cal_origin_responsetime(dfs): """ calculates origin_responsetime as, sum("turnaroundtimemsec") where cachestatus == 0 and cacherefreshsrc == 'origin' sample output, ``` "origin_response_time": 0, ``` """ return int( dfs.query("cachestatus == 0 and cacherefreshsrc == 'origin'")[ "turnaroundtimemsec" ].sum() )
1150635d20916f97a731c4bb6e513c34b33e59f2
88,255
def abs_sqd(x): """Element-wise absolute value squared.""" return x.real**2 + x.imag**2
dee260169027ec69eafa9abee1b46a858d522a31
88,256
import copy def isolate_and_merge_station(inv, network_id, station_id): """ Takes an inventory object, isolates the given station and merged them. Merging is sometimes necessary as many files have the same station multiple times. Returns the processed inventory object. The original one will not be changed. :param inv: The inventory. :type inv: :class:`~obspy.core.inventory.inventory.Inventory` :param network_id: The network id. :type network_id: str :param station_id: The station id. :type station_id: str """ inv = copy.deepcopy(inv.select(network=network_id, station=station_id, keep_empty=True)) # Merge networks if necessary. if len(inv.networks) != 1: network = inv.networks[0] for other_network in inv.networks[1:]: # Merge the stations. network.stations.extend(other_network.stations) # Update the times if necessary. if other_network.start_date is not None: if network.start_date is None or \ network.start_date > other_network.start_date: network.start_date = other_network.start_date # None is the "biggest" end_date. if network.end_date is not None and other_network.end_date is \ not None: if other_network.end_date > network.end_date: network.end_date = other_network.end_date elif other_network.end_date is None: network.end_date = None # Update comments. network.comments = list( set(network.comments).union(set(other_network.comments))) # Update the number of stations. if other_network.total_number_of_stations: if network.total_number_of_stations or \ network.total_number_of_stations < \ other_network.total_number_of_stations: network.total_number_of_stations = \ other_network.total_number_of_stations # Update the other elements network.alternate_code = (network.alternate_code or other_network.alternate_code) or None network.description = (network.description or other_network.description) or None network.historical_code = (network.historical_code or other_network.historical_code) or None network.restricted_status = network.restricted_status or \ other_network.restricted_status inv.networks = [network] # Merge stations if necessary. if len(inv.networks[0].stations) != 1: station = inv.networks[0].stations[0] for other_station in inv.networks[0].stations[1:]: # Merge the channels. station.channels.extend(other_station.channels) # Update the times if necessary. if other_station.start_date is not None: if station.start_date is None or \ station.start_date > other_station.start_date: station.start_date = other_station.start_date # None is the "biggest" end_date. if station.end_date is not None and other_station.end_date is \ not None: if other_station.end_date > station.end_date: station.end_date = other_station.end_date elif other_station.end_date is None: station.end_date = None # Update comments. station.comments = list( set(station.comments).union(set(other_station.comments))) # Update the number of channels. if other_station.total_number_of_channels: if station.total_number_of_channels or \ station.total_number_of_channels < \ other_station.total_number_of_channels: station.total_number_of_channels = \ other_station.total_number_of_channels # Update the other elements station.alternate_code = (station.alternate_code or other_station.alternate_code) or None station.description = (station.description or other_station.description) or None station.historical_code = (station.historical_code or other_station.historical_code) or None station.restricted_status = station.restricted_status or \ other_station.restricted_status inv.networks[0].stations = [station] # Last but not least, remove duplicate channels. This is done on the # location and channel id, and the times, nothing else. unique_channels = [] available_channel_hashes = [] for channel in inv[0][0]: c_hash = hash((str(channel.start_date), str(channel.end_date), channel.code, channel.location_code)) if c_hash in available_channel_hashes: continue else: unique_channels.append(channel) available_channel_hashes.append(c_hash) inv[0][0].channels = unique_channels # Update the selected number of stations and channels. inv[0].selected_number_of_stations = 1 inv[0][0].selected_number_of_channels = len(inv[0][0].channels) return inv
ce7756535f0fe95d411639e8e824975b384cbe9c
88,258
def get_gitbranch_from_url(url) : """extracts the branch name from an url string (after the optional '#'), returns None if no branch name specified. :param url: an url string, with optional '#' branch name appended :returns: the extracted branch name, or None """ if '#' in url : return url.split('#')[1] else : return None
d47e7df8bd9eb26ea85d26d430482de3a750790c
88,259
import re def get_ioc_value_from_ioc_name(ioc_obj): """ Extract SHA-256 from string: ([file:name = 'blabla' OR file:name = 'blabla'] AND [file:hashes.'SHA-256' = '1111'])" -> 1111 """ ioc_value = ioc_obj.get('name') try: ioc_value = re.search("(?<='SHA-256' = ').*?(?=')", ioc_value).group(0) # type:ignore # guardrails-disable-line except AttributeError: ioc_value = None return ioc_value
9746333db056d07138b282afbfdde49d7efe8cbd
88,264
import torch def rank_segments(e, feature_segments): """ rank segments of e :param e: explanation e :param feature_segments: unranked feature segments (list of (sensor, t_start, t_end)) :returns: sorted list of the feature segments with the mean score as well """ scores = [torch.mean(e[d, t_start:t_end]).detach().cpu().numpy() for d, t_start, t_end in feature_segments] ranked_feature_segments = sorted(zip(scores, feature_segments), reverse=True) # sort by scores in descending order return ranked_feature_segments
0a2447e92ee570932effe57ad998bdbb16d40321
88,266
def neighbour(t, i, j): """ Get the triangle edge neighbour of a triangle. :param t: List of triangle indices in the mesh. Size (n, 3) where n is the number of triangles. :param i: Triangle index. :param j: Edge index. :returns: Index of triangle neighbouring triangle i along edge j, or None if no neighbour exists. """ v0 = t[i][(j + 1) % 3] v1 = t[i][(j + 2) % 3] for k in range(len(t)): if k != i: if v0 in t[k] and v1 in t[k]: return k return None
2a6e65738c655e57eb06b92976c52c3873972d89
88,268
import re def license_number(value, space=True): """ Regex to validate is the string passed is a valid Zimbabwean driver's license :param value: The string passed :param space: (Optional) if you want a space between the first 6 numbers and the last two letters :return: boolean """ is_valid = False if space: pattern = re.compile(r"^[0-9]{6}\s[a-zA-Z]{2}$") else: pattern = re.compile(r"^[0-9]{6}[a-zA-Z]{2}$") try: if re.fullmatch(pattern, value): is_valid = True except re.error: is_valid = False return is_valid
f35891613dd42ed5c9b02ed46a7b465921d44875
88,269
from datetime import datetime def get_section1() -> dict: """Returns the section1 part of the message to be encoded.""" now = datetime.utcnow() section1 = { 'originating_centre': 177, 'sub_centre': 0, 'data_category': 31, # oceanographic data 'sub_category': 4, # subsurface float (profile) 'local_category': 0, # Ideally something specifies this as a marine mammal # animal tag 'master_table_version': 36, # Future version 'local_table_version': 255, # Unknown 'year': now.year, 'month': now.month, 'day': now.day, 'hour': now.hour, 'minute': now.minute, 'second': now.second, 'seq_no': 0, # Original message } return section1
b5233d102f0abc95deb6de3f443f56fef76fb42d
88,271
def render_player_card(context, player): """Render a player in card format.""" return {"player": player, "request": context["request"]}
635f367563582a4270d16a922813bb3f6de28c7a
88,274
def derive_s3_path(image_path): """ Given a path or s3 key, derives the expected s3 prefix based on the base filename Arguments --------- image_path : string the s3 key or file path to an nii.gz file Return ------ path, basename : tuple (string, string) the path of the image as it would appear on s3 and the basename of the image without the extension Example ------- >>> path, basename = derive_s3_path("data/ADNI-002_S_0413-20060224-T1w-000.nii.gz") >>> print(path) "ADNI/002_S_0413/20060224/T1w/000/brain_ext/" >>> print(basename) "ADNI-002_S_0413-20060224-T1w-000", """ basename = image_path.split('/')[-1].replace('.nii.gz', '') loc = basename.split('-') path = '/'.join(loc) + '/' return path, basename
02c9a549afdade820f44390b4348f17fb77353d0
88,275
def determine_metric(metrics, data): """ Util function to check which of the provided eval metrics is available in the current data dictionary """ found = False eval_metric = None for metric in metrics: if metric in data.keys(): eval_metric = metric found = True break return found, eval_metric
957906b2522be317787907d045f67377130f5260
88,277
def gen_range_str(min, max): """ Generates a string that shows a minimum and maximum value, as well as the range. Example: ``<min> |-- <range> --| <max>`` Parameters ---------- min : float Minimum value. max : float Maximum value. Returns ------- :obj:`str` """ return '{:<.3} |-- {:^8.3} --| {:<9.3}'.format(min, max - min, max)
0d282c0c20aae5829ad6c04a68ca0f74a40021b0
88,278
def remove_quotes(fname_string): """Remove quote marks from beginning and end of string""" return fname_string.replace('"', '').replace("'", '')
fa62b33605dfe090862cf899cee53a1dcd6e339c
88,287
import ipaddress def int32_to_ip(int32): """ Takes an unsigned 32 bit number and returns a string representation of its IPv4 address. :param int32: 32 bit integer value. :return: string representation of int32 value. """ return str(ipaddress.IPv4Address(int32))
69c53b2c90b86398d71bfb81bb8a7523523d5781
88,293
def query_str(work): """Return string to query work on scholar""" return work.title + " " + work.author
7902a42eb3aa6680f77599579f9126b4b621c524
88,306
import pprint def display_with_pformat(name: str, bases: tuple, cls_dict: dict): """ Metaclass which overrides the :meth:`__str__` of a class to use the `pprint` module to pretty-format the class's default :meth:`__repr__`. """ def __str__(self): return pprint.pformat(self) cls_dict['__str__'] = __str__ return type(name, bases, cls_dict)
a6122989c26b599493be7d616f197e539ad58302
88,307
import re def escape_filename(value): """ Escape forbidden symbols in a file name. """ value = re.sub(r'[<>:\/\\|?*\'"’“”„«»…–—¡¿]', ' ', str(value)) value = value.encode('ascii', errors='replace').decode().replace('?', ' ') value = value.strip() return value
90486ec8aeecc188764dc717ba28a6a162d47dff
88,308
from typing import OrderedDict def flatten_json(dictionary): """Recursively flattens a nested json. :param dictionary: dict object to flatten :return: dict object containing flat json. """ out = {} def flatten(element, name=''): if type(element) is dict: for a in element: flatten(element[a], name + a + '.') elif type(element) is list: i = 0 for a in element: flatten(a, name + str(i) + '.') i += 1 else: out[name[:-1]] = element flatten(dictionary) out_ordered = OrderedDict() for key, value in sorted(out.items()): out_ordered[key] = value return out_ordered
5552e6b10dd7a547d2082336f2045dfd9b3e58e4
88,309
import zlib def _crc32_checksum(filepath): """Calculate the checksum of a file using CRC32.""" with open(filepath, "rb") as f: checksum = zlib.crc32(f.read()) return checksum
21ab0fe07580c7176dec1f0d0ea280d9f72404ae
88,313
def modulo_complejos(num:list) -> list: """ Funcion que realiza el modulo de un numero complejo. :param num: lista que representa el numero complejo :return: lista que representa el modulo del numero complejo. """ res = [] res.append(round((num[0]**2 + num[1]**2)**(0.5), 2)) return res
65916c378a2eacded7f6783f07329f503e6280e1
88,317
def get_doc_data(gold_summ_docs): """ This function takes in a list of gold summary Document objects and returns a dictionary of the data. Keys are document object and values are list of tuples of (sent, set of nouns) for each sent in the doc. {doc_obj: [(sent_index, sent_noun_set)] """ doc_dict = dict() for summary_doc in gold_summ_docs: # List to hold (sent_position, sent_noun_set) tuples doc_list = [] # Get sentence index and set of nouns for each sentence object sent_index = 0 for sent_obj in summary_doc.sentence_list: doc_list.append((sent_index, sent_obj.nouns)) sent_index += 1 # Add the list of sentence tuples to the dictionary doc_dict[summary_doc] = doc_list # Return the dictionary return doc_dict
7310d73539f8bbacf52b896fece61ba30e1f32d7
88,326
from typing import List from typing import Tuple def list_to_tuple(list_: List) -> Tuple: """ Recursively transforms a list of lists into tuples of tuples Parameters ---------- list_: (nested) list Returns ------- (nested) tuple """ return tuple( list_to_tuple(item) if isinstance(item, list) else item for item in list_ )
2dc28e5e1520b8b2a3f01c7b9438777b7c09e1a6
88,327
def getInt(inputtext=''): """Prompts the user for an integer. Returns the integer entered by the user. If a non-integer is entered, the user is prompted to re-enter a value until an integer is submitted. """ while 1: try: outputint=int(input(inputtext + '\nPlease Enter an Integer:\n')) return outputint except: print('INVALID ENTRY')
6c4506fc6f5c07e4617251c27dd0cc5ef76bbf1c
88,328
def bl_calculate_deposit(amount: int, percentage: int, years: int) -> float: """ This function calculate amount of deposit :param amount: Start amount :param percentage: Bank percentage :param years: Time of deposit :return: Amount of deposit """ return amount * (1 + (percentage / 100) / 1) ** years
62e44772d6cc9b9aa0d661c7fc63c76513495e3a
88,329
from typing import Tuple def pad_frame(frame_data: bytes, size: Tuple[int, int]) -> bytes: """Pads frame data to fit the size. Args: frame_data (bytes): Any byte string size (Tuple[int, int]): Resolution of the image Returns: bytes: Padded frame data """ return frame_data.ljust(size[0] * size[1] * 3, b"\x00")
4a5ce35ae80eaf0bace3281a2258e3acd88cf648
88,332
def num_to_gast(num): """ takes a number and converts it to gast """ gast = {"type": "num", "value": num} return gast
5c8aee881a4b908b34a901ce94e3b28f08187056
88,333
def is_jobtype_match_queue(jobtype, pq_dict): """ Check if analy/prod job can be run in a PanDA queue :param jobtype: str: analy or prod :param pq_dict: dict: PQ info :return: bool """ pq_type = pq_dict['type'] if 'type' in pq_dict else 'unified' pq_jt_matrix = { 'unified': ('prod', 'analy'), 'production': ('prod',), 'analysis': ('analy',), 'special': ('prod',), } if pq_type in pq_jt_matrix and jobtype in pq_jt_matrix[pq_type]: return True return False
31a5055c0cc3fce2fc91a87c4b61e5b108dc6fcc
88,335
from typing import Dict from typing import Any def wavefunction2d() -> Dict[str, Any]: """Plot defaults for plotting.wavefunction2d""" return {"figsize": (8, 3)}
9797f27773ad22164f0ff883a2bfb3de57862c29
88,340
def args2string(args: dict) -> str: """Convert args dictionary to a string. Args: args (dict): A dictionary that contains parsed args. Return: A converted string. Example: >>> args = { 'arg1': [value1, value2], 'arg2': [value3], 'arg3': [value4] } >>> args2string(args) '--arg1 value1 value2 --arg2 value3 --arg3 value4' """ text = [] for k in args: text.append(f'--{k}') if args[k] is not bool: text.extend([str(x) for x in args[k]]) return ' '.join(text)
4174b8d58b1915b68d61397a06c6874e42c62d67
88,342
def format_syntax_error(e: Exception): """ Returns a formatted string of a SyntaxError. Stolen from https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py#L24-L25 """ return "{0.text}\n{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__).replace("\n\n", "\n")
8afc281d23c59e83707482c750674069c15f185e
88,343
def seconds(dhms): """Convert a time string "[[[DD:]HH:]MM:]SS" to seconds. """ components = [int(i) for i in dhms.split(':')] pad = 4 - len(components) if pad < 0: raise ValueError('Too many components to match [[[DD:]HH:]MM:]SS') components = [0] * pad + components return sum(i * j for i, j in zip((86400, 3600, 60, 1), components))
a79d448ba6e6ae41b9a55c2c2312e4870f35e9b7
88,348
def get_sorted_vocab(d): """ Sort the entire vocabulary (keys and keys of their value dictionaries). Input d -- dictionary mapping word-pairs to counts, created by cooccurrence_matrix(). We need only the keys for this step. Output vocab -- sorted list of strings """ vocab = set([]) for w1, val_dict in d.items(): vocab.add(w1) for w2 in val_dict.keys(): vocab.add(w2) vocab = sorted(list(vocab)) return vocab
364ca452665707fec6057d142f5066c1ad76e0fc
88,367
def getSwaggerPaginationDef(resultsPerPage): """Build swagger spec section for pagination""" return { "name": "page", "type": "int", "in": "query", "description": "The page number for this paginated query ({} results per page)".format(resultsPerPage) }
3a9804f491d9ec2714b257191d4c6a4edd7057b2
88,369
def get_input(prompt): """Get input (python 3 required).""" return input(prompt)
f2d46e71437b860a88c9000f606ea16ed7832203
88,375
import torch def ones(shape): """All ones.""" initial = torch.ones(shape, dtype=torch.float32) return torch.nn.Parameter(initial)
19950b81d04d0625de6b7a90782903bdd43e10b2
88,380
def LPStoGPM(Vlps): """ Convertie le debit volumique en gpm vers l/sec Conversion: 3.7854118 l = 1 gallon :param Vlps: Debit volumique [l/sec] :return Vgpm: Debit volumique [gpm] """ Vgpm = Vlps / 3.7854118 * 60 return Vgpm
c9ace44e2ecccfc47b5639905b65731114b704e2
88,381
import torch def subsequent_mask(lens): """Mask out future word Args: lens: (bs,) Return: mask: (bs, max_len, max_len) """ bs, max_len = len(lens), max(lens) mask = torch.ones([bs, max_len, max_len]).tril_(0) mask = mask > 0 return mask
e5114e9670e6b9df5a0a3bf7c518d0f21b3f2336
88,385
import torch import itertools def pit_loss( estimate: torch.Tensor, target: torch.Tensor, axis: int, loss_fn=torch.nn.functional.mse_loss, return_permutation: bool = False ): """ Permutation invariant loss function. Calls `loss_fn` on every possible permutation between `estimate`s and `target`s and returns the minimum loss among them. The tensors are permuted along `axis`. Does not support batch dimension. Does not support PackedSequence. Args: estimate: Padded sequence. The speaker axis is specified with `axis`, so the default shape is (T, K, F) target: Padded sequence with the same shape as `estimate` (defaults to (T, K, F)) loss_fn: Loss function to apply on each permutation. It must accept two arguments (estimate and target) of the same shape that this function receives the arguments. axis: Speaker axis K. The permutation is applied along this axis. axis=-2 and an input shape of (T, K, F) corresponds to the old default behaviour. return_permutation: If `True`, this function returns the permutation that minimizes the loss along with the minimal loss otherwise it only returns the loss. Examples: >>> T, K, F = 4, 2, 5 >>> estimate, target = torch.ones(T, K, F), torch.zeros(T, K, F) >>> pit_loss(estimate, target, 1) tensor(1.) >>> T, K, F = 4, 2, 5 >>> estimate, target = torch.ones(T, K, F), torch.zeros(T, F, dtype=torch.int64) >>> pit_loss(estimate, target, 1, loss_fn=torch.nn.functional.cross_entropy) tensor(0.6931) >>> T, K, F = 4, 2, 5 >>> estimate, target = torch.ones(K, F, T), torch.zeros(K, F, T) >>> pit_loss(estimate, target, 0) tensor(1.) >>> T, K, F = 4, 2, 5 >>> estimate = torch.stack([torch.ones(F, T), torch.zeros(F, T)]) >>> target = estimate[(1, 0), :, :] >>> pit_loss(estimate, target, axis=0, return_permutation=True) (tensor(0.), (1, 0)) >>> K = 5 >>> estimate, target = torch.ones(K), torch.zeros(K) >>> pit_loss(estimate, target, axis=0) tensor(1.) >>> A, B, K, C, F = 4, 5, 3, 100, 128 >>> estimate, target = torch.ones(A, B, K, C, F), torch.zeros(A, B, K, C, F) >>> pit_loss(estimate, target, axis=-3) tensor(1.) """ axis = axis % estimate.ndimension() sources = estimate.size()[axis] assert sources < 30, f'Are you sure? sources={sources}' if loss_fn in [torch.nn.functional.cross_entropy]: estimate_shape = list(estimate.shape) del estimate_shape[1] assert estimate_shape == list(target.shape), ( f'{estimate.shape} (N, K, ...) does not match {target.shape} (N, ...)' ) else: assert estimate.size() == target.size(), ( f'{estimate.size()} != {target.size()}' ) candidates = [] filler = (slice(None),) * axis permutations = list(itertools.permutations(range(sources))) for permutation in permutations: candidates.append(loss_fn( estimate[filler + (permutation, )], target )) min_loss, idx = torch.min(torch.stack(candidates), dim=0) if return_permutation: return min_loss, permutations[int(idx)] else: return min_loss
feabf5b625e915a1bee86df13394a49bfdf6f6f9
88,387
import struct def pack128(int_val): """ pack a 128-bit integer in big-endian format """ max_int = 2 ** (128) - 1 max_word_size = 2 ** 32 - 1 if int_val <= max_word_size: return struct.pack('>L', int_val) words = [] for i in range(4): word = int_val & max_word_size words.append(int(word)) int_val >>= 32 words.reverse() return struct.pack('>4I', *words)
3a81e58cd74e71b673bc841b6567e21cf0014991
88,389
def partTimeStamp(timeStamp): """ Part the TimeStamp into date and time for writing in Measurementparameterlist :param timeStamp: :return: date, time """ date = timeStamp.split('_')[0] time = timeStamp.split('_')[1] return date, time
b909c75243b54a70e9016ffae49fe14dfd8983bb
88,393
def type_chain(iterable, type_iterable): """ Compares the type chain of an iterable, checks with only the first element Args: iterable (list): a list type_iterable (list): type chain Returns: bool: If the type chain is true for the iterable. """ for c_type in type_iterable: if type(iterable) != c_type: return False try: iterable = iterable[0] except: pass return True
aeb59bd949f96fa0b118f5e9de71b95bdecd8709
88,394
def versiontuple(v, version_index=-1): """ convert a version string to a tuple of integers argument <v> is the version string, <version_index> refers o how many '.' splitted shall be returned example: versiontuple("1.7.0") -> tuple([1,7,0]) versiontuple("1.7.0", 2) -> tuple([1,7]) versiontuple("1.7.0", 1) -> tuple([1]) """ temp_list = map(int, (v.split("."))) return tuple(temp_list)[:version_index]
e85d18e9005b0ffa9adb3e7dbb5c01c4fdb4a333
88,416
def quantize_verts(verts, n_bits=8): """Convert vertices in [-1., 1.] to discrete values in [0, n_bits**2 - 1].""" min_range = -0.5 max_range = 0.5 range_quantize = 2 ** n_bits - 1 verts_quantize = (verts - min_range) * range_quantize / ( max_range - min_range) return verts_quantize.astype('int32')
96c24f2f4cf1f1dbd75c1e731dabed51f7924583
88,419
def pad_and_join_list_of_strings(string_list: list, str_size: int): """Pad each entry to a defined size and join the padded entries into one string""" for i in range(len(string_list)): string_list[i] = string_list[i].ljust(str_size) result = "".join(string_list) return result
ffdc8357bcf9b9633af1dadb5959138e0234ab21
88,424
def format_word(raw, trs=None, sense=None): """Format a word from a template argument, with possibly a transcript and a sense attached to it. """ string = "<i>" + raw.value + "</i>" if trs is not None: string += """, <span class="transcript">%s</span>""" % trs.value if sense is not None: string += " (&laquo; %s &raquo;)" % sense.value return string
f354fc544e5c417129162aec30039274ec006653
88,425
def std_ref_form(ref_string): """ Deletes unnecessary chars from the string. Seperates combined references. returns the refernces in a list. """ if ' corr.' in ref_string: ref_string = ref_string.replace(' corr.','') while ',' in ref_string: ref_string = ref_string.replace(', ',']#[') refs_list = ref_string.split('#') return refs_list
418bf952094e87a881fd609ffa7b7da979fc9287
88,427
def sort_positions(positions): """ Utility routine to take two corner values are return the x and y range. Parameters ---------- positions: A four element list or numpy array of float values; values are assumed to be [x1. y1, x2, y2] for two corner positions (x1, y1) and (x2, y2). Returns ------- xmin: A floating point value, the x minimum defining a rectangular area or None if there is an issue xmax: A floating point value, the x maximum defining a rectangular area or None if there is an issue ymin: A floating point value, the y minimum defining a rectangular area or None if there is an issue ymax: A floating point value, the y maximum defining a rectangular area or None if there is an issue """ if len(positions) != 4: return None, None, None, None for loop in range(4): if positions[loop] is None: return None, None, None, None xmin = min(positions[0], positions[2]) xmax = max(positions[0], positions[2]) ymin = min(positions[1], positions[3]) ymax = max(positions[1], positions[3]) return xmin, xmax, ymin, ymax
f8f22e6a7224a04ac285610131042def21930799
88,433
def _v_sum(t_arr, i): """ Generates the coefficient V for barycentric coordinates Parameters ---------- t_arr : iterable of floats values of t i : int index of current point. Returns ------- v_i : float coefficient V. """ n = len(t_arr) prod_coef = [ii for ii in range(n)] prod_coef.pop(i) v_i = 1.0 for jj in prod_coef: v_i *= t_arr[i] - t_arr[jj] return 1.0 / v_i
496019a16d21973544262afe0ce965ff8aec1402
88,434
def _agg_sum(iur, item, sims, use): """ Sum aggregate Args: iur(matrix._CSR): the item-user ratings matrix item(int): the item index in ``iur`` sims(numpy.ndarray): the similarities for the users who have rated ``item`` use(numpy.ndarray): positions in sims and the rating row to actually use """ x = 0.0 for j in use: x += sims[j] return x
0faa1080a8864d15b7f768f768a4e85b1b74cd0a
88,436
def swap_column(content, col_index, col_info=[]): """ Swap column position into the table Arguments: - the table content, a list of list of strings: - First dimension: the columns - Second dimensions: the column's content - cursor index for col - optional information to append from command line Returns: - the table content, a list of list of strings: - First dimension: the columns - Second dimensions: the column's content """ nb_col = len(content) if col_index == nb_col: to_swap = col_index - 2 else: to_swap = col_index col_saved = content[to_swap] del content[to_swap] content.insert(col_index - 1, col_saved) return content
f18ab28a1e634821209d1d01c4c4ec1171495406
88,442
def replace_string(string: str, from_idx: int, to_idx: int, repl: str) -> str: """ Replaces specified part of `string` from `from_idx` to `to_idx` with `repl` string. Example: >>> input_string = 'abcde123fghij' >>> result_string = replace_string(input_string, 5, 8, '000000000') >>> print(result_string) abcde000000000fghij :param string: Original string which need replacing. :type string: str :param from_idx: Start index to replace from original string. (Inclusive) :type from_idx: int :param to_idx: End index to replace from original string. (Not inclusive) :type to_idx: int :param repl: String to put instead of `string[from_idx:to_idx]` :type repl: str :return: Replaced string :rtype: str """ return string[:from_idx] + repl + string[to_idx:]
a53c4ebaf9d6845af2e6627ad07f71a47a7b5e1d
88,444
import copy def obfuscate_password(config: dict) -> dict: """Obfuscate password value in auth_basic config :param config: config from ExternalTaskWorker or ExternalTaskClient :returns: _config with obfuscated password """ _config = copy.deepcopy(config) _auth = _config.get('auth_basic') if _auth is not None and 'password' in _auth.keys(): _auth['password'] = '***' return _config
a5d8a8c8fabd0bafe4cc31e0655378b11c28d61f
88,451
from typing import List from typing import Tuple def find_insertion_index_for_version(content: List[str], version: str) -> Tuple[int, bool]: """ Finds insertion index for the specified version from the .rst changelog content. :param content: changelog split into separate lines :param version: version to look for :return: Tuple : insertion_index, append (whether to append or insert the changelog) """ changelog_found = False skip_next_line = False index = 0 for index, line in enumerate(content): if not changelog_found and line.strip() == version: changelog_found = True skip_next_line = True elif not skip_next_line and line and all(char == '.' for char in line): return index - 2, changelog_found else: skip_next_line = False return index, changelog_found
84f37e1bd763f694dfc2b231b93f38c05b0fa200
88,452
from typing import List from typing import Dict def calc_freq(input_list:List[int]) -> Dict[int, int]: """ Return Dict with Count of each number present input List Parameters ---------- input_list : List[int] List of integers Returns ------- Dict[int, int] Raises ------ TypeError when input is not List. """ if not isinstance(input_list, list): error_msg = f"input_list must be List of Integers (type: List[int])." raise TypeError(error_msg) result:Dict[int, int] = {} for integer in input_list: result[integer] = result.get(integer, 0) + 1 return result
aae9b89008f2fe2a2536913d49c0b175efed262b
88,456
def not_(arg): """:yaql:operator not Returns true if arg evaluates to false. Otherwise returns false. :signature: not arg :arg arg: value to be converted :argType arg: any :returnType: boolean .. code:: yaql> not true false yaql> not {} true yaql> not [1] false """ return not arg
0e1d13f4ebb206604d7aaf78c70b8d55ebded253
88,460
from typing import Sequence from typing import Mapping def parse_cell_methods(cell_methods: Sequence[Mapping[str, str]]) -> str: """Parse cell methods as YAML reads them into a string.""" methods = [] for cell_method in cell_methods: methods.append("".join([f"{dim}: {meth}" for dim, meth in cell_method.items()])) return " ".join(methods)
9bcd26857f6df37590a6caadf238424ad52b0141
88,466
def get_flag_variables(ds): """ Returns a list of variables that are defined as flag variables :param netCDF4.Dataset ds: An open netCDF4 Dataset """ flag_variables = [] for name, ncvar in ds.variables.items(): standard_name = getattr(ncvar, "standard_name", None) if isinstance(standard_name, str) and "status_flag" in standard_name: flag_variables.append(name) elif hasattr(ncvar, "flag_meanings"): flag_variables.append(name) return flag_variables
89a2813e5410102ca2e7e5c727e108040bef7471
88,471
import re def filter_abstract(s): """ Some abstracts have metadata tags that should not be displayed in public views. If present, only the text between {AbstractBegin} and {AbstractEnd} should be displayed. """ match = re.search('\{AbstractBegin\}([\w\s\W\S]*)\{AbstractEnd\}', s) if match: return match.groups()[0].strip() return s
9bfa1871be6f36c4e783b926a42ca704cf3a2b30
88,473
def extract_dict_to_ordered_key_lists(dictionary, data_name, key_name='times'): """ Create a dictionary with each element lists, one giving the "times" that the list element refers to and the others giving the data content that these times refer to - maintaining the order that the keys were originally in. Args: dictionary: The dictionary containing the data to be extracted (N.B. Assumption is that the keys refer to times) data_name: The key of interest within the input dictionary key_name: String to assign to the dictionary element that is the sorted list of keys of the input dictionary Returns: Dictionary containing the extracted lists with keys 'times' and "key_string" """ return {key_name: sorted(dictionary.keys()), data_name: [dictionary[time] for time in sorted(dictionary.keys())]}
391f2c46840f0f380f1f2405d55ec68ed143aab6
88,474
def get_file(name): """ Helper function to get the text file contents """ # basedir = os.path.dirname(__file__) fn = "../testdata/%s" % (name,) return open(fn, "rb")
79c9edccbddc54665be803838facef18b8113ebd
88,479
import csv def read_from_csv(fn): """Read in the CSV file and return a list of (email, password) tuples""" users = [] csvfile = csv.reader(open(fn, "r")) for row in csvfile: if row[0] != "": users.append(row) return users
f495330ce4cc8d7d325ff7b16795faa0bafec063
88,481
def dtIsArticle(dt): """ Checks if determiner is indefinite or definite English article Argument: dt: the determiner name Return: True if determiner is indefinite or definite English article """ return dt.lower() in ['a', 'an', 'the']
ab9553aa275eeb5d838d1ffe0b5332051f64e59f
88,483
def _correct_folder(folder: str) -> str: """Ensures the folder follows a standard. Pathlib.parent in the root folder results in '.', whereas in other places we should use '' for the root folder. This function makes sure the root folder is always empty string. Args: folder: the folder to be corrected. Returns: The corrected folder. """ if folder == '.': return '' return folder
4a4126192445acfac113e3199e618c86253ebdeb
88,490
def merge(incoming={}, output={}, overwrite=False): """ Resursively merges two dictionaries by taking inputs from the 'incoming' dictionary and overlaying them upon the 'output' dictionary. By default, no values in the resulting dictionary will be overwritten if present in both. Passing 'overwrite=True' will flip this behavior. Useful for, eg, applying some user-inputted configuration (incoming) over top of some already-existing (default) values (output). """ _output = output.copy() for _key, _value in incoming.items(): # loop through each key/value pair if (_key in _output) and isinstance(_value, dict): # detect when we need to recurse _output[_key] = merge(_value, _output[_key]) # recurse else: # _key is not in output if _key in _output and overwrite == False: # we check if it already exists, and if we care continue # don't overwrite existing values unless overwrite is 'True' _output[_key] = _value # add key/value pair return _output
ab52aa480d420ae854852c366eb64962e213aa46
88,498
def get_synsets_lemmas(synsets): """ Take a list of synsets and returns a list of lemmas of all synsets """ lemmas = [] for synset in synsets: for lemma in synset.lemmas(): word = lemma.name() lemmas.append(word) return lemmas
b51f9b5858ded4b3d599104e7ff8d6e601477e9a
88,499
def create_hello_world_cpp() -> str: """ Fixture which returns a simple cpp hello world code """ return '#include<iostream>\n using namespace std; int main() {cout << "Hello World"; return 0;}'
c86c0643dddefc9d653e73ab8e24dfbfe80af172
88,500
def format_field(center, field): """ Given a registration center and a field name, return the value of the field formatted to export in a CSV file. """ if field == 'center_type': return str(center.get_center_type_display()) # We never want to print "None", but "0" is okay. val = getattr(center, field, None) if val is None: val = '' return str(val)
840ef7e9a49b1e918680644948a005c61a834a81
88,505
def draw_point(r, g, b, m, n, grid): """Draws a point on the ppm grid Arguments --------- r, g, b -- RGB values for the point m, n -- row, column grid -- ppm grid being edited """ grid[m][n][0], grid[m][n][1], grid[m][n][2] = r, g, b return grid
f4dcbc3365a04f45efda29d2515a90155f25c516
88,511
def _delta(i, j): """The Kronecker delta. Returns 1 if i == j, 0 otherwise.""" return int(i == j)
0604992804236694b4698679deed5f190eee137c
88,520
def set_limits(output, plot_limits=[None, None], legend_limits=[None, None]): """Set plot and legend limits according to inputs. If limits are input and valid, return them. Otherwise, set them to the according limit in the data. If limits are input and out of order, reverse them. Args: output (np array, nx1): output dataset being plotted plot_limits (float, 2x1 list, optional): [min, max] limits of values that will be displayed in plot legend_limits (float, 2x1 list, optional): [min, max] limits of colorbar scale Returns: plot_limits (float, 2x1 list): [min, max] limits of plotted values legend_limits (float, 2x1 list): [min, max] limits of colorbar scale """ if all(plot_limits) and plot_limits[0] > plot_limits[1]: plot_limits.reverse() if all(legend_limits) and legend_limits[0] > legend_limits[1]: legend_limits.reverse() if plot_limits[0] is None: plot_limits[0] = min(output) if plot_limits[1] is None: plot_limits[1] = max(output) if legend_limits[0] is None: legend_limits[0] = min(output) if legend_limits[1] is None: legend_limits[1] = max(output) return plot_limits, legend_limits
03d44f3cb4564a9e2e22e6e069e0cb9d04ab97d7
88,522
def expertises_to_comma_separated_string(queryset): """ Convert Expertise Queryset to comma-separated string :param queryset: Queryset of Expertise :return: Comma-separated string containing User Expertise """ string = ", ".join(str(obj.name) for obj in queryset) return string
f59aef1c8942bf8da66a35b570af1701f24da7d7
88,525
def filterMatches(matches, prevKeypoints, currKeypoints, threshold=30): """ This function discards matches between consecutive frames based on the distance between keypoints, which has to be below a certain threshold, and based on the fact whether their scale increases (keypoints that don't increase in scale are not useful for TTC). In the paper, the threshold for the L2 distance (Euclidean) was 0.25. For ORB, most inliers are included (and most outliers excluded) with a threshold of 64. """ # queryIdx refers to prevKeypoints, trainIdx refers to currKeypoints filteredMatches = list(filter(lambda m: m.distance < threshold and currKeypoints[m.trainIdx].size > prevKeypoints[m.queryIdx].size, matches)) # Check if length is reasonable # print(len(filteredMatches)) return filteredMatches
8a65497ceeb0e79d79b5f32ca653dcb1e278a27a
88,526
def pathappend(base, suffix, sep='/'): """Appends a path component to a base file path or URL. Like os.path.join, but always treats the suffix as a relative path. The separator can be any single character, by default `/`. >>> pathappend('http://example.com/api', 'test/') 'http://example.com/api/test/' >>> pathappend('/etc', '/test') '/etc/test' >>> pathappend('12:34:56:', ':78:90', sep=':') '12:34:56:78:90' """ return base.rstrip(sep) + sep + suffix.lstrip(sep)
869caf1149f50c03fdf483f9f2e992d3422cf0ea
88,528
def unique(sequence): """ Returns the sequence without duplicate elements """ return list(set(sequence))
b82b472e652c4841f6e829ad2f1d277976370880
88,531
import csv def load_jobs_list(filename): """Save the content of a csv file into a dictionaty. Args: filename (str): Name of the CSV to convert to a dictionary. Returns: jobs_list (dict): Dictionary with all the csv entries in it. """ jobs_list = {} with open(filename) as f: reader = csv.reader(f) next(reader, None) # Skip the header jobs_list = {rows[0]: rows[1] for rows in reader} return jobs_list
8ada6922ee2730030c086ae5b4968edd00578d41
88,540
import json def cfg_to_json_str(cfg): """ Get contents of config file as a json string """ return json.dumps(cfg, sort_keys=False, indent=4)
c261d06f775218871a6aa16a974337075ccb2103
88,541
def render_external_question(url, frame_height=0): """ Renders a URL within an ExternalQuestion XML object for use as a task. :param url: The URL of the task to display to Workers :param frame_height: Frame height to use for the Worker viewport, zero by default to use the whole window :return: The rendered ExternalQuestion XML string """ return ''' <ExternalQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd"> <ExternalURL>{}</ExternalURL><FrameHeight>{}</FrameHeight> </ExternalQuestion>'''.format(url, frame_height)
42fa927e02d4b4a7a7c7597e95366a98e7ae431d
88,544
def keywords_polarity_week(df, cw_weeknum): """ Concatenate all tweets from `df` into a string by `cw_weeknum` and `polarity` . Parameters ---------- df : pandas.core.frame.DataFrame Dataframe containing tweets to extract from. cw_weeknum : int week number. Returns ------- positive_words : str Joined string from all positive tweets. negative_words : str Joined string from all negative tweets. """ # Slice df based on selected date and polarity, then save as new df positive_df = df.loc[(df['week'] == cw_weeknum) & (df['polarity'] == 'positive')].copy() negative_df = df.loc[(df['week'] == cw_weeknum) & (df['polarity'] == 'negative')].copy() # Clean up positive_list = positive_df['tweet'].apply(lambda x: "".join(x).replace("'", "").replace("[", "").replace("]", "").replace(",", "").split()) negative_list = negative_df['tweet'].apply(lambda x: "".join(x).replace("'", "").replace("[", "").replace("]", "").replace(",", "").split()) # Join words positive_words = (' ').join([item for sublist in positive_list for item in sublist]) negative_words = (' ').join([item for sublist in negative_list for item in sublist]) return positive_words, negative_words
0d3b7394e3e905f7ff13b1a71bff35b711f6f62c
88,545
def initialize_test_obj(fao=None, nffa=None, cfda_num="00.000", sub_tier_code="1234", sub_fund_agency_code=None, ppop_code="NY00000", ppop_zip4a=None, ppop_cd=None, le_zip5=None, le_zip4=None, record_type=2, award_mod_amend=None, fain=None, uri=None, cldi=None, awarding_office='033103', funding_office='033103', legal_city="WASHINGTON", legal_state="DC", primary_place_country='USA', legal_country='USA', detached_award_financial_assistance_id=None, job_id=None): """ Initialize the values in the object being run through the fabs_derivations function """ obj = { 'federal_action_obligation': fao, 'non_federal_funding_amount': nffa, 'cfda_number': cfda_num, 'awarding_sub_tier_agency_c': sub_tier_code, 'funding_sub_tier_agency_co': sub_fund_agency_code, 'place_of_performance_code': ppop_code, 'place_of_performance_zip4a': ppop_zip4a, 'place_of_performance_congr': ppop_cd, 'legal_entity_zip5': le_zip5, 'legal_entity_zip_last4': le_zip4, 'record_type': record_type, 'award_modification_amendme': award_mod_amend, 'fain': fain, 'uri': uri, 'correction_late_delete_ind': cldi, 'awarding_office_code': awarding_office, 'funding_office_code': funding_office, 'legal_entity_city_name': legal_city, 'legal_entity_state_code': legal_state, 'place_of_perform_country_c': primary_place_country, 'legal_entity_country_code': legal_country, 'detached_award_financial_assistance_id': detached_award_financial_assistance_id, 'job_id': job_id } return obj
cd396159b4ed576b493c7e90fdbd79dd55137561
88,548
def is_hex(s): """ Test if a string is a hexadecimal in string representation. :param s: The string to test. :return: True if hexadecimal, False if not. """ try: int(s, 16) return True except ValueError: return False
c2d595aa43f8f8b24f8cf24db706fbb663225fb9
88,549
def convert_to_fortran_string(string): """ converts some parameter strings to the format for the inpgen :param string: some string :returns: string in right format (extra "" if not already present) """ if not string.strip().startswith("\"") or \ not string.strip().endswith("\""): return f'"{string}"' else: return string
09f4b2ab33fb3c60adcd52d1cd6eb5727970e6de
88,551
def splitem(query): """ Split a query into choices >>> splitem('dog, cat') ['dog', 'cat'] Disregards trailing punctuation. >>> splitem('dogs, cats???') ['dogs', 'cats'] >>> splitem('cats!!!') ['cats'] Allow or >>> splitem('dogs, cats or prarie dogs?') ['dogs', 'cats', 'prarie dogs'] Honors serial commas >>> splitem('dogs, cats, or prarie dogs?') ['dogs', 'cats', 'prarie dogs'] Allow choices to be prefixed by some ignored prompt. >>> splitem('stuff: a, b, c') ['a', 'b', 'c'] """ prompt, sep, query = query.rstrip('?.!').rpartition(':') choices = query.split(',') choices[-1:] = choices[-1].split(' or ') return [choice.strip() for choice in choices if choice.strip()]
cef457ed560db569ff9c8714d3b2f767c8935c85
88,555
def get_fw_setting(fw_conn, items_xpath): """Get FW Setting Info Args: fw_conn (PanDevice): A panos object for device items_xpath (str): The items xpath for the setting Returns: xml_data (Element): XML data from firewall """ base_xpath = ("/config/devices/entry[@name='localhost.localdomain']" + items_xpath) xml_data = fw_conn.xapi.get(xpath=base_xpath) return xml_data
78b5c063c66d53436e7d20f46afa272862404026
88,558
def client_2(network): """Client fixture (second node).""" return network.clients[1]
2fb4578f97ea19f6ba5f8a40460a6a6897c300db
88,560
def strip_suffix(text, suffix): """ Cut a set of the last characters from a provided string :param text: Base string to cut :param suffix: String to remove if found at the end of text :return: text without the provided suffix """ if text is not None and text.endswith(suffix): return text[:len(text) - len(suffix)] else: return text
883ccee3bd3c48b80839d8ad4838a77720c28faf
88,568
import configparser def config(section, file='database.ini'): """parses through a file and returns configuration settings for a given section in an INI file Args: section (str) - name of the section in the configuration INI file file (str) - file name of INI file Returns: configuration (obj) - a configuration object with config settings from INI file """ configuration = configparser.ConfigParser() configuration.read(file) db_config = {} if configuration.has_section(section): params = configuration.items(section) for param in params: db_config[param[0]] = param[1] else: raise Exception('{0} not found in the {1} file'.format(section, file)) return db_config
b2d1369d952c5802ac93c88e6b2acd406290c6d0
88,576
import random def tire_entiers(entiers_disponibles: list[int], n: int) -> list[int]: """ Tire aléatoirement les entiers pour le jeu en piochant N entiers dans la liste fournies. Les valeurs retournées sont triées du plus petit au plus grand. """ return sorted(random.choices(entiers_disponibles, k=n))
f38725cc5c4601df9704de5ab1d295bd69f500aa
88,577
def _get_contacts(all_atoms, ions, cutoff): """Identify the atoms closest to the ions and return them.""" contacts = {} for ion in ions: contacts[ion.resid] = [] for atom in all_atoms: dist = ( (ion.x - atom.x) ** 2 + (ion.y - atom.y) ** 2 + (ion.z - atom.z) ** 2 ) ** 0.5 if dist <= cutoff and ion.resname != atom.resname: contacts[ion.resid].append((atom, dist)) return contacts
930a73cd973b72ce570989f70ad12dbc8f0f4266
88,578
def extract_list_item(source_data, item_pos): """Extract specific items from a list of lists. Extracts the item in item_pos from each list within a list of lists and returns a list with just the extracted items. Args: source_data (list): A list of lists. Returns: extracted_items (list): List of the extracted items. """ extracted_items = [] for item in source_data: target_item = item[item_pos] extracted_items.append(target_item) return extracted_items
6539364f712fc021fb9cef929132b453210f340b
88,582
def feature_value_match_dict_from_column_names(column_names, prefix=None, suffix=None, col_name_to_feature_vals_delimiter='_match_for_____', feature_vals_delimiter='_'): """Given a list of column names of the form COLUMN_NAME_match_for: FEATURE_VALUE1 FEATURE_VALUE2, FEATURE_VALUE3 return a dictionary map of the form {COLUMN_NAME: [FEATURE_VALUE1, FEATURE_VALUE2, FEATURE_VALUE3]}. Optional arguments for column prefix, suffix, col_to_feature_delimiter and feature_value_delimeter Parameters ---------- column_names: list[str] A list of string column names for which to extract to a dictionary of the form {column_name:[list of feature values]} prefix: str A string prefix to remove from the created columns suffix: str A string suffix to from the created columns col_name_to_feature_vals_delimiter : str, Default = '_match_for: ' The string delimiter that seperates the column features values from the column name feature_vals_delimiter: str, Default = ', ' The string delimiter that seperates the features values from each other Example --------- feature_value_match_dict_from_column_names([ 'Sparse_feature_aggregation_Claim Classification_1mo_match_for: Catastrophic', 'Sparse_feature_aggregation_Injury Type_1mo_match_for: Permanent Partial-Unscheduled', 'Sparse_feature_aggregation_riss_match_for: 14.0, 17.0, 12.0, 13.0'], prefix='Sparse_feature_aggregation_') >>> {'Claim Classification_1mo': ['Catastrophic'], 'Injury Type_1mo': ['Permanent Partial-Unscheduled'], 'riss': ['14.0', '17.0', '12.0', '13.0']} """ # If single string column name passed, # turn it into a list if isinstance(column_names, str): column_names = [column_names] # Remove prefix/suffix if specified if prefix: column_names = [col.replace(prefix, "") for col in column_names] if suffix: column_names = [col.replace(suffix, "") for col in column_names] # Create empty map match_value_map = {} # Iterate through column list for column in column_names: # Split the into column name and feature value column_name, match_values = column.split(col_name_to_feature_vals_delimiter) # Extract just the feature values match_values_list = match_values.split(feature_vals_delimiter) # Add to feature value map match_value_map[column_name] = match_values_list return match_value_map
72de4e16f2854a90f12b06b1f7a29d844ad7b265
88,584
import typing def is_from_typing_module(cls): """ >>> is_from_typing_module(typing.Any) True >>> is_from_typing_module(typing.Callable[[],typing.IO[bytes]]) True >>> is_from_typing_module(str) False """ return cls.__module__ == typing.__name__
1e3d17177daadf307ad3d9556d66f896c0bd7258
88,587
def sensors_data(conn, SENSORS_DATA): """ Insert sensors data into the SENSORS_DATA table :param conn: :param project: :return: insert data id """ sql = ''' INSERT INTO SENSORS_DATA(DEVICE_NAME,DATE,AMBIENT_TEMP,GROUND_TEMP,AIR_QUALITY,AIR_PRESSURE,HUMIDITY,WIND_DIRECTION,WIND_SPEED,WIND_GUST_SPEED,RAINFALL) VALUES(?,?,?,?,?,?,?,?,?,?,?) ''' cur = conn.cursor() cur.execute(sql, SENSORS_DATA) conn.commit() return cur.lastrowid
e2974ac0bb0b1353f5072466ab11dbc9065cffb3
88,591