content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def using_reverseproxy(config): """ Returns True if we are using a reverse proxy (config.http_endpoint != config.https_endpoint), False otherwise. """ return not (config.http_endpoint == config.https_endpoint)
ff901de4aa797dce61187612cb52ffebcdfc2d8b
90,760
def pink_power(f, alpha=1, scale=1): """ power spectral density for pink noise inputs: f => frequency alpha => exponent characteristic to pink noise scale => scaling factor of proportionality returns: power spectral density""" return scale / f**alpha
71a59ec778a7d1a80d88740cf128f054aa19c374
90,761
def _set_data_contains_dq(set_data: dict) -> bool: """ Returns True if set_data contains at least one DQ'd player. """ for entrant in set_data['slots']: if entrant['standing']['stats']['score']['value'] is None: return True if entrant['standing']['stats']['score']['value'] < 0: # DQ = -1: return True return False
80b6f884643cd33e98c5c01c52636bb8a4410e22
90,764
def get_feat_ind(semantic_features, feat): """Returns the index of a specific word feature / question.""" for i, f in enumerate(semantic_features['features']): if f == feat: return i return None
c9106a0d588f7eefaafd40251692906e6fd9faa5
90,767
import operator def check_operator(dropdown): """ Converts the string value of the DropDown element in operator object Args: | *dropdown* : DropDown object from the algorithm class Returns: | *op_object*: operator object converted """ op_object = None if dropdown.value == "strictly smaller": op_object = operator.lt if dropdown.value == "smaller or equal": op_object = operator.le if dropdown.value == "equal": op_object = operator.eq if dropdown.value == "greater or equal": op_object = operator.ge if dropdown.value == "strictly greater": op_object = operator.gt return op_object
3c0d7fd3d4cf0d12d250316079d022488e69695c
90,769
def ways_to_climb(num_steps, max_step_size): """ Calculate the number of ways to climb a certain number of steps given a maximum step size. This approach uses a dynamic-programming approach :param num_steps: :param max_step_size: :return: """ # initialize base cases memo = {0: 1, 1: 1} # we already know the answer when num_steps < 2. so solve for num_steps > 1 for current_step in range(2, num_steps + 1): memo[current_step] = 0 # accumulate the sum of combinations for all step sizes from 1 to min(current_step, max_step_size) for step_size in range(1, min(current_step, max_step_size) + 1): memo[current_step] = memo[current_step] + memo[current_step - step_size] return memo[num_steps]
c6ffc1979cb532362ab5f6f608ee84d829ec1643
90,770
def get_filter_par(filter_group, filter_ids, key): """ Given a pandas groupby object 'filter_group' that group the filters by filter_number (giving a filter set) and a key, get the first value for that key in each group. """ return [filter_group[i][0][key] for i in filter_ids]
bd5f9a08c20152956443d8f39a0ebe4ed36f14f6
90,773
def compute_spike_rate(spikes): """Estimate spike rate from a vector of spike times, in seconds. Parameters ---------- spikes : 1d array Spike times, in seconds. Returns ------- float Average firing rate. Examples -------- Compute spike rate of 6 spikes >>> spikes = [0.5, 1, 1.5, 2, 2.5, 3] >>> compute_spike_rate(spikes) 2.4 """ return len(spikes) / (spikes[-1] - spikes[0])
3c9ddb4b32f658dc6a375763a6df9f25b67bc51d
90,782
import torch def get_rois_target_levels(levels, base_scale, rois): """ Assign proposals to different level feature map to roi pooling Arguments: rois (FloatTensor): [R, 5] (batch_ix, x1, y1, x2, y2) levels (list of int): [L], levels. e.g.[2, 3, 4, 5, 6] base_scale: scale of the minimum level """ w = rois[:, 3] - rois[:, 1] + 1 h = rois[:, 4] - rois[:, 2] + 1 scale = (w * h)**0.5 eps = 1e-6 target_levels = (scale / base_scale + eps).log2().floor() target_levels = target_levels.to(dtype=torch.int64) min_level, max_level = min(levels), max(levels) return torch.clamp(target_levels, min=min_level, max=max_level)
64bfc4faddcd85815cc68056b774a1cfdf270d70
90,783
def split_rows(array:list, l:int=9) -> list: """ Transforms a 1D list into a list of lists with every list holding l elements. Error is raised if array has a length not divisible by l. """ if len(array) % l != 0: raise ValueError("split_rows(): Rows in list have different lengths") return [array[i:i+l] for i in range(0, len(array), l)]
3f9934f92f6891f95c868e207831634f62e2eaed
90,785
def occurence_data(pat_df, column): """ Return value counts of given dataframe columns :param pat_df: Dataframe :param column: Column included in Dataframe :return: Series of value occurences """ return pat_df[column].value_counts()
7bec7cec90c0f575aa5e2dfd4855106caf03a31a
90,794
def layer_attributes(lyr): """ Returns the names of the attributes of a provided vector layer. """ provider = lyr.dataProvider() attrs = provider.fields() return [attr.name() for attr in attrs]
21b32a51f94f047a4d6ec4a3ca48376fbb897f15
90,795
def calculate_fcc(listA, listB): """ Calculates the fraction of common elements between two lists taking into account chain IDs """ cc = len(listA.intersection(listB)) cc_v = len(listB.intersection(listA)) return (cc, cc_v)
0a51cd85d817eafb838b825f944b1953c2cd20f2
90,797
from typing import List from typing import Dict def idx(words: List[str]) -> Dict: """Create a mapping from words to indexes Args: words: List of words Returns: w2i: Dict mapping words from their index Raises: """ w2i = {} for i, w in enumerate(words): if w not in w2i: w2i[w] = i return w2i
173ff21b8c56dd67d38af216ac4481bc455bf397
90,801
def queens_solved(organisms): """Determine if we have solved the problem. We just search through the population for an organism that has a fitness that is equal to the number of queens in the population. If so, we have a solution, otherwise we need to keep looking. """ for org in organisms: if org.fitness == len(org.genome): return 1 # if we got here we didn't do it return 0
aff13abbfc317b70cf591f948168cecebd031b97
90,806
def generate_room(width: int, height: int) -> list: """Generate a rectangular room with given width and height""" room = [] for i in range(height): string = "" for j in range(width): if i in (0, width - 1): string += "-" else: if j in (0, width - 1): string += "|" else: string += "." room.append(string) return room
c1fd717913b77f0f3cb87b98b3593525c938a1fa
90,807
def _dict_merge(dominant, recessive): """ Combines the two dicts. In case of duplicate keys, the values of 'dominant' are used. """ for key, value in recessive.items(): dominant[key] = dominant.setdefault(key, value) return dominant
7703a2886dfd8af6afca433733e5918fc97d67ce
90,811
def verify(joined): """ Checks that the passed otu and sequences constitute valid Virtool records and can be included in a otu index. Error fields are: * emtpy_otu - otu has no isolates associated with it. * empty_isolate - isolates that have no sequences associated with them. * empty_sequence - sequences that have a zero length sequence field. * isolate_inconsistency - otu has isolates containing different numbers of sequences. :param joined: a joined otu :type joined: dict :return: return any errors or False if there are no errors. :rtype: Union[dict, None] """ errors = { "empty_otu": len(joined["isolates"]) == 0, "empty_isolate": list(), "empty_sequence": list(), "isolate_inconsistency": False } isolate_sequence_counts = list() # Append the isolate_ids of any isolates without sequences to empty_isolate. Append the isolate_id and sequence # id of any sequences that have an empty sequence. for isolate in joined["isolates"]: isolate_sequences = isolate["sequences"] isolate_sequence_count = len(isolate_sequences) # If there are no sequences attached to the isolate it gets an empty_isolate error. if isolate_sequence_count == 0: errors["empty_isolate"].append(isolate["id"]) isolate_sequence_counts.append(isolate_sequence_count) errors["empty_sequence"] += filter(lambda sequence: len(sequence["sequence"]) == 0, isolate_sequences) # Give an isolate_inconsistency error the number of sequences is not the same for every isolate. Only give the # error if the otu is not also emtpy (empty_otu error). errors["isolate_inconsistency"] = ( len(set(isolate_sequence_counts)) != 1 and not (errors["empty_otu"] or errors["empty_isolate"]) ) # If there is an error in the otu, return the errors object. Otherwise return False. has_errors = False for key, value in errors.items(): if value: has_errors = True else: errors[key] = False if has_errors: return errors return None
a9a0833082bed8ce8feaa5cf8f226c921ddbc462
90,814
def toy2(choice): """ return three fake line lists to play with""" lines1 = {"lee":115.27, "doug":115.0, "marc":112.0} lines2 = {"leslie":111.00, "mark":110.0, "peter":113.0} lines3 = {"lisa":114.0, "robert":112.1, "kevin":112.2} if choice == 1: return lines1 elif choice == 2: return lines2 else: return lines3
e28de9963d395ed7f21c60433922f1217080872d
90,815
from pathlib import Path def ds_kwargs(pudl_settings_fixture, request): """Return a dictionary of keyword args for creating a PUDL datastore.""" return dict( gcs_cache_path=request.config.getoption("--gcs-cache-path"), local_cache_path=Path(pudl_settings_fixture["pudl_in"]) / "data", sandbox=pudl_settings_fixture["sandbox"], )
42e602a3c3621eafc6f3d2edb582a6c4decaec19
90,818
import math def is_power(n: int, p: int) -> bool: """Determines if the natural number n is a perfect power with exponent p. Specifically, returns True iff n = m^p for some natural number m. """ root_n = n**(1 / p) lo_power = (int(root_n))**p hi_power = (int(math.ceil(root_n)))**p return lo_power == n or hi_power == n
a3fd67e29b56938d3080cb3954407b810789bb7f
90,821
def capitalize(words): """Capitalize every string in ``words``. """ return list(w[0].capitalize() + w[1:] for w in words)
a534df904b0c45d4001d829b2450b9d702694d67
90,822
def get_marginal_rna_obs(r: int, p_max: int): """Get the observable for a marginalized RNA abundance.""" marginal = '' for p in range(p_max-1): marginal += f'x_{r}_{p} + ' marginal += f'x_{r}_{p_max-1}' return {'name': f'x_r{r}', 'formula': marginal}
8d005c68fd2c56f2d161bea47862b4590b4505d9
90,823
import token def canonical_category_name(tok): """ Returns the 'canonical' name of the token's category. >>> canonical_category_name((1, 'for', (1, 1), (1, 3), 1)) 'NAME' >>> canonical_category_name(make_whitespace(' ', (1, 1))) 'WHITESPACE' """ return token.tok_name.get(tok[0], tok[0])
d59a45db4b740b80a3c0068371f6dc47fac87c2a
90,825
def is_leap_day(date): """Check if the given date is a leap day.""" return date.month == 2 and date.day == 29
1428acde79572ec35998b7e246eda391802ae655
90,830
def get_eval_dir_basename(task: str, split: str) -> str: """Returns the basename for eval directory. Args: task: a seqio eval task name. split: split name. """ return f'eval_{task}_{split}'
352be2bb6a4b89807351fe83efdf1f9878655e22
90,834
import getpass def prompt_for_new_password() -> str: """ Prompt the user to enter a new password, with confirmation """ while True: passw: str = getpass.getpass() passw2: str = getpass.getpass() if passw == passw2: return passw else: print('Passwords do not match')
dbfcd5845693a5c6af28bc8103280b556e7ac8b6
90,837
def size_to_str(b: int) -> str: """Format a number of bytes as a string in B/KB/MB/GB""" postfixes = ['B','KB','MB','GB'] vals = [b / (1000 ** p) for p in range(4)] v = min([x for x in vals if x >= 1]) return f"{v:0.2f} {postfixes[vals.index(v)]}"
2622aecbacc320da847bc0f2e1fbda6f9d3af3c5
90,848
def _wait_for_confirmation(client, transaction_id, timeout): """ Wait until the transaction is confirmed or rejected, or until 'timeout' number of rounds have passed. Args: transaction_id (str): the transaction to wait for timeout (int): maximum number of rounds to wait Returns: dict: pending transaction information, or throws an error if the transaction is not confirmed or rejected in the next timeout rounds """ start_round = client.status()["last-round"] + 1 current_round = start_round while current_round < start_round + timeout: try: pending_txn = client.pending_transaction_info(transaction_id) except Exception: return if pending_txn.get("confirmed-round", 0) > 0: return pending_txn elif pending_txn["pool-error"]: raise Exception("pool error: {}".format(pending_txn["pool-error"])) client.status_after_block(current_round) current_round += 1 raise Exception( "pending tx not found in timeout rounds, timeout value = : {}".format(timeout) )
bc692aa635e10b47a2297dc093832d4fccd66f95
90,849
def _get_antpos(uvd, ENU=False): """Retrieve {ant: pos} dictionary from a UVData object.""" if ENU: pos, ant = uvd.get_ENU_antpos() else: ant = uvd.antenna_numbers pos = uvd.antenna_positions return dict(zip(ant, pos))
d4d467944ccbb01ac7b926c1b223472333d57897
90,851
def is_a_b(variant, variant_dict): """ Is the value of the variant either 'A' or 'B'? Filters out junk data :param variant: :return: True or False """ return any([variant == x for x in list(variant_dict.values())])
bf88607d07adb97b297d138101994c268a7bb22b
90,855
def get_indexes(seq, item): """Return all indexes of an item in a sequence. :param seq: sequence to check :param item: item to find indexes of :return: list of indexes """ start_at = -1 locs = [] while True: try: loc = seq.index(item, start_at + 1) except ValueError: break else: locs.append(loc) start_at = loc return locs
a1134c2464f17b12c211ad5072d9f629123237db
90,856
def prob_win(a, b): """ Calculates the chance player A will beat player B, given their respective ELOs. """ return 1 / (1 + (10 ** ((b - a) / 400)))
2136ad6c5ef5056e9679f30b2e390c168af88cdb
90,857
def aggregate_publish_stats(stats_set): """ For a set of per process _publish_ stats, make some basic aggregated stats timings are a simple mean of the input timings. ie the aggregate "minimum" is the average of the minimum of each process, not the absolute minimum of any process. Likewise, aggregate "stddev" is a simple mean of the stddev from each process, not an entire population stddev. """ def naive_average(the_set): return sum(the_set) / len(the_set) count_ok = sum([x["count_ok"] for x in stats_set]) count_total = sum([x["count_total"] for x in stats_set]) cid = "Aggregate stats (simple avg) for %d processes" % len(stats_set) avg_msgs_per_sec = naive_average([x["msgs_per_sec"] for x in stats_set]) return { "clientid": cid, "count_ok": count_ok, "count_total": count_total, "rate_ok": count_ok / count_total, "time_min": naive_average([x["time_min"] for x in stats_set]), "time_max": naive_average([x["time_max"] for x in stats_set]), "time_mean": naive_average([x["time_mean"] for x in stats_set]), "time_stddev": naive_average([x["time_stddev"] for x in stats_set]), "msgs_per_sec": avg_msgs_per_sec * len(stats_set) }
d9c7faa1607a8abef07cf725d665e51e6f42756e
90,860
from pathlib import Path def extract_run_name(samplesheet: Path) -> str: """ Retrieves the 'Experiment Name' from SampleSheet.csv :param samplesheet: Path to SampleSheet.csv :return: value of 'Experiment Name' """ with open(str(samplesheet)) as f: for line in f: if 'Experiment Name' in line: experiment_name = line.split(',')[1].strip() return experiment_name elif 'Description' in line: experiment_name = line.split(',')[1].strip() return experiment_name else: raise Exception(f"Could not find 'Experiment Name' in {samplesheet}")
c45678a3bbe4c0ad22b56d45702e18a983a5053b
90,866
import re def parse_int(string): """ Finds the first integer in a string without casting it. :param string: :return: """ matches = re.findall(r'(\d+)', string) if matches: return matches[0] else: return None
3e0e00dc65cdd774e320f282d55800bcb2bdca91
90,868
from typing import Collection import requests def efetch_sequences_request(headers: Collection) -> requests.Response: """Sends POST request for sequence retrieval from NCBI Entrez.""" response = requests.post( "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?", params={"db": "nuccore", "rettype": "fasta"}, files={"id": ",".join(headers)}, ) if response.status_code != 200: raise requests.HTTPError( f"Error fetching sequences from NCBI [code {response.status_code}]." " Bad query IDs?" ) return response
948eca0497ca2569ca86b369c8361c1650c67e17
90,874
from typing import Union def to_cpu_str(n_cpus: Union[int, float]) -> str: """Convert number of cpus to cpu string (e.g. 0.5 -> '500m') """ if n_cpus == 0: return "0" millicores = n_cpus * 1000 if millicores % 1 == 0: return f"{int(millicores)}m" microcores = n_cpus * 1000000 if microcores % 1 == 0: return f"{int(microcores)}u" nanocores = n_cpus * 1000000000 return f"{int(nanocores)}n"
32e257ee71ec07383c9cde1ec3a8334af7a5864a
90,875
import math def rotate(vector, angle): """Rotate a vector (x, y) by an angle in radians.""" x, y = vector sin, cos = math.sin(angle), math.cos(angle) return ( cos * x - sin * y, sin * x + cos * y, )
146dae0b4570eaaa38d4971fcdbcf790ca30d3c9
90,877
def force_to_energy_grad(dataset): """ Converts forces to energy gradients in a dataset. This conforms to the notation that a key with `_grad` is the gradient of the property preceding it. Modifies the database in-place. Args: dataset (TYPE): Description dataset (nff.data.Dataset) Returns: success (bool): if True, forces were removed and energy_grad became the new key. """ if 'forces' not in dataset.props.keys(): return False else: dataset.props['energy_grad'] = [ -x for x in dataset.props.pop('forces') ] return True
584095bea241bd2a08b8b99824d5ef9aa2b1499e
90,883
from pathlib import Path def _make_path(base: Path, segment: str) -> Path: """ Returns the segment relative to the given base, if it's a relative path Absolute paths are returned as is. """ original_path = Path(segment) if original_path.is_absolute(): return original_path return base.joinpath(original_path)
fd009c2224b3f61759af57a7705579904dae48ba
90,886
def fib(n): """ Return a list containing a set of fibonacci series upto n numbers """ result = [] # it is an empty list a, b = 0, 1 # multiple value assigtnment in single line while a < n: # initiating the while loop result.append(a) # appending the series in the result list a, b = b, a + b # making the logic of fibonacci series return result
718ab358e7aa889fe63e0d1a549db7950322108f
90,890
def get_institution_url(base_url): """ Clean up a given base URL. :param base_url: The base URL of the API. :type base_url: str :rtype: str """ base_url = base_url.strip() return base_url.rstrip("/")
0a691c041b73d5f8ac74c1ad264c5e0d8ff2af92
90,891
from pathlib import Path def genome_fasta_dir(data_dir: Path) -> Path: """Genome fasta direcotry""" return data_dir / "genome_fasta"
5ae94b3123e728d8e19ac132896fd1aa256c0d5e
90,892
def index_columns(df, none_name=None): """Return list of column names that form the (multi-)index or None, if index is a single unnamed column.""" try: return [l.name for l in df.index.levels] except AttributeError: name = df.index.name if name is not None: return [name] elif none_name is not None: return [none_name]
5543eeb534a79f56814b81159460e9459ae9bb40
90,895
def preprocess_image(img): """Unit interval preprocessing""" img = (img - img.min()) / (img.max() - img.min()) return img
6ef23d00d99bdd0b48837a24c5b6b557f39848f5
90,896
def time_to_str(seconds): # real signature unknown; restored from __doc__ """ time_to_str(seconds: int) -> str Return a string describing the number of seconds in a human readable manner using days, hours, minutes and seconds. """ return ""
aca82a728cad15f844eb21338b0426c50a7c19f4
90,902
def get_completion_word(details): """Find the completed text from some Vim completion data. Args: details (dict[str, str]): Data that comes from Vim after completion finishes. For more information, check out Vim's help documentation. `:help v:completed_item`. Returns: str: The completed function. """ user_made_a_completion_selection = details.get('user_data', '') != '' if not user_made_a_completion_selection: return '' return details['word']
ad76932e7b59257c2ca96a16fe27ed7e57dd3ef7
90,903
import itertools def ndgrid_inds(sz): """ Return a sequnce of tuples of indices as if generated by nested comprehensions. Example: ndgrid_inds((ni,nj)) Returns the same sequence as [(i,j) for i in range(ni) for j in range(nj)] The iterates are always tuples so ndgrid_inds(4) returns [(0,), (1,), (2,), (3,)] """ return itertools.product(*map(range, sz))
c62cd2450ebb52bb4ed9ce359335dc367542bb33
90,904
import requests def login(url, auth, **kwargs): """ Requires a url and (username, password) tuple, and returns a session token to be passed with future requests. """ url = '{0}/v1/session/add'.format(url) response = requests.post(url, auth=auth, verify=kwargs.get('verify', True)) response.raise_for_status() return response.headers.get('X-Zerto-Session')
0b8e2fdec9786b2bce3f932348ee855da10abdce
90,909
def build_content_disposition(filename): """Builds a content-disposition header. Args: filename: The filename for the content disposition attachment. Returns: A content-disposition header, or empty string if no filename provided. """ if not filename: return '' return 'attachment; filename=%s' % filename
fbf6bad9180511b68f9d5c1cad8f9d0516562759
90,910
def get_period(start, end, peak, tsend): """Return the onset/decline period for a mhw. For onset if event starts on 1st day of timeseries, then: if peak on 1 st day of event, onset period -> 1 day else -> period=peak. In any other case period = peak + 0.5 For decline if event ends on last day of timeseries, then: if peak on last day of event, onset period -> 1 day else -> period=(end - start - peak). In any other case period = (end - start -peak) + 0.5 Parameters ---------- start: pandas Series Index of start of event along time axis end: pandas Series Index of end of event along time axis peak: pandas Series Index of peak of event respect the event itself -> index_peak - index_start tsend: int Index of last element of series Returns ------- onset_period: pandas Series Period of onset of MHWs decline_period: pandas Series Period of decline of MHWs """ esp = end - start - peak x = peak.where(peak != 0, 1) onset_period = x.where(start == 0, x + 0.5) y = esp.where(peak != tsend, 1) decline_period = y.where(end == tsend, y + 0.5) return onset_period, decline_period
30a4f14440684ecebb5a605f8ae46e0a06de9f18
90,922
import re def is_valid_mac_address(mac): """Validates the format of a MAC address. Args: mac (str): the MAC address to validate Returns: bool: True if valid, False otherwise Examples: >>> valid_mac('invalid_mac') False >>> valid_mac('28:cf:e9:18:ca:01') True """ regex = "[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$" return re.match(regex, mac.lower())
3191042ff43a71a389fd78c1309edb654c7a53f5
90,925
def listToString(data): """ Return a string from the given list. Example: print listToString(['apple', 'pear', 'cherry']) # apple,pear,cherry :type data: list :rtype: str """ # Convert all items to string and remove 'u' data = [str(item) for item in data] data = str(data).replace("[", "").replace("]", "") data = data.replace("'", "").replace('"', "") return data
51f3a18fe7085c0b1d9b933eb0412c58015c3457
90,932
import _ast def CompareOpMap(operator): """Maps operator strings for boolean operations to their _ast node.""" op_dict = { '==': _ast.Eq, '!=': _ast.NotEq, '<': _ast.Lt, '<=': _ast.LtE, '>': _ast.Gt, '>=': _ast.GtE, 'is': _ast.Is, 'is not': _ast.IsNot, 'in': _ast.In, 'not in': _ast.NotIn, } return op_dict[operator]()
c998c13c49a6ad9569c6c8be742bd8bff658c61a
90,934
import re import keyword def sanitize(name): """Lower-case name and replace all non-ascii chars by `_`. If name is a Python keyword (like `return`) then add a trailing `_`. """ new_name = re.sub(r'\W|^(?=\d)', '_', name.lower()) if keyword.iskeyword(new_name): new_name = new_name + '_' return new_name
f44d41086a95dffa450f6687bf910d4829d153f9
90,938
def get_predecessor_list(data): """ Get the predecessor's list for a Data object""" predecessor_list = set() if data.predecessor_list: for predecessor in data.predecessor_list: predecessor_list.add(predecessor) return predecessor_list
4b399267bd82d93a88e5c5ea5d9e42be55162cfe
90,943
def _s2t_idx(s_idx, sV, tV, oracle=None): """ Translates an feature-index from the source domain to the feature-index of the target domain. If the source and target share the language, the vocabularies are the only data structures involved. If otherwise, the oracle is used to translate the word before retrieving the index. """ t_idx = None word = sV.idx2word(s_idx) if oracle: word = oracle.source2target(word) if word: t_idx = tV.word2idx(word) return t_idx
2a8f3bc2f4c042b091f713f8929d74c8bd8a2fcb
90,945
def normalizeInterpolationFactor(value): """ Normalizes interpolation factor. * **value** must be an :ref:`type-int-float`, ``tuple`` or ``list``. * If **value** is a ``tuple`` or ``list``, it must have exactly two items. These items must be instances of :ref:`type-int-float`. * Returned value is a ``tuple`` of two ``float``. """ if not isinstance(value, (int, float, list, tuple)): raise TypeError("Interpolation factor must be an int, float, or tuple " "instances, not %s." % type(value).__name__) if isinstance(value, (int, float)): value = (float(value), float(value)) else: if not len(value) == 2: raise ValueError("Interpolation factor tuple must contain two " "values, not %d." % len(value)) for v in value: if not isinstance(v, (int, float)): raise TypeError("Interpolation factor tuple values must be an " ":ref:`type-int-float`, not %s." % type(value).__name__) value = tuple([float(v) for v in value]) return value
a2f1c62bf722b8b0df48203d41d8c99cf7875470
90,948
def imgPixelToWorldCoords(pixel_x, pixel_y, img_x, img_y, img_w, img_h, img_pixels_w, img_pixels_h): """Converts a pixel coordinate to world coordinates. Args: pixel_x: the x-coordinate in pixels in the image pixel_y: the y-coordinate in pixels in the image (larger is lower on screen) img_x: the x-coordinate of the top left hand corner of the image img_y: the y-coordinate of the top left hand corner of the image img_w: the width in world coordinates of the image img_h: the height in world coordinates of the image img_pixels_w: the number of pixels along the width of the image img_pixels_h: the number of pixels along the height of the image Returns: (x, y) in world coordinates for the bottom left hand corner of the pixel """ x = pixel_x * img_w / img_pixels_w + img_x y = img_y - pixel_y * img_h / img_pixels_h return (x, y)
f4482ef6d181ab86467b066a53d5a7ccb059438e
90,956
def calc_x_for_contact_depth(m, c, contact): """Calculate x for contact depth Args: m: slope from `last_base_slope` c: intercept from `c0` """ return 1 / m * (contact - c)
025c35aa300b9a210efaa9ce217f9c2d916cdfc8
90,959
import random def tirage_expo(x): """ Renvoie un nombre décimal (float) choisi de manière aléatoire selon une loi exponentielle de paramètre ``x``. Arguments: x (float) : est un réel strictement positif. """ return random.expovariate(x)
94b897c768af48900a0bdc2b1cdcefeec4073b7a
90,962
import asyncio async def subprocess_output(cmd, **kwargs): """ Run cmd until completion & return stdout, stderr Convenience method to start and run a process """ proc = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, **kwargs) stdout, stderr = await proc.communicate() return stdout.decode(), stderr.decode()
b75097dd39c82d43f8d14c575d678341ec289e51
90,968
def get_time(str_time: str, fix=True): """ 获取分钟时间 :param fix: 获取以30分钟为分度的分钟时间 :param str_time: 格式 %H:%M 范围: 7:30 - 22:30 :return: int: 分钟 """ hour = int(str_time.split(":")[0]) minute = int((int(str_time.split(":")[1]) // 30) * 30) if fix else int(str_time.split(":")[1]) return minute + hour * 60
ff202b815302b47b599e349988ec1fe1134ea324
90,970
def contains_any(seq, aset): """ Check whether sequence seq contains ANY of the items in aset. """ for c in seq: if c in aset: return True return False
0ee3014de50b97221d9964325059f352693b04be
90,973
def parse_tag(tag: str): """Parse 'standard' image tag and return Tuple[int, int, int, int, int] with frame and box coordinates x1, y1, x2, y2 in one 5-tuple """ # Real example of a tag: 121614:3616:235_183_293_262 _, frame_str, box_str = tag.split(":", 2) frame = int(frame_str) box = tuple(int(c) for c in box_str.split("_")) return (frame, *box)
6bd29fd3e6fd2d0ace8ac70ee3c594115fc1c987
90,976
from typing import Tuple from typing import List def parse_einsum_formula(formula: str) -> Tuple[List[str], str]: """ Parse an einsum formula into input tensor subscripts and output tensor subscript. Note that we don't support ellipsis. :param formula: The einsum formula to parse. :return: """ operands, output_subscript = formula.split('->') input_subscripts = [x.strip() for x in operands.split(',')] return input_subscripts, output_subscript
477a9cdea3b9319206c88b804f2c96e01bcaec89
90,977
from typing import TextIO from typing import Dict import csv def read_freq_file(input_fh: TextIO, max_records=-1) -> Dict[str, int]: """Reads lines of the form "word, count" and returns a dictionary of this word -> count data If there are duplicate keys, the corresponding values are added. """ output_dict = {} reader = csv.reader(input_fh) lines_read = 1 for row in reader: output_dict[row[0]] = output_dict.get(row[0], 0) + int(row[1]) lines_read += 1 if lines_read > max_records > 0: break return output_dict
cf973f9c60ddbc3c0251a684b98955da8eb1b7e1
90,978
def span_overlap(a, b): """ Compute overlap of span where left is inclusive, right is exclusive. span_overlap([0, 3], [3, 5]) -> 0 span_overlap([0, 3], [2, 5]) -> 1 span_overlap([0, 5], [0, 2]) -> 2 """ return max(0, min(a[1], b[1]) - max(a[0], b[0]))
168679cbf1e2c6358056ee184fcd7f76ab5b3879
90,982
from typing import Set def get_embedded_areas_sums(parent_square_size: int, embedded_squares_count: int) -> Set[int]: """ Get the set of all possible areas which can found by dividing a square of the sizeparent_square_size into embedded_squares_count subsquares. The function works in the recursive manner. :param parent_square_size: size of the parent square :param embedded_squares_count: count of parts to divide :return: set of possible areas """ if embedded_squares_count == 0: return {0} if embedded_squares_count == 1: return {parent_square_size ** 2} if parent_square_size / embedded_squares_count < 1: return set() sums = set() for i in range(int(round(parent_square_size / 2))): area = (i + 1) ** 2 for ps in get_embedded_areas_sums(parent_square_size - i - 1, embedded_squares_count - 1): sums.add(area + ps) return sums
33effc44e5aee7b0e014dd98dfd4edb86ca8715d
90,988
def _generate_cache_key(ctx, spec_name, platform, configuration): """ Generate a composite key for (optional) spec_name, platform, and configuration :param ctx: The context :param spec_name: name of the spec (or None, use the current spec) :param platform: the platform (or None, use the current platform) :param configuration: the configuration (or None, use the current configuration) :return: The composite key """ if hasattr(ctx, 'env'): if not platform: platform = ctx.env['PLATFORM'] if not configuration: configuration = ctx.env['CONFIGURATION'] else: if not platform: platform = 'none' if not configuration: configuration = 'none' composite_key = spec_name + '_' + platform + '_' + configuration return composite_key
41cc6d6e66a4eba8cd74c81d13e8a1b0cd4623c3
90,989
def filter_styles(style, group, other_groups, blacklist=[]): """ Filters styles which are specific to a particular artist, e.g. for a GraphPlot this will filter options specific to the nodes and edges. Arguments --------- style: dict Dictionary of styles and values group: str Group within the styles to filter for other_groups: list Other groups to filter out blacklist: list (optional) List of options to filter out Returns ------- filtered: dict Filtered dictionary of styles """ group = group+'_' filtered = {} for k, v in style.items(): if (any(k.startswith(p) for p in other_groups) or k.startswith(group) or k in blacklist): continue filtered[k] = v for k, v in style.items(): if not k.startswith(group) or k in blacklist: continue filtered[k[len(group):]] = v return filtered
5a4d92175156ec3b32eff85dfa54e341120e5ca3
90,991
def html_encode(unicode_data, encoding='utf-8'): """Encode HTML""" return unicode_data.encode(encoding, 'html_replace')
4f210ce780e1d0e3123bfacf129a2cbd5c25f65b
91,002
def fitness_f(x): """calculates the total value and weight, if the weight is over 15, the individual receives a penalization with 0 fitness""" weight = 0 value = 0 for i in range(len(x.genes)): weight += x.genes[i][0] value += x.genes[i][1] if weight <= 15: return value return 0
b001a751ff76c66561e0e6f3cf41963d76d34fca
91,004
def _get_offline_details(fg_name, sagemaker_session, s3_uri=None): """Get offline feature group details Args: fg_name (str): Feature group name sagemaker_session (sagemaker.Session()): SageMaker session s3_uri (str, optional): Where to store offline query results. Defaults to None. Returns: tuple: Offline feature group table, database, and temporary uri for query results """ _data_catalog_config = sagemaker_session.sagemaker_client.describe_feature_group( FeatureGroupName=fg_name )["OfflineStoreConfig"]["DataCatalogConfig"] _table = _data_catalog_config["TableName"] _database = _data_catalog_config["Database"] if s3_uri is None: s3_uri = f"s3://{sagemaker_session.default_bucket()}/offline-store" _tmp_uri = f"{s3_uri}/query_results/" return _table, _database, _tmp_uri
d6689fccabe187a08be3f2580087814c1178a9b0
91,016
def get_csv_row(table_row, file_link): """ Get a file's name, download link, and size :param table_row: A 'tr' tag object :param file_link: A file's download link :return: A dictionary whose pairs are 'filename':<filename>, 'download_link':<download link, and 'filesize':<size> """ keys = ['filename', 'download_link', 'filesize'] elements = table_row.find_all('td') csv_row = elements[1].a['href'], file_link, elements[3].text return dict(zip(keys, csv_row))
c87280071df319c0fc5234f0f969b3beb3d3f272
91,020
import itertools def _margins(vars, margins=True): """ Figure out margining variables. Given the variables that form the rows and columns, and a set of desired margins, works out which ones are possible. Variables that can't be margined over are dropped silently. Parameters ---------- vars : list variable names for rows and columns margins : bool | list If true, margins over all vars, otherwise only those listed Return ------ out : list All the margins to create. """ if margins is False: return [] def fn(_vars): "The margin variables for a given row or column" # The first item is and empty list for no margins dim_margins = [[]] # for each wanted variable, couple it with # all variables to the right for i, u in enumerate(_vars): if margins is True or u in margins: lst = [u] + [v for v in _vars[i+1:]] dim_margins.append(lst) return dim_margins # Margin variables for rows and columns row_margins = fn(vars[0]) col_margins = fn(vars[1]) # Cross the two lst = list(itertools.product(col_margins, row_margins)) # Clean up -- merge the row and column variables pretty = [] for c, r in lst: pretty.append(r + c) return pretty
71f078d40d02fb0e3b92cd70c56b97590c3a9e79
91,021
import random def make_data(n): """ Data generator for the one machine scheduling problem. """ p,r,d,w = {},{},{},{} J = range(1,n+1) for j in J: p[j] = random.randint(1,4) w[j] = random.randint(1,3) T = sum(p) for j in J: r[j] = random.randint(0,5) d[j] = r[j] + random.randint(0,5) return J,p,r,d,w
e04bd948e9a3ac90add07e009d8fa6d2499e1892
91,023
import socket def Bind(port, socket_type, socket_proto): """Try to bind to a socket of the specified type, protocol, and port. This is primarily a helper function for PickUnusedPort, used to see if a particular port number is available. Args: port: The port number to bind to, or 0 to have the OS pick a free port. socket_type: The type of the socket (ex: socket.SOCK_STREAM). socket_proto: The protocol of the socket (ex: socket.IPPROTO_TCP). Returns: The port number on success or None on failure. """ s = socket.socket(socket.AF_INET, socket_type, socket_proto) try: try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('', port)) return s.getsockname()[1] except socket.error: return None finally: s.close()
2c5682b331e6bf547d1d3a8dafc52b76369aee0c
91,031
def alias(attrname): """ Returns a function which calls 'attrname' - for function aliasing. We can't just use foo = bar, as this breaks subclassing. """ def func(self, *args, **kwds): return getattr(self, attrname)(*args, **kwds) return func
7220503c15d4c723597e06b050681732569dedae
91,032
def parse_labels(cfg): """ Parse labels to dict where label is key and list of patterns is corresponding value cfg: ConfigParser with loaded configuration of labels """ return { label: list(filter(None, cfg['labels'][label].splitlines())) for label in cfg['labels'] }
95e607a3ecf4b386fb787097cfcdbba30b2183ae
91,035
def _strip_str(value): """Strip whitespace if value is a string.""" if isinstance(value, str): value = value.strip() return value
a5e07e3349f2832fa28994c7dfa0373839138319
91,038
def get_hand(indices, game_lines): """Return a subset of the game between the supplied indices.""" hand_start_index, hand_end_index = indices return game_lines[hand_start_index : hand_end_index + 1]
ef9d9239da6737c79e334722643a25123c63a8c8
91,042
def truncate_lines(text, count): """Truncate string based on line count. Counts number of line breaks in text and removes extra lines based on line_count value. If lines are removed, appends '...' to end of text to inform user of truncation. :param text: Single or multi-line string. :type text: str :param count: Number of lines to return. :type count: int :return: Truncated text string. :rtype: str """ lines = [line for line in text.splitlines() if line.strip()] text = '\n'.join(lines[:count]) if len(lines) > count: text += '...' return text
c0409b8b01806b780818bc8fea8a4a9ab360fbca
91,045
import re def is_active_string(context, flatpage): """ Returns the string ' active' if the requested url starts with the absolute url of the given flatpage. Else returns an empty string. """ request = context.get('request') if request and re.match(flatpage.slug + '/', request.path[1:]): result = ' active' else: result = '' return result
14858172b66a4f4acbaacaf1d9cd04b27fa0d8f4
91,049
import math def _kt_estim_update(new_bit, counts): """Computes log(P(Next_bit=new_bit|counts)) for the the Krichevski-Trofimov estimator. """ return math.log((counts[new_bit] + 0.5) / float((sum(counts) + 1)))
1d60c70a32ab4195a9acb787aa9f75cc7be72585
91,051
def get_item_properties(item, fields): """Return a tuple containing the item properties. :param item: a single item resource (e.g. Server, Project, etc) :param fields: tuple of strings with the desired field names """ return tuple([item.get(field, '') for field in fields])
23b24f51c5bcc0d5d26d497c967fd4c02c6aa7c1
91,054
def flat_test_name(_id): """Return short form test name from TestCase ID.""" return '-'.join(_id.split('.')[1:])
75bba12d5dca97814ba4d937a329ad5ba0c77df4
91,066
def _analyse_gdal_output(output): """analyse the output from gpt to find if it executes successfully.""" # return false if "Error" is found. if b'error' in output.lower(): return False # return true if "100%" is found. elif b'100 - done' in output.lower(): return True # otherwise return false. else: return False
09b99408d2e64206f95e9605d1322aaf7ce6b99f
91,069
def host(ctx): """Returns true if player has host role""" if "Host" in [role.name for role in ctx.message.author.roles]: return True return False
e36c3a7e3977142bdded4ff48cca68c0831fc37a
91,072
def _formatted_table_to_dict(formatted_table): """Convert a single-row table with header to a dictionary""" headers = [ header.strip() for header in formatted_table[0].split(" ") if len(header) > 0 ] fields = [ field.strip() for field in formatted_table[1].split(" ") if len(field) > 0 ] return dict(zip(headers, fields))
0fafc769d3e9b74a605cd1af0516b8dea279b5f5
91,073
def regressor(model, data): """Returns Regression model prediction""" pred = model.predict(data) return pred
e7c5f4f03c8eb8b51b9c6a33f09e54fd892512a4
91,077
def generate_color_map(graph_object, individual_df): """ Generates a color map corresponding with whether a node represents an individual or membership :param graph_object: NetworkX graph object :param individual_df: DataFrame of the individual table query :return colors: A list of colors to be passed to the networkx.draw() function """ individual_nodes = list(individual_df['INDIVIDUAL_ID']) for n in graph_object.nodes: graph_object.nodes[n]['color'] = 'c' if n in individual_nodes else 'm' colors = [node[1]['color'] for node in graph_object.nodes(data=True)] return colors
bdf059b94efeca9a63ddae588b073afc5372ca0a
91,084
def _remove_trailing_chars(text: str) -> str: """ Removes trailing characters from the beginning or end of a string. """ chars = ['.', '@', '/', '&', '-', "'"] for char in chars: text = text.strip(char) return text
b19e1d369fc858a2a269fdfc14a128907ecf0bc3
91,089
from typing import List def rot(matrix: List[List]) -> List[List]: """Rotate a python matrix (list of list) clockwise by 90 degree without numpy https://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-matrix-in-python """ return list(map(list, zip(*matrix[::-1])))
6c04267ab41daddbfa65a83973faadec65f472cd
91,091
def implement_outfile_parser(args): """ Implement -o and -O arguments added by generate_outfile_parser. Parameters ---------- args : argparse args object Results of parse_args() when called on an Argument Parser object. Returns ------- outfile : str, None None if neither args.oname nor args.o was set, do not output to file. Notes ----- Use for docstring in methods that accept outfile as an argument: outfile : str, None Path to output text file. Disables writing to file if set to None. """ outfile = None if args.oname is not None: outfile = args.oname.strip() if not outfile.endswith(".txt"): outfile += ".txt" elif args.o: outfile = "out.txt" return outfile
005b7f5f43bf8c77f80254a28a8109303e18ac86
91,092
def get_requirements(local_filepath): """ Return list of this package requirements via local filepath. """ requirements = [] with open(local_filepath) as f: requirements = f.read().splitlines() return requirements
c1560fb5f206688821ea47b722f4c9311718c1fa
91,099
def isBottomLayer(layer): """ Decide if layer is a bottom layer """ return str(layer).startswith("Layer.B_")
1b4ef7eddfd1c8fc3cb59c2658cb17b70f3a8e0a
91,108
def setup_datafiles(shell,params_info): """ write the datafiles to disk build the parameters file """ parameters_text_items = [] for key,value in params_info.items(): shell.write_file(value['path'], value['text']) parameters_text_items.append("%s:%s" % (value['type'],value['path'])) # generate the parameters file to feed into the url parameters_text = '\n'.join(parameters_text_items) return parameters_text
e1791bd3da7761b0fe897a8864fa8b2a9f350b0b
91,109
def merge_dicts_with_function(function, dict_1, dict_2): """ Apply function to values keyed by the same key in dict_1 and dict_2. :param function: function; :param dict_1: dict; :param dict_2: dict; :return: dict; merged dict """ new_dict = {} all_keys = set(dict_1.keys()).union(dict_2.keys()) for k in all_keys: if k in dict_1 and k in dict_2: new_dict[k] = function(dict_1[k], dict_2[k]) elif k in dict_1: new_dict[k] = dict_1[k] else: new_dict[k] = dict_2[k] return new_dict
5cc08e529bc77a228461a5eae6225e9d50e218ba
91,110