content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def L_bot(P_mass, R, M_bot, M_dist, F_mass, M_feed, phi): """ Calculates the flow rate liquid at the bottom of column. Parameters ---------- P_mass : float The mass flow rate of distilliat, [kg/s] F_mass : float The mass flow rate of feed, [kg/s] M_dist : float The molar mass of distilliat, [kg/kmol] M_feed : float The molar mass of feed, [kg/kmol] phi : float The fraction of vapor at the feed point R : float The actual reflux number, [dismensionless] M_bot : float The molar mass at bottom of column, [kg/kmol] Returns ------- L_bot : float The flow rate liquid at the bottom of column, [kg/s] References ---------- Дытнерский, стр. 229, формула 6.5 """ return (P_mass * R * M_bot / M_dist) + (F_mass * M_bot * (1 - phi) / M_feed)
811a8677f739c30a233fbbfb65767125f85c963c
51,905
def set_ignore_certificate_errors(ignore: bool) -> dict: """Enable/disable whether all certificate errors should be ignored. Parameters ---------- ignore: bool If true, all certificate errors will be ignored. **Experimental** """ return { "method": "Security.setIgnoreCertificateErrors", "params": {"ignore": ignore}, }
49f0597ee4b7bdcdd620e636704e732f52871c22
51,906
def top_sentences(query, sentences, idfs, n): """ Given a `query` (a set of words), `sentences` (a dictionary mapping sentences to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the `n` top sentences that match the query, ranked according to idf. If there are ties, preference should be given to sentences that have a higher query term density. """ scores = {} for sentence, sentwords in sentences.items(): score = 0 for word in query: if word in sentwords: score += idfs[word] if score != 0: density = sum([sentwords.count(x) for x in query]) / len(sentwords) scores[sentence] = (score, density) sorted_by_score = [k for k, v in sorted(scores.items(), key=lambda x: (x[1][0], x[1][1]), reverse=True)] return sorted_by_score[:n]
8152d4bd7e60f3a8f4989b2c6a99986dbbaaf646
51,907
def ckpt_recency(ckpt): """Recency as Checkpoint importance metric. This function can also act as an example of how to make checkpoint importance keyfuncs. This is a named function, but as you can see it could be easily implemented as a lambda in a pinch. """ return ckpt.meta["unixtime"]
3969e618389e20ced56e1026d2d4a011373c1bc9
51,909
import math def stakePoolTicketFee(stakeDiff, relayFee, height, poolFee, subsidyCache, netParams): """ stakePoolTicketFee determines the stake pool ticket fee for a given ticket from the passed percentage. Pool fee as a percentage is truncated from 0.01% to 100.00%. This all must be done with integers. Args: stakeDiff (int): The ticket price. relayFee (int): Transaction fees. height (int): Current block height. poolFee (int): The pools fee, as percent. subsidyCache (calc.SubsidyCache): A subsidy cache. netParams (module): The network parameters. Returns: int: The stake pool ticket fee. """ # Shift the decimal two places, e.g. 1.00% # to 100. This assumes that the proportion # is already multiplied by 100 to give a # percentage, thus making the entirety # be a multiplication by 10000. poolFeeAbs = math.floor(poolFee * 100.0) poolFeeInt = int(poolFeeAbs) # Subsidy is fetched from the blockchain package, then # pushed forward a number of adjustment periods for # compensation in gradual subsidy decay. Recall that # the average time to claiming 50% of the tickets as # votes is the approximately the same as the ticket # pool size (netParams.TicketPoolSize), so take the # ceiling of the ticket pool size divided by the # reduction interval. adjs = int(math.ceil(netParams.TicketPoolSize / netParams.SubsidyReductionInterval)) subsidy = subsidyCache.calcStakeVoteSubsidy(height) for i in range(adjs): subsidy *= 100 subsidy = subsidy // 101 # The numerator is (p*10000*s*(v+z)) << 64. shift = 64 s = subsidy v = int(stakeDiff) z = int(relayFee) num = poolFeeInt num *= s vPlusZ = v + z num *= vPlusZ num = num << shift # The denominator is 10000*(s+v). # The extra 10000 above cancels out. den = s den += v den *= 10000 # Divide and shift back. num = num // den num = num >> shift return num
69d17a5cef65721901af8a6515ce434f086b2e1d
51,910
import itertools def _solve_bruteforce(D, all_solutions, valid, spin, value): """_solve_bruteforce. Helper function for solve_pubo_bruteforce, solve_puso_bruteforce, solve_qubo_bruteforce, and solve_quso_bruteforce. Iterate through all the possible solutions to a BO formulated problem and find the best one (the one that gives the minimum objective value). Do not use for large problem sizes! This is meant only for testing very small problems. Parameters ---------- D : dict. all_solutions : boolean (optional, defaults to False). If all_solutions is set to True, all the best solutions to the problem will be returned rather than just one of the best. If the problem is very big, then it is best if ``all_solutions`` is False, otherwise this function will use a lot of memory. valid : function. ``valid`` takes in a bitstring or spinstring and outputs a boolean indicating whether that bitstring or spinstring is a valid solutions. spin : bool. Whether we're bruteforce solving a spin model or boolean model. value : function. One of ``qubo_value``, ``quso_value``, ``pubo_value``, or ``puso_value``. Returns ------- res : tuple (objective, solution). if all_solutions is False: objective : float. The best value of the problem. solution : dict. Maps the binary variable label to its solution value, {0, 1} if not spin else {-1, 1}. if all_solutions is True: objective : float. The best value of the problem solution : list of dicts. Each dictionary maps the label to the value of each binary variable. Ie each ``s`` in ``solution`` is a solution that gives the best objective function value. """ if not D: return 0, ({} if not all_solutions else [{}]) elif () in D: offset = D.pop(()) if not D: D[()] = offset return offset, ({} if not all_solutions else [{}]) D[()] = offset # if D is a Matrix object or QUBO, PUBO, etc, then these are defined try: N = D.num_binary_variables # could do D.reverse_mapping, but that creates a copy. We just need to # not mutate it here, then we don't have to waste time copying. mapping = D._reverse_mapping except AttributeError: var = set() for x in D: var.update(set(x)) N = len(var) # map qubit name to 0 through N-1 mapping = dict(enumerate(var)) best = None, {} all_sols = {None: [{}]} for test_sol in itertools.product((1, -1) if spin else (0, 1), repeat=N): x = {mapping[i]: v for i, v in enumerate(test_sol)} if not valid(x): continue v = value(x, D) if all_solutions and (best[0] is None or v <= best[0]): best = v, x all_sols.setdefault(v, []).append(x) elif best[0] is None or v < best[0]: best = v, x if all_solutions: best = best[0], all_sols[best[0]] return best
a0fe41785eb41e12bbca74e49b09679ab5e646ca
51,914
import pickle import bz2 def zloads(pickled_data): """ loads pickleable object from bz2 compressed string :param pickled_data: BZ2 compressed byte sequence :type pickled_data: bytes :returns: An unpickled version of the compressed byte sequence. """ return pickle.loads(bz2.decompress(pickled_data)) # return pickle.loads(pickled_data)
03a18e47a780b586b4ca606f253744ff3003febc
51,919
def atom_count(mol2_file): """ Get atom count from Tripos MOL2 header :param mol2_file: mol2 file :type mol2_file: :py:str :return: atom count :rtype: :py:int """ atomcount = 0 with open(mol2_file, 'r') as infile: for line in infile.readlines(): if line.startswith('@<TRIPOS>ATOM'): break line = line.strip().split() if len(line) and line[0].isdigit(): atomcount = int(line[0]) break return atomcount
f305219f6d9ddd8ebb7fc96d34855d6c50e113b3
51,922
def parse_project_and_task_from_dag_id(dag_id): """Parse project and task from dag id. Args: dag_id (str): The id of DAG. Returns: (tuple of str): The first item is project. The second item is task. If dag_id is invalid, will return empty string. """ if not dag_id: return '', '' ids = dag_id.split('__') if len(ids) >= 3: return ids[1], ids[2] elif len(ids) == 2: return ids[1], '' else: return '', ''
1a6740ce65bd82189604121a0cbfb4f9c11e3c8f
51,923
def convertValueOrNone(value, convert=int): """Convert a value to a type unless NoneType. Parameters ---------- value : anything The value to possibly convert. convert : TYPE, optional The type for conversion. Returns ------- convert type The converted value. """ return value if value is None else convert(value)
e3c5c091d7c2ad3dfd6dc48349afac5765bd6f40
51,924
def snake_id_from_XY(x, y, converted_data): """ Returns snake_id from x, y integers. Args: x (int): x value of tile y (int): y value of tile converted_data (dict): python readable version of json Returns: snake["id"](unicode string): snake id """ for snake in converted_data["board"]["snakes"]: if (x == snake["body"][0]["x"]) and (y == snake["body"][0]["y"]): return snake["id"]
a1f1a4973169bfdb91aed4713ac8be36b13f09ad
51,926
def LTrim(text): """Strip spaces from the left of the text""" return str(text).lstrip()
fa9fb09d90795a3f6e8b6e1025039eda0baca407
51,928
def sum_diagnonals_spiral(N): """ Problem 28, method 2 , serial: 1:1, 3:45, 5:101 spiral 5X5 has 1+4*2+4*4 25 element, max element is 25, ele size N*N the upper right corner number must the N*N, the other 3 corners counter-clockwise by -(N-1) """ # s=1 # spiral 1 for l in range(3,N+2,2): s=s+l*l s=s+l*l-(l-1) s=s+l*l-2*(l-1) s=s+l*l-3*(l-1) print("sum of the all diag number of spriral", N, "is: ", s) return s
eadae06001a8cadfc5bed28f760a3e6a3ef5f4b6
51,932
def count_values(x, labels): """Count how many rows have a label contained in labels.""" return x['label'].loc[x['label'].isin(labels)].count()
4efeb945204c711a411445a7cff7b478a62862a3
51,934
def convert_to_bagel_geom(geom_str): """ Converts an xyz geometry sting to a bagel formatted geometry (json format) :param geom_str: xyz geometry to convert """ bagel_geom = '' for line in geom_str.strip().splitlines(): atom, *xyz = line.split() bagel_geom += f'{{"atom" : "{atom:s}", ' if len(atom) == 1: bagel_geom += ' ' x, y, z = map(float, xyz) bagel_geom = f'[{x:>15.10f},{y:>15.10f},{z:>15.10f}] }},\n' return bagel_geom[:-2]
6beb0a75f19fdfebec66bc22a30cb7a6f9bbbc39
51,939
def affine_to_str(transform): """ Return a string representatin of an affine.Affine transform """ return ','.join(map(str, transform[:6]))
844d52e77f00eb86668563c92969eca175acb1e6
51,942
def lexiconmatch(word, lexicon): """Return 0 if word not in lexicon; lexicon valence otherwise.""" return lexicon[word] if word in lexicon else 0
af2f451e67de5aebc60c1c9812ae9937a7514f5c
51,943
def is_array(obj): """ Return True if object is list or tuple type. """ return isinstance(obj,list) or isinstance(obj,tuple)
f3ed267dcb6f1a4bb5ce0648b1a876cfb860ecf4
51,945
from datetime import datetime def times(fields): """Return a starting and ending datetime, given `fields` like: [u'CLOCK:', u'[2013-03-08', u'Fri', u'14:24]--[2013-03-08', u'Fri', u'15:41]', u'=>', u'1:17'] """ return (datetime.strptime(fields[1] + fields[3][:5], u'[%Y-%m-%d%H:%M'), datetime.strptime(fields[3][9:] + fields[5], u'%Y-%m-%d%H:%M]'))
8a1fb78e893e71c96dc2a04c1897bfc4b7a1d367
51,949
def identity(x, session): """Return the value.""" return x
b18cd37ccfdf7a06f4645c38fa5debd020bc432f
51,950
def calculate(first, second, operation: str): """ Вычисляет арифметическую операцию ``operation`` над двумя числами. Допустимые операции: * ``+`` - сложение, * ``-`` - вычитание, * ``*`` - умножение, * ``/`` - деление. :param first: Первое число. :param second: Второе число. :param operation: Арифметическая операция. :return: Результат арифметической операции. """ if operation == "+": return first + second elif operation == "-": return first - second elif operation == "*": return first * second elif operation == "/": if second != 0: return first / second else: raise ZeroDivisionError("Деление на ноль недопустимо") else: raise ValueError("Неизвестная операция")
120ca531234c443517927dffc4754b7bb9e6977f
51,951
import torch def reverse_padded_sequence(inputs, lengths, batch_first=False): """Reverses sequences according to their lengths. Inputs should have size ``T x B x *`` if ``batch_first`` is False, or ``B x T x *`` if True. T is the length of the longest sequence (or larger), B is the batch size, and * is any number of dimensions (including 0). Arguments: inputs (Variable): padded batch of variable length sequences. lengths (list[int]): list of sequence lengths batch_first (bool, optional): if True, inputs should be B x T x *. Returns: A Variable with the same size as inputs, but with each sequence reversed according to its length. """ if batch_first: inputs = inputs.transpose(0, 1) max_length, batch_size = inputs.size(0), inputs.size(1) if len(lengths) != batch_size: raise ValueError('inputs is incompatible with lengths.') ind = [list(reversed(range(0, length))) + list(range(length, max_length)) for length in lengths] ind = torch.LongTensor(ind).transpose(0, 1) for dim in range(2, inputs.dim()): ind = ind.unsqueeze(dim) ind = ind.expand_as(inputs) if inputs.is_cuda: ind = ind.cuda(inputs.get_device()) reversed_inputs = torch.gather(inputs, 0, ind) if batch_first: reversed_inputs = reversed_inputs.transpose(0, 1) return reversed_inputs
2794e1effb4227e509d97aa942a87f329a61b7b8
51,953
def parser_combinator(top, lookup, combinator, tokens): """Parse the given tokens, combining in the given fashion. Args: top: The top-level parser. Separates the tokens into sections which can be consumed by the parsers in the lookup function. lookup: For a given section from the top-level parser, returns a list of possible parsers. combinator: Combines the resultant nodes from parsing each section from the top-level parser. tokens: The tokens to be parsed. Returns: The top-level node from the combinator. """ sections = top(tokens) parsed_sections = list() for i, section in enumerate(sections): parsed = None for parse in lookup(section, i): parsed = parse(section) if parsed: break if not parsed: return None parsed_sections.append(parsed) return combinator(*parsed_sections)
155def4208d6baa9f4092ba29abd1918ae4438ea
51,956
import requests def get_image_from_url(imgurl): """ Loads and returns the bytes of the image from the specified url :param imgurl: the url """ resp = requests.get(imgurl) imgbytes = resp.content return imgbytes
530d57ae44edd25668b3ad1bdb6aa6721ea66537
51,957
def _filter_out_duplicate_spotify_artists(spotify_artists): """ We should not try to add the same artist multiple times to the graph, so filter out any duplicates. """ spotify_artist_dictionary = { spotify_artist.id: spotify_artist for spotify_artist in spotify_artists } return list(spotify_artist_dictionary.values())
1dce7b9ddfcadb2baf3006d7061adabc27375097
51,967
def bernoulli_prob(var, bias=0.5): """ Returns Sympy-expression of a Bernoulli PMF for var at specified bias. :param var: boolean symbol to express pmf :param bias: bias probability (float ranging from 0 to 1). :return bias*var + (1-bias)*(1-var) """ # bv + (1-b)(1-v) = bv + 1 - v - b + bv = 2bv - v - b + 1 = v(2b-1) + (1-b) return var * (2*bias - 1) + (1 - bias)
b7e0ae4c578ad0cbd4c8dc7127f1d7f24a083d9f
51,968
def append_line(buf, linearr): """Append lines Args: buf (obj): Nvim buffer linearr (Array[string]): Line contents Returns: suc (bool): True if success """ for l in linearr: buf.append(l) return True
9ae3c08b119c01276a95c1adf9e8372730f08154
51,971
import torch def convert_ax_angle_to_quat(ax, ang): """ Convert Euler angles to quaternion. """ qw = torch.cos(ang/2) qx = ax[0] * torch.sin(ang/2) qy = ax[1] * torch.sin(ang/2) qz = ax[2] * torch.sin(ang/2) quat = torch.stack([qw, qx, qy, qz], dim=1) return quat
04ffcd09fd88dcef0c26c7230abdd3d3704ec2f4
51,975
def next_value(value: int) -> int: """ Given the current code value, return what the next value will be. """ return (value * 252_533) % 33_554_393
6b77837313abd891e95e56d191af3410f45eeb11
51,978
def read_messages(file): """(file open for reading) -> list of str Read and return the contents of the file as a list of messages, in the order in which they appear in the file. """ messages = [] for line in file.readlines(): messages.append(line.strip()) return messages
f596b2fdc7661d5f641f30a676f586494a7b41d9
51,979
def prob_page_pattern_match(q, q1, t, m, s): """ Probability that a page pattern of q + t pages contains another page pattern of q1 pages, assuming that all pages are equiprobable. Parameter m represents the total number of pages. """ #prob_same_page = (harmonic_approx(m, 2*s) # / (harmonic_approx(m, s) ** 2)) prob_same_page = float(1) / m prob_single_page_no_match = (1 - prob_same_page) ** (q + t) return (1 - prob_single_page_no_match) ** q
269fe744ca6ca46e410dccc994914afca16d24f4
51,981
def tokenize_sentence(sentence): """ Tokenize sentence into list of words. :param sentence: to tokenize :return: the list of words in the sentence """ tokens = [] token_start = 0 for i in range(len(sentence)): if sentence[i] == ' ': tokens.append(sentence[token_start:i]) token_start = i + 1 if i == len(sentence) - 1: tokens.append(sentence[token_start:i+1]) return tokens
c0cdd0ef770f40ee3e9723ba0663348b494cd49c
51,989
def relative_error(estimated, truth): """Calculate relative error. The relative error is defined as (estimated - truth) / truth. Args: estimated: the estimated value. truth: the true value. Returns: The relative error. """ return (estimated - truth) / truth
92be45fc09df753195abe28c8d740bddeb499a0d
51,993
def y_true(y_true, y_pred): """ Returns the label (y_true) :param y_true: :param y_pred: :return: """ return y_true
66c68f0303f5431daa62a09ad528e51f4d30126c
51,998
def data_blobxfer_extra_options(conf): # type: (dict) -> str """Retrieve input data blobxfer extra options :param dict conf: configuration object :rtype: str :return: blobxfer extra options """ try: eo = conf['blobxfer_extra_options'] if eo is None: eo = '' except KeyError: eo = '' return eo
6e6627e42ddc054571e08bce4d1a6959d0977ceb
51,999
def get_assets(db): """Get ALL supported assets from crypto_list.db""" rows = db.execute("SELECT * FROM currencies") return rows
2aa14922413650d0638ee62c0b5c01a4d20a4adc
52,000
import ast def get_cue(series): """Extract cues using the cue indices from the sentence. """ # The data is a string that should be a list of integers cues = ast.literal_eval(series["cues_idx"]) # Extract all cue words. cue = [s for c, s in zip(cues, series["sent"].split(" ")) if c] if len(cue) == 1: return cue[0] elif len(cue) == 0: return None else: return cue
291381427ae1f53aa980f681a5a23f7d11910598
52,001
async def subscriber_recv(loop, subscriber): """Receive a message from a subscriber on the EII Message Bus. :param loop: asyncio loop :param subscriber: EII Message Bus Subscriber """ recv = loop.run_in_executor(None, subscriber.recv) msg = await recv return msg
a7d6d7ae9777d517af5567ca72c0b4de4666fd92
52,008
def linenr_column_line(text, offset): """Return line number, column and the whole line in which text[offset] lies. Line number and column are in one-based indexing. Each tab is counted as one column. """ offset = min(max(0, offset), len(text)) textbegin = text[:offset] if not textbegin: return 1, 1, None lines = textbegin.splitlines(True) linenr, column = max(1, len(lines)), len(lines[-1]) + 1 line = lines[-1] nlpos = text.find('\n', offset) if nlpos >= 0: line += text[offset:nlpos+1] else: line += text[offset:] return linenr, column, line
1ab598ffcb9fb94d61761b9ede75f1759687350b
52,009
def parseCommandLine(parser): """Parse command line""" parser.add_argument('--remove', '-r', action='store_true', dest='removeFlag', default=False, help='remove all diskimgr configuration files') # Parse arguments args = parser.parse_args() return args
eed67549a330837bcfa952d81302a25a1b455757
52,012
import binascii import hmac import hashlib def daily_signature(key, message): """ This function takes a key and message as input the key is the date which changes daily and message is the data to be hashed. It returns a hashed object for that message """ byte_key = binascii.unhexlify(key) message = message.encode() return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()
5d103f5c4eaf2701c30b4d26a7c55bad762ae477
52,014
def add_recommend(df): """adds general 'would recommend' variable (probably+definitly)""" df['would_recommend'] = df['RECOMMEND_MBV'] + df['RECOMMEND_TBV'] return df
c994ab81acbb2d858147e96d32e82beccc4d36ee
52,019
def _cache_key(G): """ Return the key used to cache the result for the graph G This is used by the decorator :func:`_cached`. EXAMPLES:: sage: from sage.graphs.tutte_polynomial import _cache_key sage: G = graphs.DiamondGraph() sage: print(_cache_key(G)) ((0, 2), (0, 3), (1, 2), (1, 3), (2, 3)) """ return tuple(G.canonical_label().edges(labels=False, sort=True))
b9b0bdb5e48ea89c5db5c3bc9d4f93b54ae22a12
52,021
def _GetEventData(data, key, value_type=None): """Get value for key from data. Args: data: a dict contains data. key: the key to get value. value_type: value's type. Returns: value """ value = data.get(key) if value and value_type: value = value_type(value) return value
1428f08e4275864b871fa923dd9bf8e380561d80
52,026
def validate_model_is_trained(model, model_type): """Check whether the model has been already trained. :param model: The input model. :param model_type: [str] The model type (e.g., RandomForestClassifier). :return Either nothing or an AttributeError. """ if model_type in ['RandomForestClassifier', 'RandomForestRegressor', 'ExtraTreesClassifier', 'ExtraTreesRegressor']: n_estimators_attr = 'n_estimators' estimators_attr = 'estimators_' else: raise NotImplementedError('Don\'t know what to do with \'%s\'' % model_type) if hasattr(model, n_estimators_attr) and not hasattr(model, estimators_attr): raise AttributeError('The model has not been trained yet, and thus cannot be explained.') else: return n_estimators_attr, estimators_attr
c1c3cc1c164c15a9831f5776fc587a46ec04a3b8
52,033
def cropimage( img, bounds): """Crop a pillow image at bounds(left, top, right, bottom) """ return img.crop( bounds )
00d6b6a1de744f4d43209bb8078834dfb8a74bf7
52,038
def filter_PIBICs(lattesframe, npibics=1): """Returns the rows of the researchers in arg lattesframe that have been part of the PIBIC scholarship at least once. Args: lattesframe: the pandas dataframe to be filtered. npibics: the minimum quantity of PIBICs to be filtered. """ if npibics <= 0: print('Invalid arg npibics. Reverting to default npibics == 1.') npibics = 1 return lattesframe.loc[lattesframe['quantasVezesPIBIC'] >= npibics]
c167294cf94e37e20170c6d41b07ddc3c2161b99
52,039
def get_pos_diff(old_pos, new_pos): """Returns the difference in position between two rospy positions""" pos_diff = new_pos - old_pos return pos_diff
64b333c18b8c6c192541bf7980689fae2ce9cb6e
52,042
import logging def _get_task_counts(rows): """Calculcate number of true/false tasks and maximum achievable score.""" count_true = count_false = 0 max_score = None for row in rows: if not row.id.property: logging.info("Missing property for task %s.", row.id) continue expected_result = row.id.expected_result if not expected_result: continue if expected_result.result is True: count_true += 1 elif expected_result.result is False: count_false += 1 row_max_score = row.id.property.max_score(expected_result) if row_max_score is not None: max_score = row_max_score + (max_score or 0) return max_score, count_true, count_false
b9b6dd4f8abcbe2eee9275ae465047448e1a5ac7
52,043
from typing import List def kadane_algorithm(array: List[int]) -> int: """ Use Kadane's algorithm to solve the max sum subarray problem. :param array: the array to process :return: the maximum value amongst all consecutive subarrays. >>> kadane_algorithm([34, -50, 42, 14, -5, 86]) 137 >>> kadane_algorithm([-5, -1, -8, -9]) 0 """ current_max_ending = 0 max_ending = 0 for v in array: current_max_ending += v if max_ending < current_max_ending: max_ending = current_max_ending if current_max_ending < 0: current_max_ending = 0 return max_ending
de03b9fe5f181ae9356ccaee971366d8b5aa7159
52,045
def slurp(filename): """ Load a file """ fp = open(filename) try: return fp.read() finally: fp.close()
acc2d8746b0ffb5cc971609b6576bce6fc2da1c7
52,047
import re def filter_remove_device_sw_log_prefix(line): """ Remove the file/line information in log messages produced by device software LOG() macros. """ # See base_log_internal_core() in lib/base/log.c for the format description. pattern = r'^[IWEF?]\d{5} [a-zA-Z0-9\.-_]+:\d+\] ' if isinstance(line, bytes): return re.sub(bytes(pattern, encoding='utf-8'), b'', line) else: return re.sub(pattern, '', line)
c69ce2ecc96370dd1c55ce47cb38ce59aa759a11
52,060
def build_request_body(type, id, attributes=None, relationships=None): """Build a request body object. A body JSON object is used for any of the ``update`` or ``create`` methods on :class:`Resource` subclasses. In normal library use you should not have to use this function directly. Args: type(string): The resource type for the attribute id(uuid): The id of the object to update. This may be ``None`` Keyword Args: attributes(dict): A JSON dictionary of the attributes to set relationships(dict) A JSON dictionary of relationships to set Returns: A valid attribute dictionary. Often used in the ``update`` or ``create`` :class:`Resource`` methods. """ result = { "data": { "type": type } } data = result['data'] if attributes is not None: data['attributes'] = attributes if relationships is not None: data['relationships'] = relationships if id is not None: data['id'] = id return result
0d6a9029446fe0cb250cb88c6fc4bbf57d24afb9
52,061
def varchar(length): """ Factory for a character length validator of the specified length. """ length = int(length) def char_length_validator(string): """ Validate a string ensuring that it doesn't exceed a maximum length. """ if string is None: return string = str(string) if len(string) > length: raise ValueError("Value '%s' exceeds character limit " "of %i." % (string, length)) return string return char_length_validator
a0d498c26528f1f0e7156a7b2fda632db1d65682
52,064
import hashlib def hash(v, encoding="utf-8"): """return hash value for string v""" return hashlib.md5(v.encode(encoding)).hexdigest()[:6]
da7464f61fa59b5f92629a9de32d5e7f1c22240e
52,068
def output_dir(tmpdir): """Pytest fixture return `output_dir` argument as string.""" return str(tmpdir.mkdir('output'))
d1c74b08792d423b10bdb1e332fae65ab6ee4752
52,074
def iou(box1, box2): """Compute the Intersection-Over-Union of two given boxes. Args: box1: array of 4 elements [cx, cy, width, height]. box2: same as above Returns: iou: a float number in range [0, 1]. iou of the two boxes. """ lr = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - \ max(box1[0] - 0.5 * box1[2], box2[0] - 0.5 * box2[2]) if lr > 0: tb = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - \ max(box1[1] - 0.5 * box1[3], box2[1] - 0.5 * box2[3]) if tb > 0: intersection = tb * lr union = box1[2] * box1[3] + box2[2] * box2[3] - intersection return intersection / union return 0
c515d2aa81ff12841e247a3ea925bb312a6c931d
52,078
def _MakeCounterName(key_id, tag): """Helper to create a sharded Counter name. Args: key_id: Int unique id (usually a model entity id). tag: String tag which hints at the counter purpose. Returns: String to be used as a sharded Counter name. """ return '%s_%s' % (key_id, tag)
5b1ebf20f4a59fe5b2e0ed076a83355a61ce97a1
52,093
import re def just_one_dot(text): """Some Zabbix metrics can end up with multiple . characters. Replace with a single one""" rx = re.compile(r"\.+") return rx.sub(".", text)
d32f2a0373a9c3c1a38599feeb247145b79bea80
52,095
import torch def classify(preds, labels, cacu_rate=False, topk=None): """ 用于分类的准确率 :param preds: [batch,logits] :param labels: [labels,] :param cacu_rate: 计算正确率而不是计数 :param topk: list(int) ,表明计算哪些topk :return: if cacu_rate: [topk_rate,...] else: total, [topk_count,...] """ if topk is None: topk = (1,5) k = topk _, maxk = torch.topk(preds, max(*k), dim=-1) total = labels.size(0) test_labels = labels.view(-1, 1) # reshape labels from [n] to [n,1] to compare [n,k] if cacu_rate: return [(test_labels == maxk[:, 0:i]).sum().item() / total for i in k] else: return total, [(test_labels == maxk[:, 0:i]).sum().item() for i in k]
21cb44d41cc81bdcf81e8ca60cc55a13d89fe21e
52,101
def count_all_pixels_per_block(x, y): """ Calculate total number of pixels in a rectangular block Parameters ---------- x : int block width in pixels y : int block height in pixels Returns ------- number_of_pixels : int total number of pixels in a block """ return x * y
7389386d2259422d5fca36f70d9318d57e5fab69
52,103
def analyse_structure(structure, mode="total", diamond=False): """ Use either common neighbor analysis or the diamond structure detector Args: structure (pyiron_atomistics.structure.atoms.Atoms): The structure to analyze. mode ("total"/"numeric"/"str"): Controls the style and level of detail of the output. - total : return number of atoms belonging to each structure - numeric : return a per atom list of numbers- 0 for unknown, 1 fcc, 2 hcp, 3 bcc and 4 icosa - str : return a per atom string of sructures diamond (bool): Flag to either use the diamond structure detector or the common neighbor analysis. Returns: (depends on `mode`) """ if not diamond: return structure.analyse.pyscal_cna_adaptive( mode=mode, ovito_compatibility=True ) else: return structure.analyse.pyscal_diamond_structure( mode=mode, ovito_compatibility=True )
51b06765cd058c59cc9fca877bcc3db977486e18
52,115
import six def ensure_bytes(obj): """ If the input object is a string, make sure it is returned as a Byte string, as follows: * If the input object already is a Byte string, it is returned unchanged. * If the input object is a Unicode string, it is converted to a Byte string using the UTF-8 encoding. * Otherwise, the input object was not a string and is returned unchanged. """ if isinstance(obj, six.text_type): return obj.encode("utf-8") return obj
e7f93927577fa1354743ec85a0b65060f25c61b2
52,119
def bounds(sizes): """Convert sequence of numbers into pairs of low-high pairs >>> bounds((1, 10, 50)) [(0, 1), (1, 11), (11, 61)] """ low = 0 rv = [] for size in sizes: rv.append((low, low + size)) low += size return rv
8196f4cba37a23e89d475859ea6958b7081f5736
52,121
def parsecommon(parser): """ Parse common arguments """ parser.add_argument("--bufsz", help="size of buffer", type=int, default=20) parser.add_argument("--nch", help="Number of cells", type=int, default=1000) parser.add_argument("--fs", help="spike sampling rate", type=int, default=1000) parser.add_argument("--port", help="port to connect to", type=int, default=29170) return parser
eb1ad9e0769a7d999225ecd2d8dce1304621c53e
52,122
def DiffValueLists(new_list, old_list): """Give an old list and a new list, return the added and removed items.""" if not old_list: return new_list, [] if not new_list: return [], old_list added = [] removed = old_list[:] # Assume everything was removed, then narrow that down for val in new_list: if val in removed: removed.remove(val) else: added.append(val) return added, removed
4c3dc471bd0e9e9aea3b5f0b8c906eb1ca27d196
52,124
def every_item_but_one(l: list, idx: int) -> list: """ Returns every item in the list except for the one at index idx. :param l: a list to process. :param idx: the index to be excluded from the list. :return: the list l minus the item at index idx. """ return [item for i, item in enumerate(l) if i != idx]
52b8ebe4af24226745a052b5577c5c470c18f56d
52,125
import hashlib def mktempname(salt, instance): """Create /tmp file name for compile output.""" m = hashlib.md5() m.update(salt) hd = m.hexdigest() return "/tmp/%s.%d.err.txt" % (hd, instance)
a17490c0dfc9fba83c960da3c748b6da9985e725
52,128
def modify_readme(content: str) -> str: """Modify README.md.""" content = content.replace("myst-parser", "myst-docutils") content = content.replace( "# MyST-Parser", "# MyST-Parser\n\nNote: myst-docutils is identical to myst-parser, " "but without installation requirements on sphinx", ) content = content.replace("myst-docutils.readthedocs", "myst-parser.readthedocs") content = content.replace( "readthedocs.org/projects/myst-docutils", "readthedocs.org/projects/myst-parser" ) return content
9325dae3f54ec6d743249c12a5d398e18cbf9734
52,130
def create_metadata( input_name_list, input_type_list, input_shape_list, output_name_list, output_type_list, output_shape_list, model_input_list=None, model_output_list=None, custom_meta_dict=None, ): """ Facilitates creation of a metadata dictionary for model packaging (MAR). The method renders user-supplied information compliant with standard expected format for the metadata. It has to be noted that the format of metadata is completely up the user. The only reqirement is that metadata should be always supplied as a json-serializable dictionary. This method makes metadata more standard by capturing information about model inputs and outputs in fields that are conventionally used and accepted across Eisen ecosystem. That is, this method implements a convention about the format of metadata :param input_name_list: A list of strings representing model input names Eg. ['input'] for single-input model :type input_name_list: list :param input_type_list: A list of strings for input types Eg. ['ndarray'] matching exp. type for 'input' :type input_type_list: list :param input_shape_list: A list of shapes (list) representing expected input shape Eg. [[-1, 3, 244, 244]] :type input_shape_list: list :param output_name_list: List of strings representing model output names Eg. ['logits', 'prediction'] :type output_name_list: list :param output_type_list: List of strings representing model output types Eg. ['ndarray', 'str'] :type output_type_list: list :param output_shape_list: List of shapes (list) for output shape Eg. [[-1, 10], [-1]] :type output_shape_list: list :param model_input_list: List of input names that should be used as model inputs (default all input_name_list) :type model_input_list: list :param model_output_list: List of output names that should be obtained from the model (default all output_name_list) :type model_output_list: list :param custom_meta_dict: A json-serializable dictionary containing custom information (Eg. options or notes) :type custom_meta_dict: dict :return: Dictionary containing metadata in standardized format """ if model_input_list is None: model_input_list = input_name_list if model_output_list is None: model_output_list = output_name_list metadata = { 'inputs': [], 'outputs': [], 'model_input_list': model_input_list, 'model_output_list': model_output_list, 'custom': {} } if custom_meta_dict is None: custom_meta_dict = {} assert len(input_name_list) == len(input_type_list) == len(input_shape_list) assert len(output_name_list) == len(output_type_list) == len(output_shape_list) for name, typ, shape in zip(input_name_list, input_type_list, input_shape_list): metadata['inputs'].append({ 'name': name, 'type': typ, 'shape': shape }) for name, typ, shape in zip(output_name_list, output_type_list, output_shape_list): metadata['outputs'].append({ 'name': name, 'type': typ, 'shape': shape }) metadata['custom'] = custom_meta_dict return metadata
410af3785b009d9f7b3a6c43d14613779bad66c4
52,133
def homogeneous_to_euclidean(xh): """ Transforms X to be in euclidean coordinates Parameters ---------- x: numpy array (3,N), (4,N) each column of the array is a point Returns ------- xh: numpy array, x in homogeneous coordinates """ return xh[0:-1, :]/xh[-1, :]
53c2c47d1bd02fc46713d178323648655f93aee2
52,139
def get_j_indices(plaintext): """Description: for a given string input, returns a list containing the indices in which the letter 'j' occurs. Arguments: plaintext (string): string used to either encode or decode. Returns: j_indices (list): list contains all indices in which 'j' occurs. """ j_indices = [] count = 0 space_removed_plaintext = [i for i in plaintext if i != ' '] for i in space_removed_plaintext: if i == 'j': j_indices.append(count) count += 1 return j_indices
48ee4e872df0bd8da4780369644ae29f42bc5dfa
52,140
import re def swallow(text: str, pattern_matcher: re.Pattern) -> str: """ Utility function internal to this module :param text: text to clean :param pattern_matcher: pattern to match :return: the text without the matched pattern; spaces are not substituted """ idx_to_omit = [] for item in pattern_matcher.finditer(text): idx_to_omit.insert(0, item.span()) for start, end in idx_to_omit: text = text[:start] + text[end:] return text.strip()
65d24b96f78aa9e0f318d0b4ec5da9251843125e
52,144
def count_items(alist): """ Count number of items in alist. """ count = 0 for item in alist: count = count + 1 return count
69675913ffbcc1e6021c2c1002d4fa039ecc63b4
52,148
from typing import Tuple def euklids_algorithm(a: int, b: int) -> Tuple[int, int, int]: """ Euklids algorithm returning GCD = X*a + Y*b. >>> a, b = 3, 13 >>> gcd, ax, by = euklids_algorithm(a, b) >>> gcd == ax*a + by*b True """ if a == 0: return b, 0, 1 gcd, x1, y1 = euklids_algorithm(b % a, a) x = y1 - (b // a) * x1 y = x1 return gcd, x, y
540d0db350c20ca8adb98d598ccaaa7bfddda3bb
52,149
def format_expected_adviser(adviser): """ Formats Adviser object into format expected to be returned by `NestedAdviserWithEmailAndTeamField`. """ if not adviser: return None return { 'contact_email': adviser.contact_email, 'dit_team': { 'id': str(adviser.dit_team.pk), 'name': adviser.dit_team.name, }, 'id': str(adviser.pk), 'name': adviser.name, }
539607b064a66beb7d688c33b4dd914199d4b04a
52,163
def split_columns(string): """ Splits the return-columns argument or reads it from .txt --return-columns 'temperature c, "heat$"' -> ['temperature c', '"heat$"'] --return-columns my_vars.txt -> ['temperature c', '"heat$"'] """ if string.endswith('.txt'): with open(string, 'r') as file: return [col.rstrip('\n').strip() for col in file] return [s.strip() for s in string.split(',')]
0977f978a2ae5d22eb7b873901a6f4bfa6c92cc4
52,164
import math def calculate_padding_dimensions(t): """ Calculate list of PyTorch padding values to extend the spatial dimension of input tensor to multiples of 32. Args: t: The ``torch.Tensor`` to pad. Return A tuple ``(p_l_n, p_r_n, p_l_m, p_r_m)`` containing the left and right padding for the second to last dimension (``p_l_m, p_r_m``) and for the last dimension (``p_l_n, p_r_n``). """ shape = t.shape n = shape[-1] d_n = math.ceil(n / 32) * 32 - n p_l_n = d_n // 2 p_r_n = d_n - p_l_n m = shape[-2] d_m = math.ceil(m / 32) * 32 - m p_l_m = d_m // 2 p_r_m = d_m - p_l_m return (p_l_n, p_r_n, p_l_m, p_r_m)
65a1cfb0d8ccba9fdf7992aa8d7cda173b202398
52,166
def diff_date(truth_date, computed_date): """Compare two dates. Returns (match?, reason for mismatch).""" if computed_date == truth_date: return (True, '') if computed_date is None: return (False, 'Missing date') if truth_date is None: return (False, 'Should be missing date') return (False, 'complex')
3ccca3899587ab5322004e1024d6dc81a06a2066
52,168
def UrnToFlowId(urn): """Converts given URN string to a flow id string.""" components = urn.split("/") return components[-1]
f98b777f9439dcf5c7e72445019e87d9d92f989c
52,169
def verify_object_exists(obj: dict, obj_path: list): """Verifies that a given object has a given sub-object defined by obj_path""" if not obj_path: return True if obj_path[0] in obj: return verify_object_exists(obj[obj_path[0]], obj_path[1:]) else: return False
b782b1f8762fa6a0e61f4b4102534197a3251c45
52,177
def clean_attr(attr): """Append @ to attributes and resolve text -> text() for XPath.""" if attr: if 'text' in attr: return 'text()' else: attr = attr.lstrip('@') if attr: return '@' + attr return None
a14d32504d09051d9a442048c458e43751686d45
52,178
def bh_mass_from_bulge_mass(bulge_mass): """ Kormendy & Ho (2013) fitting function for the Mbh--Mbulge power law relation. Parameters ---------- bulge_mass : ndarray Numpy array of shape (ngals, ) storing the stellar mass of the bulge in units of solar mass assuming h=0.7 Returns ------- bh_mass : ndarray Numpy array of shape (ngals, ) storing black hole mass Examples -------- >>> ngals = int(1e4) >>> bulge_mass = np.logspace(8, 12, ngals) >>> bh_mass = bh_mass_from_bulge_mass(bulge_mass) """ prefactor = 0.49*(bulge_mass/100.) return prefactor*(bulge_mass/1e11)**0.15
e578e26c9b6a6b71ca217fcc7f8579ace07f3e93
52,181
def predecessor_to_path(pred, source, target): """ generate a path from source to target by pred @:param pred a dict mapping each node to its predecessor node on the shortest path from the specified starting node: e.g. pred == {'b': 'a', 'c': 'b', 'd': 'c'} @:return a list of the vertices in order along the shortest path. e.g. path = ['a', 'b', 'c', 'd'] """ v = target path = [v] while v != source: v = pred[v] path.append(v) path.reverse() return path
16bb0aba1bbf671e88430ddcdf4aa07ab0acc5c7
52,184
def dist(x, y, S): """ Mahalanobis distance: d(x,y) = \\sqrt( (x-y)^T*S^-1*(x-y) ) where S^-1 = diag(1/s_i^2 for s_i in std) Input must be torch.Tensor((1,N)) for x,y,S """ d = ((x-y).pow(2)/S).sum(1).sqrt() return d
676171a62ca828adbc140177c497c15124084817
52,190
def parse_params(param_str): """ Convert a string of the form name='value', ... into a dictionary. Leading and trailing spaces are stripped, as are line-feed, carriage-return, tab, single-quote and double-quote characters. """ params = {} for param in param_str.split(','): dirty = param.split('=') key = dirty[0].strip(' \t\n\r\'\"') value = dirty[1].strip(' \t\n\r\'\"') params.update({key: value}) return params
898b5450be2ddf65cb9a20e58233b01cc5d8e079
52,192
from typing import Tuple def split_checkpoint_filename(filename: str) -> Tuple[str, str]: """ Given a filename like "model.ckpt-20.index" return ("model", "model.ckpt-20") or ("", "") if the filename is not a valid checkpoint file. """ parts = filename.rsplit(".", 2) if len(parts) < 3 or not parts[1].startswith("ckpt-"): return ("", "") cname = parts[0] basename = ".".join([cname, parts[1]]) return (cname, basename)
6512fa9d0c3e47404a553b527c4a8cc1fcb5d320
52,193
def calc_total_curvature_abc(bias, std_error, quadratic_coef): """ Calculate the total curvature of the level surface of the weight vector, where the set of weights in the surface are those where the weighted MLE equals the original (i.e. the equal-weighted) MLE. Parameters ---------- bias : 1D ndarray. Contains the approximate bias of the MLE estimates for use in the ABC confidence intervals. std_error : 1D ndarray. Contains the standard error of the MLE estimates for use in the ABC confidence intervals. quadratic_coef : 1D ndarray. Contains a measure of nonlinearity of the MLE estimation function as one moves in the 'least favorable direction.' Returns ------- total_curvature : 1D ndarray of scalars. Denotes the total curvature of the level surface of the weight vector. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6. Equation 22.39. """ total_curvature = (bias / std_error) - quadratic_coef return total_curvature
9e3d263b0463d7842ae8a125c76241bae03984a5
52,195
def organize_columns(columns): """Locates data column keys and their respective mean data keys. Args: columns: iterable of dataframe column names Returns: keywords: iterable of tuples containing (data key, data mean key | '') """ located = [] keywords = [] for kw in sorted(columns): if kw in located: continue mean_keys = [mkey for mkey in columns if mkey.startswith(kw+'_MEAN')] if not mean_keys: keywords.append((kw, '')) continue for mean_key in mean_keys: keywords.append((kw, mean_key)) located.append(mean_key) return keywords
d07d4c5aad71741e24c256e66ec3d1ca2bfa187a
52,196
def find_stem(stratified_string: str): """ find the stem of the compartment name as the text leading up to the first occurrence of the joining string should run slightly faster than using find_name_components """ return stratified_string.split("X")[0]
867d01d2b8650ed6943bb507eba3977004dd31a4
52,209
def get_hms_for_seconds(seconds): """ prints number of seconds in hours:minutes:seconds format""" m, s = divmod(seconds, 60) h, m = divmod(m, 60) hms = "%d hours: %02d min: %02d sec" % (h, m, s) return hms
41fe7fbe6e95a73aa973b4fb316c094e543afa76
52,213
def zmatrix(rotor_lst): """ Get the Z-Matrix for the rotors """ return rotor_lst[0][0].zma
c0c0cf2a950d2338894121415cbaf069d7c7ff1a
52,215
from pathlib import Path def env_devel_file(osparc_simcore_root_dir: Path) -> Path: """Path to osparc-simcore/.env-devel file""" env_devel_fpath = osparc_simcore_root_dir / ".env-devel" assert env_devel_fpath.exists() return env_devel_fpath
104b0df3f67c44bd0cd5881ea199d543acc95b0c
52,216
def get_target_f0_list(f0_list: list, rapid_f0_change_indices: list, adjusted_widths: list): """補正に用いる基準値(平均値)を計算する。 中心となる2点からwidth分だけ前後の両端の点の平均値を、補正に用いる値とする。 その値をリストにして返す。 """ # 念のため assert len(rapid_f0_change_indices) == len(adjusted_widths) # 補正の基準にするf0の値 target_f0_list = [] # 急峻な変化がある場所について、前から順に平均値を計算していく。 for f0_idx, width in zip(rapid_f0_change_indices, adjusted_widths): f0_left = f0_list[f0_idx - width] f0_right = f0_list[f0_idx + width + 1] target_f0 = (f0_left + f0_right) / 2 target_f0_list.append(target_f0) # こんな感じのリストを返す → [(f0_idx, width), ...] return target_f0_list
aa6623aad4230a882fc03d6348b284da72510dec
52,220
def get_experiment(mlflow_client, exp_id_or_name): """ Gets an experiment either by ID or name. """ exp = mlflow_client.get_experiment_by_name(exp_id_or_name) if exp is None: try: exp = mlflow_client.get_experiment(exp_id_or_name) except Exception: raise Exception(f"Cannot find experiment ID or name '{exp_id_or_name}'. Client: {mlflow_client}'") return exp
639daa3d23de767dbb0e6ca3f99b01d1b0185d85
52,223
import re def parse_keyslots_luks2(luks_dump): """Lists the used keyslots in a LUKS2 device. These may or may not be bound to clevis. Return: <used keyslots> <error>""" if not luks_dump: return None, {"msg": "Empty dump provided"} # This is the pattern we are looking for: # 0: clevis # Keyslot: 3 pattern = r"^\s+(\d+): luks2$" match = re.findall(pattern, luks_dump, re.MULTILINE | re.DOTALL) if not match: errmsg = "parse_keyslots_luks2: no used key slots" return None, {"msg": errmsg} return match, None
ae62f3b736723aff89aa4793a1f2c04a955611bf
52,229
import pickle def get_challenge_by_name(database, challenge_name, ctf_channel_id): """ Fetch a Challenge object in the database with a given name and ctf channel ID. Return the matching Challenge object if found, or None otherwise. """ ctfs = pickle.load(open(database, "rb")) for ctf in ctfs: if ctf.channel_id == ctf_channel_id: for challenge in ctf.challenges: if challenge.name == challenge_name: return challenge return None
12cba200a23f79ea604dc3a6743b1c4dab7b3b2b
52,244
def calc_arc_extent(day: int, hour: int, minutes: int) -> int: """ Returns the value, in degrees, to use to draw the arc representing the current minutes. It is negative to run clockwise. Hour and day are passed to handle the hedge cases. :param day: current day :param hour: current hour :param minutes: current minute :return: arc extent in degrees (int) """ extent = -1 * minutes * 6 if minutes == 0 and hour == 5 and day == 1: return 0 if minutes == 0: return -359 return extent
a05f969bfc87e7597ef3262b2a15666a0a634247
52,250
def round(x: int, divisor: int) -> int: """Round x to the multiplicity of divisor not greater than x""" return int(x / divisor) * divisor
e598827058d5c7c37e32f8d213690875403fbe8d
52,251
def bin_dec(bin): """Conversion binary -> decimal. Needed to calculate decimal variable value from binary coded genome.""" dec=0.0 bin.reverse() for i in range(0, len(bin)): dec+=(bin[i]*(2**i)) return dec
f0064c6ec248a61e4c88c56d97b1203fd73abd82
52,252