content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import requests import json def get_ipaddresses(auth): """ Function to get list of ipaddresses from Aruba OS switch :param auth: AOSSAuth class object returned by pyarubaoss.auth :return list of ipaddresses :rtype list """ url_ipaddresses = "http://" + auth.ipaddr + "/rest/"+auth.version+"/ipaddresses" try: r = requests.get(url_ipaddresses, headers = auth.cookie) ipaddresses = json.loads(r.text)['ip_address_subnet_element'] return ipaddresses except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_ipaddresses: An Error has occured"
1ef732670c973c70eda56c9d3957835219ce1485
20,189
import numpy def get_hamming(codons): """ Get the hamming distance between codons, in {0, 1, 2, 3}. @param codons: sequence of lower case codon strings @return: matrix of hamming distances """ ncodons = len(codons) ham = numpy.zeros((ncodons, ncodons), dtype=int) for i, ci in enumerate(codons): for j, cj in enumerate(codons): ham[i, j] = sum(1 for a, b in zip(ci, cj) if a != b) return ham
c9a2502bb19b827bf445b9bb34dfcb484218db3c
20,190
def generate_fibonacci_series(n): """Generate fibonacci series from 0 to nth term args: n->nth term of fibonnaci series Returns: A list of numbers of fibpnacci series to the nth term """ n1 = 0 n2 = 1 count = 0 fibonacci_series = [] if not isinstance(n, int): raise ValueError('nth term must be an integer') elif n <= 0: raise ValueError("nth term must be postive") elif n == 1: fibonacci_series.append(n1) return fibonacci_series else: while count < n: fibonacci_series.append(str(n1)) nth = n1 + n2 n1, n2 = n2, nth count += 1 return fibonacci_series
8796ff1bc97a944b644ee7fe869e4b2407b6994e
20,192
def generate_catalog_mags(instrument_mags, color, model): """ Generate catalog magnitudes from instrumental magnitudes given a model that relates the two. """ return instrument_mags + model(color)
0b39a7dae5eb1f573c62b25b7053acebf28e91d2
20,194
import logging import sys def setup_logging(log_name,log_level): """Return logger params: log_name, log_level returns: logger """ log_format="%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d" logging.basicConfig(level=log_level.upper(), format=log_format, datefmt="%Y-%m-%d %H:%M:%S") logging.StreamHandler(sys.__stdout__) log=logging.getLogger().setLevel(log_level.upper()) return log
983a399acb54242f53ebd03b5a6e8d140f360039
20,195
import os def ReadLsbRelease(sysroot): """Reads the /etc/lsb-release file out of the given sysroot. Args: sysroot: The path to sysroot of an image to read sysroot/etc/lsb-release. Returns: The lsb-release file content in a dictionary of key/values. """ lsb_release_file = os.path.join(sysroot, 'etc', 'lsb-release') lsb_release = {} with open(lsb_release_file, 'r') as f: for line in f: tokens = line.strip().split('=') lsb_release[tokens[0]] = tokens[1] return lsb_release
846a57c50b5588c1b1c682becb5cca6871a3852a
20,196
from typing import List def generateBasisIndexList(basisStrList: List[str], sysLevel: int) -> List[int]: """ Return a list of integers which indicates the basis indices according to the input basis string list. For example, ``generateBasisIndexList(['00', '01', '10', '11'], 3)`` will return: ``[0, 1, 3, 4]`` :param basisStrList: basis string list :param sysLevel: the energy level of qubits in the system. :return: basis indices list """ strLen = [len(item) for item in basisStrList] assert max(strLen) == min(strLen), "All input digital strings should have same length." digLen = max(strLen) def translateStrToInt(strN: str) -> int: """ Translate a string to int """ intNum = 0 for digIndex, charN in enumerate(strN): dig = int(charN) assert dig < sysLevel, f"Digit '{dig}' is greater than sysLevel '{sysLevel}'." intNum += (sysLevel ** (digLen - digIndex - 1)) * dig return intNum basisIntList = [] for strNum in basisStrList: basisIntList.append(translateStrToInt(strNum)) return basisIntList
aa11cd27a134b5ec432e957578908a64e2c1cc9e
20,197
def get_urn_from_raw_update(raw_string): """ Return the URN of a raw group update Example: urn:li:fs_miniProfile:<id> Example: urn:li:fs_updateV2:(<urn>,GROUP_FEED,EMPTY,DEFAULT,false) """ return raw_string.split("(")[1].split(",")[0]
fa96086f79462354f70a19e4475da9e62a3e0046
20,198
def standardize_action(org_action): """if status is not book or change the flight number will be empty. name is always required.""" # some human raters will end a name with . or , # since names in intent are standarlized (with - being replaced by space), # it will not be necessary to consider - again in the action standarlization. original_name = org_action['name'].strip() name = [] for d in original_name: if d.isalpha() or d == ' ': name.append(d) name = ''.join(name) status = org_action['status'] # if flight is a single int, we will need to convert it to a list # ground truth can have multiple flights # prediction and real data have no more than one element in the flight list. flight = org_action['flight'] if type(flight) == int: flight = [flight] if status == 'book' or status == 'change': new_flight = [] # get ride of anything that is not a valid flight number. # This could be the empty flights in early version of the UI. for f in flight: if int(f) >= 1000: new_flight.append(f) else: # otherwise we provide an empty list of the flight # any user selecged flights that does not come with bookable status # will be ignored. new_flight = [] return {'flight': new_flight, 'name': name, 'status': status}
e51508360331ec26c05a7f4bb71bc5df8ab1e59c
20,199
def convert_into_delta_times(score): """ Transform start_time into delta_time and returns new score """ return list(map( lambda super_event: ( [ super_event[1][0]-super_event[0][0], super_event[0][1] ]), # [ [1, 2], [3, 4] ] -> [ [2, 2] ] zip(score[:-1], score[1:]) # Shifted association. [1, 2, 3] -> [ (1, 2), (2, 3) ] ))+[[1000, score[-1][1]]]
adb17680fe96049577a06139981f914f93b8b7cb
20,200
def close_window(driver, w): """Close the window associated with the given window handle.""" driver.switch_to.window(w) return driver.close()
f0b8cc5abd6703f5a1a056ffb24925d1d4e2c8e0
20,202
import os def _get_job_dir(output_path: str, job_name: str) -> str: """Returns Beam processing job directory.""" return os.path.join(output_path, job_name)
8fb40f65ecf7d246ffaca899c7dfdc8e16a75904
20,205
def disable(f): """Mark a test as disabled.""" f.__test__ = False return f
afd3851496472d65748ea67a1f3e4860f379451c
20,206
import os import sqlite3 def create_db(dbFile, createSQLdict, LF=None): """ Create an empty SQLite3 database on disk. """ if os.path.exists(dbFile): os.remove(dbFile) try: conn = sqlite3.connect(dbFile) cursor = conn.cursor() for sql in createSQLdict.values(): cursor.execute(sql) conn.commit() cursor.close() conn.close() except Exception: return False return True
a4c88eb587d88a0aadb6f0126956e3bd679ed75b
20,207
def get_command(filename, box, speedup=1.0, extension="mp4"): """Creates ffmpeg command which crops and speeds up movie (if desired).""" outfile = filename.split(".") outfile = ".".join(outfile[:-1] + ["cropped", extension]) w, h, l, t = (box[key] for key in ["width", "height", "left", "top"]) return ( " ".join( [ "ffmpeg", "-i", filename, "-filter_complex", f'"[0]setpts={1/speedup:1.2f}*PTS[b];' f"[b] crop = {w}:{h}:{l}:{t}[c];" f'[0]atempo={speedup:1.2f}[a]"' f' -map "[c]" -map "[a]" {outfile}', ] ) if abs(speedup - 1.0) > 0.009 else f'ffmpeg -i {filename} -filter:v "crop={w}:{h}:{l}:{t}" {outfile}' )
a9baa36154f6f70a58bfa90516ee327b4054a2f5
20,208
from typing import List from typing import Tuple def tuple_zip(list1: List, list2: List) -> List[Tuple]: """Creates tuples of elements having same indices from two lists. doctests: >>> tuple_zip([1, 2], ['x', 'y']) [(1, 'x'), (2, 'y')] >>> tuple_zip([1, 2, 3, 4], ['a', 'b', 'c', 'd']) [(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')] """ x = list(zip(list1, list2)) return x
dbd3a162dc55ea70122483591c82c187fe4f1411
20,212
def NoneSafeType(_type): """ A hack for a "None-safe" typecaster. Given a type, it casts all values to that type as the type would, except that None is always cast to None. """ def caster(value): if value is None: return None else: return _type(value) return caster
d8b763ec10ba16faf151dc20acf68fcb35286197
20,213
import sys def remap(data, mapping, verbose=False): """ Map from the mapping file to the data file. In this step we figure out where on the tree we should place the color strip :param data: the data dictionary where the metagenome read id is the key and the label is the value :param mapping: the mapping from metagenome read id to node in the tree :param verbose: :return: """ ndata = {} if verbose: sys.stderr.write("There are {} keys in mapping\n".format(len(mapping.keys()))) s = set() for k in mapping: s.update(mapping[k]) if verbose: sys.stderr.write("There are {} vals in mapping\n".format(len(s))) for mgid in mapping: for posn_in_tree in mapping[mgid]: if posn_in_tree not in ndata: ndata[posn_in_tree] = {} ndata[posn_in_tree][data[mgid]] = ndata[posn_in_tree].get(data[mgid], 0) + 1 if verbose: sys.stderr.write("There are {} Keys in ndata\n".format(len(ndata.keys()))) return ndata
f8bbe09b0d503bcc2b0f2a6293a9bbd90dde8316
20,215
def find_scale_to_fit(im, shape): """Finds the scale that makes the image fit in the rect""" w, h = im.shape[1], im.shape[0] target_w, target_h = shape[1], shape[0] scale = 1.0 if target_w is not None: scale = min(scale, target_w / float(w)) if target_h is not None: scale = min(scale, target_h / float(h)) return scale
333936a1d9f7b2364dc33e39a3042e16c6c603fc
20,218
import inspect def get_function_args(inspected_function): """ Finds the arguments that could be passed into the specified function :param inspected_function: :return: """ arg_spec = inspect.signature(inspected_function) args = list(arg_spec.parameters.keys()) return args
5a6cb131fecb616b1233576ed9c4cf67895d512a
20,219
def grid_round(x, spacing): """Rounds distances into discrete numbers of grid intervals""" n = 1 / spacing return(round(x*n)/n)
820f738bb38b7958e6068d05d34aba1fe8f09bdb
20,222
import os import json def load_json(filepath): """Parse json file to dict""" if not os.path.isfile(filepath): raise ValueError('File could not find in %s' % filepath) with open(filepath, 'r') as fp: config = json.load(fp) return config
0b45726e2a73f7e9a4790251a259c24b22889897
20,223
def rk4(lhs, dt, function): """ :param lhs: previous step state. :param dt: delta t. :param data_type: "ex1" or "ex2". :return: Runge–Kutta 4th order method. """ k1 = dt * function(lhs) k2 = dt * function(lhs + k1 / 2.0) k3 = dt * function(lhs + k2 / 2.0) k4 = dt * function(lhs + k3) rhs = lhs + 1.0 / 6.0 * (k1 + 2.0 * (k2 + k3) + k4) return rhs
a5feb7ab98840bc31003fc759e5345ae9b234dad
20,224
def isTruthy(value): """Converts any value to a boolean value; just uses the 'bool' built-in function, except that strings like 'FALSE', 'false' and 'False', and strings that are numeric values equal to 0, return False. """ if str(value).lower() == 'false': return False try: return bool(float(value)) except: return bool(value)
bfc7b3547c77f1d8642c0078c80740d632813b45
20,225
import subprocess def RunCommand(command): """ Runs commands frequently seen in scripts. These are usually a sequence of commands connected by pipes, so we use shell=True """ # logger.info("Running the command\n{0}".format(command)) if command.endswith('|'): command = command.rstrip('|') p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # p.wait() [stdout, stderr] = p.communicate() return p.pid, stdout, stderr
205ea39ea69a4a9e6deb01ea64d9402ca3983833
20,226
def insert_new_targets(start, end, make_lines, targets): """ Insert new targets in Makefile between mkgen annotations """ return make_lines[: start + 1] + targets + make_lines[end:]
d947c69366f95171d57e050278c323ad89853961
20,229
def TransMatrix(demand, c): """Constriuct transition matrix. If capacity < requests then remainder requests left in queue. """ D = [] max = 999 for k in range(max+1): row = [0 for i in range(max)] if k <= c: row[0:len(demand)-1] = demand D.append(row) else: row[k-c:k-c+len(demand)-1] = demand if len(row) > max + 1: difference = len(row) - (max + 1) row[len(row)-difference-1:len(row)] = [sum(row[len(row)-difference-1:len(row)])] D.append(row) return D
ae35c90de4d4827c6da3272e23c8b17d2683f4b1
20,230
def p2a(p, m1, m2): """ It computes the separation (Rsun) given m1 (Msun), m2 (Msun) and p (days). """ yeardy=365.24 AURsun=214.95 p = p/yeardy a = AURsun*(p*p*(m1 + m2))**(1./3.) return a
a0c5d8c0d7b961e8017217f22f54aa2a70daf5a0
20,231
import yaml def yaml_as_python(val): """Convert YAML to dict""" try: return yaml.safe_load_all(val) except yaml.YAMLError as exc: return exc
0d7105308ccebb336ea54cd3075c819effdbe4f2
20,232
def sanitize_label(text: str) -> str: """ remove risky charactger in a latex label :param text: :return: """ for c in "%.|_#[]@": text = text.replace(c, "-") return text
89c729013587e11b4a7b9ebc0b2b9cf4de4d808a
20,233
def build_complement(dna): """ :param dna: str, the DNA sequence to be converted :return: str, the complement strand of the DNA sequence """ ans = '' for base in dna: if base == 'A': ans += 'T' elif base == 'T': ans += 'A' elif base == 'C': ans += 'G' elif base == 'G': ans += 'C' return ans
747797095c8b741e497d2ad1324fe9171dd00735
20,234
def calculate_bootstrap(bootstrap_size, length): """ Calculate the bootstrap size for the data of given length. Parameters ---------- bootstrap_size : int, float, default=None Bootstrap size for training. Must be one of: - int : Use `bootstrap_size`. - float : Use `bootstrap_size * n_samples`. - None : Use `n_samples`. length : int Length of the data to be bootstrapped. Returns ------- bootstrap : int Actual bootstrap size. """ if bootstrap_size is None: return length elif isinstance(bootstrap_size, int) and bootstrap_size > 0: return bootstrap_size elif isinstance(bootstrap_size, float) and 0 < bootstrap_size <= 1: return int(bootstrap_size * length) else : raise ValueError("Bootstrap Size must be None, a positive int or float in (0,1]")
f6d0856322ac43638fd75b94838f9b04a6acabd1
20,235
from subprocess import check_output def call_git(args, verbose=False): """ Helper function for calling a 'git' command. @param args: list of arguments to the git shell script. @return string stdout of git """ if isinstance(args,str): args = args.split() if verbose: print ("[call_git] << git ", ' '.join(args)) res = check_output(['git'] + args) if verbose: print ("[call_git] >> {}".format(res)) return res.decode()
89afcae42d9d6ebfda21a569cb257ddf4af9d0bb
20,237
def _pandas_sort_pre_017(df, by, *args, **kwargs): """Use sort() to sort a DataFrame""" return df.sort(*args, columns=by, **kwargs)
3c72ba060de7376529663a1424231b4ed9916ffa
20,238
def remove_diacritics(name): """Returns name without diacritics and inserted latin letters. Arguments: name -- word to have diacritics removed """ return name.replace(u'ą', 'a').replace(u'ć', 'c').replace(u'ę', 'e').replace(u'ł', 'l')\ .replace(u'ń', 'n').replace(u'ó', 'o').replace(u'ś', 's').replace(u'ź', 'z')\ .replace(u'ż', 'z').replace(u'Ą', 'A').replace(u'Ć', 'C').replace(u'Ę', 'E')\ .replace(u'Ł', 'L').replace(u'Ń', 'N').replace(u'Ó', 'O').replace(u'Ś', 'S')\ .replace(u'Ź', 'Z').replace(u'Ż', 'Z')
67d0af763bced3ac7368289b21bcfe525e1efa7c
20,240
def totM(m1, m2): """The total mass shows up in Kepler formulae, m1+m2 """ return( m1+m2 )
1be75e653582e7f9eaafaf07d1f946735eb6f66e
20,241
def get_child_object_id(parent, number): """ Returning child object entity_id """ if number < 10: s_number = "0" + str(number) else: s_number = str(number) return "{}_{}".format(parent, s_number)
3bad2c728d2112b51028ca5f7bf6acabb24bc229
20,242
from pathlib import Path def tmp_dir(tmp_path) -> Path: """ Returns `Path` to the temporary directory. If It not exists - creates and returns it. """ dir_path = tmp_path / "tmp_dir" if not dir_path.exists(): dir_path.mkdir() return dir_path
39c61278f942cc9cbfd531bf3e7be296ca012d77
20,247
from pathlib import Path def ORDER_NAME(path: Path) -> str: """パスのソート用関数です。名前でソートを行います。 """ return path.name
32e2f62caca2c281d4d24d6e17c07bc5b8e3ada7
20,248
def generate_batch_spec(mode, batch_size): """ Generates a spec describing how to draw batches Args: mode: one of ['train', 'test', 'val'] """ assert mode in ['train', 'test', 'val'] # on a more complicated dataset this would include useful arguments # such as whether to augment the data, what data source to draw from.... # this example is simple enough to not need a spec so this is just for illustration purposes batch_spec = { 'mode': mode, 'batch_size': batch_size } return batch_spec
232991fc65e037f5c860f8ca9dc2fa832b7062e8
20,250
def get_star_column_number(line): """ For a line in a star file describing a column entry (e.g., '_rlnEstimatedResolution #5'), retrieve the value of that column (e.g. 5) """ column_num = int(line.split()[1].replace("#","")) return column_num
3cd4e981b1486167fdad0e6cbfeb5b36e88c4a1a
20,251
import functools def data_source_generator(name=None, **properties): """Decorator for applying to a simple data source which directly returns an iterable/generator with the metrics for each sample. The function the decorator is applied to must take no arguments. """ def _decorator(func): @functools.wraps(func) def _properties(settings): def _factory(environ): return func d = dict(properties) d['name'] = name d['factory'] = _factory return d return _properties return _decorator
0d16b0a56aa6b9b926e9e9d380265fa71b2abb8f
20,252
from typing import Iterable from typing import List def flatten_lists(list_of_lists:Iterable) -> List: """ Flatten a list of iterables into a single list This function does not further flatten inner iterables. Parameters ---------- list_of_lists : typing.Iterable The iterable to flatten Returns ------- flattened_list: typing.List The flattened list """ return [item for sublist in list_of_lists for item in sublist]
5ba8f5add9f8f1fd7fad50fbaea765655b183718
20,253
def format(str): """Format string to flow continuiously (without indents that have been inserted in order to make the docstring easy to follow""" arr = str.split('\n') arr = [item.strip() for item in arr] return " ".join(arr)
72bad09032140ac6642a352675b19bb1cc62944a
20,255
def _convert_float32_to_float64(data): """ Converts DataArray values of float32 to float64 :param data: Xarray dataset of coverage data :returns: Xarray dataset of coverage data """ for var_name in data.variables: if data[var_name].dtype == 'float32': og_attrs = data[var_name].attrs data[var_name] = data[var_name].astype('float64') data[var_name].attrs = og_attrs return data
1a66c6de0de7ff2c79d7e03c017b79b78cb43639
20,256
import struct def byte_2_int(byte_string, is_little_endian=False): """ 将byte(1个字节)转换成int。 :param byte_string: :param is_little_endian: :return: """ if is_little_endian: return struct.unpack('<B', byte_string)[0] return struct.unpack('>B', byte_string)[0]
7db464904246cdf28a522a413d7d2d4366e422a5
20,258
def create_reference_dictionaries(state_space): """Creates reference dictionaries for future referall""" state_to_index={} index_to_state={} for i in range(len(state_space)): state_to_index[state_space[i]]=i index_to_state[i]=state_space[i] return state_to_index, index_to_state
6d1548e94f6adb797004391b1ec4927b05d24f70
20,261
def validate_required_fields(json_data, required_fields=None): """ 验证必要的字段在json数据中是否存在 """ required_fields = required_fields or [] for field in required_fields: if field not in json_data: return False return True
8c7fe7e3f8f2ab8bb121455999747e8c95fc2bbe
20,262
def bin_data(signal_list, bins, labels = [], group_sizes = []): """ """ assert all([type(item) == float or type(item) == int for item in signal_list]), "Signal should be provided as a float." assert type(bins) == int, "The number of groups (bins) should be an integer." assert type(labels) in [list, tuple], "The labels should be in a list or tuple." if group_sizes != []: assert all([type(item) == int for item in group_sizes]), "The group sizes should be integers." assert len(group_sizes) == bins, "The number of given group sizes does not match the number of groups." groups = [] count = 0 for item in group_sizes: groups.append(signal_list[count:count+item]) count += item else: # Assume an even number of things in each bin n_members = len(signal_list)//bins groups = [signal_list[n_members*i: n_members*(i+1)] for i in range(bins)] if labels != []: assert all([type(item) == str for item in labels]), "The labels should be strings" assert len(labels) == bins, "Each group should have a corresponding label" return [(labels[i], groups[i]) for i in range(bins)] else: return [(f"{i}", groups[i]) for i in range(bins)]
dac677c0b83715963d9f240356d069b40036ac1a
20,264
def _pisano_period_len(modulo): """ In number theory, the nth Pisano period, written π(n), is the period with which the sequence of Fibonacci numbers taken modulo n repeats. Args: modulo: modulo Returns: length of Pisano period """ init_array = [0, 1] idx = 1 while 1: idx += 1 init_array.append(init_array[idx - 1] % modulo + init_array[idx - 2] % modulo) if init_array[idx] % modulo == 1 and init_array[idx - 1] % modulo == 0: return len(init_array) - 2
776db9e0e8fd2af28159b9616018ff000dd55154
20,265
def potency_tensor(normal, slip): """ Given a fault unit normal and a slip vector, return a symmetric potency tensor as volume components (W11, W22, W33), and shear components (W23, W31, W12). """ v = [ normal[0] * slip[0], normal[1] * slip[1], normal[2] * slip[2], ] s = [ 0.5 * (normal[1] * slip[2] + normal[2] * slip[1]), 0.5 * (normal[2] * slip[0] + normal[0] * slip[2]), 0.5 * (normal[0] * slip[1] + normal[1] * slip[0]), ] return [v, s]
87c1a08d74e8e2dfdd51f32a19ee44dd543a4c4c
20,266
def extract_doi_suffix(protocol_doi): """ DOIs come in a format like 'dx.doi.org/10.17504/protocols.io.bazhif36'. We just need the 'protocols.io.bazhif36' element to form our query url. """ return protocol_doi.split("/")[2]
9dbb9d44b9159bd9b3168169e17c6a0d66fe95d8
20,268
def index(list_, i): """リストの要素をindexを指定して取得する""" return list_[int(i)]
fc437245367dfa86df45acec743d7dd117686e14
20,269
import re def fenced_bootstrap(block): """Set up a fenced block for bootstrap prettify highlighting.""" pattern = re.compile(r'```(?P<lang>\w+)?(?P<code>.*?)```', re.MULTILINE|re.DOTALL) match = pattern.match(block) if not match: return block lang = match.groupdict().get('lang', None) code = match.groupdict().get('code', '') return '''<pre class="prettyprint linenums">%s</pre>''' % code
34140706556e0aa4fa80b946eb2f4fbbb3018d02
20,270
def find_token_by_position(tokens, row, column): """Given a list of tokens, a specific row (linenumber) and column, a two-tuple is returned that includes the token found at that position as well as its list index. If no such token can be found, ``None, None`` is returned. """ for index, tok in enumerate(tokens): if ( tok.start_row <= row <= tok.end_row and tok.start_col <= column < tok.end_col ): return tok, index return None, None
d5493b596761bc7620aac2f54c0ccd9cc8982d7b
20,271
def read_txt_file(file_name): """Read the content of a text file with name `file_name` and return content as a list of lines""" with open(file_name, 'r') as file: lines = file.readlines() return lines
0231bd41327ae9c082502d9cf5c712978d88df26
20,272
def diff_snapshots(snap_origin, snap_dest): """ Returns the diff of snapshots new: Means the files to be created in destination modified: Means the files to be copied again in destination deleted: Means the files that has to be deleted in destination """ new = [] modified = [] for filename, md5_origin in snap_origin.iteritems(): if filename not in snap_dest: new.append(filename) else: if md5_origin != snap_dest[filename]: modified.append(filename) deleted = [] for filename in snap_dest.keys(): if filename not in snap_origin: deleted.append(filename) return new, modified, deleted
91ca8c60ff6fbdedb62e8948e79ca0b6dbba8984
20,273
def read_program(file): """Deprecated""" with open(file, "r") as f: return [int(x) for x in f.readline().rstrip().split(",")]
9c3d48dd05f15c62bd047cb33bcb429fdfd5b6a7
20,275
def intersect_line(p1, p2, p3, p4, strict=False): """ This function will intersect the two lines given by two points each boolean flag strict will determine if 2nd point belongs to line (so if line (( 0, 0) - (0,100) ) will intersect with line (-50,100)- (50,100) ) :param p1: 1st point of first line :param p2: 2nd point of first line :param p3: 1st point of second line :param p4: 2nd point of second line :param strict: if true excludes 2nd point of each line :return: returns point of intersection or () if no intersection or the two points, if parallel lines overlap """ retval = () t1 = t2 = 2.0 d1 = (p2[0] - p1[0], p2[1] - p1[1]) d2 = (p4[0] - p3[0], p4[1] - p3[1]) det = float(d1[0] * d2[1] - d2[0] * d1[1]) if det == 0: # same direction => parallel lines? or same line? d3 = (p3[0] - p1[0], p3[1] - p1[1]) # delta between p1 and p3 d4 = (p4[0] - p2[0], p4[1] - p2[1]) # delta between p2 and p4 det2 = float(d1[0] * d3[1] - d3[0] * d1[1]) # determinant to check if delta3 is same as delta1 det3 = float(d2[0] * d4[1] - d4[0] * d2[1]) # determinant to check if delta3 is same as delta1 if det2 == 0 and det3 == 0: # same line if d1[0] != 0: # either d1[0] (dx must be >0 or dy >0 or its not a line) t1 = (float(p3[0] - p1[0]) / d1[0]) # calc factor on same line t2 = (float(p4[0] - p1[0]) / d1[0]) elif d1[1] != 0: t1 = (float(p3[1] - p1[1]) / d1[1]) t2 = (float(p4[1] - p1[1]) / d1[1]) elif d2[0] != 0: # p1 and p2 are same -> swap p1,p2 with p3,p4 t1 = (float(p1[0] - p3[0]) / d2[0]) t2 = (float(p2[0] - p3[0]) / d2[0]) elif d2[1] != 0: t1 = (float(p1[1] - p3[1]) / d2[1]) t2 = (float(p2[1] - p3[1]) / d2[1]) else: # p1 and p2 are same AND p3 and P4 are same: return p1 if they are all same if p1 == p3: return p1 else: # parallel lines do not intersect return () # either one of them is in limit[0..1] or they are on different sides.. if min(t1, t2) <= 1.0 and max(t1, t2) >= 0.0: t1n = max(min(t1, t2), 0.0) t2n = min(max(t1, t2), 1.0) retval = ((p1[0] + t1n * d1[0], p1[1] + t1n * d1[1]), (p1[0] + t2n * d1[0], p1[1] + t2n * d1[1])) if retval[0] == retval[1]: retval = retval[0] else: t1 = float(d2[0] * (p1[1] - p3[1]) - d2[1] * (p1[0] - p3[0])) / det t2 = float(d1[0] * (p1[1] - p3[1]) - d1[1] * (p1[0] - p3[0])) / det if strict: if 0.0 <= t1 < 1.0 and 0.0 <= t2 < 1.0: # point has to be on line segment retval = (p3[0] + t2 * d2[0], p3[1] + t2 * d2[1]) else: if 0.0 <= t1 <= 1.0 and 0.0 <= t2 <= 1.0: # point has to be on line segment retval = (p3[0] + t2 * d2[0], p3[1] + t2 * d2[1]) return retval
2c6c6d6bf98f83abf4ab06929847e551e8774ea2
20,277
def compress(compress_dict: dict[str, str], text: str) -> str: """Compression routine""" # Compression acc = [] for letter in text: acc.append(compress_dict[letter]) return ''.join(acc)
6a6fb3b4b379bc90f6d41e48b20033d6187c1e39
20,278
from typing import List import math def chunks(arr, m) -> List[list]: """分割列表,但是子list元素个数尽可能平均 Args: arr: 待分割的list m: 分成几份 Returns: 分割后的每个子list都是返回结果list的一个元素 """ n = int(math.ceil(len(arr) / float(m))) return [arr[i : i + n] for i in range(0, len(arr), n)]
040b75647fcdd72cac561bb43019c50c64dec51d
20,279
import torch def flatten_parameters_wg(model): """ Flattens parameters of a model but retains the gradient :return: 1D torch tensor with size N, with N the model paramters """ return torch.cat([p.view(-1) for p in model.parameters()])
1bdf6779099e37dce5179ce1fe5e63472ea72fbf
20,280
def _dominance_constraints_from_feature_configs(feature_configs): """Returns list of dominance constraints in the given feature_configs.""" feature_names = [feature_config.name for feature_config in feature_configs] monotonic_dominances = [] for dominant_idx, dominant_feature_config in enumerate(feature_configs): for dominance_config in dominant_feature_config.dominates or []: if dominance_config.feature_name in feature_names: weak_idx = feature_names.index(dominance_config.feature_name) if dominance_config.dominance_type == 'monotonic': monotonic_dominances.append((dominant_idx, weak_idx)) else: raise ValueError('Unrecognized dominance type: {}'.format( dominance_config.dominance_type)) return monotonic_dominances
186c86cec286a4a9fd228632e675b53f07981927
20,281
import json def filter_json(data, nrow): """ return the first nrow rows of a json object, that can be structured as a list of rows [{"colname": val1, ..},...] or a dict with keys as column headings and vals as lists of column values {"col":[val1,val2,...], ...} """ if isinstance(data, list): return json.dumps(data[:nrow]) elif isinstance(data, dict): new_dict = {} for k, v in data.items(): new_dict[k] = v[:nrow] return json.dumps(new_dict) else: ## unknown format - just return data as-is return data
8c855bb3bcae7e95bd763e7b0148ea7e1baa5ad4
20,285
import os def build_shows_uri(category, page): """Constructs the request URI for shows endpoint""" return "{}/category/{}/{}".format(os.getenv("BASE_URL"), category, page)
588d61c60915ad5a80884970569ab641763df20b
20,286
def soma(a, b): """soma os numeros a e b >>> soma(1, 2) 3 >>> soma(8, 2) 10 """ return a + b
6415769b5e2b34bf65906bc943c657d0895437ad
20,287
from pathlib import Path from typing import Set from typing import List def _dir_diff(dir1: Path, dir2: Path, glob_pattern: str = '*') -> Set[Path]: """ returns files in dir1 that are not in dir2 (recursively) """ def listdir(directory: Path) -> List[Path]: return [path.relative_to(directory) for path in directory.rglob(glob_pattern)] files1 = listdir(dir1) files2 = listdir(dir2) diff = set(files1) - set(files2) return diff
056d997fe10e7febf982708124cee9117ace7517
20,288
def rpc_plugins_list(handler): """ Return information regarding enabled plugins in the server. :return: A dictionary representing enabled plugins and their meta-data. :rtype: dict """ plugin_manager = handler.server.plugin_manager plugins = {} for _, plugin in plugin_manager: plugins[plugin.name] = { 'description': plugin.formatted_description, 'name': plugin.name, 'title': plugin.title, 'version': plugin.version } return plugins
d534e3ba9379947821fc2a49b0439b4a4269d9ff
20,289
def list_instruments(snap): """ List instruments from a snapshot """ return (list(snap['station']['instruments'].keys()) + list(snap['station']['components'].keys()))
a4abe5bc9884d80b014bbf6eb4688349114ed664
20,291
def insertion_sort(some_list): """ https://en.wikipedia.org/wiki/Insertion_sort Split the array into a "sorted" and "unsorted" portion. As we go through the unsorted portion we will backtrack through the sorted portion to INSERT the element-under-inspection into the correct slot. O(N^2) """ iters = 0 # We get to ignore the first element of the unsorted portion, as it becomes the first element of the sorted portion. for i in range(1, len(some_list)): iters += 1 # Keep track of where we are in the unsorted portion of the list. elem = some_list[i] hole_pos = i # hole_pos is index, in unsorted portion, of the hole # We're iterating right to left. We want to stop iterating when the element to the left of our hole position is # less than the element we're trying to insert. while (hole_pos > 0) and (some_list[hole_pos - 1] > elem): iters += 1 # Shift each element one space to the right. Keeps a clear space for our insertion. some_list[hole_pos] = some_list[hole_pos - 1] # Continue to move left. hole_pos = hole_pos - 1 # Insert the element into the sorted portion of the list where the hole is. some_list[hole_pos] = elem return iters, some_list
ce5be31c03aa925f567c9880cd81281cf3c5af96
20,292
def Nop(x): """empty function for tree traversal""" return True
120feb2ba4e1eaa291eb8db4a5586bfd8478f8f2
20,294
import six import sys import hmac import hashlib def signature(secret, parts): """Generates a signature. All strings are assumed to be utf-8 """ if not isinstance(secret, six.binary_type): secret = secret.encode('utf-8') newparts = [] for part in parts: if not isinstance(part, six.binary_type): part = part.encode('utf-8') newparts.append(part) parts = newparts if sys.version_info >= (2, 5): csum = hmac.new(secret, digestmod=hashlib.sha1) else: csum = hmac.new(secret, digestmod=sha) for part in parts: csum.update(part) return csum.hexdigest()
1535e607b440c7c60366148688ba47c11cc5a59f
20,295
import torch def calculate_birds_eye_view_parameters(x_bounds, y_bounds, z_bounds): """ Parameters ---------- x_bounds: Forward direction in the ego-car. y_bounds: Sides z_bounds: Height Returns ------- bev_resolution: Bird's-eye view bev_resolution bev_start_position Bird's-eye view first element bev_dimension Bird's-eye view tensor spatial dimension """ bev_resolution = torch.tensor([row[2] for row in [x_bounds, y_bounds, z_bounds]]) bev_start_position = torch.tensor([row[0] + row[2] / 2.0 for row in [x_bounds, y_bounds, z_bounds]]) bev_dimension = torch.tensor([(row[1] - row[0]) / row[2] for row in [x_bounds, y_bounds, z_bounds]], dtype=torch.long) return bev_resolution, bev_start_position, bev_dimension
ca6733cbd393d749e017b85ec9443891aa36b7c1
20,296
def doNothing(rawSolutions): """ Contrary to its name, this function returns its input argument in a list structure, whose sole element is said input argument. """ return [rawSolutions]
8a59f34dba4bcd00a0d69eb2875d8b5407fb737f
20,297
from typing import Dict from typing import Any from typing import List def validate_required_keys_for_add_asset(valid_json: Dict[str, Any], required_keys: List[str]) -> bool: """ Check if the required keys for adding an asset are present or not :param valid_json: The valid input asset JSON :param required_keys: The required keys for creating an asset :return: True if the required values are present else false """ return all(key in valid_json.keys() for key in required_keys)
e6dd5fe20891af30fa997e19b8c08716340fe7b5
20,298
def get_fancylabel_Nd(fromNd, toNd): """ :param fromNd: :param toNd: :return: """ if fromNd > 0: varNameN = 'N$_{%d<d<%d}$' % (fromNd, toNd) else: varNameN = 'N$_{d<%d}$' % toNd return varNameN
9943cebc268c435ed9c59a8e6b1e0b3ab8b5613e
20,299
import imaplib def getImapMailboxCount(server, user, password, path="INBOX", searchSpec=None): """ imap_count(server,user,password,[path[,searchSpec]) Return the number of emails in the selected path. To use with imap_email Return -1 on error """ retVal=-1 with imaplib.IMAP4_SSL(server) as M: M.login(user,password) M.select(path) if searchSpec== None: typ, data = M.search(None, 'ALL') else: typ, data = M.search(None, searchSpec) if typ=="OK": retVal=len(data[0].split()) else: retVal= -1 M.logout() return retVal
cb36491f5c1796ada30220444cacbe020b9224b0
20,300
import re def make_str_valid_varname(s): """makes a string a valid python identifier""" # remove invalid characters (except spaces in-between) s = re.sub(r"[^0-9a-zA-Z_\s]", " ", s).strip() # replace spaces by underscores (instead of dropping spaces) for readability s = re.sub(r"\s+", "_", s) # if first char is not letter or underscore then add underscore to make it valid if not re.match("^[a-zA-Z_]", s): s = "_" + s return s
7dc3c5066e090f52c33c1583775c0b2c75097eba
20,304
def is_feature_overweighted_towards_class(feature_size_by_class_df, threshold=.99, feature_level=True): """ The intended use case is for seeing whether a categorical feature very strongly points towards a particular class value, as, if this is true, the feature is either highly predictive or leaking data. Parameters ---------- feature_size_by_class_df : pandas.DataFrame DataFrame on which this function will operate. This is presumed to be a result from the function `get_feature_size_by_class`. Additionally, the feature represented in `feature_size_by_class_df` should be discrete, i.e. comes from a finite set of values. threshold : float, with value between 0 and 1 (default=.99) Proportion of rows allowed that are allowed to be taken up by single `class_col` & `feature` value grouping. feature_level : bool (default=True) Whether to perform at the feature level, or feature-value level. Return ------ `bool` indicating whether all class values see all feature values. """ ratios = feature_size_by_class_df.unstack(0).ratio if feature_level: booleans = ratios.sum() >= threshold else: booleans = (ratios >= threshold).any() return booleans.any()
bf44409490e5fe29a5c3e4825603bcbddb7b36f5
20,306
import torch def _standard_wishart_tril(df: torch.Tensor, dim: int, shape: torch.Size): """ References ---------- - Sawyer, S. (2007). Wishart Distributions and Inverse-Wishart Sampling. https://www.math.wustl.edu/~sawyer/hmhandouts/Wishart.pdf - Anderson, T. W. (2003). An Introduction to Multivariate Statistical Analysis (3rd ed.). John Wiley & Sons, Inc. - Odell, P. L. & Feiveson, A. H. (1966). A Numerical Procedure to Generate a Sample Covariance Matrix. Journal of the American Statistical Association, 61(313):199-203. - Ku, Y.-C. & Blomfield, P. (2010). Generating Random Wishart Matrices with Fractional Degrees of Freedom in OX. """ dtype, device = df.dtype, df.device i = torch.arange(dim, dtype=dtype, device=device) concentration = .5 * (df.unsqueeze(-1) - i).expand(shape + (dim,)) V = 2. * torch._standard_gamma(concentration) N = torch.randn(*shape, dim * (dim - 1) // 2, dtype=dtype, device=device) T = torch.diag_embed(V.sqrt()) # T is lower-triangular i, j = torch.tril_indices(dim, dim, offset=-1) T[..., i, j] = N return T
bf1e39abbbb35f5b1ded137abe7df07cc7d513ca
20,307
def ang_symetry(m1, m2): """ Symetrisize the doses at angle theta and - theta. """ matrix = [] for row_index in range(len(m2)): result_row = [] for value_index in range(len(m2[0])): new_value = (m1[row_index][value_index] + m2[row_index][-1 - value_index]) / 2 result_row.append(new_value) matrix.append(result_row) return matrix
c88341a785a57dca8f82f684723f052a21186a88
20,310
def check_duplicate(piece1, piece2): """ Check if the piece already allocated before """ if piece1.symbol == piece2.symbol and piece1.square in piece2.squares: return True else: return False
be2c73f7221ac15f7811aed5e1a013d1645f3d18
20,311
def get_routes_len(routes_list): """return list of length of routes""" routes_length = [] for i, this_route in enumerate(routes_list): routes_length.append(this_route.get_length()) return routes_length
d0f468351ed516b1a6f511d13847cb0c23950d3c
20,312
def create_user(txn, email, password): """Creates a user node.""" query = """ MERGE (a:User {email: $email, password: $password}) """ return txn.run( query, email=email, password=password)
0ce6800de8ea4be5891ae38017bcc6b1e97e7ddd
20,313
from datetime import datetime def isday(yyyymmdd): """Is the yyyymmdd formatted as 'YYYY-MM-DD' such as '2020-03-18'""" try: datetime.strptime(yyyymmdd, '%Y-%m-%d') return True except ValueError: return False
9ec31aa0cc4d924d5ae0df2510fb6258df6668a2
20,315
import re def parse_variable_declaration(srcline): """Return (name, decl) for the given declaration line.""" # XXX possible false negatives... decl, sep, _ = srcline.partition('=') if not sep: if not srcline.endswith(';'): return None, None decl = decl.strip(';') decl = decl.strip() m = re.match(r'.*\b(\w+)\s*(?:\[[^\]]*\])?$', decl) if not m: return None, None name = m.group(1) return name, decl
ce84f493845fe7a8d72b62bfb69a34ee25114e18
20,318
import platform import os def is_clipboard_supported(): """ Check whether the clipboard is supported. :returns: :data:`True` if the clipboard is supported, :data:`False` otherwise. """ return platform.system().lower() == "darwin" or bool(os.environ.get("DISPLAY"))
803c4e4d7fd16c56090f5304323e63fcea740448
20,319
def compute_actual_possible(results): """ Takes a result dict that has been output by compute metrics. Returns the results dict with actual, possible populated. When the results dicts is from partial or ent_type metrics, then partial_or_type=True to ensure the right calculation is used for calculating precision and recall. """ correct = results['correct'] incorrect = results['incorrect'] partial = results['partial'] missed = results['missed'] spurious = results['spurious'] # Possible: number annotations in the gold-standard which contribute to the # final score possible = correct + incorrect + partial + missed # Actual: number of annotations produced by the NER system actual = correct + incorrect + partial + spurious results["actual"] = actual results["possible"] = possible return results
5138bf51f1d012be4562dba6a92fa5963baaec77
20,320
def get_exception_name(node): """ Find the name of an exception Args: node: Parso node containing the raises statement Returns: str: The exception name """ name = node.children[1] while not name.type == 'name': name = name.children[0] return name.value
0ab7841a4a1044c990fc8d8490d2ff040bd45d05
20,321
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> bool: """Can `same` padding for given args be done statically?.""" return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
56bf89e307ff12efcf26493d47226008f3662af6
20,323
def calculate_depth_loss(est_depths, gt_depths, loss_type="l1"): """Calculate loss between estimated depthmap and GT depthmap Args: est_depths: [B,1,H,W] gt_depths: [B,1,H,W] loss_type: Choose loss type from ['l1','l2'] """ assert est_depths.dim() == gt_depths.dim(), "inconsistent dimensions" assert loss_type in ["l1", "l2"], "loss_type should be l1/l2" valid_mask = (gt_depths > 0).detach() diff = est_depths - gt_depths if loss_type == "l1": return diff[valid_mask].abs().mean() elif loss_type == "l2": return (diff[valid_mask] ** 2).mean()
fbe65b8c9f5e8546e0f633f07b65e22be84df67d
20,324
import os def get_local_harmony_path(filepath): """From the provided path get the equivalent local Harmony path.""" basename = os.path.splitext(os.path.basename(filepath))[0] harmony_path = os.path.join(os.path.expanduser("~"), ".avalon", "harmony") return os.path.join(harmony_path, basename)
b3c2d511b6b7c4ea79393aae8a1740d3cd077e16
20,325
import os def get_filename_and_extension_of(target_file): """Gets the base filename and extension of the target file. :param target_file: the complete path of the target file :returns: base filename and extension """ base_target_filename = os.path.basename(target_file) file_name, file_ext_with_dot = os.path.splitext(base_target_filename) return file_name, file_ext_with_dot
ee0129b9cbef3760e65510b2fbdb51caede88008
20,328
def _intersect_point2_circle(P, C): """ Returns True if point P lies with circle C. @type P : Point2 instance @type C : Circle instance """ return abs(P - C.c) <= C.r
58522af7eed88e90c6377be5393da82d8ff99529
20,329
from typing import List def multiplicar_polinomios(p: List[float], q: List[float]) -> List[float]: """Multiplica dos polinomios :param p: polinomio :p type: list :param q: polinomio :q type: list :return: polinomio resultante :rtype: list """ if len(q) > len(p): p, q = q, p result: List[float] = [0] * (len(p) + len(q) - 1) for i in range(len(p)): for j in range(len(q)): result[i + j] += p[i] * q[j] return result
f40fccef7305ae1fa48cda4db052afa52b40f10b
20,330
import random def new_random_date(): """ تولید یک تاریخ رندم نویسنده: ندا """ obj = {} # random.randint(1900,2020) y = random.randint(1000, 1001) m = random.randint(1, 12) d = random.randint(1, 30) obj.update({"year":y}) obj.update({"month":m}) obj.update({"day":d}) return obj
eb08cfc2c3bda8034e818be9abf9a057a03068a7
20,331
def reshape_pivot(df_in): """ Reduce df to crucial subset then pivot on cases and genes. """ df = (df_in[['case_barcode', 'Hugo_Symbol', 'mutation_count']] .copy() .pivot(index='case_barcode', columns='Hugo_Symbol', values='mutation_count') .fillna(0) .astype(int)) return df
1f95f1bb496f2a3734cb32d10f748c4ccb936cbc
20,332
import functools def creates(resource): """Decorator that adds resources to the appropriate cleanup list.""" def decorator(f): @functools.wraps(f) def wrapper(cls, *args, **kwargs): resp, body = f(cls, *args, **kwargs) if 'uuid' in body: cls.created_objects[resource].add(body['uuid']) return resp, body return wrapper return decorator
e5d582fce32c535864910a17a73a745c6c0be556
20,333
from datetime import datetime from dateutil import tz def date_iso(date): """Convert a datetime string into ISO 8601 format. HTML date format agrees with ISO 8601 (see also, :RFC:`3339`), ie:: YYYY[-MM[-DD]][Thh[:mm[:ss[.s]]]T] For more information: * `Date and Time Formats: <https://www.w3.org/TR/NOTE-datetime>`_ * `Date formats: <https://www.w3.org/International/questions/qa-date-format>`_ :param date: Datetime object :type date: datetime.datetime :return: Datetime formatted as ISO 8601, or empty string if invalid :rtype: str .. note:: If the datetime object is timezone naïve, it'll be localized to UTC, so the feed parser, and any other function that requires timezone aware datetime objects, do not raise an exception. """ if date and not datetime.strftime(date, '%z'): date = date.replace(tzinfo=tz.tzutc()) return datetime.isoformat(date, timespec='minutes') if date else ''
0fa1e5ee9087caeaea302e6dc4bfe70262c9507a
20,334
import os import re def IsSaveDirectory (saveDirectory: str) -> bool: """ Get whether or not the specified save directory is a one that could be saved to or loaded from by this module. :param saveDirectory: The name or the path of a potential save directory. :type saveDirectory: str """ saveDirectory = os.path.basename(saveDirectory) saveDirectoryMatch = re.match("^Slot_[0-9|A-F]{8}_NO$", saveDirectory, re.IGNORECASE) if saveDirectoryMatch is None: return False return True
cc30b9c3c2f030c1f18e8b825878d80ff4933157
20,335