content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import List def check_group_match(group: str, group_list: List[str]) -> bool: """ Check if a group is contained in the group list. If the group list is a single element, "_all", then it matches. """ if group_list[0] == "_all": return True if group in group_list: return True return False
363c929571c043cc3cae16b408e4dff2a30fc780
66,478
import torch def unif(n): """return a uniform histogram of length n (simplex) Parameters ---------- n : int number of bins in the histogram Returns ------- h : torch.Tensor (n,) histogram of length n such that h_i=1/n for all i """ return torch.ones(n)/n
36f154d1cdd06f79ba5007b480c1e2e970955775
66,479
def is_not_fields_include(data, fields): """ Function for checking including list of fields in data dictionary. :param data: Dict Dictionary for checking. :param fields: list List of fields for checking. :return: boolean True if exist field in fields which is not present in data, False otherwise. """ for filed in fields: line = data.get(filed) if line is None: return True if isinstance(line, str) and line == "": return True return False
59e25cd284e8415abf295e9d9177d9c7fcd3bb36
66,481
def find_starts(DNA: str, pat: str) -> list: """Find all start indexes of a substring :param DNA: the longer string to search in :type DNA: str :param pattern: the substring to search for :type pattern: str :returns: all indexes where pattern starts in DNA :rtype: list """ if not DNA: raise ValueError('Cannot search in empty string') if not pat: raise ValueError('Cannot search for empty string') pat_len = len(pat) DNA_len = len(DNA) if pat_len > DNA_len: return [] starts = [] # only search possible starting positions for i in range(DNA_len - pat_len + 1): if DNA[i:i + pat_len] == pat: starts.append(i) return starts
364786d09da1b2e0632b52e13b540a79656c6256
66,482
def listRange(start, stop, adder): """Create a linear range from start up to (including) stop in adder steps.""" res = list() while start <= stop: res.append(start) start = start + adder return res
49015a6c27f1cb8b80ace3f407065dfe5d4bc8f2
66,484
def get_all_cfn_resources_by_type(resource_array: list, resource_type: str) -> list: """ Given a list of cloudformation stack resources, filters the resources by the specified type Parameters: resource_array (list): an Array of Cloudformation Stack Resources resource_type (string): the Name of the Cloudformation Resource type to filter for - example: AWS::EC2::Instance Returns: An array of dict - containing the filtered Cloudformation resources """ result = [] for resource in resource_array: if resource['ResourceType'] == resource_type: result.append(resource) return result
14eaa760fe0f4dd8de90ef145115f19bc659ced3
66,485
def add_default_field(pv_name, default_field): """ Add a default field to a pv name if pv_name does not already have a field and the default field makes sense. This is useful for example to add a VAL field to pvs for the archive Args: pv_name: the pv name default_field: default field to add Returns: pvname with the default field added """ if default_field is None or default_field == "": return pv_name if pv_name is None or pv_name == "" or "." in pv_name: return pv_name return "{0}.{1}".format(pv_name, default_field)
2fc15966254dabd1bf6d802bafb4f82a906af4ae
66,486
def compare(a, b): """ Compare items in 2 arrays. Returns sum(abs(a(i)-b(i))) """ s=0 for i in range(len(a)): s=s+abs(a[i]-b[i]) return s
e321ed0087b457df0609eb36bd42ec7e48fcb950
66,489
def _nis (atleast=False, atmost=False, exactly=False): """ Returns a `callable` which returns `True` if ``n`` is ``>=`` `atleast`, ``<=`` `atmost`, or ``==`` `exactly`. See :func:`_nargs`, :func:`_nkw`, etc., for example use. The signature of the returned `callable` is: .. function:: fn (n:number) -> bool `atleast` and `atmost` may be combined, but `exactly` must stand alone. """ if (atleast < 0) or (atmost < 0) or (exactly < 0): raise ValueError("arg limits cannot be negative") if not exactly is False: if not ((atleast is False) and (atmost is False)): raise ValueError( "cannot mix 'exactly' and 'atleast' or 'atmost'") def _nis_exactly (n): return n == exactly return _nis_exactly if atleast is False and atmost is False: raise ValueError( "must specify 'exactly' or one or both of 'atleast' and 'atmost'") if atleast is False: atleast = 0 if atmost is False: atmost = float('inf') def _nis_between (n): return (atleast <= n <= atmost) return _nis_between
20b57aa97a7e2077e98e84739433badccf5db228
66,492
import re def find_all_occurrences(source, target, boundary=True): """ Find all occurrences of `target` in `source` :param source: the source string to search within. :type source: :class:`str` :param target: the target string to search for. :type target: :class:`str`: :return: list of positions at which `source` occurs in `target`. :rtype: :class:`list` of :class:`int` """ positions = [] if boundary: results = re.finditer(r"\b" + re.escape(target) + r"\b", source) else: results = re.finditer(r"\b" + re.escape(target), source) for match in results: positions.append(match.start()) return positions
728a3bc8df566605348e3bd32a70baf96660513e
66,493
def missingassignments(rawData): """Return a list of students and their missing assignments.""" l = [] for i in range(len(rawData['name'])): for j in rawData.columns: if rawData.iloc[i][j] == '-': l.append([rawData.iloc[i]['name'],j]) return l
1588c46de82e2937da6f536df7f765900f126ab4
66,494
def combine_files(*args): """ Combines file dictionaries as returned by the methods of Dataset. :param args: file dictionaries :return: combined file dictionaries """ if len(args) < 1: raise ValueError('Pass at least one argument!') # make sure all elements contain the same number of splits if len(set(len(a) for a in args)) > 1: raise ValueError('Arguments must contain the same number of splits!') combined = [{'feat': [], 'targ': []} for _ in range(len(args[0]))] for fs in args: for s in range(len(combined)): for t in combined[s]: combined[s][t] += fs[s][t] return combined
5b8bea3a27cde21adc5afb8cbb6c97442c862940
66,495
def contains_lua_calls(item): """Check if a node contains any Lua API calls""" if 'lua_' in item['tokens']: return True if 'luaL_' in item['tokens']: return True if 'LuaSkin' in item['tokens']: return True return False
d799fd91c4b50e074e10eee7a97a38a28f2345ba
66,502
from datetime import datetime import hmac import codecs import hashlib def create_preauth(byval, key, by='name', expires=0, timestamp=None): """ Generates a zimbra preauth value :param byval: The value of the targeted user (according to the by-parameter). For example: The account name, if "by" is "name". :param key: The domain preauth key (you can retrieve that using zmprov gd) :param by: What type is the byval-parameter? Valid parameters are "name" (default), "id" and "foreignPrincipal" :param expires: Milliseconds when the auth token expires. Defaults to 0 for default account expiration :param timestamp: Current timestamp (is calculated by default) :returns: The preauth value to be used in an AuthRequest :rtype: str """ if timestamp is None: timestamp = int(datetime.now().strftime("%s")) * 1000 pak = hmac.new( codecs.latin_1_encode(key)[0], ('%s|%s|%s|%s' % ( byval, by, expires, timestamp )).encode("utf-8"), hashlib.sha1 ).hexdigest() return pak
82beee592028d8da34d513df6588df6f55b4131a
66,503
def check_permutation(string1, string2): """ Algorithm to determine if two strings are permutations of the other :param string1: String :param string2: String :return: Boolean """ def make_dictionary(string): """ Helper function to make a dictionary mapping characters to observations :param string: input string :return: dictionary mapping characters to observations """ dict_ = {} for char in string: if char in dict_.keys(): dict_[char] += 1 else: dict_[char] = 1 return dict_ # Special Cases, can't be permutations if different lengths or empty if len(string1) != len(string2): return False if string1 is None or string2 is None: return False string1_dict = make_dictionary(string1) string2_dict = make_dictionary(string2) for key, value in string1_dict.items(): if key not in string2_dict.keys(): return False if string2_dict[key] != value: return False return True
d9708ba0b820f4f3611cf3acd3385736f5fd484c
66,504
import json def get_profile(name, file): """ :param name: profile name :param file: file name e.g. profiles.json :return: dict. profile with :param name at :param file """ profile = {} with open(file) as f: data = json.load(f) for prof in data: if prof['name'] == name: profile['name'] = prof['name'] profile['asana'] = prof['asana'] profile['airtable'] = prof['airtable'] break if not profile: return None return profile
4bac456589117dbf1b17dc415571c69344768be0
66,510
import re def human_sort(s): """Sort list the way humans do """ pattern = r"([0-9]+)" return [int(c) if c.isdigit() else c.lower() for c in re.split(pattern, s)]
725d48d395f4352f54a286b03cdc41fffc8b2514
66,517
def get_outer_window(bands_node, silent=False): """ Get the ``outer_window`` parameter as a tuple (min, max), if it was given. Check if bands_node * is a child of a calculation and * that calculation has a parameter data input node with linkname parameters and * that node has the keys 'dis_win_min' and 'dis_win_max'. If that is the case, output outer_window = (min, max). """ owindow = None try: calc = bands_node.inp.bands wset = calc.inp.parameters.get_dict() owindow = (wset['dis_win_min'], wset['dis_win_max']) except KeyError as err: if not silent: raise KeyError('Missing window parameters in input to ' 'parent calculation:\n' + str(err)) from err except AttributeError as err: if not silent: raise AttributeError('bands_node is not an output of an appropriate calc node.' + str(err)) from err return owindow
0d053c0c74b57558221b4e7f8a617663e661c854
66,523
def Le(tC,hC,rho,DAB): """ Lewis number: thermal conductivity/(heat capacity)/(humid vapor density)/(DAB) Paramters: tC, thermal conductivity in W/m/K hC, heat capacity in J/mol/K rho, molar density of humid vapor in mol/m^3 DAB, diffusion of component A in B in m^2/s Returns: Lewis number (dimensionless) """ return tC/hC/rho/DAB
0c33226ca534f3c34479a28d5e93e61c44b20b2a
66,525
def sort_separation(separation): """Sort a separation. :param separation: Initial separation. :return: Sorted list of separation. """ if len(separation[0]) > len(separation[2]): return [sorted(separation[2]), sorted(separation[1]), sorted(separation[0])] return [sorted(separation[0]), sorted(separation[1]), sorted(separation[2])]
b29bb725cc3f5aa28989acaf55976a0d837c2400
66,530
def floatify(scalar): """ Useful to make float from strings compatible from fortran Args: scalar (str, float): When string representing a float that might be given in fortran notation, otherwise it might be a floating point Returns: float. The value associated to scalar as a floating point number Example: >>> # this would be the same with "1.e-4" or with 0.0001 >>> floatify('1.d-4') 1.e-4 """ if isinstance(scalar, str): return float(scalar.replace('d', 'e').replace('D', 'E')) else: return scalar
9df5deaf619fe39cd90fc0f100bf0e588ca4d780
66,543
from typing import Counter import math def conditional_entropy(x_symbols, y_symbols): """ Computes the entropy of `x` given `y`. :param list x_symbols: A list of all observed `x` symbols. :param list y_symbols: A list of all observed `y` symbols. :return float entropy: The conditional entropy of `x` given `y`. """ # Cache counters; while the xy_counter is already computed in other # parts, particularly in the scorers, it is worth repeating the code # here to have a more general function. y_counter = Counter(y_symbols) xy_counter = Counter(list(zip(x_symbols, y_symbols))) population = sum(y_counter.values()) # Compute the entropy and return entropy = 0 for xy_pair, xy_count in xy_counter.items(): p_xy = xy_count / population p_y = y_counter[xy_pair[1]] / population entropy += p_xy * math.log(p_y / p_xy) return entropy
e6433e202b600d7518ee1255bd5f66a43b6781e8
66,545
def create_table_string(content, row_labels, col_labels, format, caption): """ Create a table following a specific formatting based on provided rows, columns and table entries. Args: content: Numpy `ndarray` of shape `(rows, columns)` containing the contents of the table. row_labels: List containing the labels that should be at the start of each row. col_labels: List containing the labels that should be at the top most row of the table. format: String indicating the formatting of the table. Currently supported are `csv` and `tex`. caption: Caption that should be used for the table. Currently this option will only be used when creating a `tex` table. Returns: A string containing the table formatted with the rules of the user indicated format. Raises: Exception: Format '?' unknown for table creation. Currently supported are 'csv' and 'tex'. """ # Create table following correct format output = '' if format == 'csv': # Create .csv file if col_labels is not None: line = [''] + col_labels output += ','.join(line) + "\n" for i, row in enumerate(row_labels): line = [row] + list(map(str, content[i])) output += ','.join(line) + "\n" elif format == 'tex': # Create .tex table output = '\\begin{table}\n\\centering\n\\begin{tabular}' output += "{" + ("l|" + "c" * len(content[0])) + "}\n" if col_labels is not None: line = [''] + col_labels output += ' & '.join(line) + '\\\\' + "\n" output += '\\hline \n' for i, row in enumerate(row_labels): line = [row] + list(map(str, content[i])) output += ' & '.join(line) + '\\\\' + "\n" output += "\\end{tabular}\n" output += "\\caption{" + caption + "}\n" output += "\\end{table}" else: raise Exception( "Format '{}' unknown for table creation. Currently supported are 'csv' and 'tex'." .format(format)) return output
b8a5cf50bade400f8dffe4c02f070c6d12bf0b3a
66,548
def display_properties(props): """Display supported properties for a block.""" prop_string = "Available properties:\n" for prop in props: prop_string += f" - <block>.{prop[0]:<14}units={prop[2]:<6}{prop[1]}\n" return prop_string
8c3c8a3f27489322daef33b86f10da12ef959759
66,555
def relu_der(x, alpha=0): """ Rectified Linear Unit Derivative. If alpha is between 0 and 1, the function performs leaky relu. alpha values are commonly between 0.1 and 0.3 for leaky relu. Note: relu derivative is technically undefined at x=0, but tensorflow.nn.relu uses 0 for this special case, so that's what I do here If alpha != 0, then derivative is leaky relu Parameters ---------- x : numpy array Values to be activated. alpha : float, optional The scale factor for the linear unit. Typical values are between 0.1 and 0.3. The default is 0.1. Returns ------- dz : numpy array The derivative of the activated values. """ dZ = x.copy() dZ[x <= 0] = alpha dZ[x > 0] = 1 return dZ
cd8397235071fe174a3bfc0e5cf191f8dc572e04
66,561
def format_list(extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. """ list = [] for filename, lineno, name, line in extracted_list: item = ' File "%s", line %d, in %s\n' % (filename,lineno,name) if line: item = item + ' %s\n' % line.strip() list.append(item) return list
205c396a8c420666c7164d0f74a46991a8751cea
66,562
def concat(str1, str2): """Concat two string.""" return str1 + str2
629ae0106e1ef14a5bef3a9fdfa1e002bd7aaa4f
66,566
def even(value): """ Rounds a number to an even number less than or equal to original Arguments --------- value: number to be rounded """ return 2*int(value//2)
0afe2be997c0a623dc269e8a877bc860f72937e7
66,569
import itertools def sorted_groupby(iterable, key, value=lambda x: x): """ just like regular pythons regular group by except it sorts for you :param iterable: the thing to iterate through :param key: the key to both sort and group by :param value: a function to apply to the group before returning (defaults to groupby iterator) :return: iterable of tuples if (key_results, grouped_values) """ return ( (k, value(v)) for k, v in itertools.groupby(sorted(iter(iterable), key=key), key=key) )
d13bb63806b1c64f60fe052014162c070b2b47b4
66,572
def set_show_viewport_size_on_resize(show: bool) -> dict: """Paints viewport size upon main frame resize. Parameters ---------- show: bool Whether to paint size or not. """ return {"method": "Overlay.setShowViewportSizeOnResize", "params": {"show": show}}
d0d1f4ede8995b660f9d1abaabe18a5fdadc8d0b
66,574
def convert_range_to_number_list(range_list): """ Returns list of numbers from descriptive range input list E.g. ['12-14', '^13', '17'] is converted to [12, 14, 17] Returns string with error message if unable to parse input """ # borrowed from jpalanis@redhat.com num_list = [] exclude_num_list = [] try: for val in range_list: val = val.strip(' ') if '^' in val: exclude_num_list.append(int(val[1:])) elif '-' in val: split_list = val.split("-") range_min = int(split_list[0]) range_max = int(split_list[1]) num_list.extend(range(range_min, (range_max + 1))) else: num_list.append(int(val)) except ValueError as exc: return "Parse Error: Invalid number in input param 'num_list': %s" % exc return [num for num in num_list if num not in exclude_num_list]
31faa5b80baecccea7975237fc9796d7ca784c84
66,575
def _backward_diff(j, yj, hj, lam): """ Computes through the backward differentiation formula different order derivatives of y for a given extrapolation step (for every T{1,j}). In other words, it calculates VI.5.43 ref II (step 1) for all kappa for a fixed j (for a fixed extrapolation step). Used by _interpolate_nonsym. Parameters ---------- j : int which extrapolation inner-stage is used yj : 2D array array with all the intermediate solution values obtained to calculate each T_{j,1}. hj : array inner step taken in the j-th extrapolation step, H/nj (II.9.1 ref I). lam : int either 0 or 1, check definition and use in ref II pg 439 Returns ------- rj : 2D array contains, for each kappa=1...j-lam, the kappa-th derivative of y estimated using the j-th extrapolation step values """ max_order = j-lam nj = len(yj) - 1 coeff = [1,1] rj = (max_order+1)*[None] rj[1] = (yj[nj] - yj[nj-1])/hj for order in range(2,max_order+1): # Binomial coefficients coeff = [1] + [coeff[jj] + coeff[jj+1] for jj in range(len(coeff)-1)] + [1] index = range(nj, nj-order-1, -1) sum_ = sum([((-1)**i)*coeff[i]*yj[index[i]] for i in range(order+1)]) rj[order] = sum_ /hj**order return rj
7924acb94aa06e1d4c98eaff72ad2bd228c355e2
66,576
def find_abs_min(arr): """ Finds minimum value of the array where each element is absoluted :param arr: the input array :return: minimum value and its first index """ min_val, min_index = (abs(arr[0]), 0) for i in range(len(arr)): if min_val > abs(arr[i]): min_val, min_index = (abs(arr[i]), i) return min_val, min_index
afde58a21fb22340602d82c0880e7fb53da6e192
66,580
def nbsp(value): """ Avoid text wrapping in the middle of a phrase by adding non-breaking spaces where there previously were normal spaces. """ return value.replace(" ", "\xa0")
6ef5586da6c03c84db1acef0597ecb449d527eca
66,584
def _get_topic_base(device_id, module_id): """ return the string that is at the beginning of all topics for this device/module """ if module_id: return "devices/" + device_id + "/modules/" + module_id else: return "devices/" + device_id
ffb0576a76a98eb76cc5f54ad9d9d7629dd82258
66,590
def raycast(obj_bvhtree, ray_from_objspc, ray_to_objspc): """Casts a ray to an object. Args: obj_bvhtree (mathutils.bvhtree.BVHTree): Constructed BVH tree of the object. ray_from_objspc (mathutils.Vector): Ray origin, in object's local coordinates. ray_to_objspc (mathutils.Vector): Ray goes through this point, also specified in the object's local coordinates. Note that the ray doesn't stop at this point, and this is just for computing the ray direction. Returns: tuple: - **hit_loc** (*mathutils.Vector*) -- Hit location on the object, in the object's local coordinates. ``None`` means no intersection. - **hit_normal** (*mathutils.Vector*) -- Normal of the hit location, also in the object's local coordinates. - **hit_fi** (*int*) -- Index of the face where the hit happens. - **ray_dist** (*float*) -- Distance that the ray has traveled before hitting the object. If ``ray_to_objspc`` is a point on the object surface, then this return value is useful for checking for self occlusion. """ ray_dir = (ray_to_objspc - ray_from_objspc).normalized() hit_loc, hit_normal, hit_fi, ray_dist = \ obj_bvhtree.ray_cast(ray_from_objspc, ray_dir) if hit_loc is None: assert hit_normal is None and hit_fi is None and ray_dist is None return hit_loc, hit_normal, hit_fi, ray_dist
4db2a3eb23a8db3a91fb3083a4b573a43b4fc690
66,591
def format_comma(d): """ Format a comma separated number. """ return '{:,d}'.format(int(d))
d575d0aa8cad9d34267e8a0f3a4f65e76cde2d09
66,593
def link(url, linkText='{url}'): """Returns a link HTML string. The string is an &lt;a&gt; tag that links to the given url. If linkText is not provided, the link text will be the url. """ template = '<a href={url}>' + linkText + '</a>' return template.format(url=url)
a53c4cd468de23cfc25572093ca8787feb4f12a4
66,595
import copy def observed_graph(g): """ Constructs a subgraph containing only observed edges :param g: Input graph :return: Subgraph containing only observed edges """ g_obs = copy.deepcopy(g) unobs_edges = g_obs.es.select(description="U") g_obs.delete_edges(unobs_edges) return g_obs
b8b15bbed605e46a7e49ed163f4ea9247d80a726
66,599
def get_is_running(db_cur, schedule_id): """Get is_running status""" sqlquery = f""" SELECT is_running FROM schedules WHERE schedule_id = '{str(schedule_id)}'""" db_cur.execute(sqlquery) row = db_cur.fetchone() is_running = row[0] return is_running
de7c2c3f9cab4b02137cf4975265bdcb12aefcde
66,603
def rename_columns(df, rename_dict): """Renames columns based on `rename_dict` Args: df (`pandas.DataFrame`): The dataframe to rename columns in rename_dict (:type:`dict`): A dict in the format `{'old_name': 'new_name'}` to use to rename the columns Returns: `pandas.DataFrame`: `df` with renamed columns """ return df.rename(columns=rename_dict)
30ac3e5bb888897799d0d899fa186a48785ec54b
66,606
def no_exception(function): """ A decorator that wraps a function and ignores all exceptions """ def wrapper(*args, **kwargs): try: # print(args) return function(*args, **kwargs) except Exception as e: # do nothing print(str(e)) return wrapper
5e87331becfa8dfdaaf5c7f2b6b4e925e40be103
66,607
def evaluate_knn(knn, x_train, y_train, x_test, y_test): """ Given a trained classifier, its training data, and test data, calculate the accuracy on the training and test sets. Input: knn - A trained k-nn classifier x/y_train - Training data x/y_test - Test data Returns: A tuple (train_acc, test_acc) with the resulting accuracies, obtained when using the classifier on the given data. """ train_score = knn.score(x_train, y_train) test_score = knn.score(x_test, y_test) return (train_score, test_score)
9ee80d443b207e9884a03c741ac03c465fee881d
66,608
def unlinearize_term(index, n_orbitals): """Function to return integer index of term indices. Args: index(int): The index of the term. n_orbitals(int): The number of orbitals in the simulation. Returns: term(tuple): The term indices of a one- or two-body FermionOperator. """ # Handle identity term. if not index: return (()) elif (0 < index < 1 + n_orbitals ** 2): # Handle one-body terms. shift = 1 new_index = index - shift q = new_index // n_orbitals p = new_index - q * n_orbitals assert index == shift + p + q * n_orbitals return ((p, 1), (q, 0)) else: # Handle two-body terms. shift = 1 + n_orbitals ** 2 new_index = index - shift s = new_index // n_orbitals ** 3 r = (new_index - s * n_orbitals ** 3) // n_orbitals ** 2 q = (new_index - s * n_orbitals ** 3 - r * n_orbitals ** 2) // n_orbitals p = (new_index - q * n_orbitals - r * n_orbitals ** 2 - s * n_orbitals ** 3) assert index == (shift + p + q * n_orbitals + r * n_orbitals ** 2 + s * n_orbitals ** 3) return ((p, 1), (q, 1), (r, 0), (s, 0))
0f1a044c4b95b594a3b3e20b7623c1da4ed67275
66,609
def fixup_cell_names(design): """ Scans Yosys' JSON data structure and replaces cell instance names that contains dots in names with other character. """ # Process modules modules = design["modules"] for mod_name, mod_data in modules.items(): print(mod_name) # Process cells cells = mod_data["cells"] for cell_name in list(cells.keys()): # Fixup name if "." in cell_name: new_name = cell_name.replace(".", "_") assert new_name not in cells, new_name cells[new_name] = cells[cell_name] del cells[cell_name] return design
55978578226538860c3968f84b14560304ea23af
66,610
from typing import Any def is_real_float(candidate: Any) -> bool: """ Checks if the given candidate is a real float. An integer will return False. Examples: >>> is_real_float(1.1) True >>> is_real_float(1.0) False >>> is_real_float(object()) False >>> is_real_float(1) False >>> is_real_float("str") False """ try: return not float(candidate).is_integer() except (TypeError, ValueError): return False
fef4f6a462832272099b4010deb9917a7d917096
66,613
def format_desktop_command( command, filename): """ Formats a command template from the "Exec=" line of a .desktop file to a string that can be invoked in a shell. Handled format strings: %U, %u, %F, %f and a fallback that appends the filename as first parameter of the command. See http://standards.freedesktop.org/desktop-entry-spec/1.0/ar01s06.html """ items={ '%U': 'file://%s' % filename, '%u': 'file://%s' % filename, '%F': filename, '%f': filename, } for key, value in items.items(): if command.find( key) >= 0: return command.replace( key, value) return '%s "%s"' % ( command, filename )
8c7de60dd7db2d6da4c803d203104f86145a9060
66,614
def subset_wqt_by_gain(avg_wq_site_df, gain_field_name, gain_setting): """ Subsets daily water quality by specified gain setting :param avg_wq_site_df: average vertical profiles by site for specific date :param gain_field_name: field name that stores the gain settings :param gain_setting: gain setting (gn0, gn1, gn10, gn100) :return: water quality subseted by gain """ subset = avg_wq_site_df[avg_wq_site_df[gain_field_name] == gain_setting] return subset
545a4ca3be42ea8075525f3c06390f0358d91229
66,615
import math def dist(a, b, x, y): """Return distance between 2 points""" d = math.hypot(a-x, b-y) return d
d909b7b5a40b58bce23b880e80c6e62b4e7df310
66,621
def to_img(x): """Convert vector scaled between -1 & 1, with mean 0 to scaled between 0 & 1, with mean 0.5, followed by putting in size 28x28""" x = 0.5 * (x + 1) # mean and scaling done x = x.view(x.size(0), 28, 28) # could have used reshape(but may or may not share storage, view always shares) return x
13d4f9231e9b9d80fca5d835d52b0af61d525dba
66,624
def rotate(l, n): """Shift list elements to the left Args: l (list): list to rotate n (int): number to shift list to the left Returns (list): list shifted """ return l[n:] + l[:n]
e847a78058d917fb8c4c0fa9aac38103cadda3dc
66,626
def equate_prefix(name1, name2): """ Evaluates whether names match, or one name prefixes another """ if len(name1) == 0 or len(name2) == 0: return False return name1.startswith(name2) or name2.startswith(name1)
01145d460faebc841accc493f76db37bc0e8ad23
66,628
from pathlib import Path def find_config_file(src: Path) -> Path: """Look for sniptly.toml config file Args: src (Path): File or folder where to look for sniptly snippets. Raises: FileNotFoundError Returns: Path: Path to tohe config file. """ parents = src.resolve().parents paths_to_search_in = [src] + list(parents) if src.is_dir() else parents for path in paths_to_search_in: if (path / "sniptly.toml").exists(): return path / "sniptly.toml" raise FileNotFoundError( f"Config file was not found. Looked in {str(paths_to_search_in)}" )
82d0bdafce311087eed2b15c260e11d737df2d54
66,637
from torch import optim from typing import Dict from typing import Any import torch def get_torch_optimizer( name: str, hparams: Dict[str, Any], parameters ) -> torch.optim.Optimizer: """Construct a PyTorch optimizer specified by :obj:`name` and :obj:`hparams`.""" if name == "Adadelta": optimizer = optim.Adadelta elif name == "Adagrad": optimizer = optim.Adagrad elif name == "Adam": optimizer = optim.Adam elif name == "AdamW": optimizer = optim.AdamW elif name == "SparseAdam": optimizer = optim.SparseAdam elif name == "Adamax": optimizer = optim.Adamax elif name == "ASGD": optimizer = optim.ASGD elif name == "LBFGS": optimizer = optim.LBFGS elif name == "RMSprop": optimizer = optim.RMSprop elif name == "Rprop": optimizer = optim.Rprop elif name == "SGD": optimizer = optim.SGD else: raise ValueError(f"Invalid optimizer name: {name}") try: return optimizer(parameters, **hparams) except TypeError: raise Exception( f"Invalid parameter in hparams: {hparams}" f" for optimizer {name}.\nSee PyTorch docs." )
c480877f03869526cd10f0a255ae5d56e538820f
66,640
def calc_flesch_reading_easy( n_syllables: int, n_words: int, n_sents: int, a: float = 1.3, b: float = 60.1, c: float = 206.835, ) -> float: """ Вычисление индекса удобочитаемости Флеша Описание: Чем выше показатель, тем легче текст для чтения Значения индекса лежат в пределах от 0 до 100 и могут интерпретироваться следующим образом: 100-90 - 5-й класс 90-80 - 6-й класс 80-70 - 7-й класс 70-60 - 8-й и 9-й класс 60-50 - 10-й и 11-й класс 50-30 - Студент университета 30-0 - Выпускник университета Ссылки: https://ru.wikipedia.org/wiki/Индекс_удобочитаемости https://en.wikipedia.org/wiki/Flesch–Kincaid_readability_tests#Flesch_reading_ease Аргументы: n_syllables (int): Количество слогов n_words (int): Количество слов n_sents (int): Количество предложений a (float): Коэффициент a b (float): Коэффициент b c (float): Коэффициент c Вывод: float: Значение индекса """ return c - (a * n_words / n_sents) - (b * n_syllables / n_words)
4aaa60c573b0b9230e021085df74e60683064f50
66,644
def FindMicrocode(microcodes, model): """Find all the microcode chunks which match the given model. This model is something like 306a9 (the value returned in eax from cpuid(1) when running on Intel CPUs). But we allow a partial match, omitting the last 1 or two characters to allow many families to have the same microcode. If the model name is ambiguous we return a list of matches. Args: microcodes: Dict of Microcode objects indexed by name model: String containing model name to find Returns: Tuple: List of matching Microcode objects List of abbreviations we tried """ # Allow a full name to be used mcode = microcodes.get(model) if mcode: return [mcode], [] tried = [] found = [] for i in range(3): abbrev = model[:-i] if i else model tried.append(abbrev) for mcode in microcodes.values(): if mcode.model.startswith(abbrev): found.append(mcode) if found: break return found, tried
a5b7cc664bd7376f94b578a309078feac10f6983
66,645
def genSchedulerTrigger(trigger_name, repo, refs, path_regexps, builds): """Generate the luci scheduler job for a given build config. Args: trigger_name: Name of the trigger as a string. repo: Gitiles URL git git repository. refs: Iterable of git refs to check. May use regular expressions. path_regexps: Iterable of path regular expressions of files to trigger on or falsy to trigger on everything. builds: Iterable of build config names to trigger. Returns: Multiline string to include in the luci scheduler configuration. """ template = """ trigger { id: "%(trigger_name)s" acl_sets: "default" schedule: "with 5m interval" gitiles: { repo: "%(repo)s" %(refs)s%(path_regexps)s } %(triggers)s } """ if path_regexps: path_regexps = '\n' + '\n'.join(' path_regexps: "%s"' % r for r in path_regexps) else: path_regexps = '' return template % { 'trigger_name': trigger_name, 'repo': repo, 'refs': '\n'.join(' refs: "%s"' % r for r in refs), 'path_regexps': path_regexps, 'triggers': '\n'.join(' triggers: "%s"' % b for b in builds), }
e2752fbfee8c4e81db33038ad9f5d7e77f6fe341
66,652
def DwordToBits(srcDword): """ Converts a dword into an array of 32 bits """ bit_array = [] h_str = "%08x" % srcDword h_size = len(h_str) * 4 bits = (bin(int(h_str,16))[2:]).zfill(h_size)[::-1] for bit in bits: bit_array.append(int(bit)) return bit_array
8301be046031f521dfb1b5640b3b4edbf46a0b20
66,656
import random def _random_string(num: int): """Generate random string of a given length. """ return ''.join(random.choices('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', k=num))
3a3b42dcabcf20aeb43c7cc4db3389172bbd23c0
66,658
def extract_relation(res,resource_type): """ this function takes a unique resource and create the entries for relation.csv Logic: Profile: Bound (Req) = element.binding[strength = required].valueset Bound (Ext) = element.binding[strength = extensible].valueset Bound (Pref) = element.binding[strength = preferred].valueset Bound (Exam) = element.binding[strength = example].valueset Extension = element.type[code = extension].profile ValueSet: valuesFrom = compose.include.system valuesFrom = expansion.contains.system includes = compose.include.valueSet """ dict_relat=[] relation_type_data={"required":"Bound_Req","extensible":"Bound_Ext","preferred":"Bound_Pref","example":"Bound_Exam"} # if res.get("id")=="be-ext-laterality": # print(resource_type,res.get("id")) if resource_type in ["Profile","Data type"]: elements=res.get('snapshot', {}).get('element',[] ) for element in elements: binding=element.get("binding",{}).get("strength") value=element.get("binding",{}).get("valueSet") if binding: # print(value) stripped = value.split("|", 1)[0] #remove pipes # if res.get("id")=="be-allergyintolerance": # print(stripped) #print(resource_type,"binding -> ",binding,value) dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]}) for l in element.get("type",[]): if l.get("code",{})=="Extension": #pass if l.get("profile"): dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"}) for target_profile in l.get("targetProfile",[]): dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"}) # print() elements=res.get('differential', {}).get('element', []) for element in elements: binding=element.get("binding",{}).get("strength") value=element.get("binding",{}).get("valueSet") if binding: # print(res.get("id"),value) # print(value,res.get("id")) stripped = value.split("|", 1)[0] #remove pipes #print(resource_type,"binding -> ",binding,value) dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]}) for l in element.get("type",[]): if l.get("code",{})=="Extension": #pass if l.get("profile"): # print(l.get("profile")[0],res.get("id")) dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"}) for target_profile in l.get("targetProfile",[]): dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"}) # print() elif resource_type=="ValueSet": for s in res.get("compose",{}).get("include",[]): #print(s) if s.get("system"): dict_relat.append({"source":res.get("id"),"target_url":s.get("system"),"relation":"valuesFrom"}) if s.get("valueSet"): # print(s.get("valueSet")) dict_relat.append({"source":res.get("id"),"target_url":s.get("valueSet")[0],"relation":"includes"}) #print(res.get("expansion",{}).get("contains",[])) return dict_relat
1d2e058946c99613c8c811ed8da5008ec9d6cf62
66,667
def check_access(cursor, worker_id, task_namespace, experiment_id): """Check if there already exists a row for this worker and experiment combination""" cursor.execute( "SELECT count(*) FROM workers WHERE worker_id = ? AND task_namespace = ? AND experiment_id = ?", (worker_id, task_namespace, experiment_id), ) return cursor.fetchone()[0] == 0
6436c60a6ded4926c3faa9dd400725dfb491b1c5
66,668
def bdev_virtio_detach_controller(client, name): """Remove a Virtio device This will delete all bdevs exposed by this device. Args: name: virtio device name """ params = {'name': name} return client.call('bdev_virtio_detach_controller', params)
89db40f02d693f74be42cf3984dad6a9f7ca1fe8
66,669
def build_genotype(gt_call): """Build a genotype call Args: gt_call(dict) Returns: gt_obj(dict) gt_call = dict( sample_id = str, display_name = str, genotype_call = str, allele_depths = list, # int read_depth = int, genotype_quality = int, ) """ gt_obj = dict( sample_id = gt_call['individual_id'], display_name = gt_call['display_name'], genotype_call = gt_call.get('genotype_call'), allele_depths = [gt_call['ref_depth'], gt_call['alt_depth']], read_depth = gt_call['read_depth'], genotype_quality = gt_call['genotype_quality'] ) return gt_obj
ec94d43e77617753814b75e0063ec9f2ee382c7a
66,672
import math def mask_invalid_number(num): """Mask invalid number to 0.""" if math.isnan(num) or math.isinf(num): return type(num)(0) return num
b87b1f10d9380fd254d6d39fdd5b200934a8d490
66,673
def tpc_h17(con, BRAND="Brand#23", CONTAINER="MED BOX"): """Small-Quantity-Order Revenue Query (Q17) This query determines how much average yearly revenue would be lost if orders were no longer filled for small quantities of certain parts. This may reduce overhead expenses by concentrating sales on larger shipments.""" lineitem = con.table("lineitem") part = con.table("part") q = lineitem.join(part, part.p_partkey == lineitem.l_partkey) innerq = lineitem innerq = innerq.filter([innerq.l_partkey == q.p_partkey]) q = q.filter( [ q.p_brand == BRAND, q.p_container == CONTAINER, q.l_quantity < (0.2 * innerq.l_quantity.mean()), ] ) q = q.aggregate(avg_yearly=q.l_extendedprice.sum() / 7.0) return q
4f2214192ea084c05a257dd9dac7785fc54ad8ec
66,674
def update_weights(inputs, weights, category_num, learn_rate): """ This function returns a new weight matrix which has learned the input for the category and whether or not it was changed :param inputs: A vector which has the length of the number of features in the network. It contains the signal input into the network :param weights: A matrix of size num_features-by-num_categories which holds the current weights of the network :param category_num: The current category for the encoding of the current input range [0, num_categories) :param learn_rate: The learning rate at which the network learns new inputs. [0, 1] :return: updated_weights, changed. updated_weights is a matrix of num_features-by-num_categories containing the updated weights. changed is a binary number which confers whether or not the weight matrix was changed """ num_features, num_categories = weights.shape assert len(inputs) == num_features, 'The length of the inputs and the rows of the weights do not match' assert (category_num >= 1) or (category_num < num_categories), \ 'The category must be in the range [1, num_categories]' changed = 0 for i in range(num_features): if inputs[i] < weights[i, category_num - 1]: weights[i, category_num - 1] = learn_rate * inputs[i] + (1 - learn_rate) * weights[i, category_num - 1] changed = 1 return weights, changed
29da566f072eb3d6a31f37c8cba876277abded34
66,676
def get_common_form(placeholders): """ Extracts the common target form from a list of scalars while making sure that the given targets are equivalent. Args: placeholders: Placeholders with possibly differing target forms. Return: str: Common target form. """ target_form = None for member in placeholders["scalars"]: if target_form is None: target_form = member.target_form elif member.target_form is not None: if target_form != member.target_form: raise ValueError("Variant target forms provided for " "single entry.") target_form = member.target_form return target_form
63781c4e06c0b1e4a924b2d4cd55b7d70f13d2f9
66,677
import time def gmtime_for_str(str_date): """ Pass in a datetime string, and get back time.gmtime """ return time.gmtime(time.mktime(time.strptime(str_date, "%a, %d-%b-%Y %H:%M:%S GMT"))) if str_date is not None else None
f9c4680dda8c5f97cae8f8826c34fbaf53f4458c
66,679
def is_wh(tok): """ Is this a wh pronoun. """ return tok.tag_.startswith('W')
1f78bf065c6abfc4f0f386fc5d144d9c776aa696
66,680
def union_values(dictionary): """Given a dictionary with values that are Collections, return their union. Arguments: dictionary (dict): dictionary whose values are all collections. Return: (set): the union of all collections in the dictionary's values. """ sets = [set(p) for p in dictionary.values()] return sorted(set.union(*sets)) if sets else set()
c4ce2535708f38cdaa7f2649e295b3794da37cb1
66,683
def get_upload_urn(envelope_js): """Get the upload urn from the submission envelope. Args: envelope_js (dict): the submission envelope contents Returns: String giving the upload urn in the format s3://<bucket>/<uuid>, or None if the envelope doesn't contain a urn """ details = envelope_js.get('stagingDetails') if not details: return None location = details.get('stagingAreaLocation') if not location: return None urn = location.get('value') return urn
3e67fec7da814d103ae7029b5eb2ef13e98cdf5b
66,684
import six def listify(value): """ Convert value to list Parameters ---------- value: object The value to convert Returns ------- listified: list If string, return [string] If tuple or other iterable, convert to lsit If not iterable, return [value] """ if isinstance(value, six.string_types): return [value] try: return [vi for vi in value] except TypeError: return [value]
6abe25626e0e7d1f692863c3c611403dbe6dec43
66,687
def clever_split(line: str) -> list: """Get separated (tabs or whitespaces) elements from line. Parameters ---------- line : str Returns ------- elems : list of str Separated elements. """ # split elements by tabs cols = line.split('\t') # if no tabs, split by whitespace if len(cols) <= 1: cols = line.split(' ') # strip elements of trailing whitespaces elems = [c.strip() for c in cols if len(c.strip()) > 0] return elems
2cc0c11764ed0c7f216818ad86356bd2d327f138
66,692
def callsign_decode(rawcall): """ Decodes the AX25 encoded callsign """ s = '' for i in range(0, 6): ch = chr(ord(rawcall[i]) >> 1) if ch == ' ': break s += ch ssid = (ord(rawcall[6]) >> 1) & 0x0f if s.isalnum() == False: return '' if ssid > 0: s += "-%d" % (ssid) return s
1f21b3ec9132d95c0abf8d50aa1311068d34647d
66,697
def direction(which): """ Return a string (e.g. 'north') from a direction number (e.g. 1) """ return ['', 'north', 'east', 'south', 'west'][which]
f3416b4e9578b167fb3d6625cff761d22c7636d1
66,698
from pathlib import Path def home_cwd(tmpdir): """ create two temporary directories simulated cwd and home. Return dict with contents. """ cwd = Path(tmpdir) home = cwd / "home" home.mkdir() return dict(secondary_path=home, primary_path=cwd)
ff1d05e4c0ca6952ff41f66a01505735d867aa16
66,701
from pathlib import Path from typing import Optional import json def save_json(data: dict, source: Path, overwrite: Optional[bool]=False) -> bool: """ Save a dictionary as a json file. Return `False` if file `source` already exists and not `overwrite`. Parameters ---------- data: dict Dictionary to be saved source: Path Path to filename to open overwrite: bool True to overwrite existing file Returns ------- bool True if saved, False if already exists without `overwrite` """ if Path(source).exists() and not overwrite: e = F"`{source}` already exists. Set `overwrite` to `True`." raise FileExistsError(e) with open(source, "w") as f: json.dump(data, f, indent=4, sort_keys=True, default=str) return True
7cc4bb08092171176e93222d23bfac1c149f1c94
66,705
def normalize_map(x): """ Normalize map input :param x: map input (H, W, ch) :return np.ndarray: normalized map (H, W, ch) """ # rescale to [0, 2], later zero padding will produce equivalent obstacle return x * (2.0/255.0)
4fff7f7cb404b7e122db241c768c156defce0653
66,706
def CreateLogResourceName(project, log_id): """Creates the full log resource name. Args: project: The project id, e.g. my-project. log_id: The log id, e.g. my-log. Returns: Log resource, e.g. projects/my-project/logs/my-log. """ # Also handle the case where we already have the correct format. if 'projects/' in log_id and 'logs/' in log_id: return log_id return 'projects/%s/logs/%s' % (project, log_id.replace('/', '%2F'))
ffef7b90d8e1e93930bf6cc53e0fdf9884a4bd5c
66,708
def parse_coco_categories(categories): """Parses the COCO categories list. The returned ``classes`` contains all class IDs from ``[0, max_id]``, inclusive. Args: categories: a dict of the form:: [ ... { "id": 2, "name": "cat", "supercategory": "animal", "keypoints": ["nose", "head", ...], "skeleton": [[12, 14], [14, 16], ...] }, ... ] Returns: a tuple of - classes: a list of classes - supercategory_map: a dict mapping class labels to category dicts """ cat_map = {c["id"]: c for c in categories} classes = [] supercategory_map = {} for cat_id in range(max(cat_map) + 1): category = cat_map.get(cat_id, None) try: name = category["name"] except: name = str(cat_id) classes.append(name) if category is not None: supercategory_map[name] = category return classes, supercategory_map
0136d4a79a7e06dc3031be94edf9aedc324041ad
66,712
def predict_y(x0, m, c): """ Predict the value yhat for a regression line with the given parameters. Parameters ---------- > `x0`: the value of predictor > `m`: slope of the regression line > `c`: y-intercept of the regression line Returns ------- The predicted value of y for the above given parameters. """ return (m * x0) + c
3dded1cb5c00a01933785f98f405f13582159f81
66,715
def get_embed_method_and_name(fname): """ Returns the name of the file and the method by splitting on the word '_cleaned_' """ t = fname.split('_cleaned_') return t[0].split()[-1], t[-1]
f110eeda67a8702b7f259b95acb3b37f4125e4cd
66,718
def most_recent_unbanned(booru_results): """ Get the first booru post from an artist that isn't banned. That's because posts from banned artists don't have the id field. :param booru_results: List<dict>: A list of booru results. :return: dict: The first booru result that is not banned. """ for result in booru_results: if "id" in result: return result return None
761ca14b941c6dd2a1d0e09d70d488159a63c602
66,719
def radio_params(param_name): """ Returns text values for some radio buttons instead of numeric. Parameters: param_name: mane for radio buttons setting Returns: dict with text values if the selected setting has a specific radio\ button name. Returns None if setting not in radio_values dict """ param_border = { 'BORDER_CONSTANT': 0, 'BORDER_REPLICATE': 1, 'BORDER_REFLECT': 2, 'BORDER_WRAP': 3, 'BORDER_REFLECT_101': 4, } param_interpolation = { 'INTER_NEAREST': 0, 'INTER_LINEAR': 1, 'INTER_AREA': 2, 'INTER_CUBIC': 3, 'INTER_LANCZOS4': 4, } param_compression = { 'ImageCompressionType.JPEG': 0, 'ImageCompressionType.WEBP': 1, } radio_values = { 'interpolation': param_interpolation, 'border_mode': param_border, 'compression_type': param_compression, } if param_name in radio_values: return radio_values.get(param_name) return None
480a8790adf112b82c689c64927f9f5a310a4062
66,722
from pathlib import Path def is_stored(title): """ Checks if the file for the page exists, returning a Boolean Args: title: (Str) The title of the current file to check Returns true is the file exists, false if it does not """ return Path(f"link_data/{title}").is_file()
eb32ddc276bb02f9a7640e4194075cd416938b60
66,723
import shutil def cli_installed(filename:str) -> bool: """Checks to see if a program is available from the command line, testing with a call to ```which```.""" return True if shutil.which(filename) else False
6de0eefa1710ad8c4ce464e92a6e62b30ad0ff6a
66,724
def GetAllDictPaths(tree_dict): """Obtain list of paths to all leaves in dictionary. The last item in each list entry is the value at the leaf. For items in dictionary that are a list of dictionaries, each list entry is indexed by a string repesenting its position in the list. Implementation inspired by https://stackoverflow.com/a/40555856. Args: tree_dict: Input dictionary. Returns: List of lists with all paths to leaf items in dictionary. Example: >> a = {'a':[1 , 2, 3], 'b':[{'c': 10}, {'d': 20}] } >> print get_all_dict_paths(a) [['a', [1, 2, 3]], ['b', '0', 'c', 10], ['b', '1', 'd', 20]] """ if isinstance(tree_dict, list): if isinstance(tree_dict[0], dict): return [[str(i)] + path for i, value in enumerate(tree_dict) for path in GetAllDictPaths(value)] else: return [[tree_dict]] elif not isinstance(tree_dict, dict): return [[tree_dict]] return [[key] + path for key, value in tree_dict.items() for path in GetAllDictPaths(value)]
be291f58c7b2a3fe63fc4e8b8e9b2fd57de45b5e
66,734
from typing import List def check_for_phrase(content: str, phraselist: List) -> bool: """ See if a substring from the list is in the content. This allows us to handle somewhat uncommon (but relied-upon) behavior like 'done -- this is an automated action'. """ return any([option in content for option in phraselist])
bb6e9fa96572baa9a01e0fcfa93c3ff447f7b3da
66,737
def minutes_degrees(coord, kywrd): """ Converts coordinates from decimal minutes to decimal degrees Parameters ------------ coord : str A single coordinate, either longitude or latitude, with a single char at the end indicating the hemisphere kywrd : str Indicates whether the coordinate is a latitude or longitude coordinate, as they are formatted differently Returns ------------ deg : str Decimal degree coordinate """ if (kywrd == 'lat'): deg = coord[:2] mins = coord[2:4] elif (kywrd == 'lon'): deg = coord[:3] mins = coord[3:5] else: print('Coordinate Transform error: Invalid keyword') return -1 dec = float(mins) / 60 deg = float(deg) + dec if (coord[-1] == "S" or coord[-1] == "W"): deg = deg * -1 deg = str(deg)[:6] return deg
58b40977f51d9fb838ed3045af7454fc50c01344
66,738
from typing import Optional from typing import List def build_aws_cmd( profile: Optional[str], repo: str, subcommand: str, aws_args: Optional[List[str]] = None ) -> List[str]: """Build up an AWS ECR command from a subcommand and optional parts.""" aws_cmd = ["aws", "ecr"] if profile: aws_cmd.append(f"--profile={profile}") aws_cmd.extend([f"--repository-name={repo}", "--output=json", subcommand]) if aws_args: aws_cmd.extend(aws_args) return aws_cmd
b18c96c01eecd934942ea99d88a4ca05e59fdc5c
66,743
def _stripItalic(s): """Returns the string s, with italics removed.""" return s.replace('\x1d', '')
0cdbefc09ad4cb037c1e175d7668846024ad09fd
66,745
def bbars(taper): """ Function to calculate coefficients of deflection terms for a tapered cantilever beam under tip load. This function is a dependency used only by coefficients_simple above. """ temp = ((taper+1)/(taper-1)) temp = temp*temp*temp b1bar = temp*(taper-1.0)*(taper-2.0) bmbar = taper*temp/(taper+1)#(taper+1)**2/(taper-1)**3 blbar = temp #((taper+1)/(taper-1))**3 b0bar = -temp*0.5*taper brbar = -temp return b0bar, brbar, b1bar, blbar, bmbar
ec0ee8731de13b9f5b2ff28e99929860cb1ff3cb
66,747
def isValidCreditCard(cardNum): """Returns True if the credit card number is a valid Luhn (Mod 10) number and False if not. This, of course, does not validate the number, but will catch typos. There is the chance that two typographic errors could return a false positive if they offset one another, but the likelihood is low and pre-validating is fast""" financialIndustries = ['3', '4', '5', '6'] if cardNum[0] not in financialIndustries: return False total = pos = 0 for digit in cardNum[::-1]: if pos % 2 == 0: multiplier = 1 else: multiplier = 2 product = int(digit) * multiplier total += product // 10 + product % 10 pos += 1 if total % 10 == 0: return True return False
5b62c9841b62c563e213349585fdd2d206270baf
66,753
import torch def tensor_is_empty(x: torch.Tensor) -> bool: """Return whether the tensor is empty.""" return 0 in x.shape
e5faefc4fb6bd677c5d668e93d988381f410deef
66,755
def write_all(filename, text, *, _open=open, ): """Write the given text to the file.""" with _open(filename, 'w') as outfile: return outfile.write(text)
70dbca0f93a251f08e9a244bfdf138b13bf53643
66,756
from datetime import datetime def get_datetime_with_offset(date, offset=300): """ Returns a forward or backward date given a date and an offset time in seconds :param offset: offset interval in seconds (default: 300s). The offset can be positive or negative :param offset: the reference datetime :return datetime: the anterior datetime :raises Exception: Invalid parameters """ if (type(offset) is int) and (type(date) is datetime): ref_ts = datetime.timestamp(date) offset_ts = ref_ts + offset return datetime.fromtimestamp(offset_ts) else: raise Exception("Invalid parameters.")
a8d54fcb478897c6e9a5cbba76b8751f69714202
66,757
from datetime import datetime def generate_slurm_jobs_sql(start: str,end: str): """generate_slurm_jobs_sql Generate Slurm Jobs Sql Generate Sql for querying slurm jobs info Args: start (str): start time end (str): end time Returns: string: sql string """ utc_from = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S.%fZ') epoch_from = int((utc_from - datetime(1970, 1, 1)).total_seconds()) utc_to = datetime.strptime(end, '%Y-%m-%dT%H:%M:%S.%fZ') epoch_to = int((utc_to - datetime(1970, 1, 1)).total_seconds()) sql = f"SELECT * FROM slurm.jobs \ WHERE start_time < {epoch_to} \ AND end_time > {epoch_from};" return sql
82d684b148fbad3cff5ac59b2a786f779095616a
66,758
import random def bogo_sort(collection): """Pure implementation of the bogosort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bogo_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bogo_sort([]) [] >>> bogo_sort([-2, -5, -45]) [-45, -5, -2] """ def is_sorted(collection): if len(collection) < 2: return True for i in range(len(collection) - 1): if collection[i] > collection[i + 1]: return False return True while not is_sorted(collection): random.shuffle(collection) return collection
b25adc66e689949cb8d58b45cf2e525a16ffa5f6
66,759
def descendant_pks(category): """ Returns a list of the primary keys of the descendants of the given category, not including the pk of the category itself. :param Category category: category :return [number]: list of the primary keys """ return [str(desc.pk) for desc in category.get_descendants(include_self=False)]
e917122d9a6c8f97b1d10fa861b1c09949bb6c58
66,760
def addNewCards(old_dict, comparison_dict, new_dict): """Compare old_dict and comparison_dict, add only those keys not in old_dict to the new_dict.""" if old_dict: for k, v in comparison_dict.items(): if k not in old_dict: new_dict[k] = comparison_dict[k] return(new_dict) else: return(comparison_dict)
1e62991783ad4829fd5194ad7953a769b841a9db
66,764