content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def prepend_non_digit(string): """ Prepends non-digit-containing string. Useful in combination with built-in slugify in order to create strings from titles that can be used as HTML IDs, which cannot begin with digits. """ if string[:1].isdigit(): string = "go-to-{0}".format(string) return string
34594a7839a40477a986d284986b2f1eb1e1d994
18,534
def patch(attrs, updates): """Perform a set of updates to a attribute dictionary, return the original values.""" orig = {} for attr, value in updates: orig[attr] = attrs[attr] attrs[attr] = value return orig
ec8b8e4862afdc556512a848882a933214a747b4
18,535
def ge(x, y): """Implement `ge`.""" return x >= y
e1aa97783f3f4cc64c0f833fec053c85c506e1e1
18,536
def find_addition_of_two_numbers_in_list(list: list, k: int): """ Checks whether two numbers from a given list add up to a given value k. Args: list: A list of integer numbers. k: The value which is used for the check. Returns: True if two numbers from the list add up to k. """ for _item in list: for _item2 in list: if k == _item + _item2: return True return False
84498fb9e4f264ef52519607a7025645c757ad36
18,539
def _match_expr(v): """ Builds a match expression from a version which is optionally empty. :param v: :return: """ ALLOW_ANY_VERSION = '>=0.0.0' # cover case where v was not specified, indicating the latest version if v is None: return ALLOW_ANY_VERSION if not v.startswith(('>', '=', '<')): return '{}{}'.format('==', v) return v
748789f3d152a4564f4119809ffcc89bd8b3372d
18,540
import subprocess def resolve_rev(rev_id): """Given an arbitrary git revision id, returns the commit's hash""" try: output = subprocess.check_output("git rev-parse {}".format(rev_id), shell=True) return output.decode('utf-8').strip() except subprocess.CalledProcessError as err: print("failed to resolve rev_id {}: {}".format(rev_id, err)) return None
125f0bbfaec0d125e63970e746fd3f7574be4f72
18,541
def get_a_navi_comicnavnext_navinext(cls, last_soup, next_): """Implementation of get_navi_link.""" return last_soup.find( "a", class_="navi comic-nav-next navi-next" if next_ else "navi comic-nav-previous navi-prev", )
76516a283eac451bcb401f457364a2b2d1bb3555
18,543
def omit_edge_list(): """Solution to exercise R-14.9. Can edge list E be omitted from the adjacency matrix representation while still achieving the time bounds given in Table 14.1? Why or why not? --------------------------------------------------------------------------- Solution: --------------------------------------------------------------------------- An adjacency matrix stores references to each edge in the graph in an (n x n) array, where each vertex v is numbered with an integer [0, n-1]. Let's go through each method in Table 14.1 and see if we can still maintain the stated time bounds without an unordered list of edges, E: 1. vertex_count(), O(1): This is simply n, and does not require an edge list. 2. edge_count(), O(1): The edge list would typically store this value as an instance variable. We could still achieve O(1) time if a similar instance variable is stored in the adjacency matrix and incremented/decremented whenever an edge is added or deleted. 3. vertices(), O(n): Iteration through vertices, unaffected by lack of edge list. 4. edges(), O(m): Iteration through edges. Without an edge list to iterate through, we would need to iterate through the entire (n x n) adjacency array to return every edge in the graph. This would require O(n^2) time. According to Proposition 14.10, the number of edges in a simple graph with n vertices is also O(n^2). However, as noted in the text most real-world graphs are sparse, meaning that the number of edges will not be proportional to n^2. This means that iterating through the entire adjacency array would usually be slower than iterating through the edge list. We can state that it is not guaranteed that we can achieve the same time bounds for the edges() method without an edge list. 5. get_edge(u,v), O(1): We can still get the edge between vertex u and v in constant time without an edge list. The vertices u and v are associated with integers i and j that can be used to access the edge stored at A[i,j] in the adjacency matrix. The vertices can either directly store their associated integer, or the integers can be stored in a map associated with their respective vertex. In either case, we can achieve O(1) run-time efficiency without an edge list. 6. degree(v), O(n): This method works by iterating through the row of the array associated with v. It is unaffected by the existence of an edge list. 7. incident_edges(v), O(n): This method also iterates through the cells of the row of the array associated with v, and is unaffected by the existence of an edge list. 8. insert_vertex(x), O(n^2); Requires copying the contents of the existing array to a larger array. Unaffected by the existence of an edge list. 9. remove_vertex(v), O(n^2): Requires copying the contents of the existing array to a smaller array. Unaffected by the existence of an edge list. 10. insert_edge(u,v,x), O(1): Given O(1) time to look up the integers associated with vertices u and v, the insert_edge() method can insert a new edge into the array in O(1) time without an edge list. 11. remove_edge(e), O(1): The edge object contains references to its endpoints u and v, which can then be used to look up their corresponding integers in O(1) time. Then the array can be updated so that A[i,j] = None. This can all be done in O(1) time without an edge list. In conclusion, it seems that all of the methods of the adjacency matrix can maintain their stated time bounds without an edge list E except for the edges() method. Without an edge list to iterate through, we cannot guarantee O(m) performance for this method. """ return True
aaaff728cdb9a08768a6ad7b569443c261e1b15e
18,545
def _sanitize_feature_name(feature_name: str) -> str: """Returns a sanitized feature name.""" return feature_name.replace('"', '')
7f232680502819d5054ee6852ca4a824565839cc
18,546
def sanitise_db_creds(creds): """Clean up certain values in the credentials to make sure that the DB driver doesn't get confused. """ tmp = {} for name, value in creds.items(): if name == 'port': tmp[name] = int(value) elif name == 'password': tmp['passwd'] = value else: tmp[name] = value return tmp
a5f3e8d4aab2f5959a8a03833f7c3be653234126
18,547
from argparse import ArgumentParser def parse_options(argv): """Parses options from the command line """ parser = ArgumentParser(prog='collect_counts', description='This script collects expression counts in TSV format and aggregates them in a single HDF5 file. Please note \ that there are several ways to provide the list of input files (see options below). You must specifiy the input \ files using one of these options. If you specify multiple options, they are parsed in the following precedence: \ -i, -f, -p.\n') parser.add_argument('-i', '--input', dest='infiles_fnames', metavar='STR', nargs='+', help='list of expression count files in TSV format', default='-') parser.add_argument('-f', '--filelist', dest='infiles_flist', metavar='STR', help='text file listing expression count files', default='-') parser.add_argument('-p', '--pattern', dest='infiles_fpattern', metavar='STR', help='search pattern describing list of expression count files', default='-') parser.add_argument('-o', '--outfile', dest='outfile', metavar='STR', help='name of output file (will be hdf5)', default='-', required=True) parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='switch on verbose output [off]', default=False) return parser.parse_args(argv[1:])
8a484d0e4088003368a8b35ca57bdddd63f18932
18,548
def post_config_aem_health_check_servlet(bundles_ignored=None, bundles_ignored_type_hint=None): # noqa: E501 """post_config_aem_health_check_servlet # noqa: E501 :param bundles_ignored: :type bundles_ignored: List[str] :param bundles_ignored_type_hint: :type bundles_ignored_type_hint: str :rtype: None """ return 'do some magic!'
4df0ba40ea04d883d1a7fbfca5e3931d523f7da8
18,550
def getFromLongestMatchingValue( objectList, listOfValues, keyToMatch, caseInsensitive=True ): """ Function to take a list of objects, a list of values and a key to match and return the object with the longest matching value for that key or None if no value matches for that that key. :param objectList: The list of objects. :type objectList: list of dicts :param listOfValues: A list of values to try to match :type listOfValues: list of string values :param keyToMatch: key in which to match the value :type keyToMatch: str :param caseInsensitive: Case insensitive value matching? :type caseInsensitive: boolean :returns: dict with longest matching value for specified key in object """ objectList = objectList.copy() if caseInsensitive: listOfValues = [k.lower() for k in listOfValues] value = max( [str(k) for k in listOfValues], key=len ) if len(listOfValues) else None if value and value in listOfValues: listOfValues.remove(value) for object in sorted( objectList, key=lambda i: len(i.get(keyToMatch, "")), reverse=True ): if ( object.get(keyToMatch, '').lower( ) if caseInsensitive else object.get(keyToMatch, '') )==value: return(object) if len(listOfValues)>=1: return(getFromLongestMatchingValue( objectList, listOfValues, keyToMatch, caseInsensitive )) for object in sorted( objectList, key=lambda i: len(i.get(keyToMatch, "")), reverse=False ): generic = object.get(keyToMatch, '').lower( ) if caseInsensitive else object.get(keyToMatch, '') generic = generic.split('-')[0] if '-' in generic else generic if generic==value: return(object) return({})
acb79bc37831e5f19ac39acf0917f83bdef24f2e
18,552
def is_point_in_rect_circular_boundary(distance, circular_radius, boundary_range): """ judge whether a point is in boundary area for top center rect """ if distance < circular_radius + boundary_range: return True else: return False
9e1b98bd233039f1fabcdc627c682a38b9a3b783
18,555
import re import os def find_new_title(dir: str, filename: str) -> str: """Return a path that does not exist yet, in ``dir``. If ``filename`` exists in ``dir``, adds or changes the end of the file title until a name is found that doesn't yet exist. For instance, if file "Image (01).jpg" exists in "somedir", returns "somedir/Image (02).jpg". """ rx = re.compile(r"\((\d{1,5})\)$") p = os.path.join(dir, filename) while os.path.exists(p): base = os.path.basename(p) (root, ext) = os.path.splitext(base) m = rx.search(root) if m is None: replacement = "(001)" else: increment = int(m.group(1)) + 1 replacement = "(%03d)" % increment root = root[: m.start(1) - 1] f = root + replacement + ext p = os.path.join(dir, f) return p
a4e15842493b7701f30c91ddaf5bd18a6ad6aea2
18,557
def perp(point1,point2): """Returns slope and y-intercept of the perpendicular bisector of the segment connecting two cities.""" x1 = point1[0] y1 = point1[1] x2 = point2[0] y2 = point2[1] """ try: m = (x2 - x1)/(y1 - y2) b = x1**2 + y1**2 - x1**2 - y2**2 return ['yes',m,b] except ZeroDivisionError: mid = (x2 - x1)/2 return ['no',mid] """ m = (x2 - x1)/(y1 - y2) b = (x1**2 + y1**2 - x2**2 - y2**2)/(2*(y1 - y2)) return m,b
24b03a83b3a3cf760e27141614e4c9ffd939a666
18,558
def check_valid_ip(ip: str) -> bool: """ print "Valid IP address" If IP is valid. or print "Invalid IP address" If IP is Invalid. >>> check_valid_ip("192.168.0.23") True >>> check_valid_ip("192.255.15.8") False >>> check_valid_ip("172.100.0.8") True >>> check_valid_ip("254.255.0.255") False """ ip1 = ip.replace(".", " ") list1 = [int(i) for i in ip1.split() if i.isdigit()] count = 0 for i in list1: if i > 254: count += 1 break if count: return False return True
a73b8c262b94e4ff7797518f2b2f04dbed1c2364
18,560
import pickle def save_model(model, filepath="models/"): """Save a trained model to filepath (e.g. 'model/filename') Args: model (var): variable-held trained model (e.g. Linear_Regression) filepath (str): path to save model (excluding file extension) Returns: msg (str): confirmation message """ pickle.dump(model, open(f"{filepath}.sav", "wb")) return f"model saved to: {filepath}.sav"
3de1495e4e207998f251a1977e8e21d0af1b0402
18,561
import numpy def makeUnpolInstrumentalResponse(j1, j2): """ Form the visibility matrix in instrumental response from two Jones matrices assuming unpolarised sources (hence the brightness matrix is the identity matrix) Input: j1,j2: Jones matrices of dimension[za][az][2][2] Returns: [za][az][[xx,xy],[yx,yy]] where "X" and "Y" are defined by the receptors of the Dipole object used in the ApertureArray. Hence to get "XX", you want result[za][az][0][0] and for "YY" you want result[za][az][1][1] """ result = numpy.empty_like(j1) result[:, :, 0, 0] = j1[:, :, 0, 0] * j2[:, :, 0, 0].conjugate() + j1[:, :, 0, 1] * j2[:, :, 0, 1].conjugate() result[:, :, 1, 1] = j1[:, :, 1, 0] * j2[:, :, 1, 0].conjugate() + j1[:, :, 1, 1] * j2[:, :, 1, 1].conjugate() result[:, :, 0, 1] = j1[:, :, 0, 0] * j2[:, :, 1, 0].conjugate() + j1[:, :, 0, 1] * j2[:, :, 1, 1].conjugate() result[:, :, 1, 0] = j1[:, :, 1, 0] * j2[:, :, 0, 0].conjugate() + j1[:, :, 1, 1] * j2[:, :, 0, 1].conjugate() return result
b266b467ad18143f5e4ba01d2da0e89f2e426b60
18,562
def django_testdir_initial(django_testdir): """A django_testdir fixture which provides initial_data.""" django_testdir.project_root.join("tpkg/app/migrations").remove() django_testdir.makefile( ".json", initial_data=""" [{ "pk": 1, "model": "app.item", "fields": { "name": "mark_initial_data" } }]""", ) return django_testdir
1b99e811945bb10a3d74e3a1b2cfe5d52fb2a27b
18,563
def preprocess_data(x): """ Preprocess data :param x: `Tensor.FloatTensor` with size `N x C x H x W` """ n, c, h, w = x.size() x = x.permute(3, 0, 2, 1).contiguous().view((w, n, -1)) return x
aa80a31dc1057ff7df5b25fc1d737ccec3402edb
18,564
def train_test_split(ls, train=0.4, dev=0.1, test=0.5): """ :param ls: list of 2-dimensional tuples. First element is a sentence, second is a prediction (0/1) :return: 3 lists for train test split. train 40%, dev 10%, test 50% defaults """ assert sum((train, dev, test)) == 1, 'train, dev and test must sum to 1.' n = len(ls) train_ls = ls[0:int(train * n)] dev_ls = ls[int(train * n): int((train + dev) * n)] test_ls = ls[int((train + dev) * n):n + 1] return train_ls, dev_ls, test_ls
9a313e9bedaf8b773a90664a4397f1e560e50843
18,566
def is_goal(state) -> bool: """ Returns False if any amphipods in any room are not of the right type """ rooms, _ = state for room_type, pods in rooms.items(): if any((amphipod != room_type) for amphipod in pods): return False return True
eba65bbffbc9496d5f2dba68e27f574321ef6f7b
18,567
def find_brackets(smiles): """ Find indexes of the first matching brackets ( "(" and ")" ). It doesn't check if all brackets are valid, i.e. complete. Parameters ---------- smiles Returns ------- list Index of first and second matching bracket. """ indexes = [] n_brackets = 0 for i, char in enumerate(smiles): if char == "(": if n_brackets == 0: indexes.append(i) n_brackets += 1 elif char == ")": n_brackets -= 1 if n_brackets == 0: indexes.append(i) break return indexes
b1b8d40e6f04d7a903b55b85db98a35fb8eab10c
18,568
from datetime import datetime def get_course_starting_time(): """ This function gets the starting time of a course. """ now = datetime.now() if 8 < now.hour < 10: return datetime(now.year, now.month, now.day, 8, 0, 2) if 10 < now.hour < 12: return datetime(now.year, now.month, now.day, 10, 0, 2) if 14 < now.hour < 16: return datetime(now.year, now.month, now.day, 16, 0, 2) return datetime(now.year, now.month, now.day, 18, 0, 2)
4b5d8ac76df3fb15bf855f19f3313fbfccbb1632
18,569
def read_sra_data(ifile='sra_ids_experiments.txt', verbose=False): """ Read the SRA experiment ID that we generated with an SQL command """ sra_data = {} # library_strategy,library_source,library_selection,library_layout,platform,instrument_model,platform_parameters with open(ifile, 'r') as f: for l in f: p = l.rstrip("\n").split("\t") sra_data[p[0]] = {} if 'PAIRED' in p[4]: p[4] = 'PAIRED' if 'SINGLE' in p[4]: p[4] = 'SINGLE' if 'unspecified' in p[6]: p[6] = 'Illumina HiSeq 1000' for i,j in enumerate(['library_strategy', 'library_source', 'library_selection', 'library_layout', 'platform', 'instrument_model', 'platform_parameters']): sra_data[p[0]][j]=p[i+1] # the first element is the id :) return sra_data
eacfb59fae920728185019f9bd648facef83ed96
18,570
def get_node_name_parts(obj_name): """ Breaks different Maya node name parts and returns them: - objectName: a:a:grpA|a:a:grpB|a:b:pSphere1 - long_prefix: a:a:grpA|a:a:grpB - namespace: 'a:b - basename': 'pSphere1' :param obj_name: str, name of Maya node :return: tuple(str, str, str), tuple with long_prefix, namespace and basename """ if '|' in obj_name: obj_name = str(obj_name) long_name_parts = obj_name.split('|') long_prefix = ''.join(long_name_parts[:-1]) short_name = long_name_parts[-1] else: short_name = obj_name long_prefix = '' if ':' in short_name: namespace_parts = short_name.split(':') base_name = namespace_parts[-1] namespace = ':'.join(namespace_parts[:-1]) else: base_name = short_name namespace = '' return long_prefix, namespace, base_name
c3c0d47ff7ef791616b93bb0456cb503e4c80140
18,572
def tex_coord(x, y, n=4): """ Return the bounding vertices of the texture square. Parameters ---------- x, y - 2D position coordinates of texture file texture.png n = 4 - hard coded size of texture in file Returns ------- 8 integers, the bounding coordinates of each texture square """ m = 1.0 / n #This values is essentially hard coded to be .25 as n=4 in the function definition dx = x * m dy = y * m return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m #This return is what sends that proper coordinates each texture, as it important to change due to #possible difference in height for where the block can be placed, which is why it is not hard coded.
27269d73d54ced16f762b3fa51ed293c1d280fc8
18,573
def invert_image(image): """ Inverts a binary image Args: image: a binary image (black and white only) Returns: An inverted version of the image passed as argument """ return 255 - image
eb466971c77fae2a57ad86a3b555884865ed404a
18,574
def get_most_common_non_ascii_char(file_path: str) -> str: """Gets the most common non ascii character from a text document and returns it. Args: file_path: the pathname (absolute or relative to the current working directory) of the file to be opened. Returns: the most common non ascii character. """ symbols_counter = {} with open(file_path, encoding="unicode-escape") as file: for line in file: symbols = (symbol for symbol in line if not symbol.isascii()) for symbol in symbols: if symbol not in symbols_counter: symbols_counter[symbol] = 1 else: symbols_counter[symbol] += 1 return max(symbols_counter, key=lambda dict_key: symbols_counter[dict_key])
4254defd2e8c9504785325f4ede108db9a3f1b1a
18,575
def with_api(func): """Decorate a method to use the client() context manager.""" def wrapper(*args): with args[0].client() as api: return func(args[0], api, *args[1:]) return wrapper
9ecbebc9c0599d83d178820ed88d0f6fa5c34ee1
18,576
def create_dummy_txt_file(tmp_path): """Create a dummy text file.""" d = tmp_path / 'txt_files' try: d.mkdir() except Exception: pass p = d / 'dummy.txt' _fobj = open(p, 'x') _fobj.close() return p
ad4fcd7b437237ba7eda51063f3338d016c1ec1d
18,578
def compare_df(df1, df2): """ Helpful testing function that prints values if there are a mismatch. """ if not df1.equals(df2): print('df1:') print(df1.to_string()) # print(df1.shape) # print(df1.values[-1]) # for i in df1: # print(f"{i}'s type: {type(df1[i].values[0])}") print('---') print('df2:') print(df2.to_string()) # print(df2.shape) # print(df2.values[-1]) # for i in df2: # print(f"{i}'s type: {type(df2[i].values[0])}") # print('---') print(df1.where(df1.values!=df2.values).notna().to_string()) # values_not_equal = df1.values!=df2.values # print(f'values_not_equal:\n{values_not_equal}') # print(df1.loc[values_not_equal].notna()) return df1.equals(df2)
1d19fc172afb54e3c689ebf23f15a98d880b6860
18,580
def currency_to_int(s: str) -> int: """ Just remove a colon. """ return int(s[:len(s)-3] + s[len(s)-2:])
d1daab090ff856fb2ec8f4f7d0c1cba7e6de7dd3
18,581
import torch def gelu_quick(x): """ Approximation of gelu. Examples: >>> inputs = torch.rand(3, 2) >>> assert torch.allclose(gelu_quick(inputs), F.gelu(inputs), atol=1e-2) References: https://arxiv.org/pdf/1606.08415.pdf """ return x * torch.sigmoid(1.702 * x)
1fc27f052ae9958cab53f499d906c612ef24f3a8
18,584
def get_line(char: str, length: int) -> str: """ return string """ return char * length
a1833f479c98269edf4ccc030d61690342a2de01
18,587
from pathlib import Path def input_custom_variables(string: str, dmvio_folder: str): """ Replace the following environment variables in the given string. if ${EVALPATH} is inside string it is replaced with the path to the evaltools (the folder where this file is located). ${DMVIO_PATH} is replaced with the path to DM-VIO. """ return string.replace('${EVALPATH}', str(Path(__file__).parent.parent.resolve())).replace('${DMVIO_PATH}', dmvio_folder)
0766874154192a885e49f50f14b5ab9038788ced
18,588
from functools import reduce def sumatoria_reduce(n: int) -> int: """CHALLENGE OPCIONAL: Re-escribir utilizando reduce. Referencia: https://docs.python.org/3/library/functools.html#functools.reduce """ return reduce(lambda x, y: x + y, range(n + 1), 0)
c3f96f8f4d6c60599d54e8d8ccba3260783af74b
18,589
def _GetLines(line_strings): """Parses the start and end lines from a line string like 'start-end'. Arguments: line_strings: (array of string) A list of strings representing a line range like 'start-end'. Returns: A list of tuples of the start and end line numbers. Raises: ValueError: If the line string failed to parse or was an invalid line range. """ lines = [] for line_string in line_strings: # The 'list' here is needed by Python 3. line = list(map(int, line_string.split('-', 1))) if line[0] < 1: raise ValueError('invalid start of line range: %r' % line) if line[0] > line[1]: raise ValueError('end comes before start in line range: %r', line) lines.append(tuple(line)) return lines
d59fc282ef5f7dca251de8b3015eaebd18230f9f
18,590
def match_includes_reaction_center(train_mode, match, atoms_core): """ Determindes whether a substructure match includes the full reaction center. Parameters ---------- train_mode: Literal["single_reactant", "transition_state"] Mode in which diagram was constructed. match: tuple Indices of substructure match. atoms_core: List[int] Atom indices belonging to the reaction center. Returns ------- includes_rc: bool Boolean whether match includes the reaction center. """ includes_rc = True if train_mode == "transition_state": if False in [core_atom in match for core_atom in atoms_core]: includes_rc = False return includes_rc
1da9d3c7304280d24918046ecc8e88ece078040f
18,591
def _get_num_slices(op_slice_sizes): """Returns the number of slices in a list of OpSlice sizes. Args: op_slice_sizes: List of list of slice sizes, where the outer list has a list per op and the inner list is the slice sizes of the op. Returns: Integer max number of slices in the list of ops. """ return max([len(slices) for slices in op_slice_sizes])
55d7170d4e1318fdd72e8c4e6ab1da30d42640e9
18,592
def _explode_lines(shape): """ Return a list of LineStrings which make up the shape. """ if shape.geom_type == 'LineString': return [shape] elif shape.geom_type == 'MultiLineString': return shape.geoms elif shape.geom_type == 'GeometryCollection': lines = [] for geom in shape.geoms: lines.extend(_explode_lines(geom)) return lines return []
689deed3c3674fdc7d0cb12917004bbe9eca2227
18,593
import random def packet_loss(P): """Adds a uniformly distributed packet loss, returns True if packet to be dropped""" u = random.uniform(0, 1) if u < P: return True return False
a5a7e3ce2a7b23937a4c23ce498cdd1aa5561841
18,594
def insertion_sort(collection): """ Examples: >>> insertion_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> insertion_sort([]) [] >>> insertion_sort([-2, -5, -45]) [-45, -5, -2] """ for index in range(1, len(collection)): while index > 0 and collection[index - 1] > collection[index]: #index를 줄여가며 이미 정렬된 부분과 비교하며 위치를 찾고 collection[index], collection[index - 1] = collection[index - 1], collection[index] #찾은 위치에 삽입을 함으로써 정렬한다. index -= 1 return collection
77d97634899ba8239c509aafc5d51f4f7bd8f4d8
18,595
def default_narrative( end_yr, value_by, value_ey, diffusion_choice='linear', sig_midpoint=0, sig_steepness=1, base_yr=2015, regional_specific=True, fueltype_replace=0, fueltype_new=0, ): """Create a default single narrative with a single timestep E.g. from value 0.2 in 2015 to value 0.5 in 2050 Arguments ---------- end_yr : int End year of narrative value_by : float Value of start year of narrative value_ey : float Value at end year of narrative diffusion_choice : str, default='linear' Wheter linear or sigmoid sig_midpoint : float, default=0 Sigmoid midpoint sig_steepness : float, default=1 Sigmoid steepness base_yr : int Base year regional_specific : bool If regional specific or not Returns ------- container : list List with narrative """ return [{ 'base_yr': base_yr, 'end_yr': end_yr, 'value_by': value_by, 'value_ey': value_ey, 'diffusion_choice': diffusion_choice, 'sig_midpoint': sig_midpoint, 'sig_steepness': sig_steepness, 'regional_specific': regional_specific, 'fueltype_replace': fueltype_replace, 'fueltype_new': fueltype_new }]
7076e19af13337d52241c4cd35a6ec3392678d3c
18,596
def reshape_bboxes(bboxes): """ Convert bboxes from [x1, y1, x2, y2] to [y1, x1, y2, x2] bboxes : [num_bboxes, 4] """ return [bboxes[:,[1, 0, 3, 2]]]
23e1b59e77d282d0d9f8a67519bd0933e21c1998
18,598
def generate_dep_hint(dep_comparison, kind): """Generate a dep hint. Parameters ---------- dep_comparison : dict The dependency comparison. kind : str The kind of comparison (e.g., source code, grayskull, etc.) Returns ------- hint : str The dependency hint string. """ hint = "\n\nDependency Analysis\n--------------------\n\n" hint += ( "Please note that this analysis is **highly experimental**. " "The aim here is to make maintenance easier by inspecting the package's dependencies. " # noqa: E501 "Importantly this analysis does not support optional dependencies, " "please double check those before making changes. " "If you do not want hinting of this kind ever please add " "`bot: inspection: false` to your `conda-forge.yml`. " "If you encounter issues with this feature please ping the bot team `conda-forge/bot`.\n\n" # noqa: E501 ) df_cf = "" for sec in ["host", "run"]: for k in dep_comparison.get(sec, {}).get("df_minus_cf", set()): df_cf += f"- {k}" + "\n" cf_df = "" for sec in ["host", "run"]: for k in dep_comparison.get(sec, {}).get("cf_minus_df", set()): cf_df += f"- {k}" + "\n" if len(df_cf) > 0 or len(cf_df) > 0: hint += ( f"Analysis by {kind} shows a discrepancy between it and the" " the package's stated requirements in the meta.yaml." ) if df_cf: hint += ( f"\n\n### Packages found by {kind} but not in the meta.yaml:\n" # noqa: E501 f"{df_cf}" ) if cf_df: hint += ( f"\n\n### Packages found in the meta.yaml but not found by {kind}:\n" # noqa: E501 f"{cf_df}" ) else: hint += f"Analysis by {kind} shows **no discrepancy** with the stated requirements in the meta.yaml." # noqa: E501 return hint
676babf8475669dbd58c050ac3211aa0e39d4fa5
18,599
import random def random_time_between(x, y, random_width): """ Генерирует случайное число в интервале :param x: Первое число :param y: Второе число :param random_width: Разброс 0.0 - 1.0, ограничивает рандом. При 0 вернет число точно по центру, при 1 может вернуть любое число в интервале :return: Случайное чило между x и y с разбросом random_width """ middle = (x + y) // 2 scatter = int((y - x) * random_width / 2) return int(middle + random.randint(-scatter, scatter))
6a3f7039b01178f6b6d60ed07e728ecf981d1283
18,600
import time def time_ms(): """Return current timestamp in milliseconds.""" return int(1000 * time.time())
24201688797aa4499c41fd6e8d1b3db8a5de9199
18,601
import six def flatten(value, prefix=None): """Takes an arbitrary JSON(ish) object and 'flattens' it into a dict with values consisting of either simple types or lists of simple types.""" def issimple(value): # foldr(True, or, value)? for item in value: if isinstance(item, dict) or isinstance(item, list): return False return True if isinstance(value, six.text_type): return value.encode("utf8") if isinstance(value, list): if issimple(value): return value offset = 0 result = {} prefix = "%d" if prefix is None else "%s_%%d" % prefix for item in value: k = prefix % offset v = flatten(item, k) if not isinstance(v, dict): v = {k:v} result.update(v) offset += 1 return result if isinstance(value, dict): result = {} prefix = "%s" if prefix is None else "%s_%%s" % prefix for k, v in six.iteritems(value): k = prefix % str(k) v = flatten(v, k) if not isinstance(v, dict): v = {k:v} result.update(v) return result return value
8208fd2e1cdd3fa741a5f04eb9e48f71141f65e7
18,602
import struct def encode_float(value): # float -> [int, int] """Encode a float into MODBus format as in secn 2.11.1 of the techspec""" words=struct.unpack('>HH', struct.pack('>f', value)) return words[1],words[0]
ba27ce380afcc22f926582bcf2c9523975cfa88d
18,604
import argparse def get_arguments(): """Handle get CLI arguments""" parser = argparse.ArgumentParser(description="Flickr Mirroring") parser.add_argument("--username", metavar="USERNAME", help="username of the account of a user on Flickr to mirror their photostream") parser.add_argument("--cache-path", metavar="CACHE PATH", type=str, default="./.flickr/", help="specify the absolute path where the photos downloaded from Flickr need to be cached") parser.add_argument("--consumer-key", metavar="CONSUMER KEY", type=str, help="a unique string used by the Consumer to identify themselves to the Flickr API") parser.add_argument("--consumer-secret", metavar="CONSUMER SECRET", type=str,help="a secret used by the Consumer to establish ownership of the Consumer Key") parser.add_argument("--image-only", default=False, action='store_true', help="specify whether the script must only download photos' images") parser.add_argument("--info-level", metavar="LEVEL", type=int, default=0, choices=[0, 1, 2], help="specify the level of information of a photo to fetch (value between 0 and 2)") parser.add_argument("--info-only", default=False, action='store_true', help="specify whether the script must only download photos' information") parser.add_argument("--save-api-keys", default=False, action='store_true', help="specify whether to save the Flickr API keys for further usage") parser.add_argument("--lifo", default=False, action='store_true', help="specify the First-In First-Out method to mirror the user's photostream, from the oldest uploaded photo to the earliest") parser.add_argument("--fifo", default=False, action='store_true', help="specify the Last-In First-Out method to mirror the user's photostream, from the earliest uploaded photo to the oldest (default option)") return parser.parse_args()
cdd95b3cb831ac545be099072deed4a6d780267d
18,605
import time def datetime_to_millis(dt): """ Convert a ``datetime`` object to milliseconds since epoch. """ return int(time.mktime(dt.timetuple())) * 1000
91091c6a84a0001d1ee6847b9912b2590f1cc57f
18,607
import os def get_patients_and_images(): """ Gets all patients and images from files Returns ------- image and patient names """ base_train_AD = 'D:/TFG/brain_data/train/AD' base_train_MCI = 'D:/TFG/brain_data/train/MCI' base_train_CN = 'D:/TFG/brain_data/train/CN' base_val_AD = 'D:/TFG/brain_data/validation/AD' base_val_MCI = 'D:/TFG/brain_data/validation/MCI' base_val_CN = 'D:/TFG/brain_data/validation/CN' base_test_AD = 'D:/TFG/brain_data/test/AD' base_test_MCI = 'D:/TFG/brain_data/test/MCI' base_test_CN = 'D:/TFG/brain_data/test/CN' images_train_AD = os.listdir(base_train_AD) images_train_MCI = os.listdir(base_train_MCI) images_train_CN = os.listdir(base_train_CN) images_validation_AD = os.listdir(base_val_AD) images_validation_MCI = os.listdir(base_val_MCI) images_validation_CN = os.listdir(base_val_CN) images_test_AD = os.listdir(base_test_AD) images_test_MCI = os.listdir(base_test_MCI) images_test_CN = os.listdir(base_test_CN) images_train = images_train_AD + images_train_MCI + images_train_CN images_validation = images_validation_AD + images_validation_MCI + images_validation_CN images_test = images_test_AD + images_test_MCI + images_test_CN images = images_train + images_validation + images_test patients = [] for img in images: patientId = img[5:15] patients.append(patientId) patients = list(set(patients)) return images, patients
5ed3c10a68e9c3d8514cc4d9d875d2aaf19949ca
18,608
def split_indexed_tag(tag): """ Splits the created tag for the api. """ tag = tag.lower() tag = tag.split("->") return tag
26e6c3236edf8362a1c7ab620838f33e9532f2ba
18,610
def ord_prio(prio): """Compute the ordinal number of a text priority :param prio: string :rtype: integer """ return { 'urgmust': 1, 'must' : 2, 'high' : 3, 'medium' : 4, 'low' : 5 }.get(prio, 5)
fb84a9c7d244bd3c2664bb97cb56f5ec23517671
18,612
def convert_string_to_tuple(creds_string): """Recreate a MAAS API credentials tuple from a colon-separated string.""" creds_tuple = tuple(creds_string.split(':')) if len(creds_tuple) != 3: raise ValueError( "Malformed credentials string. Expected 3 colon-separated items, " "got %r." % creds_string) return creds_tuple
a0f0553553733340d276bbb0f01d44d4ff842008
18,613
import base64 def base642binary(b64_fname: str) -> bytes: """Convert a printable string to a binary file. """ b64_fname += "===" return base64.b64decode(b64_fname)
e4eaba11d75d63c26fb5d66ca0fdbc9a46b203fd
18,616
def _split_host_and_port(servers): """Convert python-memcached based server strings to pymemcache's one. - python-memcached: ['127.0.0.1:11211', ...] or ['127.0.0.1', ...] - pymemcache: [('127.0.0.1', 11211), ...] """ _host_and_port_list = [] for server in servers: connection_info = server.split(':') if len(connection_info) == 1: _host_and_port_list.append( (connection_info[0], 11211)) elif len(connection_info) == 2: _host_and_port_list.append( (connection_info[0], int(connection_info[1]))) return _host_and_port_list
2f4544566bb00684b99cbbb796ca4a0246891f08
18,617
def get_lomb_signif_ratio(lomb_model, i): """ Get the ratio of the significances (in sigmas) of the ith and first frequencies from a fitted Lomb-Scargle model. """ return (lomb_model['freq_fits'][i-1]['signif'] / lomb_model['freq_fits'][0]['signif'])
e3f4d8db9a08926be49725c2a10696ede4e6d1b0
18,618
def validate(raw): """ Checks the content of the data provided by the user. Users provide tickers to the application by writing them into a file that is loaded through the console interface with the <load filename> command. We expect the file to be filled with coma separated tickers :class:`string`. Parameters: - `raw` : :class:`string` content of the user provided file. The function strips the raw data from spaces, carrier returns and split the content around comas. It will also check if there are trailing comas or if the user mistakenly put two comas instead of one between tickers. Returns a :class:`list` of sanitized tickers """ tickers = [] raw = raw.replace(' ', '') # remove spaces raw = raw.replace('\n', '') # removes cr for item in raw.split(','): # comma split if item is not '': tickers.append(str(item).upper()) return tickers
c69b5b4177e11fabc3f70c0388e3b50f56a201b7
18,620
def expand_qgrams_word_list(wlist, qsize, output, sep='~'): """Expands a list of words into a list of q-grams. It uses `sep` to join words""" n = len(wlist) for start in range(n - qsize + 1): t = sep.join(wlist[start:start+qsize]) output.append(t) return output
0937f53fa12dded031dec21deb32282c85c904ac
18,621
def find_repair_ends_and_reclassify(problem_rpns, tag_list, word_list, search_start, partial_disallowed=False): """Backwards search to find a possible repair end rpn tag and re-classify its type if needs be. Return the repair ends successfully found. problem_rpns :: list of repair end tags (non-deletes) which are to be moved back before an edit term. tag_list :: the disfluency tags for utterance word_list :: the words for the utterance search_start :: position in utterance where backwards search starts non_partial :: repair end cannot be a partial word """ resolved = [] unresolved = [] for i in range(search_start, -1, -1): if "<e/>" in tag_list[i]: continue # only allow non-edit term words if partial_disallowed and word_list[i][-1] == "-": continue # in partial_disallowed setting, no partial word rpns # if we have got here we may have a possible repair end word for rpn in problem_rpns: if rpn in resolved or rpn in unresolved: continue rpMid = rpn.replace("rpnsub", "rp").replace("rpnrep", "rp") rpStart = rpn.replace("rpnsub", "rps").replace("rpnrep", "rps") # a legit rp tag, can be the repair end if rpMid in tag_list[i] or rpStart in tag_list[i]: # get rid of rp mid tags tag_list[i] = tag_list[i].replace(rpMid, "") tag_list[i] = tag_list[i] + rpn # add repair end tag # reclassify it as either repeat or substitution by iterating # up to this current word rmMid = rpn.replace("rpnsub", "rm").replace("rpnrep", "rm") rmStart = rpn.replace("rpnsub", "rms").replace("rpnrep", "rms") reparandum = [] repair = [] for check in range(0, i + 1): if rmStart in tag_list[check] or rmMid in tag_list[check]: reparandum.append(word_list[check]) if rpStart in tag_list[check] or rpMid in tag_list[check]: repair.append(word_list[check]) if rpn in tag_list[check]: repair.append(word_list[check]) # it was marked as a repeat, change if no longer a # repeat if "rep" in rpn: if not reparandum == repair: tag_list[check] = tag_list[check].replace( rpn, rpn.replace("rpnrep", "rpnsub")) # else if marked as a sub, change if it is now a repeat elif reparandum == repair: tag_list[i] = tag_list[i].replace( rpn, rpn.replace("rpnsub", "rpnrep")) break resolved.append(rpn) # this is a resolved repair end return resolved
c0ed41ed0f43729e79d02a4c7d09f9d44d46e138
18,622
def fibonacci(n): """Find the n-th fibonacci number - the first two numbers are 1, the third one is the sum of the first two, the fourth one is the sum of the second and the third, ... meaning that fibonacci(n) = fibonacci(n-1) + fibonacci(n-2). This example also shows one of the possible problems with recursion - we calculate the same things over and over again! For instance, if we call fibonacci(5), we get a tree like this:: 5 4 3 3 2 2 1 2 1 As you can see, we called fibonacci(1) 2 times, fibonacci(2) 3 times and fibonacci(3) 2 times. Of course this can grow very fast, so if you call something like fibonacci(50), it can take a very long time to calculate the result. >>> [fibonacci(i) for i in range(1, 11)] [1, 1, 2, 3, 5, 8, 13, 21, 34, 55] """ if n <= 2: return 1 return fibonacci(n-1) + fibonacci(n-2)
7feb6c151a9ef223ff69dbbe5ba637fb278b5148
18,624
import inspect import sys def script_code(): """ Return the source code of a module running as '__main__'. Acquisition scripts can use this to save their code. If attempting to load the source code raises an exception, return a string representation of the exception. Returns ------- str The code, with lines separated by newline characters. """ try: return inspect.getsource(sys.modules['__main__']) except Exception as e: return str(e)
5a925c4953b6f98f75f1a8d7220dfc464dd66538
18,625
def index(): """Index (Testing only)""" return 'You shouldn\'t be here'
458742bf080706cc118ea79884282f5850c92b57
18,627
def convert_thrift_header(thrift_header): """ returns a dictionary representation of a thrift transaction header """ return { "actor": thrift_header.actor, "block_id": thrift_header.block_id, "business_unit": thrift_header.business_unit, "create_ts": thrift_header.create_ts, "creator_id": thrift_header.creator_id, "entity": thrift_header.entity, "family_of_business": thrift_header.family_of_business, "line_of_business": thrift_header.line_of_business, "owner": thrift_header.owner, "status": thrift_header.status, "transaction_id": thrift_header.transaction_id, "transaction_ts": thrift_header.transaction_ts, "transaction_type": thrift_header.transaction_type }
f0554da0c10c464633d19a001ca32a0180c42dd0
18,628
def _find_exclude_idx(ch_names, exclude): """Find the index of all channels to exclude. If there are several channels called "A" and we want to exclude "A", then add (the index of) all "A" channels to the exclusion list. """ return [idx for idx, ch in enumerate(ch_names) if ch in exclude]
db754d5e92af59563d6ee2004e5470bfe08a0fc1
18,629
import torch def compute_jacobian_on_surface(u, v, forward_transform, eps=0.01): """ Computes the differentials: [dX/dv, dY/dv, dX/dv, dX/du, dY/du, dX/du] for the given projection function) using central differences. u and v are an orthogonal coordinate system on the surface and X, Y, Z are 3D Cartesian coordinates.. Returns (u.shape[0], u.shape[1], 2, 2) """ # Compute dX/du, dY/du, dZ, du x0, y0, z0 = forward_transform(u - eps, v) x1, y1, z1 = forward_transform(u + eps, v) dx_du = (x1 - x0) / (2 * eps) dy_du = (y1 - y0) / (2 * eps) dz_du = (z1 - z0) / (2 * eps) # Compute dX/dv, dY/dv, dZ/dv x2, y2, z2 = forward_transform(u, v - eps) x3, y3, z3 = forward_transform(u, v + eps) dx_dv = (x3 - x2) / (2 * eps) dy_dv = (y3 - y2) / (2 * eps) dz_dv = (z3 - z2) / (2 * eps) return torch.stack((torch.stack( (dx_du, dy_du, dz_du), -1), torch.stack((dx_dv, dy_dv, dz_dv), -1)), -1)
3d0a7a749abeba834fe800365dac1c208e16a87a
18,631
def compute_factorial(n: int) -> int: """ Compute n-factorial. :param n: Number to compute factorial for :return: n-factorial """ if (not isinstance(n, int)) or (n < 0): raise ValueError("compute_factorial() only accepts non-negative integer values.") factorial = 1 for i in range(1, n + 1): factorial *= i return factorial
75061c245376f09ec01e6bcf018d04e938f419c1
18,632
def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total)
2f79b56efd11477a1f649e9b374891ff09632c7f
18,633
def get_charging_status(battery_id): """ Check if the battery is currently charging :param battery_id: Battery ID/Number e.g. BAT0 :return: bool, True is battery is charging """ with open(f'/sys/class/power_supply/{battery_id}/status') as f: if 'Charging' in f.read(): return True return False
18fa1cc07a4338ec526954342383f346f9cd057c
18,634
from unicodedata import category from sys import maxunicode def get_unicodedata_categories(): """ Extracts Unicode categories information from unicodedata library. Each category is represented with an ordered list containing code points and code point ranges. :return: a dictionary with category names as keys and lists as values. """ categories = {k: [] for k in ( 'C', 'Cc', 'Cf', 'Cs', 'Co', 'Cn', 'L', 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'M', 'Mn', 'Mc', 'Me', 'N', 'Nd', 'Nl', 'No', 'P', 'Pc', 'Pd', 'Ps', 'Pe', 'Pi', 'Pf', 'Po', 'S', 'Sm', 'Sc', 'Sk', 'So', 'Z', 'Zs', 'Zl', 'Zp' )} minor_category = 'Cc' start_cp, next_cp = 0, 1 for cp in range(maxunicode + 1): if category(chr(cp)) != minor_category: if cp > next_cp: categories[minor_category].append((start_cp, cp)) categories[minor_category[0]].append(categories[minor_category][-1]) else: categories[minor_category].append(start_cp) categories[minor_category[0]].append(start_cp) minor_category = category(chr(cp)) start_cp, next_cp = cp, cp + 1 else: if next_cp == maxunicode + 1: categories[minor_category].append(start_cp) categories[minor_category[0]].append(start_cp) else: categories[minor_category].append((start_cp, maxunicode + 1)) categories[minor_category[0]].append(categories[minor_category][-1]) return categories
6e50fa68324f8262d266d3ec483273cc41387c7b
18,635
def get_max_frequency(): """ Get the maximum allowed frequency for cpu0, assuming all CPUs use the same. """ with open ('/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq') as fp: return int(fp.readlines()[0].strip())*1000
cc737e9d633239b8253f3def69f0272581fa10aa
18,636
def _convertToElementList(elements_list): """ Take a list of element node indexes deliminated by -1 and convert it into a list element node indexes list. """ elements = [] current_element = [] for node_index in elements_list: if node_index == -1: elements.append(current_element) current_element = [] else: # We also add one to the indexes to suit Zinc node indexing current_element.append(node_index + 1) return elements
750a7a7780dc901b7e00cd8a36fdfd3638005322
18,637
def get_common_introduced(db_entry, arches): """Returns the common introduction API level or None. If the symbol was introduced in the same API level for all architectures, return that API level. If the symbol is not present in all architectures or was introduced to them at different times, return None. """ introduced = None for arch in arches: introduced_tag = 'introduced-' + arch if introduced_tag not in db_entry: return None if introduced is None: introduced = db_entry[introduced_tag] elif db_entry[introduced_tag] != introduced: return None # Else we have the symbol in this arch and it's the same introduction # level. Keep going. return introduced
f71d5f91faa5a8553cd85cdbd248dea8052b2fab
18,641
import sqlite3 import json def get_did_list(workspace_contract,mode) : """ return list of dict """ path = mode.db_path conn = sqlite3.connect(path + 'nameservice.db') c = conn.cursor() data = { "workspace_contract" : workspace_contract} c.execute("SELECT did FROM resolver WHERE identity_workspace_contract = :workspace_contract", data) did = c.fetchone() conn.close() try : return json.loads(did[0]) except : return[]
899434af8a3f5200dc63161ee30c1cde78d7d419
18,642
def kf_derivative_wrt_density(kf, n): """Computes the derivative of kf with respect to density It is given by `kf / (3 * n)` Parameters ---------- kf : array-like The fermi momentum in fm^-1 n : array-like The density in fm^-3 Returns ------- d(kf)/d(n) : array-like In units of fm^2 """ return kf / (3 * n)
76dd7581f0248a0f7c5f01706cf6f23fcf27f079
18,643
def extract_phone_number(num, replacement): """Takes in the phone number as string and replace_with arg as replacement, returns processed phone num or None""" phone_num = "".join(i for i in num if i.isdigit()) if len(phone_num) != 10: phone_num = replacement if replacement == "--blank--" else num return phone_num
cf49aa0f2cea5974feb487385d76349430e3b5f7
18,644
def is_valid_entry_name(filename): """Return whether a name is valid as an entry name. :param filename: The name to check :return: bool """ # Fix Me return True
d4795a3e468bbc25eac5e653a07ad882b7872931
18,645
def compress_string(string): """ Сжимает строку на основе счётчика повторяющихся символов. :param string: исходная строка. :return: сжатая строка - если она короче исходной строки, в ином случае возвращает исходную строку. """ compressed_string = '' repeated_char_counter = 0 previous_char = None for index, char in enumerate(string): # Самый первый символ. if index == 0: # previous_char пока что None. Соответствено сравнивать нечего. repeated_char_counter += 1 # Основная часть строки. elif 0 < index < len(string) - 1: # Если символы совпадают, то увеличиваем счётчик символов. if previous_char == char: repeated_char_counter += 1 # Если символы отличаются - началась новая последовательность # символов, значит нужно записать предыдущую последовательность # и сбросить счётчик на 1 - текущий символ уже считается. else: compressed_string += f'{previous_char}{repeated_char_counter}' repeated_char_counter = 1 # Самый последний символ строки. else: # Если символы совпадают, то текущий символ является продолжением # последовательности, соответственно просто записываем её. Но перед # этим увеличиваем на +1 счётчик, так как нужно посчитать текущий # символ. if previous_char == char: compressed_string += f'{previous_char}{repeated_char_counter + 1}' # Есди символы отличаются, то значит последний символ - это новая # последовательность. Значит нужно записать предыдущую последова- # тельность. А затем записать текущий символ - как отдельную пос- # ледовательность. Последний символ уже явно записываем, без # счётчика. else: compressed_string += f'{previous_char}{repeated_char_counter}' compressed_string += f'{char}1' # Перезапись симвла - на следующей итерации текущий символ будет # использован как предыдущий. previous_char = char # Если после компресси строка получилась длиннее исходной, то возвращаем # исходную строку. if len(compressed_string) > len(string): return string return compressed_string
24079216cc5296c91c51dd196b33df578a87dbb3
18,646
def get_region_string(point, size=0.1): """ Construct a string of coordinates that create a box around the specified point. Parameters ---------- point : list of float Latitude and longitude of the specified point. size : float, optional Side length of the output square. Returns ---------- str A string with coordinates for each corner of the box. Can be passed to Earth Engine. """ left = point[0] - size / 2 right = point[0] + size / 2 top = point[1] + size / 2 bottom = point[1] - size / 2 coords = str([[left, top], [right, top], [right, bottom], [left, bottom]]) return coords
d39da98ebc14224817d4b093875e3fafce143441
18,648
def _nbits(n, correction = { '0': 4, '1': 3, '2': 2, '3': 2, '4': 1, '5': 1, '6': 1, '7': 1, '8': 0, '9': 0, 'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0}): """Number of bits in binary representation of the positive integer n, or 0 if n == 0. """ if n < 0: raise ValueError("The argument to _nbits should be nonnegative.") hex_n = "%x" % n return 4*len(hex_n) - correction[hex_n[0]]
11d6367e41273037108680634c63958de6e72730
18,649
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: O(n^2) Space Complexity: O(n) """ if num == 0: raise ValueError("ValueError") if num == 1: return '1' s='1 1' arr =[0,1,1] for num in range(3,num+1): new_num = arr[arr[num-1]]+arr[num -arr[num-1]] # i=3 arr[arr[2]]+arr[3-arr[2]]=arr[1]+arr[2]=2 arr.append(new_num) s += " "+str(new_num)+'' return s
2b7dee1c5a3d320dce1398d2aa4f16e27d9a9985
18,650
def incrementFilename(filename, increment): """ add count to filename """ fname = filename.split('.') finalname= fname[0]+'_'+increment+'.'+fname[1] return finalname
972c6434a3746b801aff70190ec82c2fd3de1c20
18,652
def get_keys(dict): """ extract the keys from a dictionary """ return dict.keys()
7c12a9717a4ed57aec53366f60a15f3aa04f672d
18,655
def __list_query_gen_simple( self, offset: int, list_type: str, direction: str, query_filter=None ) -> dict: # pragma: no cover """A method to get simple info of photos to support date filter function :param self: :param offset: :param list_type: :param direction: :param query_filter: :return: """ query = { u"query": { u"filterBy": [ { u"fieldName": u"startRank", u"fieldValue": {u"type": u"INT64", u"value": offset}, u"comparator": u"EQUALS", }, { u"fieldName": u"direction", u"fieldValue": {u"type": u"STRING", u"value": direction}, u"comparator": u"EQUALS", }, ], u"recordType": list_type, }, u"resultsLimit": self.page_size * 2, u"desiredKeys": [ u"assetDate", u"recordName", u"recordType", u"recordChangeTag", u"masterRef", ], u"zoneID": {u"zoneName": u"PrimarySync"}, } if query_filter: query["query"]["filterBy"].extend(query_filter) return query
8e2a339aad42c9a146f49e153ec2fc29c0d8f729
18,657
import os import subprocess def mathematica_run(command): """Call the shell script which in turn calls mathematica""" # Fix this path to use actual root path from config # from beluga import Beluga script = os.path.dirname(__file__)+'/runMath.sh' print(script) p = subprocess.Popen([script,command], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return out.decode('utf-8')
b2d6258b5e2a6b309d25a6702c91c91a00cd83eb
18,658
import argparse def ValidFloatActionFactory(lower, upper): """ A factory for actions that will validate float inputs. """ class ValidateAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): if not (upper >= values >= lower): raise ValueError(f"{self.dest} must be between [{lower}, {upper}]") setattr(namespace, self.dest, values) return ValidateAction
46ddca2c5185407c4425601c7f878ad1b19143c9
18,659
def get_printer(msg): """ returns a printer function, that prints information about a tensor's gradient Used by register_hook in the backward pass. :param msg: :return: printer function """ def printer(tensor): if tensor.nelement == 1: print("{} {}".format(msg, tensor)) else: print("{} shape: {}" "max: {} min: {}" "mean: {}" .format(msg, tensor.shape, tensor.max(), tensor.min(), tensor.mean())) return printer
0aefa7449452f791d0c6a15326044a0171c7faf4
18,660
def tidyRes(res): """ >>> tidyRes('') '' >>> tidyRes(()) >>> tidyRes((None, None)) >>> tidyRes(None) >>> tidyRes((1, 2)) (1, 2) >>> tidyRes(1) 1 >>> tidyRes((1,)) 1 >>> tidyRes((1, None)) 1 >>> tidyRes((1, None, 2)) (1, 2) >>> tidyRes((None, 1)) 1 >>> tidyRes([1, 2]) [1, 2] >>> tidyRes([None, 2]) 2 >>> tidyRes([None, 1, 2]) [1, 2] >>> tidyRes([None, 1, None, (3, None, 5), 2]) [1, (3, 5), 2] """ if res is None: return None if type(res) == tuple or type(res) == list: if len(res) == 0: return None elif res[0] is None: res = res[1:] res = tidyRes(res) elif len(res) == 1: return res[0] else: if type(res) == tuple: res = tuple([tidyRes(x) for x in res if x is not None]) else: res = [tidyRes(x) for x in res if x is not None] if len(res) == 0: return None elif len(res) == 1: return res[0] return res
af6983fbaab575013f6bc4f9dc51aa3d649257b4
18,661
def remove_protocol(addr): """ Removes the first occurrence of the protocol string ('://') from the string `addr` Parameters ---------- addr : str The address from which to remove the address prefix. Returns ------- str """ name = addr.split("://", 1) # maxsplit = 1... removes only the first occurrence name = ''.join(name[1:]) if len(name) > 1 else addr return name
f59c702be333671da3e9f202581acf6db3082c50
18,662
def find_duplicates(_list): """a more efficient way to return duplicated items ref: https://www.iditect.com/guide/python/python_howto_find_the_duplicates_in_a_list.html :arg list _list: a python list """ first_seen = set() first_seen_add = first_seen.add duplicates = set(i for i in _list if i in first_seen or first_seen_add(i)) # turn the set into a list (as requested) return duplicates
ed1ab9020d41b608dd6b8d3ce9e2448a096b1574
18,663
def first(iterable, default=None, key=None): """ Return the first truthy value of an iterable. Shamelessly stolen from https://github.com/hynek/first """ if key is None: for el in iterable: if el: return el else: for el in iterable: if key(el): return el return default
fd3eeeecf88b0dd29b3f689f61732e6502306016
18,664
def filter_dictionary(dictionary, keys): """ Returns a dictionary with only keys. """ if not keys: return dict() if not dictionary: return dict() return dict((key, dictionary[key]) for key in keys if key in dictionary)
3eb66b1bfdeb019fa717ed0b480bb32d1784e62f
18,665
def read_txt_file(file_name: str, encoding: str = "utf-8") -> str: """Reads a text file :param file_name: path :param encoding: encoding to use :return: str content """ with open(file_name, "r", encoding=encoding) as txt_file: return txt_file.read()
f97bd162596569b521121bd17da3edfdc04889d8
18,666
from typing import Dict def prepare_note_create_output(record: Dict) -> Dict: """ Prepares context output for user_details_get command. :param record: Dict containing note record. :return: prepared context output Dict. """ return { 'Id': record.get('Id', ''), 'WorkInfoType': record.get('WorkInfoType', ''), 'ViewAccess': record.get('ViewAccess', ''), 'Summary': record.get('Summary', ''), 'Submitter': record.get('Submitter', ''), 'srId': record.get('srId', ''), 'Notes': record.get('Notes', ''), 'ModifiedDate': record.get('ModifiedDate', ''), 'CreatedDate': record.get('CreatedDate', '') }
a0cc6f71dae7527f2c8088d349b79921f163d2d7
18,667
def query_interval_tree_by_type(interval_tree, time, type): """ Returns only items of specified type from interval tree at given time. """ all_intervals = interval_tree[time] selected_intervals = set() for interval in all_intervals: if isinstance(interval[-1], type): selected_intervals.add(interval.data) return selected_intervals
68366bfe9413cdecd8b3551b8070d1cc30646358
18,668
def extract_solution(G, model): """ Get a dictionary of vertex to clique assignments If a vertex is not assigned to a clique, its value in the dictionary is zero. :param G: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph` :param model: a solved Gurobi model for clique packing :returns: a dictionary mapping vertices to cliques """ cliques = {v: 0 for v in G.G.nodes()} for k, v in G.cluster_assignment.items(): if v.X > 0.5: cliques[k[1]] = k[0]+1 return cliques
1c32cd88765c36000b1678230ded2b69d8a9eca7
18,670
import re def parse_question_limits(question, for_items=False): """ Converts word and character length validators into JSON Schema-compatible maxLength and regex validators. """ limits = {} word_length_validator = next( iter(filter(None, ( re.match(r'under_(\d+)_words', validator['name']) for validator in question.get('validations', []) ))), None ) char_length_validator = next( iter(filter(None, ( re.search(r'([\d,]+)', validator['message']) for validator in question.get('validations', []) if validator['name'] == 'under_character_limit' ))), None ) char_length = question.get('max_length') or ( char_length_validator and char_length_validator.group(1).replace(',', '') ) word_length = question.get('max_length_in_words') or (word_length_validator and word_length_validator.group(1)) if char_length: limits['maxLength'] = int(char_length) if word_length: if not for_items and question.get('optional'): limits['pattern'] = r"^$|(^(?:\S+\s+){0,%s}\S+$)" % (int(word_length) - 1) else: limits['pattern'] = r"^(?:\S+\s+){0,%s}\S+$" % (int(word_length) - 1) return limits
3b9400f27755e5f93de52665d7e8f9d209ad5e30
18,672