content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_subscription_uri(khoros_object): """This function returns the subscriptions URI for the v2 API to perform API calls. .. versionadded:: 3.5.0 :param khoros_object: The core Khoros object :type khoros_object: class[khoros.Khoros] :returns: The full (absolute) URI for the ``subscriptions`` v2 API endpoint """ return f"{khoros_object.core_settings.get('v2_base')}/subscriptions"
574ac192e3a763633b2e1f19b57ab8247c373697
85,629
from typing import Any from typing import Iterable def iterify(item: Any) -> Iterable: """Returns 'item' as an iterable, but does not iterate str types. Args: item (Any): item to turn into an iterable Returns: Iterable: of 'item'. A str type will be stored as a single item in an Iterable wrapper. """ if item is None: return iter(()) elif isinstance(item, (str, bytes)): return iter([item]) else: try: return iter(item) except TypeError: return iter((item,))
a1d54b776622dc3fde8b1cfc8b184a61f37d73f5
85,632
def get_column_name(df): """ Parameters ---------- df Input DataFrame Returns ------- feature_name_column Column name of Feature Name in the input DataFrame (string) feature_desc_column Column name of Feature Description in the input DataFrame (string) industry_column Column name of Industry in the input DataFrame (string) usecase_column Column name of Usecase in the input DataFrame (string) """ feature_name_column = str(df.columns.tolist()[0]) feature_desc_column = str(df.columns.tolist()[1]) industry_column = str(df.columns.tolist()[2]) usecase_column = str(df.columns.tolist()[3]) return ( feature_name_column, feature_desc_column, industry_column, usecase_column, )
3509fcd108ea4c68786ed63e98fe417e6c50579e
85,633
def isInAssociation(element, iterable): """ A wrapper for 'is in' which returns true if `iterable` is None If `iterable` is None, then we accept all elements """ if iterable is None: return True if type(iterable) is str: return element['Moving group'] == iterable return element['Moving group'] in iterable
769b228c5c3f088180907f2f022ce72e2b077ae9
85,634
def get_safe_settings(settings_module): """ setup.py test and the multiprocessing module tend to fight. Since lettuce.django uses the multiprocessing module we do not want to include that when running unit tests with setup.py test because it causes an error every time, even though the tests run fine. Here is the exception that will occur after the tests finish. Traceback (most recent call last): File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs func(*targs, **kargs) File "/usr/lib/python2.7/multiprocessing/util.py", line 284, in _exit_function info('process shutting down') TypeError: 'NoneType' object is not callable For more details see: http://comments.gmane.org/gmane.comp.python.distutils.devel/11688 """ installed_apps = list(settings_module.INSTALLED_APPS) if 'lettuce.django' in installed_apps: installed_apps.pop(installed_apps.index('lettuce.django')) settings_module.INSTALLED_APPS = installed_apps return settings_module
bb91aeef9c510bcb6914827784f6b0a1e247849e
85,645
def get_named_parent_widget(source_widget, parent_widget_object_name): """ Returns the parent widget with a given name of source_widget :param source_widget: <QWidget> :param parent_widget_object_name: <string> objectName of the parent widget you're looking for :return: """ parent_widget = source_widget.parentWidget() if parent_widget is not None and parent_widget.objectName() != parent_widget_object_name: return get_named_parent_widget(parent_widget, parent_widget_object_name) return parent_widget
e3173422d1c4f12ffaa8eb7e730d8f649ee25707
85,646
import re def enclosed_in_tag(source_text, tag, loc): """ Determine whether within the `source_text`, the element present at `loc` is enclosed within the XML `tag`. :param source_text: a string that possibly contains some XML markup. :type source_text: :class:`str` :param tag: a string specifying an XML tag. :type tag: :class:`str` :param loc: the location to test for enclosure. :type loc: :class:`int` :return: a boolean indicating whether `loc` is enclosed in the specified `tag`. :rtype: :class:`bool` Example: :: source_text = '<foo>bar</foo>' tag = 'foo' loc = 6 enclosed_in_tag(source_text, tag, loc) True """ trailing_text = source_text[loc:] close_tag = '</{}>'.format(tag) first_open_tag = re.search('<[^\/].*?>', trailing_text) first_closed_tag = re.search('<\/.*?>', trailing_text) if not first_closed_tag: return False else: if first_open_tag is not None and first_open_tag.start() < first_closed_tag.start(): return False elif first_closed_tag.group(0) == close_tag: return True else: return False
e23ec00bc3ad22746584e1db4802d310f24aced9
85,656
import torch def matrix_to_tril(mat: torch.Tensor): """ Convert matrix (2D array) to lower triangular of it (1D array, row direction) Example: [[1, x, x], [2, 3, x], -> [1, 2, 3, 4, 5, 6] [4, 5, 6]] """ assert mat.ndim == 2 tril_indices = torch.tril_indices(*mat.shape) return mat[tril_indices[0], tril_indices[1]]
16f811fcd6b94e6d8945633215dc84ab3b62f31d
85,658
import shutil def copy(srcPath, destPath): """copy the file from srcPath to destPath""" return shutil.copy(srcPath, destPath)
ce929e899f609981b5ddd3d83d5903022bf82690
85,661
import re def _is_native_file(filename): """Returns true if filename is a native (non-tfrecord) genomics data file.""" return not re.match(r".*\.tfrecord(\.gz)?", filename)
0167d3d8ce208e8d421c711871c72dfcee8f3237
85,664
import string def remove_punctuation(word): """Removes all punctuation from word""" return word.translate(str.maketrans(dict.fromkeys(string.punctuation)))
a8f672ce3b95247578d76874c357f89a2a1f23bc
85,665
def _add_new_user(user_rec_sys_data, product_category, review_score): """ Adds new user rating to the user_rec_sys_data. Parameters ---------- user_rec_sys_data: Reviews data for adding new user. product_category: Product Selected on the website. review_score: Rating given for the item. Yields ------ Updates user_rec_sys_data.csv Returns ------- updated_data, new_user_id. """ # Computing new_user_id. new_user_raw_id = user_rec_sys_data["customer_id"].max() + 1 new_data = {"customer_id": new_user_raw_id, "product_category": product_category, "review_score": review_score} updated_data = user_rec_sys_data.append(new_data, ignore_index=True) return updated_data, new_user_raw_id
f1e87cb69b163c48537d040809b4bfd689192753
85,666
import typing def _cast_noop(definition: dict, value: typing.Any) -> typing.Any: """ Cast as a no-op operation used for unknown or uncastable value types. :param definition: Specification definition for the associated value to cast. :param value: A loaded value to be cast into its boto client response value. :return: An unmodified value as this is a no-op. """ return value
f2adcd5b1c3eeedab162ce6312fa1af482e63b32
85,667
import re def present(incomingString): """ Return a tuple containing a list of all single-dollar and a list of all double-dollar variables present in the given string. """ # Build a list of single-dollar-signed variables without duplicates presentSingleList = list() singleDollars = re.compile(r"((?<!\\)(?<!\$)\${1}(?!\$)[A-Z0-9_]*)") for match in singleDollars.finditer(incomingString): variableName = match.group()[1:] presentSingleList.append(variableName) presentSingleList = list(set(presentSingleList)) # Build a list of double-dollar-signed variables without duplicates presentDoubleList = list() doubleDollars = re.compile(r"((?<!\\)(?<!\$)\${2}(?!\$)[A-Z0-9_]*)") for match in doubleDollars.finditer(incomingString): variableName = match.group()[2:] presentDoubleList.append(variableName) presentDoubleList = list(set(presentDoubleList)) return (presentSingleList, presentDoubleList)
30eef0d530420c5ecfe41ae502f6b53043844673
85,669
def jax_tensor_to_xla_buffer(jax_buf): """Convert a JAX Device array back to XLA buffer.""" return jax_buf.device_buffer
3bc432a1c80f4a3cce0cd397aeff98ef8651a984
85,671
def is_user_pages(full_name): """ Return True if the repository is a user pages repository. """ username, repo_name = full_name.split('/', 1) return ( repo_name.startswith(username) and repo_name.endswith(('github.io', 'github.com')) )
0cc1a14dd91f0a1839c5b49112ff841f180e90d9
85,674
def SignsToSector(signs): """Takes a boolean array and returns the integer given by those binary digits.""" sum = 0 for i in range(signs.shape[0]): if signs[i]: sum += 1 << i return sum
1ad339b9c03da24e3729a99ab080f70cb24feb54
85,678
def _create_application_request(app_metadata, template): """ Construct the request body to create application. :param app_metadata: Object containing app metadata :type app_metadata: ApplicationMetadata :param template: A packaged YAML or JSON SAM template :type template: str :return: SAR CreateApplication request body :rtype: dict """ app_metadata.validate(['author', 'description', 'name']) request = { 'Author': app_metadata.author, 'Description': app_metadata.description, 'HomePageUrl': app_metadata.home_page_url, 'Labels': app_metadata.labels, 'LicenseBody': app_metadata.license_body, 'LicenseUrl': app_metadata.license_url, 'Name': app_metadata.name, 'ReadmeBody': app_metadata.readme_body, 'ReadmeUrl': app_metadata.readme_url, 'SemanticVersion': app_metadata.semantic_version, 'SourceCodeUrl': app_metadata.source_code_url, 'SpdxLicenseId': app_metadata.spdx_license_id, 'TemplateBody': template } # Remove None values return {k: v for k, v in request.items() if v}
97c630010c0f782da5fda5dd6113494c1826fd43
85,680
def drop_role(role): """Helper method to construct SQL: drop role.""" return f"DROP ROLE IF EXISTS {role};"
144f4b7f0d0951dde847b52cdecaab59751dea82
85,681
def read_labels(path): """Read labels to a list""" with open(path) as f: content = f.readlines() content = [int(x.strip()) for x in content] return content
f455d265d6c764c704ab83b6ff6b17fb73efe030
85,682
def dummy_container(create_container): """Returns a container that is created but not started""" return create_container("alpine", command=["sh", "-c", "while true; do sleep 1; done"])
03357c495e2c424bcc310e5a4aec57ae7bd52bc2
85,689
def get_last_value_from_timeseries(timeseries): """Gets the most recent non-zero value for a .last metric or zero for empty data.""" if not timeseries: return 0 for metric, points in timeseries.items(): return next((p['y'] for p in reversed(points) if p['y'] > 0), 0)
d06a5d0591bd87b44d968e887ffe45820fc772b8
85,692
def valid_field(field): """ Check if a field is valid """ return field and len(field) > 0
920a06650b071cf8494300ae0bfb4de27e25ad9b
85,695
def is_valid(glyph_str): """ Validates if glyph_str is alphanumeric and contains unique chars Parameters ---------- glyph_str : string glyph alphabet to be used for number encoding Returns ------- True when: glyph string is alphanumeric Each char occurs only once """ uniq = True if len({x for x in glyph_str}) != len(glyph_str): uniq = False return uniq and glyph_str.isalnum()
2d1790224313d164e8271b46959c08cfe5fe197f
85,698
def Wrap(values, lower_bound, upper_bound): """Wrap-around values within lower (inclusive) and upper (exclusive) bounds. Example: Wrap(np.array([-200, 60, 270]), -180, 180) = np.array([160, 60, -90]) Args: values: The input numpy array to be wrapped. lower_bound: The lower bound of the wrap-around range. upper_bound: The upper bound of the wrap-around range. Returns: A new numpy array with values wrapped around. """ return (values - lower_bound) % (upper_bound - lower_bound) + lower_bound
aaffc9fcd06e65ce7965d38490c9f2dddf6b2a45
85,701
def title(sen): """ Turn text into title case. :param sen: Text to convert :return: Converted text """ new_text = "" for i in range(0, len(sen)): if i == 1: new_text += sen[i].upper() continue if sen[i - 1] == " ": new_text += sen[i].upper() continue new_text += sen[i].lower() return new_text
e59303eebee37561a2a9181b2328fd53b11bef51
85,703
def process_tex(lines): """ Remove unnecessary section titles from the LaTeX file. """ new_lines = [] for line in lines: if (line.startswith(r'\section{numpy_demo.') or line.startswith(r'\subsection{numpy_demo.') or line.startswith(r'\subsubsection{numpy_demo.') or line.startswith(r'\paragraph{numpy_demo.') or line.startswith(r'\subparagraph{numpy_demo.') ): pass # skip! else: new_lines.append(line) return new_lines
6f3b784851a9ac2ad860d9c62e3a2293ac0fc075
85,704
def reverse_edge(G, edge, copy=False): """ Reverse edge in graph. :param G: target graph :param edge: target edge :param copy: if True, copy graph before changing it :return: graph with reversed edge """ if copy: G = G.copy() x, y = edge G.remove_edge(x, y) G.add_edge(y, x) return G
ca4e5dd648a560de9666d96f8c015c86ff8b26e7
85,706
def search_list(l, k, v): """Search a list for an entry with a specific value.""" for item in l: if k not in item: continue if item[k] == v: return item return None
d2fc09f5bce1c9c44b2cde72fb3a2534ea7dcecc
85,707
def to_camel_case(uv: str) -> str: """ Convert an underscore variable to a camel case variable Args: uv: The underscore variable Returns: str: The camel case variable. """ ccv = '' capitalize = False for char in uv: if char != '_': ccv += char.capitalize() if capitalize else char capitalize = False else: capitalize = True return ccv
36f7850f3d7251fa50301e40f0d70975c899e698
85,708
def create_bounds(values, percentage): """ For a given set of floating point values, create an upper and lower bound. Bound values are defined as a percentage above/below the given values. :param values: List of floating point input values. :param percentage: The percentage value to use, expected as a fraction in the range (0, 1). :return: Tuple (u_bound, l_bound), each a regular list. """ if percentage <= 0 or percentage >= 1: raise ValueError("Argument 'percentage' is expected to be a float from the range (0, 1).") u_perc = 1 + percentage l_perc = 1 - percentage bounds = [] for val in values: bound = (val * u_perc, val * l_perc) if val != 0 else (1, 0) bounds.append(bound) u_bound, l_bound = zip(*bounds) return list(u_bound), list(l_bound)
bdab2dca49e349932aa77bd807ecfef84590c108
85,709
def list_differences(list1, list2): """Returns two lists containing the unique elements of each input list""" outlist1 = [] outlist2 = [] outlist1[:] = [elem for elem in list1 if elem not in list2] outlist2[:] = [elem for elem in list2 if elem not in list1] return outlist1, outlist2
8a70d38f614877e504571f889416961fd77e760f
85,712
def _ensure_list(order, value): """Ensures that `value` is a list of length `order` If `value` is an int, turns it into a list ``[value]*order`` """ if isinstance(value, int): return [value]*order assert len(value) == order return value
c139c636f99c29d362e387b4861cd616740f4b38
85,716
def not_include(in_list1: list, in_list2: list)->list: """ Return a list of all the element that are not included in the list in_list2 :param in_list1: The source list :param in_list2: The reference list :return: A list which holds the constraint """ if len(in_list1) == 0: return [] _not_inluded_tail = not_include(in_list1[1:],in_list2) return _not_inluded_tail + ([] if in_list1[0] in in_list2 else [in_list1[0]])
5cd4bea3e507a343e6f18247346fc97a180dc9c0
85,720
def DM_Sum(DMvec, qlist): """Helper function to matrix dot product the DM matrix with a qvector Assumes that DMVec is the same length as qlist """ sum = 0 for j in range(len(DMvec)): sum += DMvec[j]*qlist[j] return sum
f3726e6c9bacc260f83d688087409a70031bd9e9
85,723
def check_bb_intersection_on_values(min_b1, max_b1, min_b2, max_b2, used_check=lambda a, b: a >= b): """ Checks if there is an intersection of the given bounding box values. Here we use two different bounding boxes, namely b1 and b2. Each of them has a corresponding set of min and max values, this works for 2 and 3 dimensional problems. :param min_b1: List of minimum bounding box points for b1. :param max_b1: List of maximum bounding box points for b1. :param min_b2: List of minimum bounding box points for b2. :param max_b2: List of maximum bounding box points for b2. :param used_check: The operation used inside of the is_overlapping1D. With that it possible to change the \ collision check from volume and surface check to pure surface or volume checks. :return: True if the two bounding boxes intersect with each other """ collide = True for min_b1_val, max_b1_val, min_b2_val, max_b2_val in zip(min_b1, max_b1, min_b2, max_b2): # inspired by this: # https://stackoverflow.com/questions/20925818/algorithm-to-check-if-two-boxes-overlap # Checks in each dimension, if there is an overlap if this happens it must be an overlap in 3D, too. def is_overlapping_1D(x_min_1, x_max_1, x_min_2, x_max_2): # returns true if the min and max values are overlapping return used_check(x_max_1, x_min_2) and used_check(x_max_2, x_min_1) collide = collide and is_overlapping_1D(min_b1_val, max_b1_val, min_b2_val, max_b2_val) return collide
8d33a08f77f110f055c31ade083ed0ecccf0a657
85,728
import torch def get_spatial_meshgrid(x: torch.Tensor, scale=False): """ Get grid which contains spatial coordinates at each pixel location Args: x: image of shape [batch_size, channels, height, width] for which we want to generate the spatial grid """ batch_size, _, height, width = x.size() # Generate mesh grid xx = torch.arange(0, width).view(1, -1).repeat(height, 1) yy = torch.arange(0, height).view(-1, 1).repeat(1, width) xx = xx.view(1, 1, height, width).repeat(batch_size, 1, 1, 1) yy = yy.view(1, 1, height, width).repeat(batch_size, 1, 1, 1) grid = torch.cat((xx, yy), 1).float() if x.is_cuda: grid = grid.cuda() if scale: grid[:, 0] = grid[:, 0] / width grid[:, 1] = grid[:, 1] / height return grid
daaf6e0e6c164bab2c9ce3d932ef849e2c4adb07
85,729
def process_legislator_data(raw_data): """ Clean & (partially) flatten the legislator data Args: raw_data (list of nested dictionaries): Legislator data from Returns: dict where key is Bioguide ID and values are legislator info """ legislator_data = {} for leg in raw_data: speaker_id = leg["id"]["bioguide"] legislator_data[speaker_id] = { "first_name": leg["name"]["first"], "last_name": leg["name"]["last"], "gender": leg["bio"]["gender"], "terms": leg["terms"], } return legislator_data
62b01f3ae83d98233f071a11d45e1e5cbedfb94d
85,730
def same_sign(number_1, number_2): """Checks if two numbers have the same sign.""" return (number_1 >= 0) ^ (number_2 < 0)
90b779c4a892fa2f9494b6e822b789cb8703f98f
85,731
def verifyWriteMode(files): """ Checks whether files are writable. It is up to the calling routine to raise an Exception, if desired. This function returns True, if all files are writable and False, if any are not writable. In addition, for all files found to not be writable, it will print out the list of names of affected files. """ # Start by insuring that input is a list of filenames, # if only a single filename has been given as input, # convert it to a list with len == 1. if not isinstance(files, list): files = [files] # Keep track of the name of each file which is not writable not_writable = [] writable = True # Check each file in input list for fname in files: try: f = open(fname,'a') f.close() del f except: not_writable.append(fname) writable = False if not writable: print('The following file(s) do not have write permission!') for fname in not_writable: print(' ', fname) return writable
846a27d31154b40cc9485d645aca7bca0fc55078
85,733
import re def text_has_service(s): """Check if a line in a file matches protobuf service definition Example: >>> text_has_service(' service foo {') True >>> text_has_service('// service nope') False >>> lines = ['//some file', 'junk', '...', ' service foo {', ' rpc Timeit (Empty) returns (TimeMsg) {}', '}'] >>> any(text_has_service(l) for l in lines) True """ pat = re.compile("^\s*service\s+.+$") if re.search(pat, s): return True return False
4c4b30fd8b6ccc67dd6c583f694315fc438406bf
85,736
def guess_platform(product_id): """Guess platform of a product according to its identifier.""" if len(product_id) == 40 and product_id.startswith('L'): return 'Landsat' if product_id.startswith('ASA'): return 'Envisat' if product_id.startswith('SAR'): return 'ERS' if product_id.startswith('S1'): return 'Sentinel-1' raise ValueError('Unrecognized product ID.')
f8adc1357bc0b6a8ad5bc36f9a22266849cfdaa4
85,748
def segmentPlanarSectionIntersection(s0 = "const Dim<3>::Vector&", s1 = "const Dim<3>::Vector&", pverts = "const std::vector<Dim<3>::Vector>&", tol = ("const double", "1.0e-8")): """Intersection of a line segment with a polygonal section of a plane. The line segment is characterized by it's endpoints: seg = (s0, s1) The polygonal section of the plane is specified by a series of points: plane = pverts[0], pverts[1], ... Note there must be at least 3 non-collinear points, and they must be passed in in order to draw the polygon. Return values are a tuple(char, Vector) The Vector is the intersection point (if any) The char is a code characterizing the intersection: "p" -> The segment lies in the plane (plane) "d" -> The pverts points do not define a unique plane (degenerate) "1" -> The segment intersects the plane properly "0" -> The segment does not intersect the plane""" return "py::tuple"
701f87186f703eceb79ab8d3c98c5686fd37f084
85,751
import re def remove_numerics(string): """Transform names like 'Tobacco products (16)' into 'Tobacco products'""" return re.sub(r" \(\d\d\)$", "", string)
145662f092f4044c28ddc2cf213def337bf0b1dc
85,756
def set_difference(src_A: list, dst_B: list) -> list: """returns set difference of src_A and dst_B A\\B or A-B = {x: x ∈ A and x ∉ B} a.k.a LEFT OUTER JOIN only values in src_A that are NOT in dst_B are returned""" diff_list = [] if isinstance(src_A, list) and isinstance(dst_B, list): diff_list = sorted(list(set(src_A) - set(dst_B))) return diff_list
6f02de99caccba3ca81e8b7cbfe800ea91480cfa
85,761
def enc(val): """Returns the passed value utf-8 encoded if it is a string, or unchanged if it is already bytes. """ try: return val.encode("utf-8") except AttributeError: # Not a string return val
c0476e02790d2b7203e7464662f6936affbf312d
85,762
def _get_ipaddr(req): """ Return the ip address for the current request (or 127.0.0.1 if none found) based on the X-Forwarded-For headers. """ if req.access_route: return req.access_route[0] else: return req.remote_addr or '127.0.0.1'
5f9d2af65428f752aa998346c6eb10b359ad8032
85,765
def get_docker_prefix(registry): """Return the proper prefix based on registry.""" if registry == "quay": return 'quay.io' elif registry == "github": return 'ghcr.io' else: return ''
cdbc342d978ed56c0b20e8500efe17f285ec4cf6
85,771
import logging import re def get_queue_id_of_received_packet( duthosts, rand_one_dut_hostname, rand_selected_interface ): """ Get queue id of the packet received on destination """ duthost = duthosts[rand_one_dut_hostname] queue_counter = duthost.shell('show queue counters {} | grep "UC"'.format(rand_selected_interface[0]))['stdout'] logging.info('queue_counter:\n{}'.format(queue_counter)) """ regex search will look for following pattern in queue_counter o/p for interface ----------------------------------------------------------------------------_--- Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------- ----- -------------- --------------- ----------- -------------- Ethernet124 UC1 10 1000 0 0 """ result = re.search(r'\S+\s+UC\d\s+10+\s+\S+\s+\S+\s+\S+', queue_counter) if result is not None: output = result.group(0) output_list = output.split() queue = int(output_list[1][2]) else: logging.info("Error occured while fetching queue counters from DUT") return None return queue
b6d20ea26324c1ff09eef4b059f8ae69cda846b9
85,772
def __parse_gfc_entry(line): """Return the values for both coefficients in a GFC file line.""" sline = line.split() n = int(sline[1]) m = int(sline[2]) return n, m, float(sline[3]), float(sline[4])
036da71761cd5403adf744f68ac9af1ff276ceb6
85,776
def check_sum(data): """ checksum = 255 - ((id + length + data1 + data2 + ... + dataN) & 255) """ # print(data) return 255 - (sum(data) & 255)
a8588e5424b88eeb644696d212abc4dfd2b3d968
85,777
def span(boundaries, from_grid_unit, to_grid_unit): """Returns the distance between two grid_unit""" assert to_grid_unit >= from_grid_unit > 0, "Grid unit number out of range" return boundaries[to_grid_unit] - boundaries[from_grid_unit - 1]
fb0e5a5af51f348432dd5a4de920b22b7b7d8c3b
85,780
def code() -> str: """ Example G-code module, a drawing of the number "5". Please simulate first, before milling. """ return """ G90 G17 G00 X0 Y0 G00 X0 Y12.5 G02 I7.5 J0 X7.5 Y20 G00 X17.5 Y20 G02 I0 J-7.5 X25.001 Y12.5 G00 X25.001 Y0 G00 X45 Y0 G00 X45 Y20 """
5c84c5fdca6a3fce959a11f47716fde4db199edb
85,781
def kelvin2celsius(T): """Convert temperature in Kelvin to Celsius""" return T - 273.15
2f5428ee9e0c5ba206e2af85a9e18f8f81c09f1c
85,782
import math def compute_minimum_namespaces(total_size, data, parity): """ compute the number and size of zerodb namespace required to fulfill the erasure coding policy :param total_size: total size of the s3 storage in GB :type total_size: int :param data: data shards number :type data: int :param parity: parity shard number :type parity: int :return: tuple with (number,size) of zerodb namespace required :rtype: tuple """ max_shard_size = 1000 # 1TB # compute the require size to be able to store all the data+parity required_size = math.ceil((total_size * (data+parity)) / data) # take the minimum nubmer of shard and add a 25% to it nr_shards = math.ceil((data+parity) * 1.25) # compute the size of the shards shard_size = math.ceil(required_size / (data+parity)) # if shard size is bigger then max, we limite the shard size # and increase the number of shards if shard_size > max_shard_size: shard_size = max_shard_size nr_shards = math.ceil(required_size / shard_size) return nr_shards, shard_size
d0d8091d2161ec1443378913d99da4ffcab94ecc
85,785
def row_multiply(matrix, row, factor): """ Multiplies a row by a factor :param matrix: List of lists of equal length containing numbers :param row: index of a row :param factor: multiplying factor :return: List of lists of equal length containing numbers """ matrix[row] = [i*factor for i in matrix[row]] return matrix
94b6438bcaf43a95f590236f452800d8e41d26c5
85,789
def build_sg_graph_dict(sg): """ Builds a dictionary of dependencies for each node, where each node can have either component or supplier dependents or both. If a node has no dependents of a certain type, the key is missing. A node with no dependents has an empty dictionary.""" def type_tag(node): return list(filter(lambda t: t in ("component", "supplier"), sg.nodes[node].tags))[0] g = {n: {} for n in sg.nodes.keys()} for e in sg.edges: if "potential" not in e.tags: g[e.dst][type_tag(e.src)] = g[e.dst].get(type_tag(e.src), []) + [e.src] return g
ecd8c1b0488b04a2cde4a1b030d96de5a78cc948
85,793
import json def get_config(config_file): """ Reads Config file This functions reads the config file and returns the information :param config_file: name of the config file as string :return: object with config information """ with open(config_file, 'r') as myfile: data = myfile.read() return json.loads(data)
bd1a61bf76760ee9f41270f3f841e2af0a7c9457
85,797
def _concat_path(prefix, component): """Add a component to a path. Args: prefix: Prefix of the path. If this is empty, it isn't included in the returned path. component: Component to add to the path. Returns: Concatenated path string. """ return f'{prefix}/{component}' if prefix else component
a5b98167d0b6c586af7898e3c4ebbc22e3c9c832
85,798
def _read_magic(f, magic): """Read and compare the magic value in a data file. Arguments: f - Input file. magic - Magic value" Returns: True on success, False otherwise. """ f_magic = f.read(len(magic)) return f_magic == magic
1c7e5a22c73c38520ed3433ec87d997d4dea683c
85,801
import re def isimei(value): """ Return whether or not given value is an imei. If the value is an imei, this function returns ``True``, otherwise ``False``. Examples:: >>> isimei('565464561111118') True >>> isimei('123456789012341') False :param value: string to validate imei """ pattern = re.compile(r'^[0-9]{15}$') sanitized = re.sub(r'[ -]', '', value) if not pattern.match(sanitized): return False should_double = True total_sum = 0 for digit in reversed(sanitized[:-1]): digit_int = int(digit) if should_double: digit_int = digit_int * 2 if digit_int >= 10: total_sum += (digit_int - 9) else: total_sum += digit_int should_double = not should_double if str(10 - (total_sum % 10))[-1] == sanitized[-1]: return True else: return False
462f0f0f61c5495e78b5608ec9c9943f64445519
85,803
def parse_num_suffix(num): """ Parse a string containing a number with a possible suffix like 'M' or 'G', and multiply accordingly. >>> parse_num_suffix('10M') 10000000 >>> parse_num_suffix('5G') 5000000000 >>> parse_num_suffix('3k') 3000 >>> parse_num_suffix('500') 500 Parameters ---------- num : str The number with possible suffix to parse Returns ------- int An integer multiplied accordingly to the suffix """ if not num: return None suffixes = { 'G': 1000000000, 'M': 1000000, 'K': 1000 } if not num[-1].isalpha(): return int(num) suffix = num[-1].upper() if suffix not in suffixes: raise ValueError( "'{}' is not a valid number. Supported suffixes: {}".format( num, ", ".join(iter(suffixes.keys())) )) return int(num[:-1]) * suffixes[suffix]
651dc397616ef6f8dab881d774e67eef8a52f52c
85,810
def is_number(n): """Check if something is a number (int, float or complex)""" return any(isinstance(n, tp) for tp in [int, float, complex])
5a4f9d76f5c5300f6ed8242674e44f6912182221
85,813
def get_impute_str(column_name, imputation): """ Creates string for sql imputation Args: - column_name: str name of column to be imputed - imputation: str indicating method of imputation Returns: - impute_sql: str sql imputation code - impute_col_flag: boolean if true an impuatation flag column will be created """ impute_sql = '' # check whether to create an imputation flag impute_col_flag = not imputation.endswith('_noflag') # determine type of imputation and create appropriate sql code to do so if imputation.startswith('zero'): impute_sql = '0' elif imputation.startswith('inf'): impute_sql = '100000000' elif imputation.startswith('mean') or imputation.startswith('avg'): impute_sql = f'avg({column_name}) over ()' elif imputation.startswith('min'): impute_sql = f'min({column_name}) over ()' elif imputation.startswith('max'): impute_sql = f'max({column_name}) over ()' else: raise ValueError(f'Unrecognized impute method {imputation}.') return (impute_sql, impute_col_flag)
eda9a4b479b89ace23e397446acd0bc4deb5558f
85,817
from typing import MutableMapping from typing import List def flatten_dict( d: MutableMapping, parent_key: str = "", sep: str = "." ) -> MutableMapping: """Flattens nested dictionary structure. Args: d : dictionary to be flattened parent_key : optional parent key identifier sep : optional separator between parent and leaf keys Returns: Flattened dictionary """ items: List = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, MutableMapping): items.extend(flatten_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
af4d2fde2aa77b659ae04d98dad25715a797ea62
85,820
def update_params(params, learning_rate, sigma, noise_array, standardized_reward_w_noise): """ Applies the updates to the given parameters. :param params: numpy array of shape (nbr_params,) of the parameters :param learning_rate: float - the learning rate :param sigma: float - the coefficient to multiply the noise by :param noise_array: numpy array of shape (generation_size, nbr_params) of random noise :param standardized_reward_w_noise: numpy array of shape (nbr_params,) of the standardized reward with noise :return: numpy array of shape (nbr_params,) of the updated parameters """ generation_size = len(noise_array) # Calculate the parameter updates for the given learning rate and standardized reward with noise, # scaled by the population size times sigma. param_updates = (learning_rate * standardized_reward_w_noise) / (generation_size * sigma) return params + param_updates
cb688217da8f422bf83343ffc6596f7caedc1d68
85,823
import re def remove_special_characters(text:str) -> str: """Define function for removing special characters Args: text: String to filter for special characters. Return: text: String with special characters removed. """ pattern=r'[^a-zA-z\s]' text=re.sub(pattern,'',text) return text
71d7d84108300e66e80f18d494781a51454ca655
85,824
def str_attach(string, attach): """ Inserts '_' followed by attach in front of the right-most '.' in string and returns the resulting string. For example: str_attach(string='sv.new.pkl', attach='raw') -> 'sv.new_raw.pkl) """ string_parts = list(string.rpartition('.')) string_parts.insert(-2, '_' + attach) res = ''.join(string_parts) return res
970333be39a1e7eb5bb4949e4f70075c39d6ca56
85,842
def _get_parser_name(var_name): """ Parser name composer. Args: var_name (str): Name of the variable. Returns: str: Parser function name. """ return "get_%s" % var_name
79c2c0eaacb691da375bb9b41540a454cd2dfa6a
85,845
def change(value, reference): """ Calculate the relative change between a value and a reference point. """ if not reference: # handle both None and divide by zero case return None return ((value or 0) - reference) / float(reference)
bad72846f344ff7416f63ed0becff6f198e3591c
85,846
def convert_timestamp_to_mysql(timestamp): """ Convert timestamp to the format used in MySQL :param timestamp: String containing ACI timestamp :return: String containing MySQL timestamp """ (mysql_timestamp, remaining) = timestamp.split('T') mysql_timestamp += ' ' mysql_timestamp = mysql_timestamp + remaining.split('+')[0].split('.')[0] return mysql_timestamp
b6936265bc85337d7ebca9f93c54747651f73c45
85,848
def get_atom_numbers(structure): """get a list of atom numbers composing a biopython structure it gives you the atom numbers from the atoms composing the given biopython structure Parameters ------------ structure : bioopython structure or list of atoms Returns ----------- list(int) the list of all the atom numbers composing the structure Notes ------------ useful when definig an area of the structure (like an alchemical region) with atom indexes (atom numbers) """ atom_numbers = [] if hasattr(structure, 'get_atoms'): atoms = structure.get_atoms() else: # list of atoms atoms = structure for atom in atoms: atom_numbers.append(atom.get_serial_number()) return atom_numbers
36e5164eb4acc1eac3e5bc8463b2da61e1289e39
85,852
import re def timeleft_to_seconds(data): """Utility function that'll take a string of format NdNhNmNs and return a number of seconds.""" if data.isdigit(): return int(data) else: match = re.match('(?:(?P<days>\d+)d)?(?:(?P<hours>\d+)h)?(?:(?P<minutes>\d+)m)?(?:(?P<seconds>\d+)s)?', data) if match is None: return 0 days = match.group('days') or 0 hours = match.group('hours') or 0 minutes = match.group('minutes') or 0 seconds = match.group('seconds') or 0 return int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60 + int(seconds)
983e8c84f1086b9972e11c9a3766ea8e705e1d68
85,856
def map_type(name, field_name): """Maps a Sysmon event type to VAST type.""" if name == "string": return "string" elif name == "integer": if field_name == "SourcePort" or field_name == "DestinationPort": return "port" return "count" elif name == "date": return "timestamp" elif name == "boolean" or name == "bool": return "bool" elif name == "ip": return "addr" else: raise NotImplementedError(name)
7944218cc599be89b0d02025114ae75b163f144d
85,858
import re def _shell_tool_safe_env_name(tool_name: str) -> str: """Replace any characters not suitable in an environment variable name with `_`.""" return re.sub(r"\W", "_", tool_name)
8577720bd16591f88a7c04f0095b1dd65a0c6ab8
85,859
def save_data(data, labels, path): """ Save images and labels. The variables 'data' and 'labels' refer to the processed images and labels. The string 'path' corresponds to the path where the images and labels will be saved. """ # Number of images. n_data = len(data) # Count number of digits in n_data. This is important for the number # of leading zeros in the name of the images and labels. n_digits = len(str(n_data)) # These represent the paths for the final label and images with the right # number of leading zeros given by n_digits. direc_d = path + "image/{b:0" + str(n_digits) + "d}.png" direc_l = path + "label/{b:0" + str(n_digits) + "d}.png" # Saves data and labels in the right folder. for i in range(len(data)): data[i].save(direc_d.format(b=i)) labels[i].save(direc_l.format(b=i)) return None
468c0ecfa085fe63fd9bf4a479b899d7b9c252eb
85,860
import logging def create_logger(logger_name: str, log_file: str) -> logging.Logger: """ Creates a logger for logging to a file. Parameters ---------- logger_name : str The name of the logger. log_file : str The path of the file to which write the logs. Returns ------- logging.Logger The logger. """ logger = logging.getLogger(logger_name) logger.setLevel(logging.INFO) formatter = logging.Formatter( fmt='%(asctime)s - %(name)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S' ) file_handler = logging.FileHandler( log_file, mode='a' ) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
77fbd8c08c8add9949279e032f2ae4444dc3a457
85,861
def positive_integer(anon, obj, field, val): """ Returns a random positive integer (for a Django PositiveIntegerField) """ return anon.faker.positive_integer(field=field)
f6dc73ea33731d4788e72598f266df9c3436d322
85,864
import itertools def _n_enr_states(dimensions, n_excitations): """ Calculate the total number of distinct ENR states for a given set of subspaces. This method is not intended to be fast or efficient, it's intended to be obviously correct for testing purposes. """ count = 0 for excitations in itertools.product(*map(range, dimensions)): count += int(sum(excitations) <= n_excitations) return count
ab974f8e0028928f1671c51537c9b4166c337958
85,874
def quantumespresso_magnetic_code(aiida_local_code_factory): """Get a quantumespresso_magnetic code. """ quantumespresso_magnetic_code = aiida_local_code_factory( executable='diff', entry_point='quantumespresso_magnetic') return quantumespresso_magnetic_code
913c8efc7bb2d1851c743754ea04df44c06ce6d7
85,876
def truncate(predictions, targets, allowed_len_diff=3): """Ensure that predictions and targets are the same length. Arguments --------- predictions : torch.Tensor First tensor for checking length. targets : torch.Tensor Second tensor for checking length. allowed_len_diff : int Length difference that will be tolerated before raising an exception. """ len_diff = predictions.shape[1] - targets.shape[1] if len_diff == 0: return predictions, targets elif abs(len_diff) > allowed_len_diff: raise ValueError( "Predictions and targets should be same length, but got %s and " "%s respectively." % (predictions.shape[1], targets.shape[1]) ) elif len_diff < 0: return predictions, targets[:, : predictions.shape[1]] else: return predictions[:, : targets.shape[1]], targets
183dbe4c3400fbccea8a76850afc29cdf06a84dc
85,877
import math def circle(radius): """Calculate the area of a circle.""" return math.pi * (radius ** 2)
80963dcf5d57c6f8b4ec1188f41ee1f7ddab6667
85,879
def get_main_type_mock(path): """Detect audio or video type from file extension. Used to mock real method which needs actual files to be present. Args: path (path.Path): Path to a file. Returns: str: "video" if file extension is "mp4" or "mkv", "audio" if file extension is "ogg" or "flac", None otherwise. """ ext = path.ext if ext in [".mp4", ".mkv"]: return "video" if ext in [".ogg", ".flac"]: return "audio" return None
93675346573b0229b422a9c0e59e29689f99f993
85,881
def check_user(conn, name, email): """ Check if a user is registered in the database :param conn: Connection to the SQLite database :param name: User name :param email: User email address :returns: boolean """ cur = conn.cursor() cur.execute("SELECT * FROM users WHERE user_email=%s", (email,)) data = cur.fetchone() return True if data else False
3713ea563caf9903e5aab6af3aa2229621845032
85,882
import pathlib def read_pair_file(filepath): """ Read in a pair file :param filepath: Path to the pair file :returns: A list of paired files """ filepath = pathlib.Path(filepath) pairs = [] with filepath.open('rt') as fp: for line in fp: line = line.strip() if line == '': continue pairs.append(line.split()) return pairs
a56ba384a5b907e1d37baf2d4ed690aa005c18f9
85,883
def _shift_unc(design, floor=.05, ceil=.95, lower_to=.25, ceil_to=.75): """ This replicates the "uncontested.default" method from JudgeIt, which replaces the uncontested elections (those outside of the (.05, .95) range) to (.25,.75). """ indicator = ((design.vote_share > ceil).astype(int) + (design.vote_share < floor).astype(int) * -1) design['uncontested'] = indicator lowers = design.query('vote_share < @floor').index ceils = design.query('vote_share > @ceil').index design.ix[lowers, 'vote_share'] = lower_to design.ix[ceils, 'vote_share'] = ceil_to return design
cdea1e966ef4da38208ea703f1c82b7e2079f804
85,884
from typing import List from typing import Any def matches_one_item(items: List[Any], container_list: List[Any]) -> bool: """ Checks if one of the items inside a list is inside another list. Args: items (List[Any]): The first list to iterate its items. container_list (List[Any]): The list to check if the items inside it. Returns: bool: Whether one of the items is inside container_list. """ return any(True for item in items if item in container_list)
ed9631fb2a4e56f7631bd8f16fc4af69f7cd007a
85,886
def get_expr_for_vep_gene_ids_set(vep_transcript_consequences_root="va.vep.sorted_transcript_consequences", only_coding_genes=False): """Expression to compute the set of gene ids in VEP annotations for this variant. Args: vep_transcript_consequences_root (string): path of VEP transcript_consequences root in the struct only_coding_genes (bool): If set to True, non-coding genes will be excluded. Return: string: expression """ expr = "%(vep_transcript_consequences_root)s" % locals() if only_coding_genes: expr += ".filter( x => x.biotype == 'protein_coding')" expr += ".map( x => x.gene_id ).toSet" return expr
916a151072abba19cfe979e98602c9521ae9f66c
85,891
def pick_features(d, subset=None): """ Drop columns from input Pandas data frame that should not be used as features, as return as Numpy array. subset: list of specific features to keep, optional """ if subset is not None: return d.drop(d.columns.difference(subset), axis=1) # If no subset, drop columns that are known to not be features cols_to_drop = ["admission_id", "person_id", "r", "time_at_risk", "daily_rate_not_zero"] + \ [f"daily_rate_geq_{x}" for x in (1, 2, 3, 5)] return d.drop(cols_to_drop, axis=1)
b35e7a09a3b7b2e0d72e529e52b4bb37a499ac5e
85,895
def make_hint(segment, part): """Create a playlist response for the preload hint.""" return f'#EXT-X-PRELOAD-HINT:TYPE=PART,URI="./segment/{segment}.{part}.m4s"'
72f84f0462f9eb9284d647ed92ff96b386756878
85,897
def normalize_hu(image, min_window=-1200.0, max_window=600.0): """ Normalize image HU value to [-1, 1] using window of [min_window, max_window]. """ image = (image - min_window) / (max_window - min_window) image = image * 2 - 1.0 image = image.clip(-1, 1) return image
4dd70e06211c6346f516b8b1f2fa8407af15f2a9
85,904
def get_calculator(name): """Return the calculator class.""" classname = name.title() module = __import__('calculator.' + name, {}, None, [classname]) Calculator = getattr(module, classname) return Calculator
db6805eb24301afb3d3677fe5d4d317dd8169145
85,906
def get_common_xfix(series, xfix="suffix"): """ Finds the longest common suffix or prefix of all the values in a series """ common_xfix = "" while True: common_xfixes = ( series.str[-(len(common_xfix) + 1) :] if xfix == "suffix" else series.str[: len(common_xfix) + 1] ) # first few or last few characters if ( common_xfixes.nunique() != 1 ): # we found the character at which we don't have a unique xfix anymore break elif ( common_xfix == common_xfixes.values[0] ): # the entire first row is a prefix of every other row break else: # the first or last few characters are still common across all rows - let's try to add one more common_xfix = common_xfixes.values[0] return common_xfix
d35c1657fa26bea7e8ead2e035e5072e8f1aed79
85,912
import warnings def sample_frames(steps, max_frames): """Sample uniformly from given list of frames. Args: steps: The frames to sample from. max_frames: Maximum number of frames to sample. Returns: The list of sampled frames. """ samples = [] steps_len = len(steps) if max_frames > steps_len: warnings.warn( f"Less than {max_frames} frames provided, producing {steps_len} frames." ) max_frames = steps_len interval = steps_len // max_frames counter = 0 for i in range(steps_len - 1, -1, -1): # Sample from the end if i % interval == 0 and counter < max_frames: samples.append(steps[i]) counter += 1 return list(reversed(samples))
fdbb377b47a82b23d19f05b9b0d587b3d4d32e64
85,920
from typing import List def find_factors(number: int) -> List[int]: """ Get math factors for this number. Number 1 is not returned as factor, because is evident. :param number: Number to get factors from. :return: A list with found factors. """ factors = [] for candidate in range(2, number+1): if number % candidate == 0: factors.append(candidate) return factors
41a00b9141c0e292b98c678119b836d9bac69899
85,921
from typing import Dict from typing import Any def _process_logging(cluster: Dict) -> bool: """ Parse cluster.logging.clusterLogging to verify if at least one entry has audit logging set to Enabled. """ logging: bool = False cluster_logging: Any = cluster.get('logging', {}).get('clusterLogging') if cluster_logging: logging = any(filter(lambda x: 'audit' in x['types'] and x['enabled'], cluster_logging)) # type: ignore return logging
15fc6c359cb3c8e5620b269b381a9ac5c1056da6
85,922
import yaml def read_config(filename): """Read .yaml config file robot: gap: 0 gap_ignore: 0 amount: 0 exchange: client_id: '' client_secret: '' uri: '' instrument: '' Args: filename (string): configuration file name Returns: dict: dict with parameters """ with open(filename) as file: try: confs = yaml.safe_load(file) return confs except yaml.YAMLError as e: print("Read config file error: {}".format(e))
e4c2bd20512a9c512bd2ad48699a49aa041613ae
85,923
def _getLogHandlers(logToFile=True, logToStderr=True): """Get the appropriate list of log handlers. :param bool logToFile: If ``True``, add a logfile handler. :param bool logToStderr: If ``True``, add a stream handler to stderr. :rtype: list :returns: A list containing the appropriate log handler names from the :class:`logging.config.dictConfigClass`. """ logHandlers = [] if logToFile: logHandlers.append('rotating') if logToStderr: logHandlers.append('console') return logHandlers
31294b99e1db76a3392a3f4b40d87e73e4c12407
85,929
import inspect async def asyncfn_executor_wrapper(fn, *args, **kwargs): """ Checks if fn is a coroutine function, and if so awaits it. Else function is computed normally. Return: Function result """ return await fn(*args, **kwargs) if inspect.iscoroutinefunction(fn) else fn(*args, **kwargs)
3627a3988e47b8478c9eae7f8658b2f6b09e9372
85,936
from typing import OrderedDict def pool_list_table_format(result): """Format pool list as a table.""" table_output = [] for item in result: table_row = OrderedDict() table_row['Pool Id'] = item['id'] table_row['State'] = item['state'] table_row['Allocation State'] = item['allocationState'] table_row['VM Size'] = item['vmSize'] table_row['VM Count'] = item['currentDedicated'] table_row['Type'] = 'IaaS' if item['virtualMachineConfiguration'] else 'PaaS' table_output.append(table_row) return table_output
b7db3b5688098e2cfec408a219298ef17bdd7e77
85,938