content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Dict from typing import Callable def register_scale( convert_to: Dict[str, Callable] = None, convert_from: Dict[str, Callable] = None ) -> Callable[[Callable], Callable]: """Decorator used to register new time scales The scale name is read from the .scale attribute of the Time class. Args: convert_to: Functions used to convert to other scales. convert_from: Functions used to convert from other scales. Returns: Decorator registering scale. """ def wrapper(cls: Callable) -> Callable: name = cls.scale _SCALES[cls.cls_name][name] = cls conversions = _CONVERSIONS.setdefault(cls.cls_name, dict()) if convert_to: for to_scale, converter in convert_to.items(): conversions[(name, to_scale)] = converter if convert_from: for from_scale, converter in convert_from.items(): conversions[(from_scale, name)] = converter return cls return wrapper
3adcd0d53a3c1622f81a1c444da19c4f2b332d14
31,100
import torch def get_top_down_ground_truth_static_ego(env_id, start_idx, img_w, img_h, map_w, map_h): """ Returns the ground-truth label oriented in the global map frame :param env_id: :param start_idx: :param img_w: :param img_h: :param map_w: :param map_h: :return: """ path = load_path(env_id) #instruction_segments = [self.all_instr[env_id][set_idx]["instructions"][seg_idx]] start_pt, dir_yaw = tdd.get_start_pt_and_yaw(path, start_idx, map_w, map_h, 0) affine = tdd.get_affine_matrix(start_pt, dir_yaw, img_w, img_h) seg_labels = np.zeros([img_w, img_h, 2]).astype(float) path_in_img = cf_to_img(path, np.array([map_w, map_h])) #gauss_sigma = map_w / 96 gauss_sigma = map_w / 32 seg_labels[:, :, 0] = tdd.plot_path_on_img(seg_labels[:, :, 0], path_in_img) if len(path_in_img) > 1: seg_labels[:, :, 1] = tdd.plot_dot_on_img(seg_labels[:, :, 1], path_in_img[-1], gauss_sigma) seg_labels_rot = tdd.apply_affine(seg_labels, affine, img_w, img_h) seg_labels_rot[:, :, 0] = gaussian_filter(seg_labels_rot[:, :, 0], gauss_sigma) seg_labels_rot[:, :, 1] = gaussian_filter(seg_labels_rot[:, :, 1], gauss_sigma) DEBUG = True if DEBUG: cv2.imshow("l_traj", seg_labels_rot[:, :, 0]) cv2.imshow("l_endpt", seg_labels_rot[:, :, 1]) cv2.waitKey(0) # Standardize both channels separately (each has mean zero, unit variance) seg_labels_path = standardize_2d_prob_dist(seg_labels_rot[:, :, 0:1]) seg_labels_endpt = standardize_2d_prob_dist(seg_labels_rot[:, :, 1:2]) seg_labels_rot = np.concatenate((seg_labels_path, seg_labels_endpt), axis=0) seg_labels_t = torch.from_numpy(seg_labels_rot).unsqueeze(0).float() return seg_labels_t
3987d91e903eff2bd48e331f33b9c0ea583ff5e0
31,101
def read_project(config_path, timesheet_path=None, replicon_options=None): """ Read a project dictionary from a YAML file located at the path ``config_path``, and read a project timesheet from the path ``timesheet_path``. Parse these files, check them, and, if successful, return a corresponding Project instance. """ project_dict = read_config(config_path) if timesheet_path is not None: project_dict['timesheet_df'] = read_timesheet(timesheet_path, replicon_options=replicon_options) return Project(**project_dict)
b3b13bd1594f3d1ba0ba3b53af5f8546c59f39ee
31,102
from typing import Union from typing import Tuple def _parse_query_location( location: Union[Tuple[float, float], Point, MultiPoint, Polygon] ) -> str: """Convert given locations into WKT representations. Args: location (QueryLocation): Provided location definition. Raises: ValueError: Raised for when unable to parse location. Returns: str: WKT representation of location. """ if isinstance(location, (tuple, list)): # Assume this is [lon lat] following wkt format location = Point(location[0], location[1]) if isinstance(location, (Point, MultiPoint, Polygon)): wkt = location.wkt else: raise ValueError("Location is not in correct format.") return wkt
17e57b9521144c61c6069d52a8ebcad6101882aa
31,103
def getuniqueitems(userchoices): """return a list of unique items given a bunch of userchoices""" items = [] for userchoice in userchoices: if userchoice.item not in items: items.append(userchoice.item) return items
a7885556604153cf756fb6a29c2e870c27d47337
31,104
def _as_array(arr, dtype=None): """Convert an object to a numerical NumPy array. Avoid a copy if possible. """ if arr is None: return None if isinstance(arr, np.ndarray) and dtype is None: return arr if isinstance(arr, integer_types + (float,)): arr = [arr] out = np.asarray(arr) if dtype is not None: if out.dtype != dtype: out = out.astype(dtype) if out.dtype not in _ACCEPTED_ARRAY_DTYPES: raise ValueError("'arr' seems to have an invalid dtype: " "{0:s}".format(str(out.dtype))) return out
d0336a8cedcd324d5dd26a7b98f59d70f691cf1d
31,105
def stitch_together_target_regions(cluster, flanking=500, logger=None, circular=False, revcomp=False): """ given a single LociCluster object containing Locus objects (usually) of length 3 (16,5,and 23 rRNAs), return the object with ammended sequence info revamped 20161004 """ assert logger is not None, "Must have logger for this function" # TODO : make this safer. coord list is constructed sequentially but this # is a backup. Throws sort of a cryptic error. but as I said, its a backup if sorted([x.start_coord for x in cluster.loci_list]) != \ [x.start_coord for x in cluster.loci_list]: logger.warning("Coords are not in increasing order; " + "you've been warned") start_list = sorted([x.start_coord for x in cluster.loci_list]) logger.debug("Start_list: {0}".format(start_list)) logger.debug("stitching together the following coords:") for i in cluster.loci_list: logger.debug(str(i.__dict__)) # This works as long as coords are never in reverse order cluster.global_start_coord = min([x.start_coord for x in cluster.loci_list]) - flanking # if start is negative, just use 1, the beginning of the sequence if cluster.global_start_coord < 1: logger.warning("Caution! Cannot retrieve full flanking region, as " + "the 5' flanking region extends past start of " + "sequence. If this is a problem, try using a smaller " + "--flanking region, and/or if appropriate, run " + "without with --linear.") cluster.global_start_coord = 1 cluster.global_end_coord = max([x.end_coord for x in cluster.loci_list]) + flanking if cluster.global_end_coord > len(cluster.seq_record): logger.warning("Caution! Cannot retrieve full flanking region, as " + "the 5' flanking region extends past start of " + "sequence. If this is a problem, try using a smaller " + "--flanking region, and/or if appropriate, run " + "without with --linear.") cluster.global_end_coord = len(cluster.seq_record) logger.debug("global start and end: %s %s", cluster.global_start_coord, cluster.global_end_coord) # the minus one makes things go from 1 based to zero based seq_with_ns = str(cluster.seq_record.seq[cluster.global_start_coord - 1: cluster.global_end_coord]) seq_len = len(seq_with_ns[:]) logger.debug(seq_len) # again, plus 1 corrects for 0 index. # len("AAAA") = 4 vs AAAA[-1] - AAAA[0] = 3 logger.debug("\nexp length %i \nact length %i", cluster.global_end_coord - cluster.global_start_coord + 1, seq_len) ## Change coords in ID When using padding if not circular: seq_id = str(cluster.sequence_id + "_" + str(cluster.global_start_coord) + ".." + str(cluster.global_end_coord)) else: # correct for padding seq_id = str(cluster.sequence_id + "_" + str(cluster.global_start_coord - cluster.padding) + ".." + str(cluster.global_end_coord - cluster.padding)) # if most are on - strand, return sequence reverse complement strands = [x.strand for x in cluster.loci_list] if revcomp and \ (sum([x == -1 for x in strands]) > sum([x == 1 for x in strands])): logger.info("returning the reverse compliment of the sequence") cluster.extractedSeqRecord = SeqRecord( Seq(seq_with_ns, IUPAC.IUPACAmbiguousDNA()).reverse_complement(), id=str(seq_id + "_RC")) else: cluster.extractedSeqRecord = SeqRecord( Seq(seq_with_ns, IUPAC.IUPACAmbiguousDNA()), id=seq_id) ### last minuete check cluster.extractedSeqRecord.description = "from riboSnag" for prop, value in vars(cluster).items(): if value is None: logger.debug("%s has a value of None!", prop) return cluster
4ec89a7d9d88874f3a1b8fb42abafec5d65633d1
31,106
def add_region(): """ add a region page function """ # init variables entry = Region() # creates a model.py instance, instance only has a name right now error_msg = {} form_is_valid = True country_list = Country.query.all() if request.method == 'GET': return render_template('regions/add.html', entry=entry, \ country_list=country_list, \ error_msg=error_msg) if request.method == 'POST': # validate input [entry, form_is_valid, error_msg] = form_validate_region(entry) # check if the form is valid if not form_is_valid: # current_app.logger.info('invalid add region') return render_template('regions/add.html', entry=entry, \ country_list=country_list, \ error_msg=error_msg) # the data is valid, save it db.session.add(entry) db.session.commit() return redirect(url_for('regions.view_one_region', \ region_id=entry.region_id)) # current_app.logger.error("unsupported method")
0798a7bec6474cf83dd870b6529e8f57b69bc882
31,107
import six def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'): """ This function was excerpted from Django project, modifications have been applied. The original license is as follows: ``` Copyright (c) Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Django nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if issubclass(type(s), six.text_type): return s.encode(encoding) try: if not issubclass(type(s), six.string_types): if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) elif hasattr(s, '__unicode__'): s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use decode() here, instead of six.text_type(s, encoding, # errors), so that if s is a SafeBytes, it ends up being a # SafeText at the end. s = s.decode(encoding, errors) except UnicodeDecodeError as e: if not isinstance(s, Exception): raise e else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join(smart_text(arg, encoding, strings_only, errors) for arg in s) return s.encode(encoding)
e4040ea31669acbdbfee5db9f6ce30acdc40bae1
31,108
def get_host(hostname): """ Get information about backups for a particular host """ print(f"\nSearching for host '{ hostname }'...\n") backuppc_data = get_backup_data() for host in backuppc_data: if host['hostname'] == hostname: return host
5b017be557d9b5340e44905a49ea6ee2b1ea30b3
31,109
def gazebo_get_gravity() -> Vector3: """ Function to get the current gravity vector for gazebo. :return: the gravity vector. :rtype: Vector3 """ rospy.wait_for_service("/gazebo/get_physics_properties") client_get_physics = rospy.ServiceProxy("/gazebo/get_physics_properties", GetPhysicsProperties) physics = client_get_physics() return physics.gravity
8ec560e1fd276be9d2677f94dc5d9b5017e81d23
31,110
def make_mock_device(): """ Create a mock host device """ def _make_mock_device(xml_def): mocked_conn = virt.libvirt.openAuth.return_value if not isinstance(mocked_conn.nodeDeviceLookupByName, MappedResultMock): mocked_conn.nodeDeviceLookupByName = MappedResultMock() doc = ET.fromstring(xml_def) name = doc.find("./name").text mocked_conn.nodeDeviceLookupByName.add(name) mocked_device = mocked_conn.nodeDeviceLookupByName(name) mocked_device.name.return_value = name mocked_device.XMLDesc.return_value = xml_def mocked_device.listCaps.return_value = [ cap.get("type") for cap in doc.findall("./capability") ] return mocked_device return _make_mock_device
80b9b51f586c2b373478020b2a08276e97a6d5ba
31,111
from typing import Union def load_file(file_name: str, mode: str = "rb") -> Union[str, bytes]: """Loads files from resources.""" file_path = get_resource_path(file_name) with open(file_path, mode) as file: return file.read()
ed535b091f3ae853e60610022ef01bb38c67b41e
31,112
def path_graph(n, create_using=None): """Returns the Path graph `P_n` of linearly connected nodes. Parameters ---------- n : int or iterable If an integer, node labels are 0 to n with center 0. If an iterable of nodes, the center is the first. create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. """ n_name, nodes = n G = empty_graph(nodes, create_using) G.add_edges_from(pairwise(nodes)) return G
53eabfd815b3c37f6b7e8e8e54e1a0357c0fcca9
31,113
def numpy_shift(array: np.ndarray, periods: int, axis: int, fill_value=np.nan) -> np.ndarray: """ numpy implementation for validation """ assert axis in range(-array.ndim, array.ndim) copy_src_indices = [slice(None)] * array.ndim copy_dst_indices = [slice(None)] * array.ndim fill_indices = [slice(None)] * array.ndim if periods > 0: fill_indices[axis] = slice(None, periods) copy_src_indices[axis] = slice(None, -periods) copy_dst_indices[axis] = slice(periods, None) elif periods < 0: fill_indices[axis] = slice(periods, None) copy_src_indices[axis] = slice(-periods, None) copy_dst_indices[axis] = slice(None, periods) else: return array.copy() result = np.empty_like(array) result[tuple(fill_indices)] = fill_value result[tuple(copy_dst_indices)] = array[tuple(copy_src_indices)] return result
3ffc61a61abc5bdc3547822c0715a81efb9d0b4d
31,114
def draw_rect(im, cords, color = None): """Draw the rectangle on the image Parameters ---------- im : numpy.ndarray numpy image cords: numpy.ndarray Numpy array containing bounding boxes of shape `N X 4` where N is the number of bounding boxes and the bounding boxes are represented in the format `x1 y1 x2 y2` Returns ------- numpy.ndarray numpy image with bounding boxes drawn on it """ im = im.copy() cords = cords[:,:4] cords = cords.reshape(-1,4) if not color: color = [255,255,255] for cord in cords: pt1, pt2 = (cord[0], cord[1]) , (cord[2], cord[3]) pt1 = int(pt1[0]), int(pt1[1]) pt2 = int(pt2[0]), int(pt2[1]) im = cv2.rectangle(im.copy(), pt1, pt2, color, int(max(im.shape[:2])/200)) return im
02c52166f0f9c25d9f9ad61f7083d7ee733759aa
31,115
from typing import List def _setup_entities(hass, dev_ids: List[str]): """Set up CAME light device.""" manager = hass.data[DOMAIN][CONF_MANAGER] # type: CameManager entities = [] for dev_id in dev_ids: device = manager.get_device_by_id(dev_id) if device is None: continue entities.append(CameLightEntity(device)) return entities
5c9deb213b5df197aaf075f62a68c1472951a3e0
31,116
import binascii import os def image_url_salt(): """Helper function that generates salt to append to an image URL to prevent browser caches from loading old images""" return binascii.b2a_hex(os.urandom(4)).decode('us-ascii')
0b8c803682ff21b56c039a12da184d699ffc47e7
31,117
def check_doa(geometry, doa, online=False): """ Check value of the DoA """ if not online: doas = [doa] else: doas = doa for doa in doas: if doa < 0: return False if geometry == "linear" and doa > 180: return False if geometry == "circular" and doa >= 360: return False return True
ae2be5c478968358a9edf6e7da63e586af39eed8
31,118
def reconnect(force=False, **kwargs): # pylint: disable=unused-argument """ Reconnect the NAPALM proxy when the connection is dropped by the network device. The connection can be forced to be restarted using the ``force`` argument. .. note:: This function can be used only when running proxy minions. CLI Example: .. code-block:: bash salt '*' napalm.reconnect salt '*' napalm.reconnect force=True """ default_ret = {"out": None, "result": True, "comment": "Already alive."} if not salt.utils.napalm.is_proxy(__opts__): # regular minion is always alive # otherwise, the user would not be able to execute this command return default_ret is_alive = alive() log.debug("Is alive fetch:") log.debug(is_alive) if ( not is_alive.get("result", False) or not is_alive.get("out", False) or not is_alive.get("out", {}).get("is_alive", False) or force ): # even if alive, but the user wants to force a restart proxyid = __opts__.get("proxyid") or __opts__.get("id") # close the connection log.info("Closing the NAPALM proxy connection with %s", proxyid) salt.utils.napalm.call( napalm_device, "close", **{} # pylint: disable=undefined-variable ) # and re-open log.info("Re-opening the NAPALM proxy connection with %s", proxyid) salt.utils.napalm.call( napalm_device, "open", **{} # pylint: disable=undefined-variable ) default_ret.update({"comment": "Connection restarted!"}) return default_ret # otherwise, I have nothing to do here: return default_ret
5150041bdf62c01335b8fb1e25639afc2e7256b2
31,119
import sys def ask_user_for_half_life(access_key): """Ask the user for a half life value""" day_choice = "One day" hour_choice = "One hour" week_choice = "One week" forever_choice = "forever" while True: choice = ask_for_choice_or_new("What half life do you want for this key? ({0})".format(access_key), [hour_choice, day_choice, week_choice, forever_choice]) if choice == hour_choice: return 3600 elif choice == day_choice: return 3600 * 24 elif choice == week_choice: return 3600 * 24 * 7 elif choice == forever_choice: return -1 else: if not choice.isdigit(): print("Please enter an integer representing the number of seconds in the half life", file=sys.stderr) else: return int(choice)
b91a70eb11450f772bd12ff39d061204c34f860b
31,120
def _trim_orderbook_list(arr: list, ascending: bool, limit_len: int = 50) -> list: """trims prices up to 4 digits precision""" first_price = arr[0][0] if first_price < 0.1: unit = 1e-5 elif first_price < 1: unit = 1e-4 elif first_price < 10: unit = 1e-3 elif first_price < 100: unit = 1e-2 elif first_price < 1000: unit = 1e-1 elif first_price < 10000: unit = 1 else: unit = 10 trimmed_price = jh.orderbook_trim_price(first_price, ascending, unit) temp_qty = 0 trimmed_arr = [] for a in arr: if len(trimmed_arr) == limit_len: break if (ascending and a[0] > trimmed_price) or (not ascending and a[0] < trimmed_price): # add previous record trimmed_arr.append([ trimmed_price, temp_qty ]) # update temp values temp_qty = a[1] trimmed_price = jh.orderbook_trim_price(a[0], ascending, unit) else: temp_qty += a[1] return trimmed_arr
557dceab9b11836b2f884ebc40d1e66769f5eb4c
31,121
from corpkit.process import make_name_to_query_dict def process_piece(piece, op='=', quant=False, **kwargs): """ Make a single search obj and value """ if op not in piece: return False, False translator = make_name_to_query_dict() target, criteria = piece.split(op, 1) criteria = criteria.strip('"') criteria = remake_special(criteria, **kwargs) if '-' in target: obj, show = target.split('-', 1) show = show.lower() if show == 'deprel': show = 'Function' elif show == 'pos': show = 'POS' form = '{} {}'.format(obj.title(), show.lower()) else: form = target.title() if form == 'Deprel': form = 'Function' elif form == 'Pos': form = 'POS' return translator.get(form), criteria
2d79455fcc27e0b3b9a81209cdc28089a7b73f5a
31,122
import random def normal220(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2): """ for source and destination id generation """ """ for type of banking work,label of fraud and type of fraud """ idvariz=random.choice(bb1) idgirande=random.choice(zz2) first.append("transfer") second.append(idvariz) third.append(idgirande) sixth.append("0") seventh.append("none") """ for amount of money generation """ numberofmoney=random.randrange(50000,money2) forth.append(numberofmoney) """ for date and time generation randomly between two dates """ final=randomDate(startt,endt, random.random()) fifth.append(final) return (first,second,third,forth,fifth,sixth,seventh)
d83408e8d5ec18d0a6de2aee62ca12aa5f1561b8
31,123
def sigma_voigt(dgm_sigmaD,dgm_gammaL): """compute sigma of the Voigt profile Args: dgm_sigmaD: DIT grid matrix for sigmaD dgm_gammaL: DIT grid matrix for gammaL Returns: sigma """ fac=2.*np.sqrt(2.*np.log(2.0)) fdgm_gammaL=jnp.min(dgm_gammaL,axis=1)*2.0 fdgm_sigmaD=jnp.min(dgm_sigmaD,axis=1)*fac fv=jnp.min(0.5346*fdgm_gammaL+jnp.sqrt(0.2166*fdgm_gammaL**2+fdgm_sigmaD**2)) sigma=fv/fac return sigma
27581dc61e04d2f5b1411cb64093e03378bb8297
31,124
def zigzag2(i, curr=.45, upper=.48, lower=.13): """ Generalized version of the zig-zag function. Returns points oscillating between two bounds linearly. """ if abs(i) <= (upper-curr): return curr + i else: i = i - (upper-curr) i = i%(2*(upper-lower)) if i < (upper-lower): return upper-i else: return 2*lower-upper+i
a51624af520121eb7285b2a8a5b4dc5ffa552147
31,125
def get_len_of_range(start, stop, step): """Get the length of a (start, stop, step) range.""" n = 0 if start < stop: n = ((stop - start - 1) // step + 1); return n
4c43f502efe7ad0e20a9bc7624e6d47e208a94a7
31,126
def generate_tap(laygen, objectname_pfix, placement_grid, routing_grid_m1m2_thick, devname_tap_boundary, devname_tap_body, m=1, origin=np.array([0,0]), transform='R0'): """generate a tap primitive""" pg = placement_grid rg_m1m2_thick = routing_grid_m1m2_thick # placement itapbl0 = laygen.place("I" + objectname_pfix + 'BL0', devname_tap_boundary, pg, xy=origin, transform=transform) itap0 = laygen.relplace(name = "I" + objectname_pfix + '0', templatename = devname_tap_body, gridname = pg, refinstname = itapbl0.name, shape=np.array([m, 1]), transform=transform) itapbr0 = laygen.relplace(name = "I" + objectname_pfix + 'BR0', templatename = devname_tap_boundary, gridname = pg, refinstname = itap0.name, transform=transform) #power route laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=rg_m1m2_thick, refinstname0=itap0.name, refpinname0='TAP0', refinstindex0=np.array([0, 0]), refinstname1=itap0.name, refpinname1='TAP1', refinstindex1=np.array([m-1, 0]) ) for i in range(1-1, int(m/2)+0): laygen.via(None, np.array([0, 0]), refinstname=itap0.name, refpinname='TAP0', refinstindex=np.array([2*i, 0]), gridname=rg_m1m2_thick) return [itapbl0, itap0, itapbr0]
59f3ceeee74e8b99690e86b671cbd1e503fe4121
31,127
from typing import Union from typing import Optional def multiply( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], out: Optional[Union[ivy.Array, ivy.NativeArray]] = None, ) -> ivy.Array: """Calculates the product for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. **Special cases** For floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``. - If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. - If ``x1_i`` and ``x2_i`` have the same mathematical sign, the result has a positive mathematical sign, unless the result is ``NaN``. If the result is ``NaN``, the “sign” of ``NaN`` is implementation-defined. - If ``x1_i`` and ``x2_i`` have different mathematical signs, the result has a negative mathematical sign, unless the result is ``NaN``. If the result is ``NaN``, the “sign” of ``NaN`` is implementation-defined. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is a signed infinity with the mathematical sign determined by the rule already stated above. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is a nonzero finite number, the result is a signed infinity with the mathematical sign determined by the rule already stated above. - If ``x1_i`` is a nonzero finite number and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is a signed infinity with the mathematical sign determined by the rule already stated above. In the remaining cases, where neither ``infinity`` nor ``NaN`` is involved, the product must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the result is an ``infinity`` of appropriate mathematical sign. If the magnitude is too small to represent, the result is a zero of appropriate mathematical sign. .. note:: Floating-point multiplication is not always associative due to finite precision. Parameters ---------- x1 first input array. Should have a numeric data type. x2 second input array. Must be compatible with ``x1`` (see ref:`Broadcasting`). Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise products. The returned array must have a data type determined by :ref:`Type Promotion Rules`. """ return _cur_backend(x1, x2).multiply(x1, x2, out)
53ce0b715933b59b40abd7f51d7bb104874e54db
31,128
def discriminateEvents(events, threshold): """ Discriminate triggers when different kind of events are on the same channel. A time threshold is used to determine if two events are from the same trial. Parameters ---------- events : instance of pandas.core.DataFrame Dataframe containing the list of events obtained with mne.find_events(raw). threshold : float Time threshold in milliseconds. Keeps an event if the time difference with the next one is superior than threshold. Returns: newData : instance of pandas.series.Series List of trial number filling the requirements. """ # calculate the rolling difference (between n and n+1) events['diff'] = events[0].diff() # replace the nan with the first value events['diff'].iloc[0] = events.iloc[0, 0] # select events with time distance superior to threshold events = events[events['diff']>threshold] events = events.reset_index(drop=True) del events['diff'] return events
0078548ea463c01d88b574185b3dcb5632e5cd13
31,129
def whiten(sig, win): """Whiten signal, modified from MSNoise.""" npts = len(sig) nfreq = int(npts // 2 + 1) assert (len(win) == nfreq) # fsr = npts / sr fsig = fft(sig) # hl = nfreq // 2 half = fsig[: nfreq] half = win * phase(half) fsig[: nfreq] = half fsig[-nfreq + 1:] = half[1:].conjugate()[::-1] return np.real(ifft(fsig))
0c6e4300d7bebc466039ee447c7f6507d211be1c
31,130
def create_slice(request): """ Create a slice based on a pattern. User has to be the "production_request" owner. Steps should contain dictionary of step names which should be copied from the pattern slice as well as modified fields, e.g. {'Simul':{'container_name':'some.container.name'}} :param production_request: Prodcution request ID. Required :param pattern_slice: Pattern slice number. Required :param pattern_request: Pattern slice number. Required :param steps: Dictionary of steps to be copied from pattern slice. Required :param slice: Dictionary of parameters to be changed in a slice. optional """ try: data = request.data production_request = TRequest.objects.get(reqid=data['production_request']) if request.user.username != production_request.manager: return HttpResponseForbidden() pattern_slice = int(data['pattern_slice']) pattern_request = int(data['pattern_request']) steps = data['steps'] slice_dict = data.get('slice') new_slice_number = None if InputRequestList.objects.filter(request=production_request).count()<1000: new_slice_number = clone_pattern_slice(production_request.reqid, pattern_request, pattern_slice, steps, slice_dict) if new_slice_number and (production_request.cstatus not in ['test', 'approved']): set_request_status('cron',production_request.reqid,'approved','Automatic approve by api', 'Request was automatically extended') except Exception as e: return HttpResponseBadRequest(e) return Response({'slice': new_slice_number,'request':production_request.reqid})
c01f15b6b369d9159b02c06ce6e2bc9c63dac50e
31,131
def get_reverse_charge_recoverable_total(filters): """Returns the sum of the total of each Purchase invoice made with recoverable reverse charge.""" query_filters = get_filters(filters) query_filters.append(['reverse_charge', '=', 'Y']) query_filters.append(['recoverable_reverse_charge', '>', '0']) query_filters.append(['docstatus', '=', 1]) try: return frappe.db.get_all('Purchase Invoice', filters = query_filters, fields = ['sum(total)'], as_list=True, limit = 1 )[0][0] or 0 except (IndexError, TypeError): return 0
95b24a90230675ffda80760d073705f5da823bc8
31,132
import hashlib def load_data(url_bam): """Load ``MetaData`` from the given ``url_bam``.""" if url_bam.scheme != "file": raise ExcovisException("Can only load file resources at the moment") with pysam.AlignmentFile(url_bam.path, "rb") as samfile: read_groups = samfile.header.as_dict().get("RG", []) if len(read_groups) != 1: raise ExcovisException("Must have one read group per BAM file!") sample = read_groups[0].get("SM", fs.path.basename(url_bam.path[: -len(".bam")])) hash = hashlib.sha256(url_bam.path.encode("utf-8")).hexdigest() return MetaData(id=hash, path=url_bam.path, sample=strip_sample(sample))
e5c17b6ad236ebcbd4222bbe5e3cbdb923965325
31,133
import random import string def generate_room_number(): """ Generates a room number composed of 10 digits. """ return "".join(random.sample(string.digits, 10))
133e7463106df89fb68de6a7dfa7c718bc1bc5ba
31,134
def is_noiseless(model: Model) -> bool: """Check if a given (single-task) botorch model is noiseless""" if isinstance(model, ModelListGP): raise ModelError( "Checking for noisless models only applies to sub-models of ModelListGP" ) return model.__class__ in NOISELESS_MODELS
d1b5cfee5713cc44d40a7d0d98542d0d2147a9a3
31,135
def random_sampling_normalized_variance(sampling_percentages, indepvars, depvars, depvar_names, n_sample_iterations=1, verbose=True, npts_bandwidth=25, min_bandwidth=None, max_bandwidth=None, bandwidth_values=None, scale_unit_box=True, n_threads=None): """ Compute the normalized variance derivatives :math:`\\hat{\\mathcal{D}}(\\sigma)` for random samples of the provided data specified using ``sampling_percentages``. These will be averaged over ``n_sample_iterations`` iterations. Analyzing the shift in peaks of :math:`\\hat{\\mathcal{D}}(\\sigma)` due to sampling can distinguish between characteristic features and non-uniqueness due to a transformation/reduction of manifold coordinates. True features should not show significant sensitivity to sampling while non-uniqueness/folds in the manifold will. More information can be found in :cite:`Armstrong2021`. :param sampling_percentages: list or 1D array of fractions (between 0 and 1) of the provided data to sample for computing the normalized variance :param indepvars: independent variable values (size: n_observations x n_independent variables) :param depvars: dependent variable values (size: n_observations x n_dependent variables) :param depvar_names: list of strings corresponding to the names of the dependent variables (for saving values in a dictionary) :param n_sample_iterations: (optional, default 1) how many iterations for each ``sampling_percentages`` to average the normalized variance derivative over :param verbose: (optional, default True) when True, progress statements are printed :param npts_bandwidth: (optional, default 25) number of points to build a logspace of bandwidth values :param min_bandwidth: (optional, default to minimum nonzero interpoint distance) minimum bandwidth :param max_bandwidth: (optional, default to estimated maximum interpoint distance) maximum bandwidth :param bandwidth_values: (optional) array of bandwidth values, i.e. filter widths for a Gaussian filter, to loop over :param scale_unit_box: (optional, default True) center/scale the independent variables between [0,1] for computing a normalized variance so the bandwidth values have the same meaning in each dimension :param n_threads: (optional, default None) number of threads to run this computation. If None, default behavior of multiprocessing.Pool is used, which is to use all available cores on the current system. :return: - a dictionary of the normalized variance derivative (:math:`\\hat{\\mathcal{D}}(\\sigma)`) for each sampling percentage in ``sampling_percentages`` averaged over ``n_sample_iterations`` iterations - the :math:`\\sigma` values used for computing :math:`\\hat{\\mathcal{D}}(\\sigma)` - a dictionary of the ``VarianceData`` objects for each sampling percentage and iteration in ``sampling_percentages`` and ``n_sample_iterations`` """ assert indepvars.ndim == 2, "independent variable array must be 2D: n_observations x n_variables." assert depvars.ndim == 2, "dependent variable array must be 2D: n_observations x n_variables." if isinstance(sampling_percentages, list): for p in sampling_percentages: assert p > 0., "sampling percentages must be between 0 and 1" assert p <= 1., "sampling percentages must be between 0 and 1" elif isinstance(sampling_percentages, np.ndarray): assert sampling_percentages.ndim ==1, "sampling_percentages must be given as a list or 1D array" for p in sampling_percentages: assert p > 0., "sampling percentages must be between 0 and 1" assert p <= 1., "sampling percentages must be between 0 and 1" else: raise ValueError("sampling_percentages must be given as a list or 1D array.") normvar_data = {} avg_der_data = {} for p in sampling_percentages: if verbose: print('sampling', p * 100., '% of the data') nv_data = {} avg_der = {} for it in range(n_sample_iterations): if verbose: print(' iteration', it + 1, 'of', n_sample_iterations) rnd.seed(it) idxsample = rnd.sample(list(np.arange(0, indepvars.shape[0])), int(p * indepvars.shape[0])) nv_data[it] = compute_normalized_variance(indepvars[idxsample, :], depvars[idxsample, :], depvar_names, npts_bandwidth=npts_bandwidth, min_bandwidth=min_bandwidth, max_bandwidth=max_bandwidth, bandwidth_values=bandwidth_values, scale_unit_box=scale_unit_box, n_threads=n_threads) der, xder, _ = normalized_variance_derivative(nv_data[it]) for key in der.keys(): if it == 0: avg_der[key] = der[key] / np.float(n_sample_iterations) else: avg_der[key] += der[key] / np.float(n_sample_iterations) avg_der_data[p] = avg_der normvar_data[p] = nv_data return avg_der_data, xder, normvar_data
a340659a363760c01d38dd21b2a439b346751a23
31,136
def classify_data(X_train, Y_train, X_test): """Develop and train your very own variational quantum classifier. Use the provided training data to train your classifier. The code you write for this challenge should be completely contained within this function between the # QHACK # comment markers. The number of qubits, choice of variational ansatz, cost function, and optimization method are all to be developed by you in this function. Args: X_train (np.ndarray): An array of floats of size (250, 3) to be used as training data. Y_train (np.ndarray): An array of size (250,) which are the categorical labels associated to the training data. The categories are labeled by -1, 0, and 1. X_test (np.ndarray): An array of floats of (50, 3) to serve as testing data. Returns: str: The predicted categories of X_test, converted from a list of ints to a comma-separated string. """ # Use this array to make a prediction for the labels of the data in X_test predictions = [] # QHACK # # Define output labels as quantum state vectors def density_matrix(state): """Calculates the density matrix representation of a state. Args: state (array[complex]): array representing a quantum state vector Returns: dm: (array[complex]): array representing the density matrix """ return state * np.conj(state).T label_0 = [[1], [0]] label_1 = [[0], [1]] state_labels = [label_0, label_1] dev = qml.device("default.qubit", wires=1) @qml.qnode(dev) def qcircuit(params, x, y): """A variational quantum circuit representing the Universal classifier. Args: params (array[float]): array of parameters x (array[float]): single input vector y (array[float]): single output state density matrix Returns: float: fidelity between output state and input """ for p in params: qml.Rot(*x, wires=0) qml.Rot(*p, wires=0) return qml.expval(qml.Hermitian(y, wires=[0])) def cost(params, x, y, state_labels=None): """Cost function to be minimized. Args: params (array[float]): array of parameters x (array[float]): 2-d array of input vectors y (array[float]): 1-d array of targets state_labels (array[float]): array of state representations for labels Returns: float: loss value to be minimized """ # Compute prediction for each input in data batch loss = 0.0 dm_labels = [density_matrix(s) for s in state_labels] for i in range(len(x)): f = qcircuit(params, x[i], dm_labels[y[i]]) loss = loss + (1 - f) ** 2 return loss / len(x) def clf_predict(params, x, state_labels=None): """ Tests on a given set of data. Args: params (array[float]): array of parameters x (array[float]): 2-d array of input vectors state_labels (array[float]): 1-d array of state representations for labels Returns: predicted (array([int]): predicted labels for test data """ dm_labels = [density_matrix(s) for s in state_labels] predicted = [] for i in range(len(x)): fidel_function = lambda y: qcircuit(params, x[i], y) fidelities = [fidel_function(dm) for dm in dm_labels] best_fidel = np.argmax(fidelities) predicted.append(best_fidel) return np.array(predicted) def accuracy_score(y_true, y_pred): """Accuracy score. Args: y_true (array[float]): 1-d array of targets y_predicted (array[float]): 1-d array of predictions state_labels (array[float]): 1-d array of state representations for labels Returns: score (float): the fraction of correctly classified samples """ score = y_true == y_pred return score.sum() / len(y_true) def iterate_minibatches(inputs, targets, batch_size): """ A generator for batches of the input data Args: inputs (array[float]): input data targets (array[float]): targets Returns: inputs (array[float]): one batch of input data of length `batch_size` targets (array[float]): one batch of targets of length `batch_size` """ for start_idx in range(0, inputs.shape[0] - batch_size + 1, batch_size): idxs = slice(start_idx, start_idx + batch_size) yield inputs[idxs], targets[idxs] def train(X_train, Y_train, learning_rate, X_test): # Train using Adam optimizer and evaluate the classifier num_layers = 3 epochs = 2 batch_size = 32 accuracy_train_best = 0 opt = qml.optimize.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999) # initialize random weights params = np.random.uniform(size=(num_layers, 3)) for it in range(epochs): for Xbatch, ybatch in iterate_minibatches(X_train, Y_train, batch_size=batch_size): params = opt.step(lambda v: cost(v, Xbatch, ybatch, state_labels), params) predicted_train = clf_predict(params, X_train, state_labels) accuracy_train = accuracy_score(Y_train, predicted_train) loss = cost(params, X_train, Y_train, state_labels) #print(accuracy_train) if accuracy_train > accuracy_train_best: best_params = params accuracy_train_best = accuracy_train if accuracy_train == 1: break return clf_predict(best_params, X_test, state_labels) # label 1 vs label 0 & -1 Y_qubit_0 = np.zeros((len(Y_train),), dtype=int) Y_qubit_0[Y_train == 1] += 1 # label -1 vs label 0 & 1 Y_qubit_1 = np.zeros((len(Y_train),), dtype=int) Y_qubit_1[Y_train == -1] += 1 # qubit 0 Ypred_1 = train(X_train, Y_qubit_0, 0.6, X_test) # qubit 1 Ypred_min1 = train(X_train, Y_qubit_1, 0.3, X_test) predictions = np.zeros((len(X_test),), dtype=int) predictions[Ypred_1 == 1] += 1 predictions[Ypred_min1 == 1] += -1 # QHACK # return array_to_concatenated_string(predictions)
7af9c56490a782fb32f289042a23871bfb5f0a85
31,137
def preprocess_embedding(z): """Pre-process embedding. Center and scale embedding to be between -.5, and .5. Arguments: z: A 2D NumPy array. shape=(n_concept, n_dim) Returns: z_p: A pre-processed embedding. """ # Center embedding. z_p = z - np.mean(z, axis=0, keepdims=True) # Scale embedding. max_val = np.max(np.abs(z_p)) z_p /= max_val z_p /= 2 return z_p
6927e606ec61b0504e91b5b19e84a819d3e86274
31,138
import os def get_folders_from_path(path): """ Gets a list of sub folders in the given path :param path: str :return: list<str> """ folders = list() while True: path, folder = os.path.split(path) if folder != '': folders.append(folder) else: if path != '': folders.append(path) break folders.reverse() return folders
4f03c0a1316572c0ab1095ed948c91750a495653
31,139
def T_from_Ts(Ts,P,qt,es_formula=es_default): """ Given theta_e solves implicitly for the temperature at some other pressure, so that theta_e(T,P,qt) = Te >>> T_from_Tl(282.75436951,90000,20.e-3) 290.00 """ def zero(T,Ts,P,qt): return np.abs(Ts-get_theta_s(T,P,qt,es_formula)) return optimize.fsolve(zero, 200., args=(Ts,P,qt), xtol=1.e-10)
ae2431127a3d0924da4b1e45d98c72a2c42b7a6c
31,140
import argparse def _get_cli_parser(): """ mideu argparse parser create :return: parser """ parser = argparse.ArgumentParser( description="MasterCard IPM file formatter ({version})".format( version=_version.get_versions()['version']) ) parser.add_argument("--version", action="version", version="%(prog)s ("+_version.get_versions()['version']+")", help="Get version information") subparsers = parser.add_subparsers(help="Sub-command help") # Extract command extract_parser = subparsers.add_parser("extract", help="Extract help") extract_parser.set_defaults(func=extract_command) _add_common_args(extract_parser) _add_extract_args(extract_parser) # Convert command convert_parser = subparsers.add_parser("convert", help="Convert help") convert_parser.set_defaults(func=convert_command) _add_common_args(convert_parser) return parser
fe9294257c8fec87e0705fe8920a8585bea00465
31,141
from typing import Tuple def find_global_peaks_integral( cms: tf.Tensor, crop_size: int = 5, threshold: float = 0.2 ) -> Tuple[tf.Tensor, tf.Tensor]: """Find local peaks with integral refinement. Integral regression refinement will be computed by taking the weighted average of the local neighborhood around each rough peak. Args: cms: Confidence maps. Tensor of shape (samples, height, width, channels). crop_size: Size of patches to crop around each rough peak as an integer scalar. threshold: Minimum confidence threshold. Peaks with values below this will be set to NaNs. Returns: A tuple of (peak_points, peak_vals). peak_points: float32 tensor of shape (samples, channels, 2), where the last axis indicates peak locations in xy order. peak_vals: float32 tensor of shape (samples, channels) containing the values at the peak points. """ return find_global_peaks( cms, threshold=threshold, refinement="integral", integral_patch_size=crop_size )
22529379fdf1e685b33319e0432a95ee51973575
31,142
def test_data(test_data_info, pytestconfig): """Fixture provides test data loaded from files automatically.""" data_pattern = pytestconfig.getoption(constants.OPT_DATA_PATTERN) if test_data_info.subdirs: return list( list(loader.each_data_under_dir( test_data_info.datadir / subdir, data_pattern )) for subdir in test_data_info.subdirs ) return list( loader.each_data_under_dir(test_data_info.datadir, data_pattern) )
639c9b16b9f6e1a4bbb5422d0ce29850535a267b
31,143
def load_maggic_data(): """Load MAGGIC data. Returns: orig_data: Original data in pandas dataframe """ # Read csv files file_name = 'data/Maggic.csv' orig_data = pd.read_csv(file_name, sep=',') # Remove NA orig_data = orig_data.dropna(axis=0, how='any') # Remove labels orig_data = orig_data.drop(['death_all','days_to_fu'], axis = 1) return orig_data
b4d1c219ef892c9218fd45aba22c859c96954853
31,144
def create_res_hessian_computing_tf_graph(input_shape, layer_kernel, layer_stride): """ This function create the TensorFlow graph for computing hessian matrix for res layer. Step 1: It first extract image patches using tf.extract_image_patches. Step 2: Then calculate the hessian matrix by outer product. Args: input_shape: the dimension of input layer_kernel: kernel size of the layer layer_stride: stride of the layer Output: input_holder: TensorFlow placeholder for layer input get_hessian_op: A TensorFlow operator to calculate hessian matrix """ input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape) patches = tf.extract_image_patches(images = input_holder, ksizes = [1,layer_kernel, layer_kernel,1], strides = [1, layer_stride, layer_stride, 1], rates = [1, 1, 1, 1], padding = 'SAME') print 'Patches shape: %s' %patches.get_shape() a = tf.expand_dims(patches, axis=-1) b = tf.expand_dims(patches, axis=3) outprod = tf.multiply(a, b) # print 'outprod shape: %s' %outprod.get_shape() get_hessian_op = tf.reduce_mean(outprod, axis=[0, 1, 2]) print 'Hessian shape: %s' % get_hessian_op.get_shape() return input_holder, get_hessian_op
aaf5f52b32f1f8f67d54d70c10c706dc26b91a75
31,145
def loadUiType(uiFile): """ Pyside lacks the "loadUiType" command, so we have to convert the ui file to py code in-memory first and then execute it in a special frame to retrieve the form_class. http://tech-artists.org/forum/showthread.php?3035-PySide-in-Maya-2013 """ parsed = xml.parse(uiFile) widget_class = parsed.find('widget').get('class') form_class = parsed.find('class').text with open(uiFile, 'r') as f: o = StringIO() frame = {} pysideuic.compileUi(f, o, indent=0) pyc = compile(o.getvalue(), '<string>', 'exec') exec pyc in frame #Fetch the base_class and form class based on their type in the xml from designer form_class = frame['Ui_%s'%form_class] base_class = eval('QtGui.%s'%widget_class) return form_class, base_class
2b5bfcdefb0d1c0fb361a4d080eea0b0cc927599
31,146
def ldns_native2rdf_int8(*args): """LDNS buffer.""" return _ldns.ldns_native2rdf_int8(*args)
21e295dcd9a67018bd673dc5ff605e80707d9736
31,147
import re def _ListProcesses(args, req_vars): # pylint: disable=W0613 """Lists processes and their CPU / mem stats. The response is formatted according to the Google Charts DataTable format. """ device = _GetDevice(args) if not device: return _HTTP_GONE, [], 'Device not found' resp = { 'cols': [ {'label': 'Pid', 'type':'number'}, {'label': 'Name', 'type':'string'}, {'label': 'Cpu %', 'type':'number'}, {'label': 'Mem RSS Kb', 'type':'number'}, {'label': '# Threads', 'type':'number'}, ], 'rows': []} for process in device.ListProcesses(): # Exclude system apps if the request didn't contain the ?all=1 arg. if not req_vars.get('all') and not re.match(_APP_PROCESS_RE, process.name): continue stats = process.GetStats() resp['rows'] += [{'c': [ {'v': process.pid, 'f': None}, {'v': process.name, 'f': None}, {'v': stats.cpu_usage, 'f': None}, {'v': stats.vm_rss, 'f': None}, {'v': stats.threads, 'f': None}, ]}] return _HTTP_OK, [], resp
636f91e3dcf6b236081787096c12eadf956c9024
31,148
def _runForRunnerUser(resourceDB: ResourceDB, user: User) -> TaskRun: """Returns the task run accessible to the given user. Raises AccessDenied if the user does not represent a Task Runner or the Task Runner is not running any task. """ # Get Task Runner. if not isinstance(user, TokenUser): raise AccessDenied('This operation is exclusive to Task Runners') try: runner = resourceDB.runnerFromToken(user) except KeyError as ex: raise AccessDenied(*ex.args) from ex # Get active task from Task Runner. run = runner.getRun() if run is None: raise AccessDenied('Idle Task Runner cannot access jobs') else: return run
e3b89ed95bea8c50be885ef56578e23919a7a25c
31,149
def narrow(lon='auto',lat='auto',ax=None,lfactor=1,**kwargs): """ Plot north arrow. Parameters: lon: Starting longitude (decimal degrees) for arrow lat: Starting latitude (ecimal degrees) for arrow ax: Axes on which to plot arrow lfactor: Length factor to increase/decrease arrow length Returns: ax: Axes with arrow plotted """ if ax is None: ax = plt.gca() geodesic = cgeo.Geodesic() # Set up geodesic calculations # Get map projection from axes crs = ax.projection if (lon=='auto')&(lat=='auto'): trans = ax.transAxes + ax.transData.inverted() sx,sy = trans.transform((0.2,0.2)) lon,lat = ccrs.Geodetic().transform_point(sx,sy,src_crs=crs) # Get geodetic projection for lat/lon - do not confuse with geodesic gdet = ccrs.Geodetic() # Get axes extent and convert to lat/lon x1,x2,y1,y2 = ax.get_extent() tlx,tly = gdet.transform_point(x1,y2,src_crs=crs) blx,bly = gdet.transform_point(x1,y1,src_crs=crs) diff = abs(bly-tly) # Get x coverage of plot in decimal degrees # Get arrow endpoint scaled by diff and lfactor end = geodesic.direct( points=[lon,lat],azimuths=0,distances=lfactor*diff*2*10**4)[0] # Transform lat-lon into axes coordinates xstart,ystart = crs.transform_point(lon,lat,src_crs=ccrs.Geodetic()) # Get X-Y coordinates of endpoint xend,yend = crs.transform_point(end[0],end[1],src_crs=ccrs.Geodetic()) # Plot arrow as annotation ax.annotate("",xy=(xstart,ystart),xycoords='data',xytext=(xend,yend), textcoords='data',arrowprops=dict(arrowstyle="<|-", connectionstyle="arc3")) # Add N to arrow ax.text(xend,yend,'N',fontsize=7,ha='center') return(ax)
8d3162ab05366472b607ff953811caf2796c1e3e
31,150
from typing import List from pathlib import Path def dump_content_descriptor(artifact_manager: ArtifactsManager) -> ArtifactsReport: """ Dumping content/content_descriptor.json into: 1. content_test 2. content_new 3. content_all Args: artifact_manager: Artifacts manager object. Returns: ArtifactsReport: ArtifactsReport object. Notes: 1. content_descriptor.json created during build run time. """ report = ArtifactsReport("Content descriptor:") if not artifact_manager.only_content_packs and artifact_manager.content.content_descriptor: descriptor = artifact_manager.content.content_descriptor object_report = ObjectReport(descriptor, content_test=True, content_new=True, content_all=True) created_files: List[Path] = [] for dest in [artifact_manager.content_test_path, artifact_manager.content_new_path, artifact_manager.content_all_path]: created_files = dump_link_files(artifact_manager, descriptor, dest, created_files) report.append(object_report) return report
e39bc38c72cd3f7b555d410d037ef1df3c747933
31,151
def parse_movie(line, sep='::'): """ Parses a movie line Returns: tuple of (movie_id, title) """ fields = line.strip().split(sep) movie_id = int(fields[0]) # convert movie_id to int title = fields[1] return movie_id, title
9d7a13ca3ddf823ff22582f648434d4b6df00207
31,152
def make_plot(dataset, plot_label, xlabel, ylabel, legend): """ Generates a MatPlotLib plot from the specified dataset and with the specified labeling features. make_plot(dataset, plot_label, xlabel, ylabel, legend) -> plt @type dataset: list of list @param dataset: formatted dataset. @type plot_label: string @param plot_label: plot title. @type xlabel: string @param xlabel: x-axis plot label. @type ylabel: string @param ylabel: y-axis plot label @type legend: tuple @param legend: plot legend. @rtype: MatPlotLib plot @return: plot """ fig = plt.figure() ax = fig.add_axes([0.1, 0.1, 0.6, 0.75]) plt.title(plot_label) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) for data, lbl in zip(dataset, legend): ax.plot(data[0], data[1], label = lbl) ax.legend(bbox_to_anchor = (1.05, 1), loc = 2, borderaxespad = 0.) return plt
0e5ff583c65dcd3751354a416d9940a12871e37d
31,153
def flatten_list(in_list: typ.List) -> typ.List: """Flattens list""" result = [] for item in in_list: if isinstance(item, list): result.extend(flatten_list(item)) else: result.append(item) return result
9d847fea7f1eb30ecbd51dd1d001c8661a502a0d
31,154
import logging def formatMessage(data): """ Format incoming message before passing to Discord """ logging.info("Formatting message payload...") time = (data.occurredAt).split("T") message = [":alarm_clock: __**Meraki Alert**__ :alarm_clock: "] message.append(f"**Device:** {data.deviceName}") message.append(f"**Message info:** {data.alertType}") message.append(f"**Occurred at:** {time[0]} - {time[1][:8]}") if len(data.alertData) > 0: message.append(f"**Additional data:** ```fix\r\n{data.alertData}\r\n```") sendmessage = "" for each in message: sendmessage += each + "\r\n" return sendmessage
21d7a50951aeecb6917479622f4131b7ddcfda00
31,155
def _convert_nearest_neighbors(operator, container, k=None, radius=None): """ Common parts to regressor and classifier. Let's denote *N* as the number of observations, *k* the number of neighbours. It returns the following intermediate results: top_indices: [N, k] (int64), best indices for every observation top_distances: [N, k] (dtype), float distances for every observation, it can be None if the weights are uniform top_labels: [N, k] (label type), labels associated to every top index weights: [N, k] (dtype), if top_distances is not None, returns weights norm: [N] (dtype), if top_distances is not None, returns normalized weights axis: 1 if there is one dimension only, 2 if this is a multi-regression or a multi classification """ X = operator.inputs[0] op = operator.raw_operator opv = container.target_opset dtype = guess_numpy_type(X.type) if dtype != np.float64: dtype = np.float32 proto_type = guess_proto_type(X.type) if proto_type != onnx_proto.TensorProto.DOUBLE: proto_type = onnx_proto.TensorProto.FLOAT if isinstance(X.type, Int64TensorType): X = OnnxCast(X, to=proto_type, op_version=opv) options = container.get_options(op, dict(optim=None)) single_reg = (not hasattr(op, '_y') or len(op._y.shape) == 1 or len(op._y.shape) == 2 and op._y.shape[1] == 1) ndim = 1 if single_reg else op._y.shape[1] metric = (op.effective_metric_ if hasattr(op, 'effective_metric_') else op.metric) neighb = op._fit_X.astype(dtype) if (hasattr(op, 'n_neighbors') and op.n_neighbors is not None and hasattr(op, 'radius') and op.radius is not None): raise RuntimeError( "The model defines radius and n_neighbors at the " "same time ({} and {}). " "This case is not supported.".format( op.radius, op.n_neighbors)) if hasattr(op, 'n_neighbors') and op.n_neighbors is not None: k = op.n_neighbors if k is None else k radius = None elif hasattr(op, 'radius') and op.radius is not None: k = None radius = op.radius if radius is None else radius else: raise RuntimeError( "Cannot convert class '{}'.".format(op.__class__.__name__)) training_labels = op._y if hasattr(op, '_y') else None distance_kwargs = {} if metric == 'minkowski': if op.p != 2: distance_kwargs['p'] = op.p else: metric = "euclidean" weights = op.weights if hasattr(op, 'weights') else 'distance' binary = None if weights == 'uniform' and radius is None: top_indices = onnx_nearest_neighbors_indices_k( X, neighb, k, metric=metric, dtype=dtype, op_version=opv, optim=options.get('optim', None), **distance_kwargs) top_distances = None elif radius is not None: three = onnx_nearest_neighbors_indices_radius( X, neighb, radius, metric=metric, dtype=dtype, op_version=opv, keep_distances=True, proto_dtype=proto_type, optim=options.get('optim', None), **distance_kwargs) top_indices, top_distances, binary = three elif weights == 'distance': top_indices, top_distances = onnx_nearest_neighbors_indices_k( X, neighb, k, metric=metric, dtype=dtype, op_version=opv, keep_distances=True, optim=options.get('optim', None), **distance_kwargs) else: raise RuntimeError( "Unable to convert KNeighborsRegressor when weights is callable.") if training_labels is not None: if ndim > 1: training_labels = training_labels.T axis = 2 else: training_labels = training_labels.ravel() axis = 1 if opv >= 9: kor = k if k is not None else training_labels.shape[-1] if ndim > 1: shape = np.array([ndim, -1, kor], dtype=np.int64) else: shape = np.array([-1, kor], dtype=np.int64) else: raise RuntimeError( "Conversion of a KNeighborsRegressor for multi regression " "requires opset >= 9.") if training_labels.dtype == np.int32: training_labels = training_labels.astype(np.int64) flattened = OnnxFlatten(top_indices, op_version=opv) extracted = OnnxArrayFeatureExtractor( training_labels, flattened, op_version=opv) reshaped = OnnxReshape(extracted, shape, op_version=opv) if ndim > 1: reshaped = OnnxTranspose(reshaped, op_version=opv, perm=[1, 0, 2]) reshaped.set_onnx_name_prefix('knny') else: reshaped = None axis = 1 if binary is not None: if op.weights == 'uniform': wei = binary else: modified = OnnxMax(top_distances, np.array([1e-6], dtype=dtype), op_version=opv) wei = OnnxMul(binary, OnnxReciprocal(modified, op_version=opv), op_version=opv) norm = OnnxReduceSum(wei, op_version=opv, axes=[1], keepdims=0) elif top_distances is not None: modified = OnnxMax(top_distances, np.array([1e-6], dtype=dtype), op_version=opv) wei = OnnxReciprocal(modified, op_version=opv) norm = OnnxReduceSum(wei, op_version=opv, axes=[1], keepdims=0) else: norm = None wei = None if wei is not None: wei.set_onnx_name_prefix('wei') if norm is not None: norm.set_onnx_name_prefix('norm') return top_indices, top_distances, reshaped, wei, norm, axis
c328d2f78467df05ddcfd83186e0704e234eabd8
31,156
import click def interface_alias_to_name(config_db, interface_alias): """Return default interface name if alias name is given as argument """ vlan_id = "" sub_intf_sep_idx = -1 if interface_alias is not None: sub_intf_sep_idx = interface_alias.find(VLAN_SUB_INTERFACE_SEPARATOR) if sub_intf_sep_idx != -1: vlan_id = interface_alias[sub_intf_sep_idx + 1:] # interface_alias holds the parent port name so the subsequent logic still applies interface_alias = interface_alias[:sub_intf_sep_idx] # If the input parameter config_db is None, derive it from interface. # In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE. if config_db is None: namespace = get_port_namespace(interface_alias) if namespace is None: return None config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) config_db.connect() port_dict = config_db.get_table('PORT') if interface_alias is not None: if not port_dict: click.echo("port_dict is None!") raise click.Abort() for port_name in port_dict: if interface_alias == port_dict[port_name]['alias']: return port_name if sub_intf_sep_idx == -1 else port_name + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id # Interface alias not in port_dict, just return interface_alias, e.g., # portchannel is passed in as argument, which does not have an alias return interface_alias if sub_intf_sep_idx == -1 else interface_alias + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
6cac69be850a65dda505999616e4d21d0a8c9438
31,157
import time def twait(phrase, tn, tout=-1, logging='off', rcontent=False, screenprint=False): """ telnetlib wait with optional timeout and optional logging""" # Adding code to allow lists for phrase finalcontent = ' ' #This is the time of the epoch startTime = int(time.time()) while True: # This is the current time currentTime = int(time.time()) if tout != -1: # This is the time since the start of this loop # if it exceeds the timeout value passed to it # then exit with a return of 0 if (currentTime - startTime) > tout: if logging == 'on': #Adding the -e-e-> to differentiate from device output if screenprint: print('-e-e->It has been ' + str(tout) + ' seconds. Timeout!') if not rcontent: return 0 return 0, finalcontent # Eager reading back from the device content = (tn.read_very_eager().decode().strip()) if content.strip() != '': finalcontent += content # if the returned content isn't blank. This stops # it from spamming new line characters if content.strip() != '': if screenprint: print(content, end='') # content was found! Return a 1 for success if isinstance(phrase, str): if phrase in content: if not rcontent: return 1 return 1, finalcontent if isinstance(phrase, list): count = 1 for p in phrase: if p in content: if not rcontent: return count return count, finalcontent count += 1
9c4308e873321fd556d8eec2668981fc2843ae87
31,158
def process_ob(obs : dict) -> dict: """ Processes an individual post :param obs: video observation metadata :returns: processed post metadata based on tags in posts_titles """ metadata = {tag: obs[tag] for tag in obs_tags} metadata.update(process_obs(obs["children"], obs["history"])) metadata.update({"uid+date": metadata["student uid"] + metadata["section date"]}) # for k,v in metadata.items(): # print(k, v) folders = metadata["folders"] if "peer_observation" in folders or "video observation" in metadata["title"].lower(): if "logistics" not in folders and "self_reflection_makeup" not in folders : return metadata raise Exception(metadata["title"], " is in folders ", metadata["folders"], " and was not included in csv")
e5200443d6ba6e69d49fb56d377f4a9992f157bb
31,159
from beta import calc_beta_eta def calc_detectable_frac(gen, model, args, gen2=None, swap_h0_and_h1=False, verbose=0): """ :param gen: sample generator :param model: NN model :param args: parameters object (should contain alpha and beta attributes) :param gen2: if gen2 is not None gen output is used for 0-hypothesis and gen2 for alternative otherwise frac > 0 condition is used :return: (frac, alpha) minimal fraction of source events in alternative (gen2) hypothesis and precise alpha or (1., 1.) if detection is impossible """ if swap_h0_and_h1: _alpha = args.beta _beta = args.alpha else: _alpha = args.alpha _beta = args.beta fracs, beta, th_eta = calc_beta_eta(gen, model, args.alpha, gen2=gen2, beta_threshold=args.beta, verbose=verbose) return th_eta, args.alpha
6844abc148cee69453eb3136e018a40eccb40334
31,160
import numpy as np from typing import Tuple def similarity_std( buffer: NumpyNDArray, wav_file: WavFile, rate: int, threshold: float ) -> Tuple[bool, float, float]: """Check the similarity of a recorded sound buffer and a given wav_file. Use a correlation check using the standard deviation.""" _, _, current_fft = perform_fft(buffer[:wav_file.signal_length], rate, True) corrcoef = np.correlate( wav_file.signal_fft / wav_file.signal_fft.std(), current_fft / current_fft.std() )[0] / len(wav_file.signal_fft) return corrcoef >= threshold, corrcoef, threshold
8900495860f59b39be3f5f3808bb321e0275bc81
31,161
def grdtrack(points, grid, newcolname=None, outfile=None, **kwargs): """ Sample grids at specified (x,y) locations. Grdtrack reads one or more grid files and a table with (x,y) [or (lon,lat)] positions in the first two columns (more columns may be present). It interpolates the grid(s) at the positions in the table and writes out the table with the interpolated values added as (one or more) new columns. A bicubic [Default], bilinear, B-spline or nearest-neighbor interpolation is used, requiring boundary conditions at the limits of the region (see *interpolation*; Default uses “natural” conditions (second partial derivative normal to edge is zero) unless the grid is automatically recognized as periodic.) Full option list at :gmt-docs:`grdtrack.html` {aliases} Parameters ---------- points : pandas.DataFrame or str Either a table with (x, y) or (lon, lat) values in the first two columns, or a filename (e.g. csv, txt format). More columns may be present. grid : xarray.DataArray or str Gridded array from which to sample values from, or a filename (netcdf format). newcolname : str Required if 'points' is a pandas.DataFrame. The name for the new column in the track pandas.DataFrame table where the sampled values will be placed. outfile : str Required if 'points' is a file. The file name for the output ASCII file. {V} {n} Returns ------- track: pandas.DataFrame or None Return type depends on whether the outfile parameter is set: - pandas.DataFrame table with (x, y, ..., newcolname) if outfile is not set - None if outfile is set (track output will be stored in outfile) """ with GMTTempFile(suffix=".csv") as tmpfile: with Session() as lib: # Store the pandas.DataFrame points table in virtualfile if data_kind(points) == "matrix": if newcolname is None: raise GMTInvalidInput("Please pass in a str to 'newcolname'") table_context = lib.virtualfile_from_matrix(points.values) elif data_kind(points) == "file": if outfile is None: raise GMTInvalidInput("Please pass in a str to 'outfile'") table_context = dummy_context(points) else: raise GMTInvalidInput(f"Unrecognized data type {type(points)}") # Store the xarray.DataArray grid in virtualfile if data_kind(grid) == "grid": grid_context = lib.virtualfile_from_grid(grid) elif data_kind(grid) == "file": grid_context = dummy_context(grid) else: raise GMTInvalidInput(f"Unrecognized data type {type(grid)}") # Run grdtrack on the temporary (csv) points table # and (netcdf) grid virtualfile with table_context as csvfile: with grid_context as grdfile: kwargs.update({"G": grdfile}) if outfile is None: # Output to tmpfile if outfile is not set outfile = tmpfile.name arg_str = " ".join( [csvfile, build_arg_string(kwargs), "->" + outfile] ) lib.call_module(module="grdtrack", args=arg_str) # Read temporary csv output to a pandas table if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame column_names = points.columns.to_list() + [newcolname] result = pd.read_csv(tmpfile.name, sep="\t", names=column_names) elif outfile != tmpfile.name: # return None if outfile set, output in outfile result = None return result
ed6a09c4f925321520c99e2a941602e82852bec3
31,162
def add(x, y): """add two number""" return x+y
9c5fe5867e0c345bd3e7439b8fc76c21b20b2c35
31,163
def is_exponential(character: str) -> bool: """ Whether an IPA character is written above the base line and to the right of the previous character, like how exponents of a power are written in mathematical notation. """ return character in exponentials
0d263a0969454ad9d8e7a3a53e393b55b5c8d45c
31,164
def _union_items(baselist, comparelist): """Combine two lists, removing duplicates.""" return list(set(baselist) | set(comparelist))
782f325960db2482afc75e63dbc8a51fea24c8d0
31,165
import os def setLanguage(languageName): """ setLanguage(languageName) Set the language for the app. Loads qt and pyzo translations. Returns the QLocale instance to pass to the main widget. """ # Get locale locale = getLocale(languageName) # Get paths were language files are qtTransPath = str( QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath) ) pyzoTransPath = os.path.join(pyzo.pyzoDir, "resources", "translations") # Get possible names for language files # (because Qt's .tr files may not have the language component.) localeName1 = locale.name() localeName2 = localeName1.split("_")[0] # Uninstall translators if not hasattr(QtCore, "_translators"): QtCore._translators = [] for trans in QtCore._translators: QtWidgets.QApplication.removeTranslator(trans) # The default language if localeName1 == "C": return locale # Set Qt translations # Note that the translator instances must be stored # Note that the load() method is very forgiving with the file name for what, where in [("qt", qtTransPath), ("pyzo", pyzoTransPath)]: trans = QtCore.QTranslator() # Try loading both names for localeName in [localeName1, localeName2]: success = trans.load(what + "_" + localeName + ".tr", where) if success: QtWidgets.QApplication.installTranslator(trans) QtCore._translators.append(trans) print("loading %s %s: ok" % (what, languageName)) break else: print("loading %s %s: failed" % (what, languageName)) # Done return locale
0b70e774636936f6c4df68440605c369788676fd
31,166
def broadcast_state(state=current_state, ip=ip_osc, port=port_client): """ Broadcasts state """ print("Called Broadcast State Function") #client = udp_client.UDPClient(ip, port,1) #builder = osc_message_builder.OscMessageBuilder(address='/status') #for k,v in state.items(): # builder.add_arg(v) #client.send(builder.build()) #print("sent {0} to {1}:{2}".format(builder.args, ip, port)) return None
be8b391cbd033b7271796ec47529b8ca78f85bd5
31,167
def make_gaussian(shape, var): """returns 2d gaussian of given shape and variance""" h,w = shape x = np.arange(w, dtype=float) y = np.arange(h, dtype=float)[:,np.newaxis] x0 = w // 2 y0 = h // 2 mat = np.exp(-0.5 * (pow(x-x0, 2) + pow(y-y0, 2)) / var) normalized_img = np.zeros((h, w)) cv2.normalize(mat, normalized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX) return normalized_img
88500f08c43b446ea073fef322f2007ea4032875
31,168
from typing import Optional from typing import Callable import os def add_button(_editor: Editor, /, *, # Keyword-only arguments icon: Optional[str] = None, cmd: Optional[str] = None, # Defaults to calling get_unique_cmd func: Callable[[Editor], None], tip: Optional[str] = "", label: Optional[str] = "", id: Optional[str] = None, toggleable: Optional[bool] = False, keys: Optional[str] = None, disables: Optional[bool] = True, rightside: Optional[bool] = True ) -> str: """ Wraps Editor.addButton to make all arguments except `func` optional Also munges icon path into absolute path, adds hotkey to tooltip """ return _editor.addButton( icon=( icon if os.path.isabs(icon) else os.path.join(USER_FILES_DIR, icon) ) if icon else icon, cmd=cmd if cmd else get_unique_cmd(), func=func, tip=tip + (" " if tip else "") + (f"({keys})" if keys else ""), label=label, id=id, toggleable=toggleable, keys=keys, disables=disables, rightside=rightside, )
bb4c740823fc15d8897d09abdf4dfbdf4938be43
31,169
def analysis_iou_on_miyazakidata(result_path, pattern="GT_o"): """对最终4张图片统合的结果进行测试""" ious = [] pattern1, pattern2 = pattern.strip().split("_") pattern1 = pattern_dict[pattern1] pattern2 = pattern_dict[pattern2] for i in range(1, 21): image1 = get_image(result_path, i, pattern1) image2 = get_image(result_path, i, pattern2) # handled_gt_path = os.path.join(result_path, "{}_mask.png".format(i)) # gt_path = os.path.join(result_path, "{}_M.png".format(i)) # image_path = os.path.join(result_path, "{}_testT.png".format(i)) # gt_path = np.array(Image.open(gt_path)) # image_path = np.array(Image.open(image_path)) ious.append(calcu_iou(image1, image2)) print(ious) print("average of iou is {:.4f}".format(np.mean(ious))) return ious
5099cde71284c0badae64e701a2362b30f87ad4c
31,170
def min_max_rdist(node_data, x: FloatArray): """Compute the minimum and maximum distance between a point and a node""" lower_bounds = node_data[0] upper_bounds = node_data[1] min_dist = 0.0 max_dist = 0.0 for j in range(x.size): d_lo = lower_bounds[j] - x[j] d_hi = x[j] - upper_bounds[j] # d = ((d_lo + abs(d_lo)) + (d_hi + abs(d_hi))) # twice as big as actual d = max(d_lo, 0) + max(d_hi, 0) min_dist += d * d d = max(abs(d_lo), abs(d_hi)) max_dist += d * d # min_dist *= 0.25 return min_dist, max_dist
03f80210ada3530c692ce24db3bb92f0156a2d6f
31,171
def strip_newlines(s, nleading=0, ntrailing=0): """strip at most nleading and ntrailing newlines from s""" for _ in range(nleading): if s.lstrip(' \t')[0] == '\n': s = s.lstrip(' \t')[1:] elif s.lstrip(' \t')[0] == '\r\n': s = s.lstrip(' \t')[2:] for _ in range(ntrailing): if s.rstrip(' \t')[-2:] == '\r\n': s = s.rstrip(' \t')[:-2] elif s.rstrip(' \t')[-1:] == '\n': s = s.rstrip(' \t')[:-1] return s
cd9c55d4ac7828d9506567d879277a463d896c46
31,172
def xyz_to_lab(x_val, y_val, z_val): """ Convert XYZ color to CIE-Lab color. :arg float x_val: XYZ value of X. :arg float y_val: XYZ value of Y. :arg float z_val: XYZ value of Z. :returns: Tuple (L, a, b) representing CIE-Lab color :rtype: tuple D65/2° standard illuminant """ xyz = [] for val, ref in (x_val, 95.047), (y_val, 100.0), (z_val, 108.883): val /= ref val = pow(val, 1 / 3.0) if val > 0.008856 else 7.787 * val + 16 / 116.0 xyz.append(val) x_val, y_val, z_val = xyz # pylint: disable=unbalanced-tuple-unpacking cie_l = 116 * y_val - 16 cie_a = 500 * (x_val - y_val) cie_b = 200 * (y_val - z_val) return cie_l, cie_a, cie_b
c2478772659a5d925c4db0b6ba68ce98b6537a59
31,173
def get_core_v1_api(): """ :return: api client """ config.load_kube_config() core_api = client.CoreV1Api() return core_api
2169b3dfe7a603c1d870c0100d96640f4688023c
31,174
def ssbm(n: int, k: int, p: float, q: float, directed=False): """ Generate a graph from the symmetric stochastic block model. Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of each edge inside a cluster is given by p. The probability of an edge between two different clusters is q. :param n: The number of vertices in the graph. :param k: The number of clusters. :param p: The probability of an edge inside a cluster. :param q: The probability of an edge between clusters. :param directed: Whether to generate a directed graph. :return: The generated graph as an ``sgtl.Graph`` object. """ # Make sure that the value q is an integer or float try: p = float(p) q = float(q) except Exception: raise TypeError("The probabilities p and q must be numbers between 0 and 1.") # Every cluster has the same size. cluster_sizes = [int(n/k)] * k # Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p. prob_mat_q = [] for row_num in range(k): new_row = [q] * k new_row[row_num] = p prob_mat_q.append(new_row) # Call the general sbm method. return sbm(cluster_sizes, prob_mat_q, directed=directed)
f849bb41afe617e6d66e043280b4606e1d5390c4
31,175
import torch def get_norm(norm, out_channels): """ Args: norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; or a callable that takes a channel number and returns the normalization layer as a nn.Module. Returns: nn.Module or None: the normalization layer """ if out_channels == 32: N = 16 else: N = 32 if isinstance(norm, str): if len(norm) == 0: return None norm = { "BN": torch.nn.BatchNorm2d, #"SyncBN": NaiveSyncBatchNorm, #"FrozenBN": FrozenBatchNorm2d, "GN": lambda channels: nn.GroupNorm(N, channels), #"nnSyncBN": nn.SyncBatchNorm, # keep for debugging }[norm] return norm(out_channels)
63821ac90fdb6f9d9442a885e54d5569aa78f1ae
31,176
def std_normal_dist_cumulative(lower, upper): """ Find the area under the standard normal distribution between the lower and upper bounds. Bounds that aren't specified are taken at infinity. """ return find_distribution_area(stats.norm, lower, upper)
210a16d29d8e07949dbe67c7486e9fea1656ce76
31,177
def ann_pharm_variant(dataframe, genome, knowledgebase, variant_type): """query with chr, start, stop, ref, alt, genome assembly version. Returns all the drugs targeting the observed variant. """ all_direct_targets = {} rows = get_variant_list(dataframe) for r in rows: direct_target_list = [] gene = r[0] if variant_type == "MUT": search_subset = {"chromosome": r[1], "start": r[2], "stop": r[3], "reference_base": r[4], "alteration_base": r[5], "assembly_version": genome} elif variant_type == "CNA": search_subset = {"chromosome": r[1], "start": r[2], "stop": r[3], "global_type": r[4], "assembly_version": genome} superset = get_inner_dict(gene, "variant_annotation", knowledgebase) if not superset: continue for variant in superset: v_index = superset.index(variant) search_set = superset[v_index] coverage = all(item in search_set.items() for item in search_subset.items()) if coverage: direct_target_list.append(search_set) if not direct_target_list: continue if gene in all_direct_targets: all_direct_targets[gene].extend(direct_target_list) else: all_direct_targets[gene] = direct_target_list return all_direct_targets
cf1b8be54bf5ccb1bfa29afbe2af5020246ac7ea
31,178
from typing import List from typing import Tuple def to_pier_settlement( config: Config, points: List[Point], responses_array: List[List[float]], response_type: ResponseType, pier_settlement: List[Tuple[PierSettlement, PierSettlement]], ) -> List[List[float]]: """Time series of responses to pier settlement. Args: config: simulation configuration object. points: points in the TrafficArray. responses_array: NumPY array indexed first by point then time. response_type: the sensor response type to add. pier_settlement: start and end settlements of piers. Returns: NumPY array of same shape as "responses_array" and considering the same points, but only containing the responses from pier settlement. """ if len(pier_settlement) == 0: return np.zeros(responses_array.shape) assert len(responses_array) == len(points) for ps in pier_settlement: assert ps[0].pier == ps[1].pier # Sorted by pier settlement index then by point index. unit_responses = [ load( config=config, response_type=response_type, pier_settlement=[ PierSettlement(pier=ps[0].pier, settlement=config.unit_pier_settlement) ], ).at_decks(points) for ps in pier_settlement ] assert len(unit_responses) == len(pier_settlement) assert len(unit_responses[0]) == len(points) start_responses, end_responses = [0 for _ in points], [0 for _ in points] for p_i, _ in enumerate(points): for ps_i, ps in enumerate(pier_settlement): start_responses[p_i] += unit_responses[ps_i][p_i] * ( ps[0].settlement / config.unit_pier_settlement ) end_responses[p_i] += unit_responses[ps_i][p_i] * ( ps[1].settlement / config.unit_pier_settlement ) ps_responses = np.zeros(responses_array.shape) for p, _ in enumerate(points): ps_responses[p] = np.interp( np.arange(ps_responses.shape[1]), [0, ps_responses.shape[1] - 1], [start_responses[p], end_responses[p]], ) return ps_responses
48879533d392f89cb39e7bea4b6565b8ff172279
31,179
def subtract_something(a, b): """ Substracts something from something else. a: add-able First thing b: add-able Second thing. Returns: -------- Result of adding the two. """ return a - b
e090d09fc3fd35b080b12584d663519b39ed24c9
31,180
from pathlib import Path def last_rådata(datafil, track=None): """Last inn rådata fra en gpx-fil eller en GPSLogger csv-fil. """ datafil = Path(datafil) if datafil.suffix == '.gpx': sjekk_at_gpxpy_er_installert() return last_rådata_gpx(datafil, track=track) elif datafil.suffix == '.csv': if track is not None: warn("Du kan kun spesifisere track dersom du bruker gpx-filer.") return last_rådata_gpslogger(datafil) else: raise ValueError("Filtypen må enten være csv (for GPSLogger csv filer) eller gpx.")
c0e7d638c858ef97cde7c8c0ce2ea9796ff4c7de
31,181
from datetime import datetime def getAverageStay(date1, date2, hop): """ The average number of days a patient stays at a hospital (from admission to discharge) if hop is None it calculates the stat system wide """ count = 0 num = 0 for visit in Appointment.objects.all(): doc = Doctor.objects.get(id=visit.doctorID.id) if doc.hospitalID == hop or hop == None: if visit.aptDate >= datetime.date(year=date1.year, month=date1.month, day=date1.day) and visit.aptDate <= datetime.date(year=date2.year, month=date2.month, day=date2.day): if ExtendedStay.objects.filter(appointmentID=visit.id).exists(): stay = ExtendedStay.objects.get(appointmentID=visit.id) count += (stay.endDate - visit.aptDate).days num += 1 if num == 0: return 0 else: return count / num
9044d415d6231e51ee4eecb5e00e3a531fb209fb
31,182
def classify(inputTree,featLabels,testVec): """ 决策树分类函数 """ firstSides = list(myTree.keys()) firstStr = firstSides[0] secondDict = inputTree[firstStr] featIndex = featLabels.index(firstStr) for key in secondDict.keys(): if testVec[featIndex]==key: if type(secondDict[key]).__name__=='dict': classLabel=classify(secondDict[key],featLabels,testVec) else: classLabel=secondDict[key] return classLabel
3daee2b72332c14eab41fd00bf781f00b4e1e633
31,183
def update(sql, *args): """ 执行update语句,返回update的行数 :param sql: :param args: :return: """ return _update(sql, *args)
700a155dbdcc77d824ce14eb95b7f041a0dda261
31,184
def videoSize(): """ videoSize() -> (width, height) Get the camera resolution. Returns: A map with two integer values, i.e. width and height. Parameters: This method does not have any parameter. Usage: width, heigh = SIGBTools.videoSize() size = SIGBTools.videoSize() """ return CaptureManager.Instance.Size
58757851dbbde75a1e1fef201d3ed2decee8cd06
31,185
import hashlib def generate_md5_token_from_dict(input_params): """ Generate distinct md5 token from a dictionary. 初衷是为了将输入一个函数的输入参数内容编码为独一无二的md5编码, 方便在其变动的时候进行检测. Parameters ---------- input_params : dict Dictionary to be encoded. Returns ------- str Encoded md5 token from input_params. """ input_params_token = "" # print(">>"*88) # print(input_params) # print(">>"*88) for v in list(input_params["kwargs"].values()) + list(input_params["args"]) + list(input_params["feature_path"]): if type(v) in [pd.DataFrame, pd.Series]: input_params_token += "pandas_" + str(v.memory_usage().sum()) + "_" + str(v.shape) + "_" elif type(v) in [np.ndarray]: input_params_token += "numpy_" + str(v.mean()) + "_" + str(v.shape) + "_" elif type(v) in [list, tuple, set]: input_params_token += "list_" + str(v) + "_" elif type(v) == str: input_params_token += "str_" + v + "_" elif type(v) in [int, float]: input_params_token += "numeric_" + str(v) + "_" elif type(v) == bool: input_params_token += "bool_" + str(v) + "_" elif type(v) == dict: input_params_token += "dict_" + str(v) + "_" else: raise "Add type {}".format(type(v)) m = hashlib.md5(input_params_token.encode("gb2312")).hexdigest() return m
2faf66679a72c6fd4f2e02f3ddef728b3bab687e
31,186
import re def create_range(range_, arrayTest, headers_suppresed = True): """ Creates a range string from a start and end value 'A1','A4' becomes 'A1,A2,A3,A4' """ result_string = [] first_cell = range_.split(':')[0] last_cell = range_.split(':')[1] column_start = re.search(r'[a-zA-Z]+', first_cell).group() column_end = re.search(r'[a-zA-Z]+', last_cell).group() row_start = int(re.search(r'\d+', first_cell).group()) row_end = int(re.search(r'\d+', last_cell).group()) for i in range(col_to_num(column_start), col_to_num(column_end)+1): for j in range(row_start, row_end+1): if headers_suppresed: result_string.append(str(arrayTest[j-1][i-1])) else: result_string.append(str(arrayTest[j-2][i-1])) return '[' + ','.join(result_string) + ']'
5aff1450946ee12ab943a24491a9947e6d2fa830
31,187
def find_largest_helper(n, maximum): """ :param n: int, the number to find the largest digit :param maximum: int, the largest digit :return: int, the largest digit """ if n % 10 > maximum: maximum = n % 10 if n / 10 == 0: return maximum else: return find_largest_helper(n // 10, maximum)
ddd49839be6a3ab6ece7cabde41f3978df1ba6f3
31,188
def global_align(seq1_1hot, seq2_1hot): """Align two 1-hot encoded sequences.""" align_opts = {'gap_open_penalty':10, 'gap_extend_penalty':1, 'match_score':5, 'mismatch_score':-4} seq1_dna = DNA(dna_io.hot1_dna(seq1_1hot)) seq2_dna = DNA(dna_io.hot1_dna(seq2_1hot)) # seq_align = global_pairwise_align_nucleotide(seq1_dna, seq2_dna, *align_opts)[0] seq_align = global_pairwise_align_nucleotide(seq1_dna, seq2_dna, gap_open_penalty=10, gap_extend_penalty=1, match_score=5, mismatch_score=-4)[0] seq1_align = str(seq_align[0]) seq2_align = str(seq_align[1]) return seq1_align, seq2_align
35cc261f43c116a96fbec9e409875b8131ff76a3
31,189
import math def get_points_dist(pt1, pt2): """ Returns the distance between a pair of points. get_points_dist(Point, Point) -> number Parameters ---------- pt1 : a point pt2 : the other point Attributes ---------- Examples -------- >>> get_points_dist(Point((4, 4)), Point((4, 8))) 4.0 >>> get_points_dist(Point((0, 0)), Point((0, 0))) 0.0 """ return math.hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])
c517833dec161585c336289149a8c56461fe7642
31,190
def get_source_type(*, db_session: Session = Depends(get_db), source_type_id: PrimaryKey): """Given its unique ID, retrieve details about a single source type.""" source_type = get(db_session=db_session, source_type_id=source_type_id) if not source_type: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=[{"msg": "The requested source type does not exist."}], ) return source_type
31eeda78aa3dff35618cc2e57092449e47645702
31,191
import os def get_namefiles(pth, exclude=None): """Search through a path (pth) for all .nam files. Parameters ---------- pth : str path to model files exclude : str or lst File or list of files to exclude from the search (default is None) Returns ------- namefiles : lst List of namefiles with paths """ namefiles = [] for root, _, files in os.walk(pth): namefiles += [ os.path.join(root, file) for file in files if file.endswith(".nam") ] if exclude is not None: if isinstance(exclude, str): exclude = [exclude] exclude = [e.lower() for e in exclude] pop_list = [] for namefile in namefiles: for e in exclude: if e in namefile.lower(): pop_list.append(namefile) for e in pop_list: namefiles.remove(e) return namefiles
c80da9dba1dbea8505d119ba61745c7c43b4f25b
31,192
def load_data(messages_filepath, categories_filepath): """ Get the messages and categories from CSV files. """ messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return messages.merge(categories, on='id', how='left')
7efafc6a51b1e3dd02e3df8926fbf4ea92f8cb39
31,193
def apply_meth(dna, meth): """Transforms DNA sequence to methylated DNA sequence Input: dna (np.array): array of DNA sequence meth (np.array): Methylation labels (len == len(dna)) Output: dna_meth (np.array): array of methylated DNA sequence """ dna_meth = dna.copy() img = True if dna.shape[1] == 4 else False assert is_complementary(dna, img=img), f'DNA needs to be unpacked first' mask = meth.sum(axis=1) > 0 if img: assert dna.shape == meth.shape, f'{dna.shape} != {meth.shape}' dna_meth[mask] = 0 dna_meth = np.concatenate((dna, meth), axis=1) else: dna_meth[mask] = dna[mask]+4 return dna_meth
e835047651970dd3add0a463a235209b772411fd
31,194
def apply_affine_3D(coords_3d, affine_matrix): """ Apply an affine transformation to all coordinates. Parameters ---------- coords_3d: numpy 2D array The source coordinates, given as a 2D numpy array with shape (n, 3). Each of the n rows represents a point in space, given by its x, y and z coordinates. affine_matrix: numpy 2D float array with shape (4, 4) The affine matrix Returns ------- The coordinates after applying the matrix, 2D numpy array with shape (n, 3). Same shape as the input coords. """ rotation = affine_matrix[:3, :3] translation = affine_matrix[:3, 3] res = np.zeros((coords_3d.shape[0], 3)) for idx, row in enumerate(coords_3d): res[idx,:] = rotation.dot(row) + translation return res
2b06901c4428075800e919579ab07c7577c6d9a0
31,195
def weighted_degree_kernel_pos_inv(x1, x2, K=4, var=8, beacon=None, bin=None): """ Weighted degree kernel with positional invariance :param x1: Sequence of characters. :param x2: Sequence of characters. :param K: K-mers to be scanned. :param beacon: Beacon sequence (tuple of characters). If set, K is equal to beacon length and only beacons are counted. :param bin tuple (bin, number of all bins) Run kernel only in specified bin. Make sure sequences are of equal length! :return: Gram matrix. """ G = 0 if bin: assert len(x1) == len(x2) b, b_all = bin if not isinstance(beacon, type(None)): K = len(beacon) if isinstance(beacon, str): beacon = tuple(beacon) for Kt in range(2, K + 1): g = 0 kmers_i = zip(*[x1[k:] for k in range(Kt)]) kmers_j = zip(*[x2[k:] for k in range(Kt)]) if bin: start = int(float(b)/b_all * len(kmers_i)) end = int(float(b+1)/b_all * len(kmers_j)) kmers_i = kmers_i[start:end] kmers_j = kmers_j[start:end] bin_norm = float(len(kmers_i)) if bin else 1 for s in range(var): delta = 1.0 / (2*(s+1)) if isinstance(beacon, type(None)): mu_i = np.sum([ki == kj for ki, kj in zip(kmers_i, kmers_j[s:])]) mu_j = np.sum([ki == kj for ki, kj in zip(kmers_j, kmers_i[s:])]) g += delta * (mu_i + mu_j) else: if Kt != len(beacon): continue else: mu_i = np.sum([beacon == ki == kj for ki, kj in zip(kmers_i, kmers_j[s:])]) mu_j = np.sum([beacon == ki == kj for ki, kj in zip(kmers_j, kmers_i[s:])]) g += delta * (mu_i + mu_j) beta = 2.0 * (K - Kt + 1) / (Kt * (Kt + 1.0)) / bin_norm G += beta * g return G
01403c7ca780c272b7b6ea37d291d79f2515edb6
31,196
import ast def is_valid_block_variable_definition(node: _VarDefinition) -> bool: """Is used to check either block variables are correctly defined.""" if isinstance(node, ast.Tuple): return all( _is_valid_single(var_definition) for var_definition in node.elts ) return _is_valid_single(node)
9be2e583353bed1910b2dfe70bba2dfd9fe96328
31,197
def julian_day_to_gregorian(julian_day): """Converts a Julian Day number to its (proleptic) Gregorian calendar equivalent Adapted from: https://en.wikipedia.org/wiki/Julian_day#Julian_or_Gregorian_calendar_from_Julian_day_number Note that the algorithm is only valid for Julian Day numbers greater than or equal to zero. Negative arguments for julian_day will raise a ValueError. Args: julian_day (int): Julian Day number to convert, must be greater than or equal to 0 Returns: A (day, month, year) tuple representing the day, month, and year in the Gregorian calendar. """ day, month, year = _convert_julian_day(julian_day, mode="gregorian") return GregorianDate(day, month, year)
1544f6cd5abee1f5cef3b5befbf24f82c8d26e44
31,198
def isnotebook(): """Identify shell environment.""" try: shell = get_ipython().__class__.__name__ if shell == 'ZMQInteractiveShell': return True # Jupyter notebook or qtconsole if shell == 'TerminalInteractiveShell': return False # Terminal running IPython # None of the above: other type ? return False except NameError: return False # Probably standard Python interpreter
4620d8275c10f5aeb8e13406c897b5b93313de97
31,199