content
stringlengths
22
815k
id
int64
0
4.91M
def get_predictions(single_stream, class_mapping_dict, ip, port, model_name): """Gets predictions for a single image using Tensorflow serving Arguments: single_stream (dict): A single prodigy stream class_mapping_dict (dict): with key as int and value as class name ip (str): tensorflow serving IP port (str): tensorflow serving port model_name (str): model name in tensorflow serving Returns: A tuple containing numpy arrays: (class_ids, class_names, scores, boxes) """ image_byte_stream = b64_uri_to_bytes(single_stream["image"]) encoded_image_io = io.BytesIO(image_byte_stream) image = Image.open(encoded_image_io) width, height = image.size filename = str(single_stream["meta"]["file"]) file_extension = filename.split(".")[1].lower() if file_extension == "png": image_format = b'png' elif file_extension in ("jpg", "jpeg"): image_format = b'jpg' else: log(("Only 'png', 'jpeg' or 'jpg' files are supported by ODAPI. " "Got {}. Thus treating it as `jpg` file. " "Might cause errors".format(file_extension) )) image_format = b'jpg' filename = filename.encode("utf-8") tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(image_byte_stream), 'image/format': dataset_util.bytes_feature(image_format), })) boxes, class_ids, scores = tf_odapi_client(tf_example.SerializeToString(), ip, port, model_name, "serving_default", input_name="serialized_example", timeout=300 ) class_names = np.array([class_mapping_dict[class_id] for class_id in class_ids]) return (class_ids, class_names, scores, boxes)
5,341,100
def interaction_graph(matrix): """Create a networkx graph object from a (square) matrix. Parameters ---------- matrix : numpy.ndarray | Matrix of mutual information, the information for the edges is taken from the upper matrix Returns ------- graph : networkx.Graph() The graph with MI as weighted edges and positions as nodes Raises ------ AssertionError If the matrix is not square """ # Assert if the matrix is a square matrix. assert matrix.shape[0] == matrix.shape[1], "The matrix is not square" graph = nx.Graph() positions = len(matrix) for pos1, pos2 in itertools.combinations(range(positions), 2): graph.add_edge(pos1, pos2, weight=matrix[pos1, pos2]) return graph
5,341,101
def tags_filter(osm_pbf, dst_fname, expression, overwrite=True): """Extract OSM objects based on their tags. The function reads an input .osm.pbf file and uses `osmium tags-filter` to extract the relevant objects into an output .osm.pbf file. Parameters ---------- osm_pbf : str Path to input .osm.pbf file. dst_fname : str Path to output .osm.pbf file. expression : str Osmium tags-filter expression. See `osmium tags-filter` manpage for details. overwrite : bool, optional Overwrite existing file. Returns ------- dst_fname : str Path to output .osm.pbf file. """ expression_parts = expression.split(" ") command = ["osmium", "tags-filter", osm_pbf] command += expression_parts command += ["-o", dst_fname] if overwrite: command += ["--overwrite"] logger.info(f"Running command: {' '.join(command)}") run(command, check=True, stdout=DEVNULL, stderr=DEVNULL) src_size = human_readable_size(os.path.getsize(osm_pbf)) dst_size = human_readable_size(os.path.getsize(dst_fname)) logger.info( f"Extracted {os.path.basename(dst_fname)} ({dst_size}) " f"from {os.path.basename(osm_pbf)} ({src_size})." ) return dst_fname
5,341,102
def bound_n_samples_from_env(n_samples: int): """Bound number of samples from environment variable. Uses environment variable `PYPESTO_MAX_N_SAMPLES`. This is used to speed up testing, while in application it should not be used. Parameters ---------- n_samples: Number of samples desired. Returns ------- n_samples_new: The original number of samples, or the minimum with the environment variable, if exists. """ if PYPESTO_MAX_N_SAMPLES not in os.environ: return n_samples n_samples_new = min(n_samples, int(os.environ[PYPESTO_MAX_N_SAMPLES])) logger.info( f"Bounding number of samples from {n_samples} to {n_samples_new} via " f"environment variable {PYPESTO_MAX_N_SAMPLES}" ) return n_samples_new
5,341,103
def weight_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, alpha, n, cuda): """ Weight inter-relation aggregator Reference: https://arxiv.org/abs/2002.12307 :param num_relations: number of relations in the graph :param self_feats: batch nodes features or embeddings :param neigh_feats: intra-relation aggregated neighbor embeddings for each relation :param embed_dim: the dimension of output embedding :param weight: parameter used to transform node embeddings before inter-relation aggregation :param alpha: weight parameter for each relation used by Rio-Weight :param n: number of nodes in a batch :param cuda: whether use GPU :return: inter-relation aggregated node embeddings """ # transform batch node embedding and neighbor embedding in each relation with weight parameter center_h = weight.mm(self_feats.t()) neigh_h = weight.mm(neigh_feats.t()) # compute relation weights using softmax w = F.softmax(alpha, dim=1) # initialize the final neighbor embedding if cuda: aggregated = torch.zeros(size=(embed_dim, n)).cuda() else: aggregated = torch.zeros(size=(embed_dim, n)) # add weighted neighbor embeddings in each relation together for r in range(num_relations): aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n]) # sum aggregated neighbor embedding and batch node embedding # feed them to activation function combined = F.relu(center_h + aggregated) return combined
5,341,104
def ho2ro(ho): """Axis angle pair to Rodrigues-Frank vector.""" return Rotation.ax2ro(Rotation.ho2ax(ho))
5,341,105
def stop_listener(): """ Tell the current running server to stop, must check is_listener_running() before calling """ f = open(SERVER_LOCK_FILE, 'r') port = int(f.readline()) f.close() server = ('127.0.0.1', port) client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) client_socket.sendto(SERVER_STOP, server)
5,341,106
def get_ratio(old, new): # type: (unicode, unicode) -> float """Return a "similiarity ratio" (in percent) representing the similarity between the two strings where 0 is equal and anything above less than equal. """ if not all([old, new]): return VERSIONING_RATIO if IS_SPEEDUP: return Levenshtein.distance(old, new) / (len(old) / 100.0) else: return levenshtein_distance(old, new) / (len(old) / 100.0)
5,341,107
def test_roundtrip_run(mlflow_client: MlflowClient): """Creates an experiment, gets it, and deletes it.""" # Create experiment experiment_name = "Test Roundtrip Run" experiment_id = mlflow_client.create_experiment(experiment_name) # Create run run = mlflow_client.create_run(experiment_id) mlflow_client.log_metric(run.info.run_id, "test_metric", 1.0) recovered_run = mlflow_client.get_run(run.info.run_id) assert recovered_run.data.metrics["test_metric"] == 1.0 mlflow_client.delete_run(run.info.run_id)
5,341,108
def test_wam_format(filename): """ Ensure the yaml file is valid and all required fields are present in each api block """ print('Checking ' + filename) with open(filename, 'rb') as fh: config = yaml.load(fh.read()) assert(config['version'] == '1.0') for name, webarchive in config['webarchives'].items(): assert_keys(('name', 'about', 'apis', 'domain_hint', 'collections'), webarchive) assert(name) assert(webarchive['name']) assert(webarchive['about']) has_collections = ('collections' in webarchive) if has_collections: # must be a list or a regex assert(isinstance(webarchive['collections'], list) or re.compile(webarchive['collections'])) domain_hint = webarchive.get('domain_hint') if domain_hint: assert(isinstance(domain_hint, list)) apis = webarchive.get('apis') if not apis: continue assert_keys(('memento', 'cdx', 'wayback'), apis) if 'cdx' in apis: assert_keys(('query'), apis['cdx']) assert(apis['cdx']['query']) if 'memento' in apis: assert_keys(('timegate', 'timemap'), apis['memento']) assert(apis['memento']['timegate']) assert(apis['memento']['timemap']) if 'wayback' in apis: assert_keys(('replay', 'calendar'), apis['wayback']) assert(apis['wayback']['replay']) for mode in ['raw', 'rewritten']: assert(mode in apis['wayback']['replay']) if apis['wayback']['replay'][mode] is None: continue assert('{url}' in apis['wayback']['replay'][mode]) assert('{timestamp}' in apis['wayback']['replay'][mode]) assert(('{collection}' in apis['wayback']['replay'][mode]) == has_collections)
5,341,109
def sample2D(F, X, Y, mask=None, undef_value=0.0, outside_value=None): """Bilinear sample of a 2D field *F* : 2D array *X*, *Y* : position in grid coordinates, scalars or compatible arrays *mask* : if present must be a 2D matrix with 1 at valid and zero at non-valid points *undef_value* : value to put at undefined points *outside_value* : value to return outside the grid defaults to None, raising ValueError if any points are outside Note reversed axes, for integers i and j we have ``sample2D(F, i, j) = F[j,i]`` If jmax, imax = F.shape then inside values requires 0 <= x < imax-1, 0 <= y < jmax-1 Using bilinear interpolation """ # --- Argument checking --- # X and Y should be broadcastable to the same shape Z = np.add(X, Y) # scalar is True if both X and Y are scalars scalar = np.isscalar(Z) if np.ndim(F) != 2: raise ValueError("F must be 2D") if mask is not None: if mask.shape != F.shape: raise ValueError("Must have mask.shape == F.shape") jmax, imax = F.shape # Broadcast X and Y X0 = X + np.zeros_like(Z) Y0 = Y + np.zeros_like(Z) # Find integer I, J such that # 0 <= I <= X < I+1 <= imax-1, 0 <= J <= Y < J+1 <= jmax-1 # and local increments P and Q I = X0.astype("int") J = Y0.astype("int") P = X0 - I Q = Y0 - J outside = (X0 < 0) | (X0 >= imax - 1) | (Y0 < 0) | (Y0 >= jmax - 1) if np.any(outside): if outside_value is None: raise ValueError("point outside grid") I = np.where(outside, 0, I) J = np.where(outside, 0, J) # try: # J[outside] = 0 # I[outside] = 0 # except TypeError: # Zero-dimensional # I = np.array(0) # J = np.array(0) # Weights for bilinear interpolation W00 = (1 - P) * (1 - Q) W01 = (1 - P) * Q W10 = P * (1 - Q) W11 = P * Q SW = 1.0 # Sum of weights if mask is not None: W00 = mask[J, I] * W00 W01 = mask[J + 1, I] * W01 W10 = mask[J, I + 1] * W10 W11 = mask[J + 1, I + 1] * W11 SW = W00 + W01 + W10 + W11 SW = np.where(SW == 0, -1.0, SW) # Avoid division by zero below S = np.where( SW <= 0, undef_value, (W00 * F[J, I] + W01 * F[J + 1, I] + W10 * F[J, I + 1] + W11 * F[J + 1, I + 1]) / SW, ) # Set in outside_values if outside_value: S = np.where(outside, outside_value, S) # Scalar input gives scalar output if scalar: S = float(S) return S
5,341,110
async def test_add_and_delete(client, basedn): """ Test adding and deleting an LDAP entry. """ async with client.connect(True) as conn: entry = LDAPEntry("cn=async_test,%s" % basedn) entry["objectclass"] = [ "top", "inetOrgPerson", "person", "organizationalPerson", ] entry["sn"] = "async_test" try: await conn.add(entry) except bonsai.errors.AlreadyExists: await conn.delete(entry.dn) await conn.add(entry) except: pytest.fail("Unexpected error.") res = await conn.search() assert entry in res await entry.delete() res = await conn.search() assert entry not in res
5,341,111
def read(file=None, timeout=10, wait=0.2, threshold=32): """Return the external temperature. Keyword arguments: file -- the path to the 1-wire serial interface file timeout -- number of seconds without a reading after which to give up wait -- number of seconds to wait after a failed read before retying threshold -- log a warning if temperature exceed threshold Although the DS18B20 only measures the temperature, this method returns a two-element tuple to allow easier interchangibility with the DHT22 which returns temperature and humidity. """ if file is None: file = _discover() logger.debug('Started reading sensor at {}'.format(file)) t1 = time.time() try: temp = _read(file, timeout, wait) except (RuntimeError, FileNotFoundError) as e: logger.warn(e.args) raise t2 = time.time() if temp > threshold: logger.warning( 'temp {:.1f}C exceeds threshold {:.1f}C' \ .format(temp, threshold) ) logger.info('temp={:.1f}C'.format(temp)) logger.debug('Finished reading sensor ({:.1f}s)'.format(t2-t1)) return temp, None
5,341,112
def rotations_to_radians(rotations): """ converts radians to rotations """ return np.pi * 2 * rotations
5,341,113
def _expect_ket(oper, state): """Private function to calculate the expectation value of an operator with respect to a ket. """ oper, ket = jnp.asarray(oper), jnp.asarray(state) return jnp.vdot(jnp.transpose(ket), jnp.dot(oper, ket))
5,341,114
def resnet152(pretrained=False, last_stride=1, model_path=''): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet(pretrained=pretrained, last_stride=last_stride, block=Bottleneck, layers=[3, 8, 36, 3], model_path=model_path, model_name='resnet152')
5,341,115
def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_): """ Check that the wheel moves by approximately 35 degrees during the closed-loop period on trials where a feedback (error sound or valve) is delivered. Metric: M = abs(w_resp - w_t0) - threshold_displacement, where w_resp = position at response time, w_t0 = position at go cue time, threshold_displacement = displacement required to move 35 visual degrees Criterion: displacement < 3 visual degrees Units: degrees angle of wheel turn :param data: dict of trial data with keys ('wheel_timestamps', 'wheel_position', 'choice', 'intervals', 'goCueTrigger_times', 'response_times', 'feedback_times', 'position') :param wheel_gain: the 'STIM_GAIN' task setting """ # Get the Bpod extracted wheel data timestamps = data['wheel_timestamps'] position = data['wheel_position'] return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3)
5,341,116
def next_fast_len(target: int) -> int: """ Find the next fast size of input data to `fft`, for zero-padding, etc. SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this returns the next composite of the prime factors 2, 3, and 5 which is greater than or equal to `target`. (These are also known as 5-smooth numbers, regular numbers, or Hamming numbers.) Parameters ---------- target : int Length to start searching from. Must be a positive integer. Returns ------- out : int The first 5-smooth number greater than or equal to `target`. Notes ----- .. versionadded:: 0.18.0 Examples -------- On a particular machine, an FFT of prime length takes 133 ms: >>> from scipy import fftpack >>> min_len = 10007 # prime length is worst case for speed >>> a = np.random.randn(min_len) >>> b = fftpack.fft(a) Zero-padding to the next 5-smooth length reduces computation time to 211 us, a speedup of 630 times: >>> fftpack.helper.next_fast_len(min_len) 10125 >>> b = fftpack.fft(a, 10125) Rounding up to the next power of 2 is not optimal, taking 367 us to compute, 1.7 times as long as the 5-smooth size: >>> b = fftpack.fft(a, 16384) """ hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000) target = int(target) if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target-1)): return target # Get result quickly for small sizes, since FFT itself is similarly fast. if target <= hams[-1]: return hams[bisect_left(hams, target)] match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient p2 = 2**((quotient - 1).bit_length()) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
5,341,117
def deformed(solver): """ Method: output the deformed state to a file """ V = FunctionSpace(solver.domain.mesh,solver.Ve) y = SpatialCoordinate(solver.domain.mesh) write = dot(Constant(solver.F_macro),y)+solver.v filename = File(os.path.join(solver.work_dir, "deformation.pvd")) filename << project(write,V) ################################ # Easy ploting for the 2D deformation ################################ #y = SpatialCoordinate(solver.domain.mesh) ##F = Identity(solver.domain.dim) + grad(solver.v) + Constant(solver.F_macro) # Deformation gradient #p = plot(dot(Constant(solver.F_macro),y)+solver.v, mode="displacement") ##p = plot(solver.v, mode="displacement") ##p = plot(solver.stress[0, 0]) #plt.colorbar(p) #plt.savefig("rve_deformed.pdf")
5,341,118
def get_user_requests(): """ endpoint for getting requests of a perticular user. --- parameters: - name: x-access-token in: header type: string required: true - name: user_id required: true in: path type: integer """
5,341,119
def human_format(val : int, fmt = '.1f') -> str : """ convert e.g. 1230 -> 1.23k """ units = ['', 'K', 'M', 'G'] base = min(int(np.floor(np.log10(val)))//3, len(units)) if base==0: return str(val) val = val/10**(3*base) res = ('{{:{}}} {}'.format(fmt, units[base])).format(val) return res
5,341,120
def select_or_insert(conn, table, id_name, payload, name=None, multi=False, insert=True): """ Prepare the SQL statements, payload MUST be a list """ log.debug('payload: {}'.format(payload)) if multi is False: sql_str = ''.join(['SELECT ', id_name, ' FROM ', table, ' WHERE ', name, ' LIKE (%s);']) result = execute_sql(conn, sql_str, payload) log.debug('select: {}'.format(result)) if result is None and insert is True: sql_str = ''.join(['INSERT INTO ', table, '(', name, ') VALUES (%s) RETURNING ', id_name, ';']) result = execute_sql(conn, sql_str, payload, commit=True) log.debug('insert: {}'.format(result)) else: id1, id2 = id_name sql_str = ''.join(['SELECT ', id1, ',', id2, ' FROM ', table, ' WHERE ', id1, ' = (%s) AND ', id2, ' = (%s);']) result = execute_sql(conn, sql_str, payload) log.debug('select: {}'.format(result)) if result is None and insert is True: sql_str = ''.join(['INSERT INTO ', table, '(', id1, ',', id2, ') VALUES (%s, %s) RETURNING ', id1, ',', id2, ';']) result = execute_sql(conn, sql_str, payload, commit=True) log.debug('insert: {}'.format(result)) return result
5,341,121
def random_choice(number: int) -> bool: """ Generate a random int and compare with the argument passed :param int number: number passed :return: is argument greater or equal then a random generated number :rtype: bool """ return number >= randint(1, 100)
5,341,122
def request_l3_attachments(session, apic) -> Any: """Request current policy enformation for encap for Outs""" root = None uri = f"https://{apic}/api/class/l3extRsPathL3OutAtt.xml" response = session.get(uri, verify=False) try: root = ET.fromstring(response.text) except ET.ParseError: print("Something went wrong. Please try again") # If reponse has totalcount of 0, notify user that encap wasnt found if response.text.rfind("totalCount=\"0\"") != -1 or response.text.rfind("error code") != -1: print("\n######## No External Policy Assigned ##########") return root
5,341,123
def main(args=None): """ Runs an Explorer node """ rclpy.init(args=args) explorer = ExploreController() explorer.run() rclpy.spin(explorer) explorer.destroy_node() rclpy.shutdown()
5,341,124
def robot(ctxt, file_=None): """Extract data from a ``pybot`` report. :param ctxt: the build context :type ctxt: `Context` :param file\_: name of the file containing the pybot report """ assert file_, 'Missing required attribute "file"' results = xmlio.Fragment() for case in RobotFrameworkReportParser(ctxt.resolve(file_)).parse(): status, source, suite_name, message = case testcase = xmlio.Element('robot', status=status.encode('utf-8'), source=source.encode('utf-8'), suite_name=suite_name.encode('utf-8'), message=message.encode('utf-8')) results.append(testcase) ctxt.report('robot', results)
5,341,125
def bet_plot( pressure, bet_points, minimum, maximum, slope, intercept, p_monolayer, bet_monolayer, ax=None ): """ Draw a BET plot. Parameters ---------- pressure : array Pressure points which will make up the x axis. bet_points : array BET-transformed points which will make up the y axis. minimum : int Lower bound of the selected points. maximum : int Higher bound of the selected points. slope : float Slope of the chosen linear region. intercept : float Intercept of the chosen linear region. p_monolayer : float Pressure at which statistical monolayer is achieved. rol_monolayer : float BET transform of the point at which statistical monolayer is achieved. ax : matplotlib axes object, default None The axes object where to plot the graph if a new figure is not desired. Returns ------- matplotlib.axes Matplotlib axes of the graph generated. The user can then apply their own styling if desired. """ # Generate the figure if needed if ax is None: _, ax = plt.pyplot.subplots(figsize=(6, 4)) ax.plot( pressure, bet_points, label='all points', **POINTS_ALL_STYLE, ) ax.plot( pressure[minimum:maximum], bet_points[minimum:maximum], label='chosen points', **POINTS_SEL_STYLE, ) x_lim = [0, pressure[maximum]] y_lim = [slope * x_lim[0] + intercept, slope * x_lim[1] + intercept] ax.plot( x_lim, y_lim, linestyle='--', color='black', label='model fit', ) ax.plot( p_monolayer, bet_monolayer, marker='X', markersize=10, linestyle='', color='k', label='monolayer point' ) ax.set_ylim(bottom=0, top=bet_points[maximum] * 1.2) ax.set_xlim(left=0, right=pressure[maximum] * 1.2) ax.set_title("BET plot") ax.set_xlabel('p/p°', fontsize=15) ax.set_ylabel('$\\frac{p/p°}{n ( 1- p/p°)}$', fontsize=15) ax.legend(loc='best') return ax
5,341,126
def paged_response( *, view: viewsets.GenericViewSet, queryset: Optional[QuerySet] = None, status_code: Optional[int] = None, ): """ paged_response can be used when there is a need to paginate a custom API endpoint. Usage: class UsersView(ModelViewSet): ... @action( ['get'], detail=True, serializer_class=PostSerializer, filterset_class=PostsFilterSet, ) def posts(self, request: Request, pk: Optional[str] = None): queryset = Post.objects.filter(user=self.get_object()) return paged_response(view=self, queryset=queryset) :param view: any instance that statisfies the GenericViewSet interface :param queryset: Optional django.db.models.QuerySet. Default: get_queryset output :param status_code: Optional int :return: rest_framework.response.Response """ status_code = status_code or status.HTTP_200_OK queryset = queryset or view.get_queryset() queryset = view.filter_queryset(queryset) page = view.paginate_queryset(queryset) if page is not None: serializer = view.get_serializer(page, many=True) return view.get_paginated_response(serializer.data) serializer = view.get_serializer(queryset, many=True) return response.Response(serializer.data, status=status_code)
5,341,127
def getObjectPositions(mapData, threshold, findCenterOfMass = True): """Creates a segmentation map and find objects above the given threshold. Args: mapData (:obj:`numpy.ndarray`): The 2d map to segment. threshold (float): The threshold above which objects will be selected. findCenterOfMass: If True, return the object center weighted according to the values in mapData. If False, return the pixel that holds the maximum value. Returns: objIDs (:obj:`numpy.ndarray`): Array of object ID numbers. objPositions (list): List of corresponding (y, x) positions. objNumPix (:obj:`numpy.ndarray`): Array listing number of pixels per object. segmentationMap (:obj:`numpy.ndarray`): The segmentation map (2d array). """ if threshold < 0: raise Exception("Detection threshold (thresholdSigma in the config file) cannot be negative unless in forced photometry mode.") sigPix=np.array(np.greater(mapData, threshold), dtype=int) sigPixMask=np.equal(sigPix, 1) segmentationMap, numObjects=ndimage.label(sigPix) objIDs=np.unique(segmentationMap) if findCenterOfMass == True: objPositions=ndimage.center_of_mass(mapData, labels = segmentationMap, index = objIDs) else: objPositions=ndimage.maximum_position(mapData, labels = segmentationMap, index = objIDs) objNumPix=ndimage.sum(sigPixMask, labels = segmentationMap, index = objIDs) return objIDs, objPositions, objNumPix, segmentationMap
5,341,128
def specific_humidity(p,RH,t,A=17.625,B=-30.11,C=610.94,masked=False): """ From Mark G. Lawrence, BAMS Feb 2005, eq. (6) q = specific_humidity(p,RH,t,A,B,C) inputs: p = pressure (Pa) RH = relative humidity (0-1) t = temperature (K) keywords: A, B and C are optional fitting parameters from Alduchov and Eskridge (1996). Masked = False (if True, perform operation on masked arrays) output: q, specific humidity (kg/kg) p, RH and t can be arrays. """ if masked==False: es = C * exp(A*(t-273.15)/(B+t)) q = 0.62198*(RH*es)/(maximum(p,es)-(1-0.62198)*es) else: es = C * ma.exp(A*(t-273.15)/(B+t)) q = 0.62198*(RH*es)/(maximum(p,es)-(1-0.62198)*es) return q
5,341,129
def list_versions(): """ List the EMDB-SFF versions that are migratable to the current version :return: status :return: version_count """ version_count = len(VERSION_LIST) for version in VERSION_LIST[:-1]: _print('* {version}'.format(version=version)) return os.EX_OK, version_count
5,341,130
def build_parser(): """ Build a pyparsing parser for our custom topology description language. :return: A pyparsing parser. :rtype: pyparsing.MatchFirst """ ParserElement.setDefaultWhitespaceChars(' \t') nl = Suppress(LineEnd()) inumber = Word(nums).setParseAction(lambda l, s, t: int(t[0])) fnumber = ( Combine( Optional('-') + Word(nums) + '.' + Word(nums) + Optional('E' | 'e' + Optional('-') + Word(nums)) ) ).setParseAction(lambda toks: float(toks[0])) boolean = ( CaselessLiteral('true') | CaselessLiteral('false') ).setParseAction(lambda l, s, t: t[0].casefold() == 'true') comment = Literal('#') + restOfLine + nl text = QuotedString('"') identifier = Word(alphas, alphanums + '_') empty_line = LineStart() + LineEnd() item_list = ( (text | fnumber | inumber | boolean) + Optional(Suppress(',')) + Optional(nl) ) custom_list = ( Suppress('(') + Optional(nl) + Group(OneOrMore(item_list)) + Optional(nl) + Suppress(')') ).setParseAction(lambda tok: tok.asList()) attribute = Group( identifier('key') + Suppress(Literal('=')) + ( custom_list | text | fnumber | inumber | boolean | identifier )('value') + Optional(nl) ) attributes = ( Suppress(Literal('[')) + Optional(nl) + OneOrMore(attribute) + Suppress(Literal(']')) ) node = identifier('node') port = Group( node + Suppress(Literal(':')) + (identifier | inumber)('port') ) link = Group( port('endpoint_a') + Suppress(Literal('--')) + port('endpoint_b') ) environment_spec = ( attributes + nl ).setResultsName('env_spec', listAllMatches=True) nodes_spec = ( Group( Optional(attributes)('attributes') + Group(OneOrMore(node))('nodes') ) + nl ).setResultsName('node_spec', listAllMatches=True) ports_spec = ( Group( Optional(attributes)('attributes') + Group(OneOrMore(port))('ports') ) + nl ).setResultsName('port_spec', listAllMatches=True) link_spec = ( Group( Optional(attributes)('attributes') + link('links') ) + nl ).setResultsName('link_spec', listAllMatches=True) statements = OneOrMore( comment | link_spec | ports_spec | nodes_spec | environment_spec | empty_line ) return statements
5,341,131
def decode_owner(owner_id: str) -> str: """Decode an owner name from an 18-character hexidecimal string""" if len(owner_id) != 18: raise ValueError('Invalid owner id.') hex_splits = split_by(owner_id, num=2) bits = '' for h in hex_splits: bits += hex_to_bin(h) test_owner = '' for seq in split_by(bits, 6): num = bin_to_dec(seq) test_owner += get_ascii_val_from_bit_value(num) if test_owner[0] != '?': return test_owner[:math.ceil(MAX_OWNER_LENGTH/2)] + '..' + test_owner[-math.floor(MAX_OWNER_LENGTH/2):] while test_owner[0] == '?': test_owner = test_owner[1:] return test_owner
5,341,132
def test_inline_config(): """:meth:`config.inline()` shell commands list to string.""" test_config = config.inline(ibefore_config) assert test_config == iafter_config
5,341,133
def _get_back_up_generator(frame_function, *args, **kwargs): """Create a generator for the provided animation function that backs up the cursor after a frame. Assumes that the animation function provides a generator that yields strings of constant width and height. Args: frame_function: A function that returns a FrameGenerator. args: Arguments for frame_function. kwargs: Keyword arguments for frame_function. Returns: a generator that generates backspace/backline characters for the animation func generator. """ lines = next(frame_function(*args, **kwargs)).split('\n') width = len(lines[0]) height = len(lines) if height == 1: return util.BACKSPACE_GEN(width) return util.BACKLINE_GEN(height)
5,341,134
def from_aiohttp( schema_path: str, app: Any, *, base_url: Optional[str] = None, method: Optional[Filter] = None, endpoint: Optional[Filter] = None, tag: Optional[Filter] = None, operation_id: Optional[Filter] = None, skip_deprecated_operations: bool = False, validate_schema: bool = True, force_schema_version: Optional[str] = None, data_generation_methods: DataGenerationMethodInput = DEFAULT_DATA_GENERATION_METHODS, code_sample_style: str = CodeSampleStyle.default().name, **kwargs: Any, ) -> BaseOpenAPISchema: """Load Open API schema from an AioHTTP app. :param str schema_path: An in-app relative URL to the schema. :param app: An AioHTTP app instance. """ from ...extra._aiohttp import run_server # pylint: disable=import-outside-toplevel port = run_server(app) app_url = f"http://127.0.0.1:{port}/" url = urljoin(app_url, schema_path) return from_uri( url, base_url=base_url, method=method, endpoint=endpoint, tag=tag, operation_id=operation_id, skip_deprecated_operations=skip_deprecated_operations, validate_schema=validate_schema, force_schema_version=force_schema_version, data_generation_methods=data_generation_methods, code_sample_style=code_sample_style, **kwargs, )
5,341,135
def wasb(connection_name, path_from, path_to, is_file, raise_errors, sync_fw): """Create wasb path context.""" _download( connection_name=connection_name, connection_kind=V1ConnectionKind.WASB, path_from=path_from, path_to=path_to, is_file=is_file, raise_errors=raise_errors, sync_fw=sync_fw, )
5,341,136
def summarize(fname, start, stop,output_dir): """ Process file[start:stop] start and stop both point to first char of a line (or EOF) """ ls_1995_1996 = [] for i in range (1995,2006): ls_1995_1996.append([]) with open(fname, newline='', encoding='utf-8') as inf: # jump to start position pos = start inf.seek(pos) for line in inf: sys.stdout.write("\r" + random.choice(string.ascii_letters)) sys.stdout.flush() if "1995" in line: ls_1995_1996[0].append(line) elif "1996" in line: ls_1995_1996[1].append(line) elif "1997" in line: ls_1995_1996[2].append(line) elif "1998" in line: ls_1995_1996[3].append(line) elif "1999" in line: ls_1995_1996[4].append(line) elif "2000" in line: ls_1995_1996[5].append(line) elif "2001" in line: ls_1995_1996[6].append(line) elif "2002" in line: ls_1995_1996[7].append(line) elif "2003" in line: ls_1995_1996[8].append(line) elif "2004" in line: ls_1995_1996[9].append(line) elif "2005" in line: ls_1995_1996[10].append(line) pos += len(line) if pos >= stop: break write_to_file(fname, ls_1995_1996, output_dir, start, stop) return ls_1995_1996
5,341,137
def compute_recall(true_positives, false_negatives): """Compute recall >>> compute_recall(0, 10) 0.0 >>> compute_recall(446579, 48621) 0.901815 """ return true_positives / (true_positives + false_negatives)
5,341,138
def get_highest_seat_id(): """ Returns the highest seat ID from all of the boarding passes. """ return max(get_seat_ids())
5,341,139
def error_function_index(gpu_series, result_series): """ utility function to compare GPU array vs CPU array Parameters ------ gpu_series: cudf.Series GPU computation result series result_series: pandas.Series Pandas computation result series Returns ----- double maximum error of the two arrays int maximum index value diff """ err = error_function(gpu_series, result_series) error_index = np.abs(gpu_series.index.to_array() - result_series.index.values).max() return err, error_index
5,341,140
def get_tipo_aqnext(tipo) -> int: """Solve the type of data used by DJANGO.""" tipo_ = 3 # subtipo_ = None if tipo in ["int", "uint", "serial"]: tipo_ = 16 elif tipo in ["string", "stringlist", "pixmap", "counter"]: tipo_ = 3 elif tipo in ["double"]: tipo_ = 19 elif tipo in ["bool", "unlock"]: tipo_ = 18 elif tipo in ["date"]: tipo_ = 26 elif tipo in ["time"]: tipo_ = 27 return tipo_
5,341,141
def fetch_indicators_command(client: Client) -> List[Dict]: """Wrapper for fetching indicators from the feed to the Indicators tab. Args: client: Client object with request Returns: Indicators. """ indicators = fetch_indicators(client) return indicators
5,341,142
def test_dtm_alt_min_max(): """ Test dtm alt min/max """ dtm_file = os.path.join(data_path(), "dtm", "srtm_ventoux", "srtm90_non_void_filled", "N44E005.hgt") geoid_file = os.path.join(data_path(), "dtm", "geoid", "egm96_15.gtx") dtm_ventoux = DTMIntersection(dtm_file, geoid_file, roi=[256, 256, 512, 512], roi_is_in_physical_space=False) alt_min = dtm_ventoux.alt_min_cell alt_max = dtm_ventoux.alt_max_cell alt_valid_min = os.path.join(data_path(), "srtm_ventoux_alt_min.npy") alt_valid_max = os.path.join(data_path(), "srtm_ventoux_alt_max.npy") alt_min_vt = np.load(alt_valid_min) alt_max_vt = np.load(alt_valid_max) np.testing.assert_array_equal(alt_min, alt_min_vt) np.testing.assert_array_equal(alt_max, alt_max_vt)
5,341,143
def hr_lr_ttest(hr, lr): """ Returns the t-test (T statistic and p value), comparing the features for high- and low-risk entities. """ res = stats.ttest_ind(hr.to_numpy(), lr.to_numpy(), axis=0, nan_policy="omit", equal_var=False) r0 = pd.Series(res[0], index=hr.columns) r1 = pd.Series(res[1], index=hr.columns) return pd.DataFrame({"ttest_T": r0, "ttest_p": r1})
5,341,144
def _is_empty(str_: str) -> bool: """文字列が空か 文字列が空であるかを判別する Args: str_ (str): 文字列 Returns: bool: 文字列が空のときはTrue, 空でないときはFalseを返す. """ if str_: return False return True
5,341,145
def delete(val: str) -> None: """Deletes a word from the trie if it exists and displays the status of the operation Args: val (str): The word you wish to delete from the trie """ print(request('Delete keyword', val))
5,341,146
def template (name, typename, value) : """ <configProperty> <name>${name}</name> <value> <type> <kind>tk_${typename}</kind> </type> <value> <${typename}>${value}</${typename}> </value> </value> </configProperty> """
5,341,147
def main(): """ Run this main function if this script is called directly. :return: None """ working_directory = os.path.dirname(os.path.realpath(__file__)) print(working_directory) repo_details = RepoDetails(working_directory, sub_paths=['\\README.md'], use_directory_hash=True) repo_details.print_summary()
5,341,148
def energy_generate_random_range_dim2(filepath,dim_1_low,dim_1_high,dim_2_low,dim_2_high,num=500): """ 6, 8 and 10 """ queryPool=[] query=[] for _ in range(num): left1 = random.randint(dim_1_low, dim_1_high) right1 = random.randint(left1, dim_1_high) query.append((left1, right1)) left2 = random.randint(dim_2_low, dim_2_high) # right2 = random.randint(left2, dim_2_high) query.append((left2, left2)) queryPool.append(query[:]) query.clear() with open(filepath,"w+") as f: f.write(str(queryPool)) return queryPool
5,341,149
def preprocess(data_folder): """ Runs the whole pipeline and returns NumPy data array""" SAMPLE_TIME = 30 CHANNELS = ['EEG Fpz-Cz', 'EEG Pz-Oz'] res_array = [] for path in os.listdir(data_folder): if path.endswith("PSG.edf"): full_path = os.path.join(data_folder, path) raw = mne.io.read_raw_edf(full_path, preload=True) mne_eeg = remove_sleepEDF(raw, CHANNELS) mne_filtered = filter(mne_eeg, CHANNELS) epochs = divide_epochs(mne_filtered, SAMPLE_TIME) epochs = downsample(epochs, CHANNELS) epochs = epochs.get_data() # turns into NumPy Array f_epochs = normalization(epochs) res_array.append([f_epochs, path[:path.index("-")]]) #save(f_epochs, path[:path.index("-")], output_folder) return res_array
5,341,150
def get_waas_policies(compartment_id: Optional[str] = None, display_names: Optional[Sequence[str]] = None, filters: Optional[Sequence[pulumi.InputType['GetWaasPoliciesFilterArgs']]] = None, ids: Optional[Sequence[str]] = None, states: Optional[Sequence[str]] = None, time_created_greater_than_or_equal_to: Optional[str] = None, time_created_less_than: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWaasPoliciesResult: """ This data source provides the list of Waas Policies in Oracle Cloud Infrastructure Web Application Acceleration and Security service. Gets a list of WAAS policies. ## Example Usage ```python import pulumi import pulumi_oci as oci test_waas_policies = oci.waas.get_waas_policies(compartment_id=var["compartment_id"], display_names=var["waas_policy_display_names"], ids=var["waas_policy_ids"], states=var["waas_policy_states"], time_created_greater_than_or_equal_to=var["waas_policy_time_created_greater_than_or_equal_to"], time_created_less_than=var["waas_policy_time_created_less_than"]) ``` :param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the compartment is created. :param Sequence[str] display_names: Filter policies using a list of display names. :param Sequence[str] ids: Filter policies using a list of policy OCIDs. :param Sequence[str] states: Filter policies using a list of lifecycle states. :param str time_created_greater_than_or_equal_to: A filter that matches policies created on or after the specified date and time. :param str time_created_less_than: A filter that matches policies created before the specified date-time. """ __args__ = dict() __args__['compartmentId'] = compartment_id __args__['displayNames'] = display_names __args__['filters'] = filters __args__['ids'] = ids __args__['states'] = states __args__['timeCreatedGreaterThanOrEqualTo'] = time_created_greater_than_or_equal_to __args__['timeCreatedLessThan'] = time_created_less_than if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:waas/getWaasPolicies:getWaasPolicies', __args__, opts=opts, typ=GetWaasPoliciesResult).value return AwaitableGetWaasPoliciesResult( compartment_id=__ret__.compartment_id, display_names=__ret__.display_names, filters=__ret__.filters, id=__ret__.id, ids=__ret__.ids, states=__ret__.states, time_created_greater_than_or_equal_to=__ret__.time_created_greater_than_or_equal_to, time_created_less_than=__ret__.time_created_less_than, waas_policies=__ret__.waas_policies)
5,341,151
def _get_output_type(output): """Choose appropriate output data types for HTML and LaTeX.""" if output.output_type == 'stream': html_datatype = latex_datatype = 'ansi' text = output.text output.data = {'ansi': text[:-1] if text.endswith('\n') else text} elif output.output_type == 'error': html_datatype = latex_datatype = 'ansi' output.data = {'ansi': '\n'.join(output.traceback)} else: for datatype in DISPLAY_DATA_PRIORITY_HTML: if datatype in output.data: html_datatype = datatype break else: html_datatype = ', '.join(output.data.keys()) for datatype in DISPLAY_DATA_PRIORITY_LATEX: if datatype in output.data: latex_datatype = datatype break else: latex_datatype = ', '.join(output.data.keys()) return html_datatype, latex_datatype
5,341,152
def cat_train_validate_on_cv( logger, run_id, train_X, train_Y, test_X, metric, kf, features, params={}, num_class=None, cat_features=None, log_target=False, ): """Train a CatBoost model, validate using cross validation. If `test_X` has a valid value, creates a new model with number of best iteration found during holdout phase using training as well as validation data. Note: For CatBoost, categorical features need to be in String or Category data type. """ if num_class: # This should be true for multiclass classification problems y_oof = np.zeros(shape=(len(train_X), num_class)) y_predicted = np.zeros(shape=(len(test_X), num_class)) else: y_oof = np.zeros(shape=(len(train_X))) y_predicted = np.zeros(shape=(len(test_X))) cv_scores = [] result_dict = {} feature_importance = pd.DataFrame() best_iterations = [] fold = 0 n_folds = kf.get_n_splits() for train_index, validation_index in kf.split(train_X[features], train_Y): fold += 1 logger.info(f"fold {fold} of {n_folds}") X_train, X_validation, y_train, y_validation = _get_X_Y_from_CV( train_X, train_Y, train_index, validation_index ) if log_target: # feature_names accepts only list cat_train = Pool( data=X_train, label=np.log1p(y_train), feature_names=features, cat_features=cat_features, ) cat_eval = Pool( data=X_validation, label=np.log1p(y_validation), feature_names=features, cat_features=cat_features, ) else: # feature_names accepts only list cat_train = Pool( data=X_train, label=y_train, feature_names=features, cat_features=cat_features, ) cat_eval = Pool( data=X_validation, label=y_validation, feature_names=features, cat_features=cat_features, ) model = CatBoost(params=params) # List of categorical features have already been passed as a part of Pool # above. No need to pass via the argument of fit() model.fit(cat_train, eval_set=cat_eval, use_best_model=True) del train_index, X_train, y_train, cat_train gc.collect() if log_target: y_oof[validation_index] = np.expm1(model.predict(cat_eval)) else: y_oof[validation_index] = model.predict(cat_eval) if test_X is not None: cat_test = Pool( data=test_X, feature_names=features, cat_features=cat_features ) if log_target: y_predicted += np.expm1(model.predict(cat_test)) else: y_predicted += model.predict(cat_test) del cat_eval, cat_test best_iteration = model.best_iteration_ best_iterations.append(best_iteration) logger.info(f"Best number of iterations for fold {fold} is: {best_iteration}") cv_oof_score = _calculate_perf_metric( metric, y_validation, y_oof[validation_index] ) cv_scores.append(cv_oof_score) logger.info(f"CV OOF Score for fold {fold} is {cv_oof_score}") del validation_index, X_validation, y_validation gc.collect() feature_importance_on_fold = model.get_feature_importance() feature_importance = _capture_feature_importance_on_fold( feature_importance, features, feature_importance_on_fold, fold ) # util.update_tracking( # run_id, # "metric_fold_{}".format(fold), # cv_oof_score, # is_integer=False, # no_of_digits=5, # ) result_dict = _evaluate_and_log( logger, run_id, train_Y, y_oof, y_predicted, metric, n_folds, result_dict, cv_scores, best_iterations, ) del y_oof gc.collect() result_dict = _capture_feature_importance( feature_importance, n_important_features=10, result_dict=result_dict ) logger.info("Training/Prediction completed!") return result_dict
5,341,153
def find_left(char_locs, pt): """Finds the 'left' coord of a word that a character belongs to. Similar to find_top() """ if pt not in char_locs: return [] l = list(pt) while (l[0]-1, l[1]) in char_locs: l = [l[0]-1, l[1]] return l
5,341,154
def make_file_url(file_id, base_url): """Create URL to access record by ID.""" url_parts = list(urlparse.urlparse(base_url)) url_parts[2] = pathlib.posixpath.join( DATAVERSE_API_PATH, DATAVERSE_FILE_API ) args_dict = {'persistentId': file_id} url_parts[4] = urllib.parse.urlencode(args_dict) return urllib.parse.urlunparse(url_parts)
5,341,155
def std_func(bins, mass_arr, vel_arr): """ Calculate std from mean = 0 Parameters ---------- bins: array Array of bins mass_arr: array Array of masses to be binned vel_arr: array Array of velocities Returns --------- std_arr: array Standard deviation from 0 of velocity difference values in each mass bin """ last_index = len(bins)-1 std_arr = [] for index1, bin_edge in enumerate(bins): cen_deltav_arr = [] for index2, stellar_mass in enumerate(mass_arr): if stellar_mass >= bin_edge and index1 == last_index: cen_deltav_arr.append(vel_arr[index2]) elif stellar_mass >= bin_edge and stellar_mass < bins[index1+1]: cen_deltav_arr.append(vel_arr[index2]) mean = 0 # mean = np.mean(cen_deltav_arr) diff_sqrd_arr = [] for value in cen_deltav_arr: diff = value - mean diff_sqrd = diff**2 diff_sqrd_arr.append(diff_sqrd) mean_diff_sqrd = np.mean(diff_sqrd_arr) std = np.sqrt(mean_diff_sqrd) std_arr.append(std) return std_arr
5,341,156
def test_transact_opcode(deploy_client): """ The receipt status field of a transaction that did not throw is 0x1 """ contract_proxy = deploy_rpc_test_contract(deploy_client, "RpcTest") address = contract_proxy.contract_address assert len(deploy_client.web3.eth.getCode(to_checksum_address(address))) > 0 check_block = deploy_client.get_checking_block() startgas = contract_proxy.estimate_gas(check_block, "ret") * 2 transaction = contract_proxy.transact("ret", startgas) deploy_client.poll(transaction) assert check_transaction_threw(deploy_client, transaction) is None, "must be empty"
5,341,157
def log_neg(rho,mask=[1,0]): """ Calculate the logarithmic negativity for a density matrix Parameters: ----------- rho : qobj/array-like Input density matrix Returns: -------- logneg: Logarithmic Negativity """ if rho.type != 'oper': raise TypeError("Input must be a density matrix") rhopt = partial_transpose(rho,mask) logneg = log2( rhopt.norm() ) return logneg
5,341,158
def test_remove(tmpdir, kind, driver, specify_driver): """Test various dataset removal operations""" extension = {"ESRI Shapefile": "shp", "GeoJSON": "json"}[driver] filename = "delete_me.{extension}".format(extension=extension) output_filename = str(tmpdir.join(filename)) create_sample_data(output_filename, driver=driver) if kind == "collection": to_delete = fiona.open(output_filename, "r") else: to_delete = output_filename assert os.path.exists(output_filename) if specify_driver: fiona.remove(to_delete, driver=driver) else: fiona.remove(to_delete) assert not os.path.exists(output_filename)
5,341,159
def create_polygon(pixels_selected: set, raster_path: str) -> gpd.GeoDataFrame: """ It allows to transform each of the indexes of the pixel data in coordinates for further processing the answer polygon Parameters -------------- pixels_selected: set Set with the pixels selected for the Connected component raster_path: str Route to the raster of origin Return -------------- polygon: geopands.GeoDataFrame Polygon generated from the points """ with rio.open(raster_path) as raster: pixels_cords = [] for x, y in pixels_selected: cord = raster.xy(x, y) pixels_cords.append(cord) new_polygon_geometry = Polygon(pixels_cords) polygon_raw = gpd.GeoDataFrame( index=[0], crs=raster.meta["crs"], geometry=[new_polygon_geometry] ).unary_union.convex_hull new_polygon = gpd.GeoDataFrame( index=[0], crs=raster.meta["crs"], geometry=[polygon_raw] ) return new_polygon
5,341,160
def test_delete_webhook(client): """Tests deletion of a webhook """ resp = client.delete_webhook(PROJECT_ID, WEBHOOK_ID) assert resp['project_id'] == PROJECT_ID assert resp['webhook_deleted']
5,341,161
def main(rows: int, columns: int, mines: int) -> None: """Your favorite sweeping game, terminal style.""" ui = PySweeperUI(rows, columns, mines) ui.main()
5,341,162
def read_output(path_elec,path_gas): """ Used to read the building simulation I/O file Args: path_elec: file path where data is to be read from in minio. This is a mandatory parameter and in the case where only one simulation I/O file is provided, the path to this file should be indicated here. path_gas: This would be path to the gas output file. This is optional, if there is no gas output file to the loaded, then a value of path_gas ='' should be used Returns: btap_df: Dataframe containing the clean building parameters file. floor_sq: the square foot of the building """ # Load the data from blob storage. s3 = acm.establish_s3_connection(settings.MINIO_URL, settings.MINIO_ACCESS_KEY, settings.MINIO_SECRET_KEY) logger.info("read_output s3 connection %s", s3) btap_df_elec = pd.read_excel(s3.open(settings.NAMESPACE.joinpath(path_elec).as_posix())) if path_gas: btap_df_gas = pd.read_excel(s3.open(settings.NAMESPACE.joinpath(path_gas).as_posix())) btap_df = pd.concat([btap_df_elec, btap_df_gas], ignore_index=True) else: btap_df = copy.deepcopy(btap_df_elec) floor_sq = btap_df['bldg_conditioned_floor_area_m_sq'].unique() # dropping output features present in the output file and dropping columns with one unique value output_drop_list = ['Unnamed: 0', ':erv_package', ':template'] for col in btap_df.columns: if ((':' not in col) and (col not in ['energy_eui_additional_fuel_gj_per_m_sq', 'energy_eui_electricity_gj_per_m_sq', 'energy_eui_natural_gas_gj_per_m_sq', 'net_site_eui_gj_per_m_sq'])): output_drop_list.append(col) btap_df = btap_df.drop(output_drop_list,axis=1) btap_df = copy.deepcopy(clean_data(btap_df)) btap_df['Total Energy'] = copy.deepcopy(btap_df[['net_site_eui_gj_per_m_sq']].sum(axis=1)) drop_list=['energy_eui_additional_fuel_gj_per_m_sq','energy_eui_electricity_gj_per_m_sq','energy_eui_natural_gas_gj_per_m_sq','net_site_eui_gj_per_m_sq'] btap_df = btap_df.drop(drop_list,axis=1) return btap_df,floor_sq
5,341,163
def parse_lmap(filename, goal, values): """Parses an LMAP file into a map of literal weights, a LiteralDict object, the literal that corresponds to the goal variable-value pair, and the largest literal found in the file.""" weights = {} max_literal = 0 literal_dict = LiteralDict() for line in open(filename): if (line.startswith('cc$I') or line.startswith('cc$C') or line.startswith('cc$P')): components = line.split('$') literal = int(components[2]) weights[literal] = components[3] max_literal = max(max_literal, abs(literal)) if line.startswith('cc$I'): variable = components[5] value = int(components[6].rstrip()) literal_dict.add(variable, values[variable][value], literal=literal) if variable == goal.variable and value == goal.value_index: goal_literal = literal return weights, literal_dict, goal_literal, max_literal
5,341,164
def show_sample(sample): """Shows the sample with tasks and answers""" print("Train:") for i in range(len(sample["train"])): fig = plt.figure() ax1 = fig.add_subplot(121) ax1.matshow(np.array(sample["train"][i]["input"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9)) ax2 = fig.add_subplot(122) ax2.matshow(np.array(sample["train"][i]["output"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9)) plt.show() print("Test:") for i in range(len(sample["test"])): fig = plt.figure() ax1 = fig.add_subplot(121) ax1.matshow(np.array(sample["test"][i]["input"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9)) if "output" in sample["test"][i]: ax2 = fig.add_subplot(122) ax2.matshow(np.array(sample["test"][i]["output"]), cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9)) plt.show()
5,341,165
def read_image(path: str): """ Read an image file :param path: str. Path to image :return: The image """ return imageio.imread(path)
5,341,166
def main(): """ Quality Control Workflow. """ cfp = ConfigParser() if cfp.start is not None and cfp.end is not None: assert cfp.start <= cfp.end else: print("Error Start or End Time.") sys.exit() if cfp.index != "all": print("Index should be [ all ].") sys.exit() if cfp.model == "qc": qc_entrance(station=cfp.name, index=cfp.index, start=cfp.start, end=cfp.end) else: print("Index should be [ qc ].") sys.exit()
5,341,167
def test_send_invalid_apikey( api_options, email_id, recipient, email_data ): """ Test send with invalid API key. """ invalid_api = sendwithus.api('INVALID_API_KEY', **api_options) result = invalid_api.send( email_id, recipient, email_data=email_data ) assert result.status_code == 403
5,341,168
def electrolyte_conductivity_Capiglia1999(c_e, T, T_inf, E_k_e, R_g): """ Conductivity of LiPF6 in EC:DMC as a function of ion concentration. The original data is from [1]. The fit is from Dualfoil [2]. References ---------- .. [1] C Capiglia et al. 7Li and 19F diffusion coefficients and thermal properties of non-aqueous electrolyte solutions for rechargeable lithium batteries. Journal of power sources 81 (1999): 859-862. .. [2] http://www.cchem.berkeley.edu/jsngrp/fortran.html Parameters ---------- c_e: :class: `numpy.Array` Dimensional electrolyte concentration T: :class: `numpy.Array` Dimensional temperature T_inf: double Reference temperature E_k_e: double Electrolyte conductivity activation energy R_g: double The ideal gas constant Returns ------- :`numpy.Array` Solid diffusivity """ sigma_e = ( 0.0911 + 1.9101 * (c_e / 1000) - 1.052 * (c_e / 1000) ** 2 + 0.1554 * (c_e / 1000) ** 3 ) arrhenius = np.exp(E_k_e / R_g * (1 / T_inf - 1 / T)) return sigma_e * arrhenius
5,341,169
def random_bdays(n): """Returns a list of integers between 1 and 365, with length n. n: int returns: list of int """ t = [] for i in range(n): bday = random.randint(1, 365) t.append(bday) return t
5,341,170
def int_to_charset(val, charset): """ Turn a non-negative integer into a string. """ if not val >= 0: raise ValueError('"val" must be a non-negative integer.') if val == 0: return charset[0] output = "" while val > 0: val, digit = divmod(val, len(charset)) output += charset[digit] # reverse the characters in the output and return return output[::-1]
5,341,171
def Reset(remove_obstacles = False, remove_weight = False): """ Rsets all the nodes to normal nodes except the start and end nodes """ global remaining_nodes_to_flip remaining_nodes_to_flip = 0 screen.fill(background_color) for column in node_list: for node in column: if node.is_obstacle: if remove_obstacles: node.Reset() node.Draw() elif node.is_weight: if remove_weight: node.Reset() else: node.ResetDistances() node.ChangeColor(colors.NodeColors.normal.value) elif node not in [start_node, end_node]: node.Reset() else: node.ResetDistances() node.Draw()
5,341,172
def search(tabela, *, parms='*', clause=None): """ Função que recebe como parâmetro obrigatório o nome da tabela a ser consultada, como parâmetro padrão recebe os filtros da pesquisa e retorna todas as linhas encontradas """ banco = Banco() banco.connect() banco.execute(f"SELECT {parms} FROM {tabela} {clause}") rows = banco.fetchall() banco.disconnect() return rows
5,341,173
def make_mask_3d(imagename, thresh, fl=False, useimage=False, pixelmin=0, major=0, minor=0, pixelsize=0, line=False, overwrite_old=True, closing_diameter=6, pbimage=None, myresidual=None, myimage=None, extension='.fullmask', spectral_closing=3): """ Makes a mask on any image you want it to. Parameters ---------- imagename : {casa image without file extention} Name of image you want to mask, without the file extention. thresh : {float} Masking thershold in whatever units are using fl : {bool} If you want to combine the mask with a previous iteration of clean (True), if not (i.e. you are using the dirty image) then False. useimage : {bool} If you want to use the dirty image or the residual for the masking (I usually use the residual - so set to False) pixelmin : {float} Min number of pixels within a masked region to be taken into the final mask, i.e. if your beam size is 1arcsec and pixel size is 0.2 arcsec, then three beams would be pixelmin = 75 major : {float} beam major axis, in arsec minor : {float} beam minor axis, in arsec pixelsize : {float} length of one side of pixel, in arcsec line : {bool} if the image is a line or continuum Returns ------- mask : {ndarray} The final mask (hopefully) as the ".fullmask" image """ import os from tasks import immath import numpy as np from scipy import ndimage from taskinit import iatool ia = iatool() mymask = imagename + '.mask' if myimage is None: myimage = imagename + '.image' maskim_nopb = imagename + '{}.nopb'.format(extension) maskim = imagename + extension threshmask = imagename + '.threshmask' if myresidual is None: myresidual = imagename + '.residual' if pbimage is None: pbimage = imagename + '.pb' if overwrite_old: os.system('rm -rf ' + maskim) os.system('rm -rf ' + maskim_nopb) os.system('rm -rf ' + threshmask) if useimage: print 'Using Image' immath(imagename=[myimage], outfile=threshmask, expr='iif(IM0 > ' + str(thresh) + ',1.0,0.0)') else: immath(imagename=[myresidual], outfile=threshmask, expr='iif(IM0 > ' + str(thresh) + ',1.0,0.0)') if fl: print 'Combining with previous mask..' immath(outfile=maskim_nopb, expr='iif(("' + threshmask + '" + "' + mymask + '") > 0.1,1.0,0.0)') else: print 'Making fresh new mask from image/residual' os.system('cp -r ' + threshmask + ' ' + maskim_nopb) immath(imagename=[pbimage, maskim_nopb], outfile=maskim, expr='iif(IM0 > 0.0, IM1, 0.0)') print "Using pixelmin=", pixelmin beamarea = (major * minor * np.pi / (4. * np.log(2.))) / (pixelsize**2) print 'Beam area', beamarea ia.open(maskim) mask = ia.getchunk() diam = closing_diameter # Change for large beam dilation structure = np.ones((diam, diam)) dist = ((np.indices((diam, diam)) - (diam - 1) / 2.)**2).sum(axis=0)**0.5 # circularize the closing element structure[dist > diam / 2.] = 0 if line: for k in range(mask.shape[3]): mask_temp = mask[:, :, 0, k] mask_temp = ndimage.binary_closing(mask_temp, structure=structure) labeled, j = ndimage.label(mask_temp) myhistogram = ndimage.measurements.histogram(labeled, 0, j + 1, j + 1) object_slices = ndimage.find_objects(labeled) threshold = pixelmin for i in range(j): if myhistogram[i + 1] < threshold: mask_temp[object_slices[i]] = 0.0 mask[:, :, 0, k] = mask_temp # add an additional closing run, this time with a 3d (4d?) st. element structure_3d = np.ones((diam, diam, 1, spectral_closing)) dist = ((np.indices((diam, diam)) - (diam - 1) / 2.)**2).sum(axis=0)**0.5 # circularize the closing element dist_3d = np.repeat(dist[:, :, None, None], spectral_closing, axis=3) structure_3d[dist_3d > diam / 2.] = 0 mask_closed = ndimage.binary_closing(mask, structure=structure_3d) else: raise RuntimeError("3D closing operation can only operate on cubes.") ia.putchunk(mask_closed.astype(int)) ia.done() print 'Mask created.' return maskim
5,341,174
def hash(data): """run the default hashing algorithm""" return _blacke2b_digest(data)
5,341,175
def _print_log_events( events: typing.List[dict], function_name: str, show_name: bool, ): """ Print out the given set of events from a `get_log_events` call. :param events: The raw events to print out. :param function_name: The name of the function from which the events came. :param show_name: Whether to include the function name when printing out the log message. """ base_prefix = f"[{function_name}]" if show_name else "" for event in events: timestamp = datetime.datetime.fromtimestamp(event["timestamp"] / 1000) prefix = f"{base_prefix}{timestamp.isoformat()} : " try: record = json.loads(event["message"]) message = json.dumps(record, indent=2) except json.decoder.JSONDecodeError: message = event["message"] message = message.strip() for line in message.split("\n"): print(f"{prefix}{line}") prefix = " " * len(prefix)
5,341,176
def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter
5,341,177
def init_logging(verbose: bool) -> None: """ Initialize the logging system """ if verbose: logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG) else: logging.basicConfig(format="%(levelname)s: %(message)s")
5,341,178
def dumps(ndb_model, **kwargs): """Custom json dumps using the custom encoder above.""" return NdbEncoder(**kwargs).encode(ndb_model)
5,341,179
def classifier_on_rbm_scores(models, dataset_train, dataset_test, clfs=None): """ TODO: store progress on heirarchical clustering; that is the slowest step clfs: list of classifiers """ if clfs is None: clfs = [CLASSIFIER] features_order = list(models.keys()) feature_dim = len(features_order) def get_X_y_features(dataset): X = np.zeros((len(dataset), feature_dim)) y = np.zeros(len(dataset), dtype=int) for idx, pair in enumerate(dataset): elem_arr, elem_label = pair preprocessed_input = binarize_image_data(image_data_collapse(elem_arr), threshold=MNIST_BINARIZATION_CUTOFF) features = np.array([score_1digit_model(models[key], preprocessed_input) for key in features_order]) #features = np.array([score_1digit_model(models[idx], preprocessed_input) for idx in range(10)]) X[idx, :] = features y[idx] = elem_label return X, y print("[classifier_on_rbm_features] Step 1: get features for training") X_train_reduced, y_train = get_X_y_features(dataset_train) print("\tTraining data dimension", X_train_reduced.shape, y_train.shape) print("[classifier_on_rbm_features] Step 2: train classifier layer") for clf in clfs: print('fitting...') clf.fit(X_train_reduced, y_train) # fit data print("[classifier_on_rbm_features] Step 3: get features for testing") X_test_reduced, y_test = get_X_y_features(dataset_test) print("[classifier_on_rbm_features] Step 4: classification metrics and confusion matrix") cms = [0] * len(clfs) accs = [0] * len(clfs) for idx, clf in enumerate(clfs): print('predicting...') predictions = clf.predict(X_test_reduced).astype(int) confusion_matrix, matches = confusion_matrix_from_pred(predictions, y_test) acc = float(matches.count(True) / len(matches)) cms[idx] = confusion_matrix accs[idx] = acc print("Successful test cases: %d/%d (%.3f)" % (matches.count(True), len(matches), acc)) return cms, accs
5,341,180
def _evolve_no_collapse_psi_out(config): """ Calculates state vectors at times tlist if no collapse AND no expectation values are given. """ global _cy_rhs_func global _cy_col_spmv_func, _cy_col_expect_func global _cy_col_spmv_call_func, _cy_col_expect_call_func num_times = len(config.tlist) psi_out = np.array([None] * num_times) expect_out = [] for i in range(config.e_num): if config.e_ops_isherm[i]: # preallocate real array of zeros expect_out.append(np.zeros(num_times, dtype=float)) else: # preallocate complex array of zeros expect_out.append(np.zeros(num_times, dtype=complex)) expect_out[i][0] = \ cy_expect_psi_csr(config.e_ops_data[i], config.e_ops_ind[i], config.e_ops_ptr[i], config.psi0, config.e_ops_isherm[i]) if debug: print(inspect.stack()[0][3]) if not _cy_rhs_func: _mc_func_load(config) opt = config.options if config.tflag in [1, 10, 11]: ODE = ode(_cy_rhs_func) code = compile('ODE.set_f_params(' + config.string + ')', '<string>', 'exec') exec(code) elif config.tflag == 2: ODE = ode(_cRHStd) ODE.set_f_params(config) elif config.tflag in [20, 22]: if config.options.rhs_with_state: ODE = ode(_tdRHStd_with_state) else: ODE = ode(_tdRHStd) ODE.set_f_params(config) elif config.tflag == 3: if config.options.rhs_with_state: ODE = ode(_pyRHSc_with_state) else: ODE = ode(_pyRHSc) ODE.set_f_params(config) else: ODE = ode(cy_ode_rhs) ODE.set_f_params(config.h_data, config.h_ind, config.h_ptr) # initialize ODE solver for RHS ODE.set_integrator('zvode', method=opt.method, order=opt.order, atol=opt.atol, rtol=opt.rtol, nsteps=opt.nsteps, first_step=opt.first_step, min_step=opt.min_step, max_step=opt.max_step) # set initial conditions ODE.set_initial_value(config.psi0, config.tlist[0]) psi_out[0] = Qobj(config.psi0, config.psi0_dims, config.psi0_shape) for k in range(1, num_times): ODE.integrate(config.tlist[k], step=0) # integrate up to tlist[k] if ODE.successful(): state = ODE.y / dznrm2(ODE.y) psi_out[k] = Qobj(state, config.psi0_dims, config.psi0_shape) for jj in range(config.e_num): expect_out[jj][k] = cy_expect_psi_csr( config.e_ops_data[jj], config.e_ops_ind[jj], config.e_ops_ptr[jj], state, config.e_ops_isherm[jj]) else: raise ValueError('Error in ODE solver') return expect_out, psi_out
5,341,181
def say_hello(): """Prints "Hello, world!" """ print("Hello, World!")
5,341,182
def get_version_from_package() -> str: """Read the package version from the source without importing it.""" path = os.path.join(os.path.dirname(__file__), "arcpy2foss/__init__.py") path = os.path.normpath(os.path.abspath(path)) with open(path) as f: for line in f: if line.startswith("__version__"): _, version = line.split(" = ", 1) version = version.replace('"', "").strip() return version
5,341,183
def delete_cert(resource, event, trigger, **kwargs): """Delete client certificate and private key """ if not verify_client_cert_on(): return with get_certificate_manager(**kwargs) as cert: if cfg.CONF.nsx_v3.nsx_client_cert_storage.lower() == "none": filename = get_cert_filename(**kwargs) if not filename: LOG.info("Please specify file containing the certificate " "using filename property") return cert.delete_pem(filename) else: if not cert.exists(): LOG.info("Nothing to clean") return cert.delete() LOG.info("Client certificate deleted succesfully")
5,341,184
def create_critic_train_op(hparams, critic_loss, global_step): """Create Discriminator train op.""" with tf.name_scope('train_critic'): critic_optimizer = tf.train.AdamOptimizer(hparams.critic_learning_rate) output_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('critic') ] if FLAGS.critic_update_dis_vars: if FLAGS.discriminator_model == 'bidirectional_vd': critic_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('dis/rnn') ] elif FLAGS.discriminator_model == 'seq2seq_vd': critic_vars = [ v for v in tf.trainable_variables() if v.op.name.startswith('dis/decoder/rnn/multi_rnn_cell') ] critic_vars.extend(output_vars) else: critic_vars = output_vars print('\nOptimizing Critic vars:') for v in critic_vars: print(v) critic_grads = tf.gradients(critic_loss, critic_vars) critic_grads_clipped, _ = tf.clip_by_global_norm(critic_grads, FLAGS.grad_clipping) critic_train_op = critic_optimizer.apply_gradients( zip(critic_grads_clipped, critic_vars), global_step=global_step) return critic_train_op, critic_grads_clipped, critic_vars
5,341,185
def interpolate(data, tstep): """Interpolate limit order data. Uses left-hand interpolation, and assumes that the data is indexed by timestamp. """ T, N = data.shape timestamps = data.index t0 = timestamps[0] - (timestamps[0] % tstep) # 34200 tN = timestamps[-1] - (timestamps[-1] % tstep) + tstep # 57600 timestamps_new = np.arange(t0 + tstep, tN + tstep, tstep) # [34200, ..., 57600] X = np.zeros((len(timestamps_new), N)) # np.array X[-1, :] = data.values[-1, :] t = timestamps_new[0] # keeps track of time in NEW sampling frequency for i in np.arange(0, T): # observations in data... if timestamps[i] > t: s = timestamps[i] - (timestamps[i] % tstep) tidx = int((t - t0) / tstep - 1) sidx = int((s - t0) / tstep) # plus one for python indexing (below) X[tidx:sidx, :] = data.values[i - 1, :] t = s + tstep else: pass return pd.DataFrame(X, index=timestamps_new, columns=data.columns)
5,341,186
def test_aligner_9(): """ testing semi-global alignment example coming from https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-096-algorithms-for-computational-biology-spring-2005/lecture-notes/lecture5_newest.pdf page 22. The example targets `end-gap-free` alignment. This implementation tests both `end-gap-free` and `semi-global` alignments. """ align_options = {'semi-global':('CAGCACTTGGATTCTCGG', 'CAGCGTGG '),'end-gap-free':('CAGCACTTGGATTCTCGG', 'CAGCGTGG')} gap_open = -2 match = 1 mismatch = -1 for align_type, seq_tup in align_options.items(): aligner = Aligner(align_type) s1, s2 = seq_tup print("s1: ", s1) print("s2: ", s2) ls1 = generate_char_seqnodes(s1) ls2 = generate_char_seqnodes(s2) score_matrix = custom_substitution_table(set(s1).union(set(s2)), match, mismatch) gaps_param = GapsParams(gap_open, gap_ext=None) res = aligner.align(ls1, ls2, gaps_param, score_matrix = score_matrix) all_paths = aligner.retrieve_alignments(*res) print("alignment_type: ", align_type) print("score_matrix \n", score_matrix) print("gap_open:{} , gap_ext:{}".format(gap_open, None)) print("alignment paths:") print(all_paths) for path in all_paths[-1]: o1=[] o2=[] for alignment in path: o1.append(alignment[0]) o2.append(alignment[-1]) print("".join(o1)) print("".join(o2)) print("~"*40) print("-"*40)
5,341,187
def doColorTransfer(org_content, output, raw_data, with_color_match = False): """ org_content path or 0-1 np array output path or 0-1 np array raw_data boolean | toggles input """ if not raw_data: org_content = imageio.imread(org_content, pilmode="RGB").astype(float)/256 output = imageio.imread(output, pilmode="RGB").astype(float)/256 org_content = skimage.transform.resize(org_content, output.shape) if with_color_match: output = match_color(output, org_content) org_content = rgb2luv(org_content) org_content[:,:,0] = output.mean(2) output = luv2rgb(org_content) output[output<0] = 0 output[output>1]=1 return output
5,341,188
def update_file(filename: str, variable_dict: dict) -> None: """Update the given file with the given data""" try: file = open(f"data/gamedata/{filename}.json", "w", encoding="utf-8") file.write(dumps(variable_dict, indent=3)) file.close() except TypeError: print("TypeError")
5,341,189
def test_chunked_full_blocks(): """ Test chunking when the input length is a multiple of the block length. """ chunks = list(_chunked("abcd", 2)) assert len(chunks) == 2 assert len(chunks[0]) == 2 assert len(chunks[1]) == 2
5,341,190
def tb_filename(tb): """Helper to get filename from traceback""" return tb.tb_frame.f_code.co_filename
5,341,191
async def fetch_symbol(symbol: str): """ get symbol info """ db = SessionLocal() s = db.query(SymbolSchema).filter(SymbolSchema.symbol == symbol).first() res = {"symbol": s.symbol, "name": s.name} return res
5,341,192
def compile_to_json(expression: expr.Expression): """Compile expression tree to json-serializable python datatypes""" pass
5,341,193
def all_live_response_sessions(cb: CbResponseAPI) -> List: """List all LR sessions still in server memory.""" return [sesh for sesh in cb.get_object(f"{CBLR_BASE}/session")]
5,341,194
def unserialize_model_params(bin: bin): """Unserializes model or checkpoint or diff stored in db to list of tensors""" state = StatePB() state.ParseFromString(bin) worker = sy.VirtualWorker(hook=None) state = protobuf.serde._unbufferize(worker, state) model_params = state.tensors() return model_params
5,341,195
def config(clazz): """Decorator allowing to transform a python object into a configuration file, and vice versa :param clazz: class to decorate :return: the decorated class """ return deserialize(serialize(dataclass(clazz)))
5,341,196
def wrap_method_once(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """manage Runnable state for given method""" # we don't re-wrap methods that had the state management wrapper if hasattr(func, 'handler_wrapped'): return func @functools.wraps(func) def wrapped_runnable_method(*args, **kw): # check the first args, if it is self, otherwise call the wrapped function # we might wrapped a callable that is not a method if args and isinstance(args[0], Runnable): self, args = args[0], args[1:] return self._call_wrapped_method(func, *args, **kw) else: return func(*args, **kw) wrapped_runnable_method.handler_wrapped = True return wrapped_runnable_method
5,341,197
async def all(iterable: ty.AsyncIterator[T]) -> bool: """Return ``True`` if **all** elements of the iterable are true (or if the iterable is empty). :param iterable: The asynchronous iterable to be checked. :type iterable: ~typing.AsyncIterator :returns: Whether all elements of the iterable are true or if the iterable is empty. :rtype: bool """ async for x in iter(iterable): if not x: return False return True
5,341,198
def set_file_logger(filename: str, name: str = 'parsl', level: int = logging.DEBUG, format_string: Optional[str] = None): """Add a stream log handler. Args: - filename (string): Name of the file to write logs to - name (string): Logger name - level (logging.LEVEL): Set the logging level. - format_string (string): Set the format string Returns: - None """ if format_string is None: format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s" logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) handler = logging.FileHandler(filename) handler.setLevel(level) formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) # see note in set_stream_logger for notes about logging # concurrent.futures futures_logger = logging.getLogger("concurrent.futures") futures_logger.addHandler(handler)
5,341,199