content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def reversedict(dct): """ Reverse the {key:val} in dct to {val:key} """ # print labelmap newmap = {} for (key, val) in dct.iteritems(): newmap[val] = key return newmap
f7a5a102546270a2e6aa7fb52d4fa6dd5e826753
3,638,900
from unfurl import yamlmanifest import os def createNewEnsemble(templateVars, project, targetPath, mono): """ If "localEnv" is in templateVars, clone that ensemble; otherwise create one from a template with templateVars """ # targetPath is relative to the project root assert not os.path.isabs(targetPath) if not targetPath or targetPath == ".": destDir, manifestName = DefaultNames.EnsembleDirectory, DefaultNames.Ensemble elif targetPath.endswith(".yaml") or targetPath.endswith(".yml"): destDir, manifestName = os.path.split(targetPath) else: destDir = targetPath manifestName = DefaultNames.Ensemble # choose a destDir that doesn't conflict with an existing folder # (i.e. if default ensemble already exists) destDir = project.getUniquePath(destDir) # destDir is now absolute targetPath = os.path.normpath(os.path.join(destDir, manifestName)) if "localEnv" not in templateVars: # we found a template file to clone assert project sourceDir = os.path.normpath( os.path.join(project.projectRoot, templateVars["sourceDir"]) ) specRepo, relPath, revision, bare = project.findPathInRepos(sourceDir) if not specRepo: raise UnfurlError( '"%s" is not in a git repository. Cloning from plain file directories not yet supported' % os.path.abspath(sourceDir) ) manifestPath = writeEnsembleManifest( os.path.join(project.projectRoot, destDir), manifestName, specRepo, sourceDir, templateVars, ) localEnv = LocalEnv(manifestPath, project=project) manifest = yamlmanifest.ReadOnlyManifest(localEnv=localEnv) elif templateVars: # didn't find a template file # look for an ensemble at the given path or use the source project's default manifest = yamlmanifest.clone(templateVars["localEnv"], targetPath) else: raise UnfurlError("can't find anything to clone") _createEnsembleRepo(manifest, mono and project.projectRepo) return destDir, manifest # XXX need to add manifest to unfurl.yaml
c08400deec0099f6833cdd54c2240b10c8d2fd1e
3,638,901
from typing import Iterable from typing import Sequence from typing import Optional import logging import time import itertools def mincut_graph_tool(edges: Iterable[Sequence[np.uint64]], affs: Sequence[np.uint64], sources: Sequence[np.uint64], sinks: Sequence[np.uint64], logger: Optional[logging.Logger] = None) -> np.ndarray: """ Computes the min cut on a local graph :param edges: n x 2 array of uint64s :param affs: float array of length n :param sources: uint64 :param sinks: uint64 :return: m x 2 array of uint64s edges that should be removed """ time_start = time.time() original_edges = edges # Stitch supervoxels across chunk boundaries and represent those that are # connected with a cross chunk edge with a single id. This may cause id # changes among sinks and sources that need to be taken care of. edges, affs, mapping, remapping = merge_cross_chunk_edges(edges.copy(), affs.copy()) dt = time.time() - time_start if logger is not None: logger.debug("Cross edge merging: %.2fms" % (dt * 1000)) time_start = time.time() mapping_vec = np.vectorize(lambda a: mapping[a] if a in mapping else a) if len(edges) == 0: return [] if len(mapping) > 0: assert np.unique(list(mapping.keys()), return_counts=True)[1].max() == 1 remapped_sinks = mapping_vec(sinks) remapped_sources = mapping_vec(sources) sinks = remapped_sinks sources = remapped_sources # Assemble edges: Edges after remapping combined with edges between sinks # and sources sink_edges = list(itertools.product(sinks, sinks)) source_edges = list(itertools.product(sources, sources)) comb_edges = np.concatenate([edges, sink_edges, source_edges]) comb_affs = np.concatenate([affs, [float_max, ] * (len(sink_edges) + len(source_edges))]) # To make things easier for everyone involved, we map the ids to # [0, ..., len(unique_ids) - 1] # Generate weighted graph with graph_tool weighted_graph, cap, gt_edges, unique_ids = \ flatgraph_utils.build_gt_graph(comb_edges, comb_affs, make_directed=True) sink_graph_ids = np.where(np.in1d(unique_ids, sinks))[0] source_graph_ids = np.where(np.in1d(unique_ids, sources))[0] if logger is not None: logger.debug(f"{sinks}, {sink_graph_ids}") logger.debug(f"{sources}, {source_graph_ids}") dt = time.time() - time_start if logger is not None: logger.debug("Graph creation: %.2fms" % (dt * 1000)) time_start = time.time() # # Get rid of connected components that are not involved in the local # # mincut # cc_prop, ns = graph_tool.topology.label_components(weighted_graph) # # if len(ns) > 1: # cc_labels = cc_prop.get_array() # # for i_cc in range(len(ns)): # cc_list = np.where(cc_labels == i_cc)[0] # # # If connected component contains no sources and/or no sinks, # # remove its nodes from the mincut computation # if not np.any(np.in1d(source_graph_ids, cc_list)) or \ # not np.any(np.in1d(sink_graph_ids, cc_list)): # weighted_graph.delete_vertices(cc) # wrong # Compute mincut src, tgt = weighted_graph.vertex(source_graph_ids[0]), \ weighted_graph.vertex(sink_graph_ids[0]) res = graph_tool.flow.boykov_kolmogorov_max_flow(weighted_graph, src, tgt, cap) part = graph_tool.flow.min_st_cut(weighted_graph, src, cap, res) labeled_edges = part.a[gt_edges] cut_edge_set = gt_edges[labeled_edges[:, 0] != labeled_edges[:, 1]] dt = time.time() - time_start if logger is not None: logger.debug("Mincut comp: %.2fms" % (dt * 1000)) time_start = time.time() if len(cut_edge_set) == 0: return [] time_start = time.time() # Make sure we did not do something wrong: Check if sinks and sources are # among each other and not in different sets for i_cc in np.unique(part.a): # Make sure to read real ids and not graph ids cc_list = unique_ids[np.array(np.where(part.a == i_cc)[0], dtype=np.int)] # if logger is not None: # logger.debug("CC size = %d" % len(cc_list)) if np.any(np.in1d(sources, cc_list)): assert np.all(np.in1d(sources, cc_list)) assert ~np.any(np.in1d(sinks, cc_list)) if np.any(np.in1d(sinks, cc_list)): assert np.all(np.in1d(sinks, cc_list)) assert ~np.any(np.in1d(sources, cc_list)) dt = time.time() - time_start if logger is not None: logger.debug("Verifying local graph: %.2fms" % (dt * 1000)) # Extract original ids # This has potential to be optimized remapped_cutset = [] for s, t in flatgraph_utils.remap_ids_from_graph(cut_edge_set, unique_ids): if s in remapping: s = remapping[s] else: s = [s] if t in remapping: t = remapping[t] else: t = [t] remapped_cutset.extend(list(itertools.product(s, t))) remapped_cutset.extend(list(itertools.product(t, s))) remapped_cutset = np.array(remapped_cutset, dtype=np.uint64) remapped_cutset_flattened_view = remapped_cutset.view(dtype='u8,u8') edges_flattened_view = original_edges.view(dtype='u8,u8') cutset_mask = np.in1d(remapped_cutset_flattened_view, edges_flattened_view) return remapped_cutset[cutset_mask]
be29b44cdeaddd5f26605d9793ebe6b2e2fe71fc
3,638,902
def mock_graph_literal(): """Creates a mock tree Metasyntactic variables: https://www.ietf.org/rfc/rfc3092.txt """ graph_dict = [ { "frame": {"name": "foo", "type": "function"}, "metrics": {"time (inc)": 130.0, "time": 0.0}, "children": [ { "frame": {"name": "bar"}, "metrics": {"time (inc)": 20.0, "time": 5.0}, "children": [ { "frame": {"name": "baz", "type": "function"}, "metrics": {"time (inc)": 5.0, "time": 5.0}, }, { "frame": {"name": "grault"}, "metrics": {"time (inc)": 10.0, "time": 10.0}, }, ], }, { "frame": {"name": "qux", "type": "function"}, "metrics": {"time (inc)": 60.0, "time": 0.0}, "children": [ { "frame": {"name": "quux"}, "metrics": {"time (inc)": 60.0, "time": 5.0}, "children": [ { "frame": {"name": "corge", "type": "function"}, "metrics": {"time (inc)": 55.0, "time": 10.0}, "children": [ { "frame": {"name": "bar"}, "metrics": { "time (inc)": 20.0, "time": 5.0, }, "children": [ { "frame": { "name": "baz", "type": "function", }, "metrics": { "time (inc)": 5.0, "time": 5.0, }, }, { "frame": {"name": "grault"}, "metrics": { "time (inc)": 10.0, "time": 10.0, }, }, ], }, { "frame": {"name": "grault"}, "metrics": { "time (inc)": 10.0, "time": 10.0, }, }, { "frame": { "name": "garply", "type": "function", }, "metrics": { "time (inc)": 15.0, "time": 15.0, }, }, ], } ], } ], }, { "frame": {"name": "waldo", "type": "function"}, "metrics": {"time (inc)": 50.0, "time": 0.0}, "children": [ { "frame": {"name": "fred", "type": "function"}, "metrics": {"time (inc)": 35.0, "time": 5.0}, "children": [ { "frame": {"name": "plugh", "type": "function"}, "metrics": {"time (inc)": 5.0, "time": 5.0}, }, { "frame": {"name": "xyzzy", "type": "function"}, "metrics": {"time (inc)": 25.0, "time": 5.0}, "children": [ { "frame": { "name": "thud", "type": "function", }, "metrics": { "time (inc)": 25.0, "time": 5.0, }, "children": [ { "frame": { "name": "baz", "type": "function", }, "metrics": { "time (inc)": 5.0, "time": 5.0, }, }, { "frame": { "name": "garply", "type": "function", }, "metrics": { "time (inc)": 15.0, "time": 15.0, }, }, ], } ], }, ], }, { "frame": {"name": "garply", "type": "function"}, "metrics": {"time (inc)": 15.0, "time": 15.0}, }, ], }, ], }, { "frame": {"name": "waldo", "type": "function"}, "metrics": {"time (inc)": 30.0, "time": 10.0}, "children": [ { "frame": {"name": "bar"}, "metrics": {"time (inc)": 20.0, "time": 5.0}, "children": [ { "frame": {"name": "baz", "type": "function"}, "metrics": {"time (inc)": 5.0, "time": 5.0}, }, { "frame": {"name": "grault"}, "metrics": {"time (inc)": 10.0, "time": 10.0}, }, ], } ], }, ] return graph_dict
4b65f0dfffe705963c1041fbbef65d85af306f4f
3,638,903
def parse_ADD_ins(tokens): """Attempts to parse an ADD instruction.""" failure = None assert len(tokens) > 0 if tokens[0].text.upper() != 'ADD': return failure statement = Obj() statement.type = 'STATEMENT' statement.statement_type = 'INSTRUCTION' statement.instruction = 'ADD' operands = parse_operands_DR_SR1_SR2(tokens[1:]) if operands: statement.operands = operands return statement operands = parse_operands_DR_SR1_imm5(tokens[1:]) if operands: statement.operands = operands return statement return failure
ffc515d0079dbaf860a10de675542e798abfd4a3
3,638,904
import re def commonIntegerPredicate(field): """"return any integers""" return tuple(re.findall("\d+", field))
955dc61fa4293f21c707b538ea218b15d5a95fb2
3,638,905
def spatialft(image, cosine_window=True, rmdc=True): """Take the fourier transform of an image (or flow field). shift the quadrants around so that low spatial frequencies are in the center of the 2D fourier transformed image""" #raised cosyne window on image to avoid border artifacts (dim1,dim2) = np.shape(image) if(cosine_window): cosfilter = np.tile(np.hanning(dim2), (dim1,1))*(np.tile(np.hanning(dim1), (dim2,1)).T) image = image * cosfilter # remove DC component if(rmdc): image = image - np.mean(image) ps = np.abs(np.fft.fftshift(np.fft.fft2(image)))**2 fqs = [np.fft.fftshift(np.fft.fftfreq(np.shape(image)[0])), np.fft.fftshift(np.fft.fftfreq(np.shape(image)[1]))] return(ps, fqs)
cafca20ec79dcaca6d6dfb11c18156077b172ab0
3,638,906
def _get_instrument_parameters(ufile, filemetadata): """ Return a dictionary containing instrument parameters. """ # pulse width pulse_width = filemetadata('pulse_width') pulse_width['data'] = ufile.get_pulse_widths() / _LIGHT_SPEED # m->sec # assume that the parameters in the first ray represent the beam widths, # bandwidth and frequency in the entire volume first_ray = ufile.rays[0] field_header = first_ray.field_headers[0] beam_width_h = field_header['beam_width_h'] / 64. beam_width_v = field_header['beam_width_v'] / 64. bandwidth = field_header['bandwidth'] / 16. * 1.e6 wavelength_cm = field_header['wavelength_cm'] / 64. wavelength_hz = _LIGHT_SPEED / (wavelength_cm / 100.) # radar_beam_width_h radar_beam_width_h = filemetadata('radar_beam_width_h') radar_beam_width_h['data'] = np.array([beam_width_h], dtype='float32') # radar_beam_width_v radar_beam_width_v = filemetadata('radar_beam_width_w') radar_beam_width_v['data'] = np.array([beam_width_v], dtype='float32') # radar_receiver_bandwidth radar_receiver_bandwidth = filemetadata('radar_receiver_bandwidth') radar_receiver_bandwidth['data'] = np.array([bandwidth], dtype='float32') # polarization_mode polarization_mode = filemetadata('polarization_mode') polarization_mode['data'] = ufile.get_sweep_polarizations() # frequency frequency = filemetadata('frequency') frequency['data'] = np.array([wavelength_hz], dtype='float32') # prt prt = filemetadata('prt') prt['data'] = ufile.get_prts() / 1e6 # us->sec instrument_parameters = { 'pulse_width': pulse_width, 'radar_beam_width_h': radar_beam_width_h, 'radar_beam_width_v': radar_beam_width_v, 'radar_receiver_bandwidth': radar_receiver_bandwidth, 'polarization_mode': polarization_mode, 'frequency': frequency, 'prt': prt, } # nyquist velocity if defined nyquist_velocity = filemetadata('nyquist_velocity') nyquist_velocity['data'] = ufile.get_nyquists() if nyquist_velocity['data'] is not None: instrument_parameters['nyquist_velocity'] = nyquist_velocity return instrument_parameters
af6ee2097848a672ec18c2199faece072f3990f1
3,638,907
import os def get_deployment_mode(path): """ Work out the 'deployment mode' from the global attributes in a NetCDF file :param path: path to dataset :return: Mode as a value from `DeploymentModes` enumeration :raises ValueError: if mode cannot be determined or is invalid """ fname = os.path.basename(path) d = Dataset(path) try: mode_str = d.deployment_mode except AttributeError: raise ValueError("Attribute 'deployment_mode' not found in '{}'".format(fname)) for mode in DeploymentModes: if mode.value.lower() == mode_str: return mode raise ValueError( "Unrecognised deployment mode '{}' in '{}'".format(mode_str, fname) )
5f24adda063cb2efdc1853a5ce862c674dbb7afa
3,638,908
import re def split_reaction(reac): """ split a CHEMKIN reaction into reactants and products :param reac: reaction string :type reac: str :returns: reactants and products :rtype: (tuple of strings, tuple of strings) """ em_pattern = one_of_these([PAREN_PLUS_EM + STRING_END, PLUS_EM + STRING_END]) reactant_str, product_str = re.split(PADDED_ARROW, reac) reactant_str = re.sub(em_pattern, '', reactant_str) product_str = re.sub(em_pattern, '', product_str) en_reactants = tuple(map(_expand_en_reagents, map(strip_spaces, re.split(PADDED_PLUS, reactant_str)))) en_products = tuple(map(_expand_en_reagents, map(strip_spaces, re.split(PADDED_PLUS, product_str)))) reactants = tuple(chain(*en_reactants)) products = tuple(chain(*en_products)) return reactants, products
d9ccbf02bd8f037d42f9de5f30612a99a1d7d918
3,638,909
def bond_stereo_parities(sgr): """ bond parities, as a dictionary """ return mdict.by_key_by_position(bonds(sgr), bond_keys(sgr), BND_STE_PAR_POS)
7b80fdb861530e4389a83db1ef55f9552baf758d
3,638,910
def encode(text): """ Encode to base64 """ return [int(x) for x in text.encode('utf8')]
af51272d8edc25d46695ea3b35fd395ad26321b5
3,638,911
import socket def _get_ip(): """ :return: This computer's default AF_INET IP address as a string """ # find ip using answer with 75 votes # https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib ip = '' sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # apparently any IP will work sock.connect(('192.168.1.1', 1)) ip = sock.getsockname()[0] except Exception as e: print(e) print('Error: Couldn\'t get IP! Make sure you are connected to a network.') finally: sock.close() return str(ip)
f39c961877a1ec026596a7ced01679411962fca4
3,638,912
def _get_value(cav, _type): """Get value of custom attribute item""" if _type == 'Map:Person': return cav["attribute_object"]["id"] \ if cav.get("attribute_object") else None if _type == 'Checkbox': return cav["attribute_value"] == '1' return cav["attribute_value"]
c8210579cf8b2a29dffc1f28a6e204fc9f89f274
3,638,913
def get_inference(model, vectorizer, topics, text, threshold): """ runs inference on text input paramaters ---------- model: loaded model to use to transform the input vectorizer: instance of the vectorizer e.g TfidfVectorizer(ngram_range=(2, 3)) topics: the list of topics in the model text: input string to be classified threshold: float of threshold to use to output a topic returns ------- tuple => top score """ v_text = vectorizer.transform([text]) score = model.transform(v_text) labels = set() for i in range(len(score[0])): if score[0][i] > threshold: labels.add(topics[i]) if not labels: return 'None', -1, set() return topics[np.argmax(score)]
e48ba018d372de317dd79fb678d69d2c83b4787b
3,638,914
def get_message_id(update: dict, status_update: str) -> int: """функция для получения номера сообщения. Описание - функция получает номер сообщения от пользователя Parameters ---------- update : dict новое сообщение от бота status_update : str состояние сообщения, изменено или новое Returns ------- message_status : str статус сообщения, если новое, то message, если отредактированое edited_message """ return update[status_update]['message_id']
9b299c94e322ad9cea92fd73cb9e7a55f3364caa
3,638,915
def gmm_clustering_predict(model, X): """ X is a (N, 1) array """ X = np.clip(X, -2.5, 2.5) return model.predict(X)
9dcff9aa68fe008713dbb5142e16702bb68f65a0
3,638,916
import requests def http_request(url, method='GET', timeout=2, **kwargs): """Generic task to make an http request.""" headers = kwargs.get('headers', {}) params = kwargs.get('params', {}) data = kwargs.get('data', {}) request_kwargs = {} if headers: request_kwargs['headers'] = headers if params: request_kwargs['params'] = params if method in ['post', 'put']: request_kwargs['data'] = data s = requests.Session() if method not in METHOD_CHOICES: raise ValueError(f'{method} not supported!') method = method.lower() request = getattr(s, method) try: response = request(url, timeout=timeout, **request_kwargs) # response.raise_for_status() except requests.exceptions.RequestException: logger.exception('%s request to url %s failed!', method, url) return None else: logger.info('%s request to url %s successfull!', method, url) return { 'status_code': response.status_code, 'headers': dict(response.headers), 'text': response.text }
f7605d5b88bb7e23a1b541b7103185d229427270
3,638,917
from typing import Type def unify_nest(args: Type[MultiNode], kwargs: Type[MultiNode], node_str, mode, axis=0, max_depth=1): """ Unify the input nested arguments, which consist of sub-arrays spread across arbitrary nodes, to unified arrays on the single target node. :param args: The nested positional arguments to unify. :type args: MultiNode :param kwargs: The nested keyword arguments to unify. :type kwargs: MultiNode :param node_str: The node to unify the nested arguments to. :type node_str: str :param mode: The mode by which to unify, must be one of [ concat | mean | sum ] :type mode: str :param axis: The axis along which to concattenate the sub-arrays. Default is 0. :type axis: int, optional :param max_depth: The maximum nested depth to reach. Default is 1. Increase this if the nest is deeper. :type max_depth: int, optional :return: nested arguments unified to the target node """ args = args._data if isinstance(args, MultiNodeIter) else args kwargs = kwargs._data if isinstance(kwargs, MultiNodeIter) else kwargs args_uni = ivy.nested_map(args, lambda x: unify(x, node_str, mode, axis), max_depth=max_depth) kwargs_uni = ivy.nested_map(kwargs, lambda x: unify(x, node_str, mode, axis), max_depth=max_depth) return args_uni, kwargs_uni
392491382b31c566db7eb8b13a98001158d8153a
3,638,918
def ast_for_inv_exp(inv: 'Ast', ctx: 'ReferenceDict'): """ invExp ::= atomExpr (atomExpr | invTrailer)*; """ assert inv.name is UNameEnum.invExp atom_expr, *inv_trailers = inv res = ast_for_atom_expr(atom_expr, ctx) if len(inv_trailers) is 1: [each] = inv_trailers if each.name is UNameEnum.atomExpr: return res(ast_for_atom_expr(each, ctx)) return ast_for_atom_expr(each[0], ctx)(res) stack = [] for each in inv_trailers: if each.name is UNameEnum.atomExpr: stack.append(ast_for_atom_expr(each, ctx)) continue if stack: res = res(*stack) stack.clear() res = (ast_for_atom_expr(each[0], ctx))(res) if stack: res = res(*stack) return res
77de8d7c1fd5dfc4a8fefa04ee6c0a5da17a6bc1
3,638,919
def create_input_pipeline(files, batch_size, n_epochs, shape, crop_shape=None, crop_factor=1.0, n_threads=2): """Creates a pipefile from a list of image files. Includes batch generator/central crop/resizing options. The resulting generator will dequeue the images batch_size at a time until it throws tf.errors.OutOfRangeError when there are no more images left in the queue. Parameters ---------- files : list List of paths to image files. batch_size : int Number of image files to load at a time. n_epochs : int Number of epochs to run before raising tf.errors.OutOfRangeError shape : list [height, width, channels] crop_shape : list [height, width] to crop image to. crop_factor : float Percentage of image to take starting from center. n_threads : int, optional Number of threads to use for batch shuffling Returns ------- TYPE Description """ # We first create a "producer" queue. It creates a production line which # will queue up the file names and allow another queue to deque the file # names all using a tf queue runner. # Put simply, this is the entry point of the computational graph. # It will generate the list of file names. # We also specify it's capacity beforehand. producer = tf.train.string_input_producer( files, capacity=len(files), num_epochs=n_epochs) # We need something which can open the files and read its contents. reader = tf.WholeFileReader() # We pass the filenames to this object which can read the file's contents. # This will create another queue running which dequeues the previous queue. keys, vals = reader.read(producer) # And then have to decode its contents as we know it is a jpeg image imgs = tf.image.decode_jpeg( vals, channels=3 if len(shape) > 2 and shape[2] == 3 else 0) # We have to explicitly define the shape of the tensor. # This is because the decode_jpeg operation is still a node in the graph # and doesn't yet know the shape of the image. Future operations however # need explicit knowledge of the image's shape in order to be created. imgs.set_shape(shape) # Next we'll centrally crop the image to the size of 100x100. # This operation required explicit knowledge of the image's shape. if shape[0] > shape[1]: rsz_shape = [ int(shape[0] / shape[1] * crop_shape[0] / crop_factor), int( crop_shape[1] / crop_factor) ] else: rsz_shape = [ int(crop_shape[0] / crop_factor), int(shape[1] / shape[0] * crop_shape[1] / crop_factor) ] rszs = tf.image.resize_images(imgs, rsz_shape) crops = (tf.image.resize_image_with_crop_or_pad(rszs, crop_shape[0], crop_shape[1]) if crop_shape is not None else imgs) # Now we'll create a batch generator that will also shuffle our examples. # We tell it how many it should have in its buffer when it randomly # permutes the order. min_after_dequeue = len(files) // 100 # The capacity should be larger than min_after_dequeue, and determines how # many examples are prefetched. TF docs recommend setting this value to: # min_after_dequeue + (num_threads + a small safety margin) * batch_size capacity = min_after_dequeue + (n_threads + 1) * batch_size # Randomize the order and output batches of batch_size. batch = tf.train.shuffle_batch( [crops], enqueue_many=False, batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue, num_threads=n_threads) # alternatively, we could use shuffle_batch_join to use multiple reader # instances, or set shuffle_batch's n_threads to higher than 1. return batch
17c26de3659cccd7e32d8297ed0e31167ef05c38
3,638,920
def confirm_email_page(): """Returns page for users that have not confirmed their email address""" if not g.loggedIn: return redirect(url_for('general.loginPage')) next = request.args.get('next') if general_db.is_activated(g.user): if next is not '': return make_auth_token_response(g.user, g.email, next) return make_auth_token_response(g.user, g.email, url_for('articles.index')) if next: err = 'You must confirm your email to access this endpoint' flash(err, 'danger') return render_template('confirm_email.html', next=next, email=g.email) return render_template('confirm_email.html', email=g.email)
e37c59e9f1fa1d710d2795257be1cfb8bc9aa3df
3,638,921
from bs4 import BeautifulSoup def get_movie_names(url_data): """Get all the movies from the webpage""" soup = BeautifulSoup(url_data, 'html.parser') data = soup.findAll('ul', attrs={'class' : 'ctlg-holder'}) #Get all the lines from HTML that are a part of ul with class = 'ctlg-holder' movie_list = [] for div in data: links = div.findAll('a') #Choose all the lines with links for a in links: if a is not None and a is not "#": movie_list.append(a.get('href', None)) print("Movie Names Obtained") return movie_list
1cae6b0093f0e0ca9e361bdc207be9ea654e7c2b
3,638,922
def message_results(): """Shows the user their message, with the letters in sorted order.""" message = request.form.get('message') encrypted_message = sort_letters(message) return render_template('message_results.html', message=encrypted_message)
8e0868330c318c958da496a742f630583822bbd0
3,638,923
def flatten_dict(dicts, keys): """ Input is list of dicts. This operation pulls out the key in each dict and combines the values into a new list mapped to the original key. A new dictionary is formed with these key -> list mappings. """ return { key: flatten_n([d[key] for d in dicts]) for key in keys }
ca037e47e2e6287145da693cd55f47719d463115
3,638,924
def render_url(fullpath, notebook=False): # , prefix="files"): """Converts a path relative to the notebook (i.e. kernel) to a URL that can be served by the notebook server, by prepending the notebook directory""" if fullpath.startswith('http://'): url = fullpath else: url = (radiopadre.FILE_URL_ROOT if not notebook else radiopadre.NOTEBOOK_URL_ROOT) + fullpath # print "{} URL is {}".format(fullpath, url) return url
ee401c4521cf93fe4ec95b2d3c1fb7dbe337ff52
3,638,925
def register(): """Register User route.""" email = request.form.get('email') password = request.form.get('password') new_user = User.register(email, password) if new_user: return jsonify({'message': 'Registration successful.'}), 201 return jsonify({'message': 'Invalid username or password.'}), 400
a6148b514268e36fc28a69737718598fcc355460
3,638,926
import json def route_sns_task(event, context): """ Gets SNS Message, deserialises the message, imports the function, calls the function with args """ record = event['Records'][0] message = json.loads( record['Sns']['Message'] ) return run_message(message)
1e7c8f774f62cddf633e51d631f74cad3fa1ec8e
3,638,927
def release_dp_mean_absolute_deviation(x, bounds, epsilon): """Release the dp mean absolute deviation. Assumes dataset size len(`x`) is public. Theorem 27: https://arxiv.org/pdf/2001.02285.pdf """ lower, upper = bounds sensitivity = (upper - lower) * 2. / len(x) x = np.clip(x, *bounds) mad = (x - x.mean()).abs().mean() base_lap = binary_search_chain(lambda s: make_base_laplace(s), sensitivity, epsilon) return base_lap(mad)
da49088e52fcd0ccf8358db072354fcd39de565e
3,638,928
import cloudpickle import pickle import os def get_configuration(spec_path): """Get mrunner experiment specification and gin-config overrides.""" try: with open(spec_path, 'rb') as f: specification = cloudpickle.load(f) except pickle.UnpicklingError: with open(spec_path) as f: vars_ = {'script': os.path.basename(spec_path)} exec(f.read(), vars_) # pylint: disable=exec-used specification = vars_['experiments_list'][0].to_dict() print('NOTE: Only the first experiment from the list will be run!') parameters = specification['parameters'] gin_bindings = [] for key, value in parameters.items(): if key == 'imports': for module_str in value: binding = f'import {module_str}' gin_bindings.append(binding) continue if isinstance(value, str) and not value[0] in ('@', '%', '{', '(', '['): binding = f'{key} = "{value}"' else: binding = f'{key} = {value}' gin_bindings.append(binding) return specification, gin_bindings
4e4d76b7fd9e3c27a16f9e1aeedaa21f5a97defd
3,638,929
def LoadScores(firstfile, prevfile): """Load the first and previous scores. For each peptide, compute a prize that is -log10(min p-value across all time points). Assumes the scores are p-values or equivalaent scores in (0, 1]. Do not allow null or missing scores. Return: data frame with scores and prize for each peptide """ first_df = pd.read_csv(firstfile, sep="\t", comment="#", header=None, index_col=0) prev_df = pd.read_csv(prevfile, sep="\t", comment="#", header=None, index_col=0) first_shape = first_df.shape assert first_shape == prev_df.shape, "First and previous score files must have the same number of peptides and time points" assert not first_df.isnull().values.any(), "First scores file contains N/A values. Replace with 1.0" assert not prev_df.isnull().values.any(), "Previous scores file contains N/A values. Replace with 1.0" print "Loaded {} peptides and {} scores in the first and previous score files".format(first_shape[0], first_shape[1]) # Merge the two types of scores merged_df = pd.concat([first_df, prev_df], axis=1, join="outer") merged_shape = merged_df.shape assert merged_shape[0] == first_shape[0], "First and previous significance scores contain different peptides" assert merged_shape[1] == 2*first_shape[1], "Unexpected number of significance scores after merging first and previous scores" # Compute prizes merged_df["prize"] = merged_df.apply(CalcPrize, axis=1) return merged_df
b6a0d9769795937a21aee195d782060db73ec494
3,638,930
import os def add_makeflags(job_core_count, cmd): """ Correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make). :param job_core_count: core count from the job definition (int). :param cmd: payload execution command (string). :return: updated payload execution command (string). """ # ATHENA_PROC_NUMBER is set in Node.py using the schedconfig value try: core_count = int(os.environ.get('ATHENA_PROC_NUMBER')) except Exception: core_count = -1 if core_count == -1: try: core_count = int(job_core_count) except Exception: pass else: if core_count >= 1: # Note: the original request (AF) was to use j%d and not -j%d, now using the latter cmd += "export MAKEFLAGS=\'-j%d QUICK=1 -l1\';" % (core_count) # make sure that MAKEFLAGS is always set if "MAKEFLAGS=" not in cmd: cmd += "export MAKEFLAGS=\'-j1 QUICK=1 -l1\';" return cmd
c559b703894fed48e21c616125166b697e32c817
3,638,931
def _gen_find(subseq, generator): """Returns the first position of `subseq` in the generator or -1 if there is no such position.""" if isinstance(subseq, bytes): subseq = bytearray(subseq) subseq = list(subseq) pos = 0 saved = [] for c in generator: saved.append(c) if len(saved) > len(subseq): saved.pop(0) pos += 1 if saved == subseq: return pos return -1
ec89e787a61d684e2a7d0c8c2d0fb9c89cf73ada
3,638,932
def fnCalculate_Bistatic_RangeAndDoppler(pos_target,vel_target,pos_rx,pos_tx,wavelength): """ Calculate measurement vector consisting of bistatic range and Doppler shift for 3D bistatic case. pos_rx, pos_tx = position of Rx and Tx in [km]. pos_target = position of target in [km]. wavelength = wavelength of radar transmitter in [km]. Validated in main_iss_bistatic_rangedopp_01.py Date: 27/12/16 Edited: 22/01/17: fixed a bug in the expression for Doppler shift. Forgot to include the Doppler shift due to the transmitter. """ target_rx = np.subtract(pos_target,pos_rx); target_tx = np.subtract(pos_target,pos_tx); y_radar = np.zeros([2],dtype=np.float64); # bistatic range y_radar[0] = np.linalg.norm(target_rx) + np.linalg.norm(target_tx) # Doppler shift pos_vel = np.hstack((pos_target,vel_target)); y_radar[1] = fnCalculate_Doppler_Shift_3D(wavelength,pos_vel,pos_rx) + fnCalculate_Doppler_Shift_3D(wavelength,pos_vel,pos_tx); # fixed: 22/01/17 return y_radar
4cd5177a8ad0be0732821f6af7cf9030cadab781
3,638,933
def all_permits(target_dynamo_table): """ Simply return all data from DynamoDb Table :param target_dynamo_table: :return: """ response = target_dynamo_table.scan() data = response['Items'] while response.get('LastEvaluatedKey', False): response = target_dynamo_table.scan(ExclusiveStartKey=response['LastEvaluatedKey']) data.extend(response['Items']) return data
8efdaf4ff407d0e2ce8dd592eeac766b0ec2264b
3,638,934
def maximum_difference_sort_value(contributions): """ Auxiliary function to sort the contributions for the compare_plot. Returns the value of the maximum difference between values in contributions[0]. Parameters ---------- contributions: list list containing 2 elements: a Numpy.ndarray of contributions of the indexes compared, and the features' names. Returns ------- value_max_difference : float Value of the maximum difference contribution. """ if len(contributions[0]) <= 1: max_difference = contributions[0][0] else: max_difference = max( [ abs(contrib_i - contrib_j) for i, contrib_i in enumerate(contributions[0]) for j, contrib_j in enumerate(contributions[0]) if i <= j ] ) return max_difference
cd7f66ec252199fb01b9891440d0f7da370c7b8e
3,638,935
import re import os def load_test_val_train_files(version): """Load the test, validation and train labels and images from the data folder. Also does the basic preprocessing (converting to the right datatype, clamping and rescaling etc.) return images_train, images_validation, images_test, labels_train, labels_validation, labels_test""" # Load labels labels_pattern = re.compile(r'labels-(\d+).npy') labels_files = filter(lambda f: re.match( labels_pattern, f), os.listdir(INPUT_DIR)) images_train, images_validation, images_test = np.array([]), np.array([]), np.array([]) labels_train, labels_validation, labels_test = list(), list(), list() for f in labels_files: # Load images (stack all frames vertically) # Loading images this way ensures that labels and images have the same order file_num = int(re.match(labels_pattern, f).group(1)) new_labels = np.load(os.path.join(INPUT_DIR, f)).tolist() if file_num % 5 == 0: # Test file labels_test += new_labels images_test = add_images_from_file(images_test, file_num) elif file_num % 5 == 1: # Validation file labels_validation += new_labels images_validation = add_images_from_file(images_validation, file_num) else: # Train file labels_train += new_labels images_train = add_images_from_file(images_train, file_num) # Add new dimension (explicit mention that we have only one color channel) # Change range from 0-255 to 0-1 (datatype change from uint8 to float64) images_test = images_test[:,:,:,np.newaxis] / 255.0 images_validation = images_validation[:,:,:,np.newaxis] / 255.0 images_train = images_train[:,:,:,np.newaxis] / 255.0 labels_test = np.array(labels_test, dtype=bool) labels_validation = np.array(labels_validation, dtype=bool) labels_train = np.array(labels_train, dtype=bool) return images_train, images_validation, images_test, labels_train, labels_validation, labels_test
f85bb48840619a23c7a96ea5b5820ab8af6c9622
3,638,936
def get_primer_target_sequence(id, svStartChr, svStartPos, svEndChr, svEndPos, svType, svComment, primerTargetSize, primerOffset, blastdbcmd, genomeFile): """Get the sequences in which primers will be placed""" if svType in ["del", "inv3to3", "trans3to3", "trans3to5", "snv", "invRefA", "invAltA"]: targetSeq1Start = svStartPos - primerOffset - primerTargetSize targetSeq1End = svStartPos - primerOffset targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper() elif svType in ["invRefB"]: targetSeq1Start = max(svEndPos - primerOffset - primerTargetSize, svStartPos + primerOffset) targetSeq1End = svEndPos - primerOffset targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper() elif svType in ["trans5to3", "trans5to5"]: targetSeq1Start = svStartPos + primerOffset targetSeq1End = svStartPos + primerOffset + primerTargetSize targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()) elif svType in ["dup", "inv5to5", "invAltB"]: targetSeq1Start = svStartPos + primerOffset targetSeq1End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset) targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()) if svType in ["del", "inv5to5", "snv", "invRefB", "invAltB"]: targetSeq2Start = svEndPos + primerOffset targetSeq2End = svEndPos + primerOffset + primerTargetSize targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper() elif svType in ["invRefA"]: targetSeq2Start = svStartPos + primerOffset targetSeq2End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset) targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper() elif svType in ["dup", "inv3to3", "invAltA"]: targetSeq2Start = max(svEndPos - primerTargetSize - primerOffset, svStartPos + primerOffset) targetSeq2End = svEndPos - primerOffset targetSeq2 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()) elif svType in ["trans3to5", "trans5to5"]: targetSeq2Start = svEndPos + primerOffset targetSeq2End = svEndPos + primerOffset + primerTargetSize targetSeq2 = get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper() elif svType in ["trans3to3", "trans5to3"]: targetSeq2Start = svEndPos - primerTargetSize - primerOffset targetSeq2End = svEndPos - primerOffset targetSeq2 = reverseComplementSequence(get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()) return (targetSeq1, targetSeq2)
b8b32319d6a37a2373a620b1be867ab838c54fdc
3,638,937
def human_format(num): """ :param num: A number to print in a nice readable way. :return: A string representing this number in a readable way (e.g. 1000 --> 1K). """ magnitude = 0 while abs(num) >= 1000: magnitude += 1 num /= 1000.0 return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
41e4f3823f756588c18b0fb926949a5aca9c6942
3,638,938
from typing import Optional from typing import Dict def torchserve( model_path: str, management_api: str, image: str = TORCHX_IMAGE, params: Optional[Dict[str, object]] = None, ) -> specs.AppDef: """Deploys the provided model to the given torchserve management API endpoint. >>> from torchx.components.serve import torchserve >>> torchserve( ... model_path="s3://your-bucket/your-model.pt", ... management_api="http://torchserve:8081", ... ) AppDef(name='torchx-serve-torchserve', ...) Args: model_path: The fsspec path to the model archive file. management_api: The URL to the root of the torchserve management API. image: Container to use. params: torchserve parameters. See https://pytorch.org/serve/management_api.html#register-a-model Returns: specs.AppDef: the Torchx application definition """ args = [ "torchx/apps/serve/serve.py", "--model_path", model_path, "--management_api", management_api, ] if params is not None: for param, value in params.items(): args += [ f"--{param}", str(value), ] return specs.AppDef( name="torchx-serve-torchserve", roles=[ specs.Role( name="torchx-serve-torchserve", image=image, entrypoint="python3", args=args, port_map={"model-download": 8222}, ), ], )
b90ec26512525e3f23a54034a685be380ef0be96
3,638,939
def ExpandRange(r,s=1): """expand 1-5 to [1..5], step by 1-10/2""" if REGEX_PATTERNS['step'].search(r): [r1,s] = r.split('/') s=int(s) else: r1 = r (start,end) = r1.split('-') return [i for i in range(int(start),int(end)+1,s)]
1e8ca3b5b026c36817acfefd1666312b0bcccf23
3,638,940
def write_file_if_changed(name, data): """ Write a file if the contents have changed. Returns True if the file was written. """ if path_exists(name): old_contents = read_file(name) else: old_contents = '' if (data != old_contents): write_file(name, data) return True return False
42962e9f9159d8cab121826e223bfa10467b8d5c
3,638,941
def _get_preprocessor_loader(plugin_name): """Get a class that loads a preprocessor class. This returns a class with a single class method, ``transform``, which, when called, finds a plugin and defers to its ``transform`` class method. This is necessary because ``convert()`` is called as a decorator at import time, but we cannot be confident that the ResourceType plugins may not be loaded yet. (In fact, since ``convert()`` is used to decorate plugins, we can be confident that not all plugins are loaded when it is called.) This permits us to defer plugin searching until the moment when ``preprocess()`` calls the various preprocessors, at which point we can be certain that all plugins have been loaded and finding them by name will work. """ def transform(cls, *args, **kwargs): plug = ResourceType.get(plugin_name) return plug.transform(*args, **kwargs) return type("PluginLoader_%s" % plugin_name, (object,), {"transform": classmethod(transform)})
5b2c1687be92b21f31c0e9e28a3566505831c876
3,638,942
def preprocess_sample(data, word_dict): """ Args: data (dict) Returns: dict """ processed = {} processed['Abstract'] = [sentence_to_indices(sent, word_dict) for sent in data['Abstract'].split('$$$')] if 'Task 2' in data: processed['Label'] = label_to_onehot(data['Task 2']) return processed
0b1b50285be0afa1faf78917024b1e2cd01fb167
3,638,943
import yaml import torch def read_input_file(input_file_path): """ read inputs from input_file_path :param input_file_path: :return: """ cprint('[INFO]', bc.dgreen, "read input file: {}".format(input_file_path)) with open(input_file_path, 'r') as input_file_read: dl_inputs = yaml.load(input_file_read, Loader=yaml.FullLoader) dl_inputs['gru_lstm']['learning_rate'] = float(dl_inputs['gru_lstm']['learning_rate']) # initialize before checking if GPU actually exists device = torch.device("cpu") dl_inputs['general']['is_cuda'] = False if dl_inputs['general']['use_gpu']: # --- check cpu/gpu availability # returns a Boolean True if a GPU is available, else it'll return False is_cuda = torch.cuda.is_available() if is_cuda: device = torch.device(dl_inputs["general"]["gpu_device"]) dl_inputs['general']['is_cuda'] = True else: cprint('[INFO]', bc.lred, 'GPU was requested but not available.') dl_inputs['general']['device'] = device cprint('[INFO]', bc.lgreen, 'pytorch will use: {}'.format(dl_inputs['general']['device'])) if not "early_stopping_patience" in dl_inputs["gru_lstm"]: dl_inputs["gru_lstm"]["early_stopping_patience"] = False if dl_inputs['gru_lstm']["early_stopping_patience"] <= 0: dl_inputs['gru_lstm']["early_stopping_patience"] = False # XXX separation in the input CSV file # Hardcoded, see issue #38 dl_inputs['preprocessing']['csv_sep'] = "\t" return dl_inputs
ade3584937997798b496690d5799e23effae1cd1
3,638,944
def task_deploy_docs() -> DoitTask: """Deploy docs to the Github `gh-pages` branch. Returns: DoitTask: doit task """ if _is_mkdocs_local(): # pragma: no cover return debug_task([ (echo, ('ERROR: Not yet configured to deploy documentation without "use_directory_urls"',)), ]) return debug_task([Interactive('poetry run mkdocs gh-deploy')])
52101c7321618e9a1f98e4bd16c5e88b291b38da
3,638,945
def plugin(version: str) -> 'Plugin': """Get the application plugin.""" return XPXPlugin
8211b8b3f2aaedbbfe289184117e6964fad5cce5
3,638,946
def is_anaconda_5(): """ anaconda 5 has conda version 4.4.0 or greater... obviously :/ """ vers = conda_version() if not vers: return False ma = vers['major'] >= 4 mi = vers['minor'] >= 4 return ma and mi
cc4701bb788867a6370c53b48994c166dffa7cd4
3,638,947
def relay_array_map(c, fn, *array): """Implementation of array_map for Relay.""" assert fn.is_constant(Primitive) fn = fn.value if fn is P.switch: rfn = relay.where else: rfn = SIMPLE_MAP[fn] return rfn(*[c.ref(a) for a in array])
8d3d89ea131272f987054c198353ec7fc398e4a0
3,638,948
def get_multi_objects_dict(*args, params=None): """Convertir un array de objetos en diccionarios""" object_group = [] result = {} for data_object in args: if params is not None and params['fields']: fields = params['fields'] else: fields = [attr for attr in data_object.__dict__.keys() if not attr.startswith('_')] row = {} for field in fields: value = getattr(data_object, field) if field.startswith('date') and value is not None: row.update({field: value.strftime('%Y-%m-%d %H:%M:%S')}) else: row.update({field: parse_value(value)}) object_group.append(row) for data_object in object_group: result.update(**data_object) return result
2f4e2bc6e68bc77fedfae89ff4562cdab5fa91fb
3,638,949
from btu.manual_tests import ping_now def test_function_ping_now_bytes(): """ Picking the 'ping_now' function and return as bytes. """ queue_args = { "site": frappe.local.site, "user": frappe.session.user, "method": ping_now, "event": None, "job_name": "ping_now", "is_async": True, # always true; we want to run Tasks via the Redis Queue, not on the Web Server. "kwargs": {} # if 'ping_now' had keyword arguments, we'd set them here. } new_sanchez = Sanchez() new_sanchez.build_internals(func=execute_job, _args=None, _kwargs=queue_args) http_result: bytes = new_sanchez.get_serialized_rq_job() return http_result
0673efa3ff11aa9b55b470c4ac84a35f7878af98
3,638,950
def post_equals_form(post, json_response): """ Checks if the posts object is equal to the json object """ if post.title != json_response['title']: return False if post.deadline != json_response['deadline']: return False if post.details != json_response['details']: return False if post.category != json_response['category']: return False if post.preferred_contact != json_response['preferred_contact']: return False if post.zip_code != json_response['zip_code']: return False return True
965a533c7ebbb70001bcdcb0e143b617708807e3
3,638,951
def get_shield(plugin: str) -> dict: """ Generate shield json for napari plugin. If the package is not a valid plugin, display 'plugin not found' instead. :param plugin: name of the plugin :return: shield json used in shields.io. """ shield_schema = { "color": "#0074B8", "label": "napari hub", "logoSvg": "<svg width=\"512\" height=\"512\" viewBox=\"0 0 512 512\" fill=\"none\" " "xmlns=\"http://www.w3.org/2000/svg\"><circle cx=\"256.036\" cy=\"256\" " "r=\"85.3333\" fill=\"white\" stroke=\"white\" stroke-width=\"56.8889\"/>" "<circle cx=\"256.036\" cy=\"42.6667\" r=\"42.6667\" fill=\"white\"/>" "<circle cx=\"256.036\" cy=\"469.333\" r=\"42.6667\" fill=\"white\"/>" "<path d=\"M256.036 28.4445L256.036 142.222\" stroke=\"white\" " "stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>" "<path d=\"M256.036 369.778L256.036 483.556\" stroke=\"white\" stroke-width=\"56.8889\" " "stroke-linecap=\"round\" stroke-linejoin=\"round\"/>" "<circle cx=\"71.2838\" cy=\"149.333\" r=\"42.6667\" transform=\"rotate(-60 71.2838 149.333)\" " "fill=\"white\"/><circle cx=\"440.788\" cy=\"362.667\" r=\"42.6667\" " "transform=\"rotate(-60 440.788 362.667)\" fill=\"white\"/>" "<path d=\"M58.967 142.222L157.501 199.111\" stroke=\"white\" stroke-width=\"56.8889\" " "stroke-linecap=\"round\" stroke-linejoin=\"round\"/><path d=\"M354.57 312.889L453.105 369.778\" " "stroke=\"white\" stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>" "<circle cx=\"71.2838\" cy=\"362.667\" r=\"42.6667\" transform=\"rotate(-120 71.2838 362.667)\" " "fill=\"white\"/><circle cx=\"440.788\" cy=\"149.333\" r=\"42.6667\" " "transform=\"rotate(-120 440.788 149.333)\" fill=\"white\"/>" "<path d=\"M58.967 369.778L157.501 312.889\" stroke=\"white\" stroke-width=\"56.8889\" " "stroke-linecap=\"round\" stroke-linejoin=\"round\"/><path d=\"M354.57 199.111L453.105 142.222\" " "stroke=\"white\" stroke-width=\"56.8889\" stroke-linecap=\"round\" stroke-linejoin=\"round\"/>" "</svg>", "schemaVersion": 1, "style": "flat-square" } plugins = get_valid_plugins() if plugin not in plugins: shield_schema['message'] = 'plugin not found' else: shield_schema['message'] = plugin return shield_schema
f1c7dadabd0b5fe6b1b0012188559b4958ca5fd0
3,638,952
def check_auth(username, password): """This function is called to check if a username / password combination is valid. """ return username == expectedUN and password == expectedPW
e19759a1514fad47a085e3dad2180c5b8b49827c
3,638,953
def Lambda(t, y): """Original Arnett 1982 dimensionless bolometric light curve expression Calculates the bolometric light curve due to radioactive decay of 56Ni, assuming no other energy input. t: time since explosion in days y: Arnett 1982 light curve width parameter (typical 0.7 < y < 1.4) Returns the dimensionless light curve shape function. """ tm = 2*tNi*y a, x = [ ], np.atleast_1d(t/tm) ig = lambda z: 2*z * np.exp(-2*z*y + z**2) for xi in x.ravel(): a.append(np.exp(-xi**2) * quad(ig, 0, xi)[0]) return np.array(a)
85752fa09f1189ca7e24a32d821e36c58379572d
3,638,954
from datetime import datetime def get_utcnow_time(format: str = None) -> str: """ Return string with current utc time in chosen format Args: format (str): format string. if None "%y%m%d.%H%M%S" will be used. Returns: str: formatted utc time string """ if format is None: format = "%y%m%d.%H%M%S" result = datetime.utcnow().strftime(format) return result
994e47abde4a4b56bd0f22ccc41d7d91c7b3b8d0
3,638,955
def repair_branch(cmorph, cut, rmorph, rep, force=False): """Attempts to extend cut neurite using intact branch. Args: cmorph (treem.Morph): cut morphology. cut (treem.Node): cut node, from cmorph. rmorph (treem.Morph): repair morphology. rep (treem.Node): undamaged branch start node, from rmorph. force (bool): force repair if branch is too short. Returns: True if repaired. """ done = 0 cutsec = list(reversed(list(cut.section(reverse=True)))) repsec = list(rep.section()) cutlen = cmorph.length(cutsec) replen = rmorph.length(repsec) target = cut if replen > cutlen: for node in repsec[-1::-1]: if rmorph.length(node.section()) > replen - cutlen: break source = node # pylint: disable=undefined-loop-variable elif rep.breadth() > 1 or force: source = rep else: source = None if source: tree = rmorph.copy(source) scale_z = -1 scale_r = cmorph.radii(cutsec).mean() / rmorph.radii(repsec).mean() tree.data[:, SWC.XYZR] *= np.array([1, 1, scale_z, scale_r]) u = np.mean(tree.data[:, SWC.XYZ], axis=0) - tree.root.coord() v = target.coord() - cmorph.root.coord() axis, angle = rotation(u, v) tree.rotate(axis, angle) shift = (target.coord() - tree.root.coord() + target.coord() - target.parent.coord()) tree.translate(shift) cmorph.graft(tree, target) done = 1 return done
1e76ec2619f1b74791c1258c65c649c25261a740
3,638,956
from datetime import datetime def parse_patient_dob(dob): """ Parse date string and sanity check. expects date string in YYYYMMDD format Parameters ---------- dob : str dob as string YYYYMMDD Returns ------- dob : datetime object """ try: dob = datetime.datetime.strptime(dob, '%Y%m%d') if dob < datetime.datetime(1900, 1, 1): raise ValueError except (ValueError, TypeError): dob = None log.debug(dob) return dob
a6c5f76cc2f335bc91d94dc5372542342d2ae9ae
3,638,957
def us2cycles(us): """ Converts microseconds to integer number of tProc clock cycles. :param cycles: Number of microseconds :type cycles: float :return: Number of tProc clock cycles :rtype: int """ return int(us*fs_proc)
51d405c512c146bdfda0a091470ad84593872819
3,638,958
from datetime import datetime def date_range(begin_date, end_date): """ 获取一个时间区间的list """ dates = [] dt = datetime.datetime.strptime(begin_date, "%Y-%m-%d") date = begin_date[:] while date <= end_date: dates.append(date) dt = dt + datetime.timedelta(1) date = dt.strftime("%Y-%m-%d") return dates
a3373ab76752423eaf1484e5d66dc5d6334c4360
3,638,959
def scalar(name): """ Create a scalar variable with the corresponding name. The 'name' will be during code generation, so should match the variable name used in the C++ code. """ tname = name return symbols(tname)
8f1f7295d15b136be38383135729fe7717fd71b8
3,638,960
def add(number1, number2): """ This functions adds two numbers Arguments: number1 : first number to be passed number2 : second number to be passed Returns: number1*number2 the result of two numbers Examples: >>> add(0,0) 0 >>> add(1,1) 2 >>> add(1.1,2.2) 3.3000000000000003 """ return number1 + number2
5db1a461f65672d5fc1201a82657fada30220743
3,638,961
def calculate_timeout(start_point, end_point, planner): """ Calucaltes the time limit between start_point and end_point considering a fixed speed of 5 km/hr. Args: start_point: initial position end_point: target_position planner: to get the shortest part between start_point and end_point Returns: time limit considering a fixed speed of 5 km/hr """ path_distance = planner.get_shortest_path_distance( [start_point.location.x, start_point.location.y, 0.22], [ start_point.orientation.x, start_point.orientation.y, 0.22], [ end_point.location.x, end_point.location.y, end_point.location.z], [ end_point.orientation.x, end_point.orientation.y, end_point.orientation.z]) return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0
cb7ae44df9b6a89d2e171046fa0bdfe3f81445c5
3,638,962
def overview(request): """Returns the overview for a daterange. GET paramaters: * daterange - 7d, 1m, 3m, 6m or 1y (default: 1y) Returns an overview dict with a count for all action types. """ form = OverviewAPIForm(request.GET) if not form.is_valid(): return {'success': False, 'errors': form.errors} daterange = form.cleaned_data.get('daterange') or '1y' mgr = KarmaManager() overview = {} for t in KarmaManager.action_types.keys(): overview[t] = mgr.count(daterange, type=t) # TODO: Maybe have a karma action not assigned to a user for this? num_days = KarmaManager.date_ranges[daterange] start_day = date.today() - timedelta(days=num_days) overview['question'] = Question.objects.filter( created__gt=start_day).count() return { 'success': True, 'overview': overview}
41d97127833d2c873ebedb1aaff9dcdfb31ae4dd
3,638,963
def func_parallel(func, list_inputs, leave_cpu_num=1): """ :param func: func(list_inputs[i]) :param list_inputs: each element is the input of func :param leave_cpu_num: num of cpu that not use :return: [return_of_func(list_inputs[0]), return_of_func(list_inputs[1]), ...] """ cpu_cores = mp.cpu_count() - leave_cpu_num pool = mp.Pool(processes=cpu_cores) list_outputs = pool.map(func, list_inputs) pool.close() return list_outputs
4642149db87236b444e26515747a18ccbc420e64
3,638,964
def get_mean(jsondata): """Get average of list of items using numpy.""" if len(jsondata['results']) > 1: return mean([float(price.get('price')) for price in jsondata['results'] if 'price' in price]) # key name from itunes # [a.get('a') for a in alist if 'a' in a] else: return float(jsondata['results'][0]['price'])
63851f6e89bea230549975eba68391421b57f087
3,638,965
from typing import Dict import torch import time def evaluate_with_trajectory( sc_dataset: SingleCellDataset, n_samples: int, trajectory_type: str, trajectory_coef: Dict, types: DeconvolutionDatatypeParametrization, deconvolution_params: Dict, n_iters=5_000, ): """Evaluate L1_error and measure fit time for fitting on a simulated dataset from a given trajectory :param sc_dataset: SingleCellDataset for generated simulations from :param n_samples: number of samples along the time axis to generate :param trajectory_type: string indicating the trajectory type to which the `trajectory_coef` correspond :param trajectory_coef: trajectory coefficients :param types: DeconvolutionDatatypeParametrization identifying datatypes to use :param deconvolution_params: Dictionary with deconvolution parameters :param n_iters: Number of learning iterations for each execution :return: Dictionary with results """ # Simulate bulk data sim_res = simulate_data( w_hat_gc=torch.Tensor(sc_dataset.w_hat_gc), num_samples=n_samples, trajectory_type=trajectory_type, dirichlet_alpha=10.0, trajectory_coef=trajectory_coef, ) simulated_bulk = generate_anndata_from_sim(sim_res, sc_dataset) ebov_simulated_dataset = DeconvolutionDataset( types=types, parametrization=DeconvolutionDatasetParametrization( sc_anndata=sc_dataset.sc_anndata, sc_celltype_col="Subclustering_reduced", bulk_anndata=simulated_bulk, bulk_time_col="time", ), ) # Prepare deconvolution object pseudo_time_reg_deconv_sim = TimeRegularizedDeconvolutionModel( dataset=ebov_simulated_dataset, types=types, **deconvolution_params, ) # Deconvolve t_0 = time.perf_counter() pseudo_time_reg_deconv_sim.fit_model( n_iters=n_iters, verbose=True, log_frequency=1000, keep_param_store_history=False, ) t_1 = time.perf_counter() # Calculate errors errors = calculate_trajectory_prediction_error(sim_res, pseudo_time_reg_deconv_sim) # Return return { "n_samples": n_samples, "l1_error_norm": errors["L1_error_norm"], "fit_time": t_1 - t_0, }
bb82164f4ec9d79bcc675be1612a61ff5b209752
3,638,966
import sys def main(argv=None): """ """ if argv == None: argv = sys.argv[1:] try: pdb_file = argv[0] data_file = argv[1] except IndexError: err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__ raise PerturbPdbError(err) out = perturbPdb(pdb_file,data_file) return "".join(out)
2d2c175430ebd953e2b363927c34b491c71d0737
3,638,967
def plot_diffraction_1d(result, deg): """ Returns this result instance in PlotData1D representation. :param deg: if False the phase is expressed in radians, if True in degrees. """ # Distinguish between the strings "phase in deg" and "phase in rad". if deg: phase_string = "Phase in deg" else: phase_string = "Phase in rad" # Retrieve setup information. info_dict = result.diffractionSetup().toDictionary() info_dict["Bragg angle"] = str(result.braggAngle()) # Retrieve angles of the results. angles_in_um = [i * 1e+6 for i in result.angleDeviations()] # Define inner function to duplicate info for every plot. def addPlotInfo(info_dict, energy, angles_in_um, data): plot_data = PlotData1D(data[0], data[1], data[2]) plot_data.set_x(angles_in_um) plot_data.set_y(data[3]) for key, value in info_dict.items(): plot_data.add_plot_info(key, value) plot_data.add_plot_info("Energy", str(energy)) return plot_data plots = [] for energy in result.energies(): # Intensity S polarization. categories = [] s_intensity = ("Intensity - Polarization S", "Angle deviation in urad", "Intensity", result.sIntensityByEnergy(energy)) plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_intensity)) p_intensity = ("Intensity - Polarization P", "Angle deviation in urad", "Intensity", result.pIntensityByEnergy(energy)) plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_intensity)) intensity_difference = ("Intensity difference", "Angle deviation in urad", "Intensity", result.differenceIntensityByEnergy(energy)) plots.append(addPlotInfo(info_dict, energy, angles_in_um, intensity_difference)) s_phase = ("Phase - Polarization S", "Angle deviation in urad", phase_string, result.sPhaseByEnergy(energy, deg)) plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_phase)) p_phase = ("Phase - Polarization P", "Angle deviation in urad", phase_string, result.pPhaseByEnergy(energy, deg)) plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_phase)) phase_difference = ("Phase difference", "Angle deviation in urad", phase_string, result.differencePhaseByEnergy(energy, deg)) plots.append(addPlotInfo(info_dict, energy, angles_in_um, phase_difference)) return plots
f316e1f02a5b5b295bfed22fc5307bcf908788c2
3,638,968
import logging def prepare_go_environ(): """Returns dict with environment variables to set to use Go toolset. Installs or updates the toolset and vendored dependencies if necessary. """ bootstrap(LAYOUT, logging.INFO) return get_go_environ(LAYOUT)
cf7d6ee594193317a1201beb127e607139fd367f
3,638,969
def get_subnets(client, name='tag:project', values=[ec2_project_name,], dry=True): """ Get VPC(s) by tag (note: create_tags not working via client api, use cidr or object_id instead ) https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_subnets """ try: return client.describe_subnets(Filters=[{'Name': name, 'Values': values},], DryRun=dry) except Exception as err: handle(err)
4504a37689bce171d3d62a3cf6f66365c58f56e8
3,638,970
import requests def get_object_handler(s3_client, request_context, user_request): """ Handler for the GetObject Operation :param s3_client: s3 client :param request_context: GetObject request context :param user_request: user request :return: WriteGetObjectResponse """ # Validate user request and return error if invalid requests_validation = validator.validate_request(user_request) if not requests_validation.is_valid: return error.write_error_response(s3_client, request_context, requests.codes.bad_request, 'InvalidRequest', requests_validation.error_msg) # Get the original object from Amazon S3 s3_url = request_context["inputS3Url"] request_header = get_request_header(user_request["headers"]) object_response = requests.get(s3_url, headers=request_header) # Check if the get original object request from S3 is successful if object_response.status_code != requests.codes.ok: # For 304 Not Modified, Error Message dont need to be send if object_response.status_code == requests.codes.not_modified: return s3_client.write_get_object_response( RequestRoute=request_context["outputRoute"], RequestToken=request_context["outputToken"], StatusCode=object_response.status_code, ) return error.write_error_response_for_s3(s3_client, request_context, object_response) # Transform the object original_object = object_response.content transformed_whole_object = transform.transform_object(original_object) # Handle range or partNumber if present in the request partial_object_response = apply_range_or_part_number(transformed_whole_object, user_request) if partial_object_response.hasError: return error.write_error_response(s3_client, request_context, requests.codes.bad_request, 'InvalidRequest', partial_object_response.error_msg) transformed_object = partial_object_response.object # Send the transformed object back to Amazon S3 Object Lambda transformed_object_checksum = checksum.get_checksum(transformed_object) return s3_client.write_get_object_response(RequestRoute=request_context["outputRoute"], RequestToken=request_context["outputToken"], Body=transformed_object, Metadata={ 'body-checksum-algorithm': transformed_object_checksum.algorithm, 'body-checksum-digest': transformed_object_checksum.digest })
fc98197f99e8751976245902eb4034a5b4930d3b
3,638,971
import os import posixpath def NormalizePath(path): """Returns a path normalized to how we write DEPS rules and compare paths.""" return os.path.normcase(path).replace(os.path.sep, posixpath.sep)
e6a6c7a50176f6990841a48748e5951c4f40b8af
3,638,972
import os import shutil import torch def eval_imgs_output_dets(opt, data_loader, data_type, result_f_name, out_dir, save_dir=None, show_image=True): """ :param opt: :param data_loader: :param data_type: :param result_f_name: :param out_dir: :param save_dir: :param show_image: :return: """ if save_dir: mkdir_if_missing(save_dir) if not os.path.isdir(out_dir): os.makedirs(out_dir) else: shutil.rmtree(out_dir) os.makedirs(out_dir) # init tracker tracker = JDETracker(opt, frame_rate=30) timer = Timer() results_dict = defaultdict(list) frame_id = 0 # frame index(start from 0) for path, img, img_0 in data_loader: if frame_id % 30 == 0: logger.info('Processing frame {} ({:.2f} fps)' .format(frame_id, 1.0 / max(1e-5, timer.average_time))) blob = torch.from_numpy(img).to(opt.device).unsqueeze(0) # ----- run detection timer.tic() # update detection results dets_dict = tracker.update_detection(blob, img_0) timer.toc() # ----- # plot detection results if show_image or save_dir is not None: online_im = vis.plot_detects(image=img_0, dets_dict=dets_dict, num_classes=opt.num_classes, frame_id=frame_id, fps=1.0 / max(1e-5, timer.average_time)) if frame_id > 0: # 是否显示中间结果 if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) # ----- 格式化并输出detection结果(txt)到指定目录 # 格式化 dets_list = format_dets_dict2dets_list(dets_dict, w=img_0.shape[1], h=img_0.shape[0]) # 输出label(txt)到指定目录 out_img_name = os.path.split(path)[-1] out_f_name = out_img_name.replace('.jpg', '.txt') out_f_path = out_dir + '/' + out_f_name with open(out_f_path, 'w', encoding='utf-8') as w_h: w_h.write('class prob x y w h total=' + str(len(dets_list)) + '\n') for det in dets_list: w_h.write('%d %f %f %f %f %f\n' % (det[0], det[1], det[2], det[3], det[4], det[5])) print('{} written'.format(out_f_path)) # 处理完一帧, 更新frame_id frame_id += 1 print('Total {:d} detection result output.\n'.format(frame_id)) # 写入最终结果save results write_results_dict(result_f_name, results_dict, data_type) # 返回结果 return frame_id, timer.average_time, timer.calls
d156450d42da57364e7f9037c4abbe8af7a18d4c
3,638,973
def decline_agreement(supplier_code): """Decline agreement (role=supplier) --- tags: - seller edit parameters: - name: supplier_code in: path type: number required: true responses: 200: description: Agreement declined. 400: description: Bad request. 403: description: Unauthorised to decline agreement. 404: description: Supplier not found. 500: description: Unexpected error. """ if current_user.supplier_code != supplier_code: return forbidden('Unauthorised to decline agreement') try: seller_edit_business.decline_agreement({ 'supplier_code': current_user.supplier_code, 'email_address': current_user.email_address }) except NotFoundError as nfe: not_found(str(nfe)) except DeletedError as de: abort(str(de)) except UnauthorisedError as ue: abort(str(ue)) return Response(status=200)
fa7f2186af9f7beb2b138eab3347cd34580557f0
3,638,974
import torch def load_model(model, model_path): """ Load model from saved weights. """ if hasattr(model, "module"): model.module.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False) else: model.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False) return model
0fbf34548474c4af89c25806f05d1e7d3170bbde
3,638,975
def get_file_size(filepath: str): """ Not exactly sure how os.stat or os.path.getsize work, but they seem to get the total allocated size of the file and return that while the file is still copying. What we want, is the actual file size written to disk during copying. With standard Windows file copying, we can just try open/close the file, and if that succeeds, the file is finished. With Kongsberg systems writing to disk, we can actually open and read the .all file as it copies, so the try/except is not good enough. This function will find the length of the actual readable data on disk. Parameters ---------- filepath file path to a file being written Returns ------- int file size in bytes """ with open(filepath, "r") as file: # move pointer to the end of the file file.seek(0, 2) # retrieve the current position of the pointer # this will be the file's size in bytes size = file.tell() return size
6936a8227a96e3ebc4b1146f8363f092d232cafd
3,638,976
import json def get_aws_regions_from_file(region_file): """ Return the list of region names read from region_file. The format of region_file is as follows: { "regions": [ "cn-north-1", "cn-northwest-1" ] } """ with open(region_file) as r_file: region_data = json.load(r_file) return sorted(r for r in region_data.get("regions"))
639da8c6417295f97621f9fd5321d8499652b7b2
3,638,977
def item_pack(): """ RESTful CRUD controller """ s3db.configure("supply_item_pack", listadd = False, ) return s3_rest_controller()
e6bce829b441a08c98dc81fa6ac1ea432ef67c89
3,638,978
def inv_cipher(rkey, ct, Nk=4): """AES decryption cipher.""" assert Nk in {4, 6, 8} Nr = Nk + 6 rkey = rkey.reshape(4*(Nr+1), 32) ct = ct.reshape(128) # first round state = add_round_key(ct, rkey[4*Nr:4*(Nr+1)]) for i in range(Nr-1, 0, -1): state = inv_shift_rows(state) state = inv_sub_bytes(state) state = add_round_key(state, rkey[4*i:4*(i+1)]) state = inv_mix_columns(state) # final round state = inv_shift_rows(state) state = inv_sub_bytes(state) state = add_round_key(state, rkey[0:4]) return state
477b32450b4fef060f936952d0af3115ca4b8add
3,638,979
import os import ast def get_version(module='spyder_terminal'): """Get version.""" with open(os.path.join(HERE, module, '__init__.py'), 'r') as f: data = f.read() lines = data.split('\n') for line in lines: if line.startswith('VERSION_INFO'): version_tuple = ast.literal_eval(line.split('=')[-1].strip()) version = '.'.join(map(str, version_tuple)) break return version
085bdff77724f7962e506f735d8336b5b8ba63d8
3,638,980
def _ptrarray_to_list(ptrarray): """Converts a ptr_array structure from SimpLL into a Python list.""" result = [] for i in range(0, ptrarray.len): result.append(ptrarray.arr[i]) lib.freePointerArray(ptrarray) return result
430c26f15ee41dbf5b4bdf562dd81c0167eead18
3,638,981
import copy import random def perform_modifications(statemachine,amount=1,possible_modifications=[]): """Starting point for modifications upon interfaces. Performs modifications as specified in the peramaters. N (amount) of modifications are selected at random from possible_modiifcations and then attempted to be applied upon the provided statemachine. Args: statemachine (generator.StateMachine object): An original statemachine object created using "generator.py" amount (int, optional): Amount of modifications to be applied upon interface. Defaults to 1. possible_modifications (list, optional): This should be a list of modification function references (from modifications.py). Defaults to []. Returns: (generator.StateMachine object,list): Returns a tuple containing the modified statemachine object and the ordered list of modifications applied. This can be "False" if no modifications could succesfully be applied. """ # Have to keep track of states already modified to prevent conflicts within AR file creation. global already_modified statemachine = copy.deepcopy(statemachine) done_modifications = [] # Loop over the amount of modifications to be selected. for _ in range(amount): # Select at random a modification from the function references list and store it selected = random.choice(possible_modifications) done_modifications.append(selected.__name__) # If the selected modification is create: # The state upon which this may be applied cannot be a begin state. if selected == create: selected_state = random.choice([x for x in statemachine.states if x != statemachine.BeginState and x not in already_modified]) #) already_modified.append(selected_state) if not selected(statemachine,selected_state): print("Something went wrong") # Temporary debug return False # If the selected modification is delete or split, the selected transition must be an output on the server side. if selected == delete or selected == split: selected_state = random.choice([x.end for x in statemachine.transitions if x.output and x not in already_modified]) #) already_modified.append(selected_state) if not selected(statemachine,selected_state): print("Something went wrong") # Temporary debug return False # If the selected modification is merge the same rules apply as per delete and split, also have to select if merge must happen # on 2 or 3 outputs. if selected == merge: selected_state = random.sample([x.end for x in statemachine.transitions if x.output and x not in already_modified], random.choice([2,3])) #) already_modified.append(selected_state) if not merge(selected_state,statemachine): print("something went wrong") return False # TODO: UPDATE NUMBERS return (statemachine,done_modifications)
8cd1f21a3b7e74d3b9c3919c829e8156065e020f
3,638,982
from typing import Callable def endpoint(path: str) -> Callable[[], Endpoint]: """Decorator for creating an Arguments: path: The path to the API endpoint (relative to the API's ``base_url``). Returns: The wrapper for the endpoint method. """ def wrapper(method): return Endpoint(path, build_converter(method)) return wrapper
1a0b9b836630f1ab4eea902861899c50303aa539
3,638,983
def from_string_to_bytes(a): """ Based on project: https://github.com/chaeplin/dashmnb. """ return a if isinstance(a, bytes) else bytes(a, 'utf-8')
e76509f1be8baf8df0bf3b7160615f9a9c04ff86
3,638,984
def split(x, divider): """Split a string. Parameters ---------- x : any A str object to be split. Anything else is returned as is. divider : str Divider string. """ if isinstance(x, str): return x.split(divider) return x
e77a162777d9bb13262e4686ba1cb9732ebab221
3,638,985
def despesa_update(despesa_id): """ Editar uma despesa. Args: despesa_id (int): ID da despesa a ser editada. Lógica matemática é chamada de utils.py: adicionar_registro() Returns: Template renderizado: despesa.html Redirecionamento: aplication.transacoes """ despesa = Despesa.query.get_or_404(despesa_id) if despesa.user != current_user: abort(403) form = DespesaForm() if form.validate_on_submit(): valor_antigo = despesa.valor id_conta_bancaria_antiga = despesa.conta_bancaria.id despesa.valor=form.valor.data despesa.data_origem=form.data_origem.data despesa.descricao=form.descricao.data despesa.categoria_despesa=form.categoria.data despesa.conta_bancaria=form.conta.data if despesa.status: adicionar_registro(id_conta_bancaria_antiga, form.conta.data.id, valor_antigo, form.valor.data, 1) db.session.commit() flash('Sua despesa foi alterada.', 'success') return redirect(url_for('aplication.transacoes', despesa_id=despesa.id)) elif request.method == 'GET': form.valor.data=despesa.valor form.data_origem.data=despesa.data_origem form.descricao.data=despesa.descricao form.categoria.data=despesa.categoria_despesa form.conta.data=despesa.conta_bancaria return render_template('despesa.html', title='Atualizar despesa', legend='Atualizar despesa', form=form)
bd848eacc19144c40822a7389ceecdae4f5c5532
3,638,986
def _convert_format(partition): """ Converts the format of the python-louvain into a numpy array Parameters ---------- partition : dict Standard output from python-louvain package Returns ------- partition: np.array Partition as a numpy array """ return np.array([partition[val] for val in partition.keys()])
5afffe9745c0083829a2ce88f5842b295583e737
3,638,987
def settingsdir(): """In which directory to save to the settings file""" return module_dir()+"/settings"
ac485b7d947cfa051adc9eeed6f194b9746d8401
3,638,988
def _is_iqn_attached(sess, iqn): """ Verify if oci volume with iqn is attached to this instance. Parameters ---------- sess: OCISession The OCISession instance. iqn: str The iSCSI qualified name. Returns ------- str: the ocid """ _logger.debug('Verifying if [%s] is attached to this instance.') volume_data = get_volume_by_iqn(sess, iqn) if volume_data is None: return None if volume_data.is_attached(): return volume_data.get_ocid() return None
d0aff2d1ba1bc7f316f2cafbbca655449e63cc77
3,638,989
from typing import Dict from typing import List import copy def run_range_mcraptor( timetable: Timetable, origin_station: str, dep_secs_min: int, dep_secs_max: int, max_rounds: int, ) -> Dict[str, List[Journey]]: """ Perform the McRAPTOR algorithm for a range query """ # Get stops for origins and destinations from_stops = timetable.stations.get_stops(origin_station) destination_stops = { st.name: timetable.stations.get_stops(st.name) for st in timetable.stations } destination_stops.pop(origin_station, None) # Find all trips leaving from stops within time range potential_trip_stop_times = timetable.trip_stop_times.get_trip_stop_times_in_range( from_stops, dep_secs_min, dep_secs_max ) potential_dep_secs = sorted( list(set([tst.dts_dep for tst in potential_trip_stop_times])), reverse=True ) logger.info( "Potential departure times : {}".format( [sec2str(x) for x in potential_dep_secs] ) ) journeys_to_destinations = { station_name: [] for station_name, _ in destination_stops.items() } logger.info("Calculating journeys to all destinations") s = perf_counter() # Find Pareto-optimal journeys for all possible departure times for dep_index, dep_secs in enumerate(potential_dep_secs): logger.info(f"Processing {dep_index} / {len(potential_dep_secs)}") logger.info(f"Analyzing best journey for departure time {sec2str(dep_secs)}") # Run Round-Based Algorithm mcraptor = McRaptorAlgorithm(timetable) if dep_index == 0: bag_round_stop, actual_rounds = mcraptor.run(from_stops, dep_secs, max_rounds) else: bag_round_stop, actual_rounds = mcraptor.run(from_stops, dep_secs, max_rounds, last_round_bag) last_round_bag = copy(bag_round_stop[actual_rounds]) # Determine the best destination ID, destination is a platform for destination_station_name, to_stops in destination_stops.items(): destination_legs = best_legs_to_destination_station( to_stops, last_round_bag ) if len(destination_legs) != 0: journeys = reconstruct_journeys( from_stops, destination_legs, bag_round_stop, k=actual_rounds ) journeys_to_destinations[destination_station_name].extend(journeys) logger.info(f"Journey calculation time: {perf_counter() - s}") # Keep unique journeys for destination_station_name, journeys in journeys_to_destinations.items(): unique_journeys = [] for journey in journeys: if not journey in unique_journeys: unique_journeys.append(journey) journeys_to_destinations[destination_station_name] = unique_journeys return journeys_to_destinations
d09a85fbe5f3e1a8e3081037e195298aed6e5fc8
3,638,990
def _choose_node_type(w_operator, w_constant, w_input, t): """ Choose a random node (from operators, constants and input variables) :param w_operator: Weighting of choosing an operator :param w_constant: Weighting of choosing a constant :param w_input: Weighting of choosing an input :param t: Trace object :return: An operator, constant or input variable """ w_sum = w_operator + w_constant + w_input rb = t.random() # print('Chose:', rb) r = rb * w_sum # r = random.uniform(0, w_sum) if r < w_operator: return BNode(_random_from_list(operators, t)) elif r < w_operator + w_constant: return _random_constant(t) else: return input_var
7517d347b97bce2748e4ccd45a5f25120e074e9d
3,638,991
import os def plot_prisma_diagram(save_cfg=cfg.saving_config): """Plot diagram showing the number of selected articles. TODO: - Use first two colors of colormap instead of gray - Reduce white space - Reduce arrow width """ # save_format = save_cfg['format'] if isinstance(save_cfg, dict) else 'svg' save_format = 'pdf' # save_format = 'eps' size = '{},{}!'.format(0.5 * save_cfg['page_width'], 0.2 * save_cfg['page_height']) dot = Digraph(format=save_format) dot.attr('graph', rankdir='TB', overlap='false', size=size, margin='0') dot.attr('node', fontname='Liberation Sans', fontsize=str(9), shape='box', style='filled', margin='0.15,0.07', penwidth='0.1') # dot.attr('edge', arrowsize=0.5) fillcolor = 'gray98' dot.node('A', 'PubMed (n=39)\nGoogle Scholar (n=409)\narXiv (n=105)', fillcolor='gray95') dot.node('B', 'Articles identified\nthrough database\nsearching\n(n=553)', fillcolor=fillcolor) # dot.node('B2', 'Excluded\n(n=446)', fillcolor=fillcolor) dot.node('C', 'Articles after content\nscreening and\nduplicate removal\n(n=105) ', fillcolor=fillcolor) dot.node('D', 'Articles included in\nthe analysis\n(n=154)', fillcolor=fillcolor) dot.node('E', 'Additional articles\nidentified through\nbibliography search\n(n=49)', fillcolor=fillcolor) dot.edge('B', 'C') # dot.edge('B', 'B2') dot.edge('C', 'D') dot.edge('E', 'D') if save_cfg is not None: fname = os.path.join(save_cfg['savepath'], 'prisma_diagram') dot.render(filename=fname, view=False, cleanup=False) return dot
490123552f3c6c8428e9156947241a9f7edc5f49
3,638,992
import os def get_script(software): """ Gets the path of the post install script of a software. :rtype: str """ dir_scripts = get_scripts_location() scripts = os.listdir(dir_scripts) for script in scripts: if script == software: return os.path.join(dir_scripts, script) return None
b6d292d15bbd26a9f26a7f1c19fddb0c94207a30
3,638,993
def _plat_idx_to_val(idx: int , edge: float = 0.5, FIO_IO_U_PLAT_BITS: int = 6, FIO_IO_U_PLAT_VAL: int = 64) -> float: """ Taken from fio's stat.c for calculating the latency value of a bin from that bin's index. idx : the value of the index into the histogram bins edge : fractional value in the range [0,1]** indicating how far into the bin we wish to compute the latency value of. ** edge = 0.0 and 1.0 computes the lower and upper latency bounds respectively of the given bin index. """ # MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use # all bits of the sample as index if (idx < (FIO_IO_U_PLAT_VAL << 1)): return idx # Find the group and compute the minimum value of that group error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1 base = 1 << (error_bits + FIO_IO_U_PLAT_BITS) # Find its bucket number of the group k = idx % FIO_IO_U_PLAT_VAL # Return the mean (if edge=0.5) of the range of the bucket return base + ((k + edge) * (1 << error_bits))
f992194492e031add3d14f0e145888303a5b4f06
3,638,994
def is_blank(value): """ Returns True if ``value`` is ``None`` or an empty string. >>> is_blank("") True >>> is_blank(0) False >>> is_blank([]) False """ return value is None or value == ""
6a30f9f6726701a4b7a9df8957503111a5222558
3,638,995
def overload_check(data, min_overload_samples=3): """Check data for overload :param data: one or two (time, samples) dimensional array :param min_overload_samples: number of samples that need to be equal to max for overload :return: overload status """ if data.ndim > 2: raise Exception('Number of dimensions of data should be 2 or less') def _overload_check(x): s = np.sort(np.abs(x))[::-1] over = s == np.max(s) if np.sum(over) >= min_overload_samples: return True else: return False if data.ndim == 2: over = [_overload_check(d) for d in data.T] return over else: over = _overload_check(data) return over
9c59bb2e105828afd93af193949a2ad01a34a32e
3,638,996
import socket import time def send_packet_to_capture_last_one(): """ Since we read packets from stdout of tcpdump, we do not know when a packet is finished Hence you should send an additional packet after you assume all interesting packets were sent """ def send(): conf = get_netconfig() sock = socket(AF_PACKET, SOCK_RAW) sock.bind((conf.dev.name, 0)) dst_mac = MAC("22:22:22:22:22:22") src_ip = IP("192.168.69.10") dst_ip = IP("192.168.69.20") src_mac = MAC("11:11:11:11:11:11") packet = arp_packet(dst_mac, src_mac, 2, src_mac, src_ip, dst_mac, dst_ip) sock.send(packet) time.sleep(0.05) return send
984848ec685273d97630dd6a95d93c939665969e
3,638,997
from typing import Callable from pathlib import Path from typing import Iterable from typing import Optional import signal import random def diss( demos: Demos, to_concept: Identify, to_chain: MarkovChainFact, competency: CompetencyEstimator, lift_path: Callable[[Path], Path] = lambda x: x, n_iters: int = 25, reset_period: int = 5, cooling_schedule: Callable[[int], float] | None = None, size_weight: float = 1.0, surprise_weight: float = 1.0, sgs_temp: float = 2.0, synth_timeout: int = 15, example_drop_prob: float = 0.0, ) -> Iterable[tuple[LabeledExamples, Optional[Concept]]]: """Perform demonstration informed gradiented guided search.""" if cooling_schedule is None: def cooling_schedule(t: int) -> float: return 100*(1 - t / n_iters) + 1 sggs = GradientGuidedSampler.from_demos( demos=demos, to_chain=to_chain, competency=competency, temp=sgs_temp, ) def handler(signum, frame): raise ConceptIdException signal.signal(signal.SIGALRM, handler) def drop_pred(example): if example_drop_prob == 0.0: return True elif example_drop_prob == 1.0: return False return example_drop_prob <= random.random() weights = np.array([size_weight, surprise_weight]) concept2energy = {} # Concepts seen so far + associated energies. concept2data = {} # Concepts seen so far + associated data. energy, new_data = float('inf'), LabeledExamples() for t in range(n_iters): temp = cooling_schedule(t) # Sample from proposal distribution. if (t % reset_period) == 0: # Reset to best example set. concept = None proposed_examples = reset(temp, concept2energy, concept2data) else: # Drop examples with some probability. examples2 = LabeledExamples( positive=filter(drop_pred, examples.positive), negative=filter(drop_pred, examples.negative), ) proposed_examples = examples2 @ new_data try: signal.alarm(synth_timeout) concept = to_concept(proposed_examples, concept=concept) signal.alarm(0) # Unset alarm. concept2data.setdefault(concept, proposed_examples) except ConceptIdException: new_data = LabeledExamples() # Reject: New data caused problem. signal.alarm(0) # Unset alarm. continue new_data, metadata = sggs(concept) new_data = new_data.map(lift_path) new_energy = weights @ [concept.size, metadata['surprisal']] metadata |= { 'energy': new_energy, 'conjecture': new_data, 'data': proposed_examples, } yield (proposed_examples, concept, metadata) # DISS Bookkeeping for resets. concept2energy[concept] = new_energy # Accept/Reject proposal based on energy delta. dE = new_energy - energy if (dE < 0) or (np.exp(-dE / temp) > np.random.rand()): energy, examples = new_energy, proposed_examples # Accept. else: new_data = LabeledExamples() # Reject.
016af4c38a890426fa148af78e1349b6bacdfa79
3,638,998
def expand_gelu(expand_info): """Gelu expander""" # get op info. input_desc = expand_info['input_desc'][0] graph_builder = builder.GraphBuilder() # generate a graph. with graph_builder.graph_scope('main') as graph_scope: # create tensor input. input_x = graph_builder.tensor(input_desc['shape'], input_desc['data_type'], input_desc['format']) dtype = input_x.dtype if dtype == 'float16': input_x = graph_builder.emit('Cast', [input_x], attrs={'dst_type': 'float32'}) # cal tanh. mul_0 = graph_builder.emit('Mul', [input_x, input_x]) pow_0 = graph_builder.emit('Mul', [mul_0, input_x]) const_csvalue = graph_builder.value(pow_0.dtype, CSVALUE, input_desc['format']) mul_1 = graph_builder.emit('Mul', [pow_0, const_csvalue]) tanh_res = graph_builder.emit('TensorAdd', [input_x, mul_1]) const_csvalue_a = graph_builder.value(tanh_res.dtype, CSVALUE_A, input_desc['format']) mul_0 = graph_builder.emit('Mul', [tanh_res, const_csvalue_a]) const_zero = graph_builder.value(mul_0.dtype, 0.0, input_desc['format']) mul_0_min = graph_builder.emit('Minimum', [mul_0, const_zero]) right_mul = graph_builder.emit('Exp', [mul_0_min]) mul_0_abs = graph_builder.emit('Abs', [mul_0]) const_neg_one = graph_builder.value(mul_0_abs.dtype, -1.0, input_desc['format']) mul_0_abs_neg = graph_builder.emit('Mul', [mul_0_abs, const_neg_one]) mul_0_abs_neg_exp = graph_builder.emit('Exp', [mul_0_abs_neg]) const_one = graph_builder.value(mul_0_abs_neg_exp.dtype, 1.0, input_desc['format']) mul_0_abs_neg_exp_add = graph_builder.emit('TensorAdd', [mul_0_abs_neg_exp, const_one]) left_mul = graph_builder.emit('RealDiv', [input_x, mul_0_abs_neg_exp_add]) result = graph_builder.emit('Mul', [left_mul, right_mul]) if dtype == 'float16': result = graph_builder.emit('Cast', [result], attrs={'dst_type': 'float16'}) # set graph output. graph_scope.set_output(result) graph = graph_builder.get()[0] return graph
1237d4899ef0411b827efd930fb7e2e0fa5fddde
3,638,999