content
stringlengths
22
815k
id
int64
0
4.91M
def install_hook(profile="default"): """Install the hook in a given IPython profile. Args: profile: the IPython profile for which to install the hook. """ hook_source_path = os.path.join(os.path.dirname(__file__), "hook.py") ipython_dir = get_ipython_dir() startup_dir = os.path.join(ipython_dir, "profile_" + profile, "startup") hook_target_path = os.path.join(startup_dir, "000-ipython-startup-hook.py") logger.info("Installing hook for IPython profile %s", profile) logger.debug("Writing hook at %s to %s", hook_source_path, hook_target_path) with open(hook_source_path) as source, open(hook_target_path, "w") as target: target.write(source.read().format(name=distname, version=version))
12,000
def testhsv(): """Main function to test hsv problems""" from PIL import ImageFilter fnames = sys.argv[1:] fname = fnames[0] i = 0 basemem = procmem() print 'Basemem is %s, using image %s of size %s' % (basemem, fname, Image.open(fname).size) #while 1: for fname in fnames: i += 1 im = Image.open(fname).convert('RGB')#.resize((100,100)) t1 = time.time() #hsv = rgb2hsv(im) # very slow (5s on medium sized image) #m = im.filter(ImageFilter.FIND_EDGES).convert('L') # fast (0.22s) #o = getGradientOrientation(im.convert('L')) # slow (2.7s) t2 = time.time() mem = procmem() - basemem print 'Finished iter %d, mem: %s, elapsed: %s' % (i, mem/1024.0, t2-t1) sys.exit()
12,001
def number_of_qucosa_metadata_in_elasticsearch( host: str = SLUB_ELASTICSEARCH_SERVER_URL, http_auth: Optional[Tuple[str, str]] = None, index_name: str = "fulltext_qucosa", ) -> int: """Return the number of qucosa documents currently available at the SLUB elastic search server. Parameters ---------- host: str = SLUB_ELASTICSEARCH_SERVER_URL The hostname of the ElasticSearch server http_auth: Optional[Tuple[str, str]] Http basic auth parameters as tuple of username and password. If http_auth is None, but environment variables `SLUB_ELASTICSEARCH_SERVER_USER` and `SLUB_ELASTICSEARCH_SERVER_PASSWORD` are set, then these are used as username and password. index_name: str = "fulltext_qucosa" The name of the ElasticSearch index to be queried. Returns ------- int the number of qucosa documents """ es_server = _initialize_elasticsearch_connection(host, http_auth) return es_server.count(index=index_name, body={"query": {"match_all": {}}})["count"]
12,002
def print_png(filename): """Print a png file from the current viewport Parameters ---------- filename : str The name of the output png file. """ from abaqus import session from abaqusConstants import PNG viewport=session.viewports[session.currentViewportName] session.printToFile(fileName=filename, format=PNG, canvasObjects=(viewport,))
12,003
def choose(ctx, *choices: str): """Chooses between multiple choices.""" if choices is None: return user = ctx.message.author yield from bot.say('Alright, **@{0}**, I choose: "{1}"'.format(user.display_name, random.choice(choices)))
12,004
def codegen_py(typeit_schema: TypeitSchema, top: bool = True, indent: int = 4) -> Tuple[str, Sequence[str]]: """ :param typ: A type (NamedTuple definition) to generate a source for. :param top: flag to indicate that a toplevel structure is to be generated. When False, a sub-structure of the toplevel structure is to be generated. :param indent: keep indentation for source lines. :return: """ typ = typeit_schema.typ overrides = typeit_schema.overrides wrappers = typeit_schema.sequence_wrappers overrides_source: List[str] = [] if typ is None: type_name = 'None' elif typ is Any: type_name = 'Any' else: type_name = typ.__name__ required_imports = [ '# ------- generated by typeit -------', 'from typing import Any, NamedTuple, Optional, Sequence', ] wrapped_type_literal = ('Sequence[' * wrappers) + type_name + (']' * wrappers) if typ in PythonPrimitives: required_imports.extend([ 'from typeit import TypeConstructor', ]) if wrappers: generated_definitions = [ f'Main = {wrapped_type_literal}' ] else: generated_definitions = [] elif typ is Any: required_imports.extend([ 'from typeit import TypeConstructor', ]) generated_definitions = [ f'Main = {wrapped_type_literal}' ] else: required_imports.extend([ 'from typeit import TypeConstructor', ]) ind = ' ' * indent generated_definitions = [f'class {type_name}(NamedTuple):'] hints = get_type_hints(typ) if not hints: generated_definitions.extend([ f'{ind}...', ]) for field_name, field_type in hints.items(): # 1. Generate source code for the field type_literal = literal_for_type(field_type) if field_type not in BUILTIN_LITERALS_FOR_TYPES: # field_type: Union[NamedTuple, Sequence] # TODO: Sequence/List/PVector flag-based folded_lists_count = type_literal.count('Sequence[') if folded_lists_count: # field_type: Sequence[T] # traverse to the folded object for __ in range(folded_lists_count): field_type = field_type.__args__[0] if field_type not in BUILTIN_LITERALS_FOR_TYPES: sub, folded_overrides = codegen_py( TypeitSchema(field_type, overrides, wrappers), False ) generated_definitions.insert(0, f'{sub}{NEW_LINE}{NEW_LINE}') overrides_source.extend(folded_overrides) else: # field_type: NamedTuple # Generate a folded structure definition in the global scope # and then use it for the current field sub, folded_overrides = codegen_py( TypeitSchema(field_type, overrides, wrappers), False ) generated_definitions.insert(0, f'{sub}{NEW_LINE}{NEW_LINE}') overrides_source.extend(folded_overrides) generated_definitions.append(f'{ind}{field_name}: {type_literal}') # 2. Check if the field included into overrides field_override: Optional[str] = overrides.get(getattr(typ, field_name)) if field_override: overrides_source.append( f"{ind}{type_name}.{field_name}: '{field_override}'," ) if top: if wrappers: type_literal = 'Main' else: type_literal = type_name if overrides_source: overrides_part = [ LINE_SKIP, LINE_SKIP, 'overrides = {' + NEW_LINE + NEW_LINE.join(overrides_source) + NEW_LINE + '}' ] constructor_part = f'TypeConstructor & overrides ^ {type_literal}' else: overrides_part = [] constructor_part = f'TypeConstructor ^ {type_literal}' generated_definitions.extend(overrides_part) constructor_serializer_def = ( f'mk_{inflection.underscore(type_literal)}, ' f'serialize_{inflection.underscore(type_literal)} = {constructor_part}' ) generated_definitions.extend([ LINE_SKIP, LINE_SKIP, constructor_serializer_def, LINE_SKIP, ]) # TODO: import Sequence/List/PVector flag-based generated_definitions = ( required_imports + [LINE_SKIP, LINE_SKIP] + generated_definitions ) return NEW_LINE.join(generated_definitions), overrides_source
12,005
def _tuple_to_string(tup): """ Converts a tuple of pitches to a string Params: * tup (tuple): a tuple of pitch classes, like (11, 10, 5, 9, 3) Returns: * string: e.g., 'et593' """ def _convert(pitch): pitch = mod_12(pitch) if pitch not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11): # should never happen raise ValueError('unexpected pitch found: %s' % pitch) if pitch == 10: return 't' elif pitch == 11: return 'e' else: return str(pitch) output = [] for pitch in tup: output.append(_convert(pitch)) return ''.join(output)
12,006
def update_file_structure(limit=999): """ Opens the hardcoded file containing CIKs to collect data on, and ensures they have the associated directory. :return: None """ # FUTURE Adapt to CIK_List.pkl f = open('objects/ref/CIK_List.txt', 'r') if not os.path.exists('/storage/cik'): os.mkdir('/storage/cik') # FUTURE Adapt to a limited child number directory structure for line in f: directory = '/storage/cik/' + line.replace('\n', '') if not os.path.exists(directory): os.mkdir(directory) f.close()
12,007
def dashboard(): """Logged in Dashboard screen.""" session["redis_test"] = "This is a session variable." return render_template( "dashboard.jinja2", title="Flask-Session Tutorial.", template="dashboard-template", current_user=current_user, body="You are now logged in!", )
12,008
def choose_top_k(scores_flat, config): """Chooses the top-k beams as successors. """ next_beam_scores, word_indices = tf.nn.top_k(scores_flat, k=config.beam_width) return next_beam_scores, word_indices
12,009
def train_valid_test_split(data, proportions='50:25:25'): """ Splits the data into 3 parts - training, validation and test sets :param proportions: proportions for the split, like 2:1:1 or 50:30:20 :param data: preprocessed data :return: X_train, Y_train, target_rtns_train, X_valid, Y_valid, target_rtns_valid, X_test, Y_test, target_rtns_test """ features = [c for c in data.columns if c not in ('ret','bin')] n = len(data) borders = [float(p) for p in proportions.split(':')] borders = borders / np.sum(borders) train_ids = (0, int(np.floor(n * borders[0]))) valid_ids = (train_ids[1] + 1, int(np.floor(n * np.sum(borders[:2])))) test_ids = (valid_ids[1] + 1, n) X_train = data[features].iloc[train_ids[0]:train_ids[1], :] X_valid = data[features].iloc[valid_ids[0]:valid_ids[1], :] X_test = data[features].iloc[test_ids[0]:test_ids[1], :] Y_train = data.bin.iloc[train_ids[0]:train_ids[1]] Y_valid = data.bin.iloc[valid_ids[0]:valid_ids[1]] Y_test = data.bin.iloc[test_ids[0]:test_ids[1]] target_rtns_train = data.ret.iloc[train_ids[0]:train_ids[1]] target_rtns_valid = data.ret.iloc[valid_ids[0]:valid_ids[1]] target_rtns_test = data.ret.iloc[test_ids[0]:test_ids[1]] return X_train, Y_train, target_rtns_train, X_valid, Y_valid, target_rtns_valid, X_test, Y_test, target_rtns_test
12,010
def basis_ders_on_quad_grid(knots, degree, quad_grid, nders, normalization): """ Evaluate B-Splines and their derivatives on the quadrature grid. If called with normalization='M', this uses M-splines instead of B-splines. Parameters ---------- knots : array_like Knots sequence. degree : int Polynomial degree of B-splines. quad_grid: 2D numpy.ndarray (ne,nq) Coordinates of quadrature points of each element in 1D domain, which can be given by quadrature_grid() or chosen arbitrarily. nders : int Maximum derivative of interest. normalization : str Set to 'B' for B-splines, and 'M' for M-splines. Returns ------- basis: 4D numpy.ndarray Values of B-Splines and their derivatives at quadrature points in each element of 1D domain. Indices are . ie: global element (0 <= ie < ne ) . il: local basis function (0 <= il <= degree) . id: derivative (0 <= id <= nders ) . iq: local quadrature point (0 <= iq < nq ) """ # TODO: add example to docstring ne,nq = quad_grid.shape basis = np.zeros((ne, degree+1, nders+1, nq)) if normalization == 'M': scaling = 1. / basis_integrals(knots, degree) for ie in range(ne): xx = quad_grid[ie, :] for iq, xq in enumerate(xx): span = find_span(knots, degree, xq) ders = basis_funs_all_ders(knots, degree, xq, span, nders) if normalization == 'M': ders *= scaling[None, span-degree:span+1] basis[ie, :, :, iq] = ders.transpose() return basis
12,011
def analyze_jumps(jumps): """takes the list of Jump tuples from group_jumps. returns JumpCmp. fails if input is weird (tell me more). """ # todo: more of a decompile, AST approach here? look at uncompyle. if jumps[-1].head is not None: raise BadJumpTable("last jump not an else") if len(jumps) < 3: raise BadJumpTable("too few, what's the point") head0 = jumps[0].head if head0[-2].code != opcode.opmap['COMPARE_OP'] or head0[-2].arg != 2: raise BadJumpTable('cmp not ==',0) if head0[-3].code != opcode.opmap['LOAD_CONST']: raise BadJumpTable('cmp right not LOAD_CONST',0) def compare_head(headi, i): if len(head0) != len(headi): raise BadJumpTable('length mismatch',i) if headi[-2].code != opcode.opmap['COMPARE_OP'] or headi[-2].arg != 2: raise BadJumpTable('cmp not ==',i) # todo below: it would be great if this didn't have to be a constant if headi[-3].code != opcode.opmap['LOAD_CONST']: raise BadJumpTable('cmp right not LOAD_CONST',i) if any(h0[1:]!=hi[1:] for h0,hi in zip(head0[:-3],headi[:-3])): raise BadJumpTable('preamble mismatch',i) for i in range(1,len(jumps)-1): compare_head(jumps[i].head,i) load_left = head0[:-3] # sans the const, sans the compare, sans the jump const2offset = {j.head[-3].arg:j.head[0].pos for j in jumps[:-1]} return JumpCmp(load_left, const2offset)
12,012
def create_notification_handler(actor, recipient, verb, **kwargs): """ Handler function to create a Notification instance. :requires: :param actor: User instance of that user who makes the action. :param recipient: User instance, a list of User instances or string 'global' defining who should be notified. :param verb: Notification attribute with the right choice from the list. :optional: :param action_object: Model instance on which the verb was executed. :param key: String defining what kind of notification is going to be created. :param id_value: UUID value assigned to a specific element in the DOM. """ key = kwargs.pop("key", "notification") id_value = kwargs.pop("id_value", None) if recipient == "global": users = get_user_model().objects.all().exclude(username=actor.username) for user in users: Notification.objects.create( actor=actor, recipient=user, verb=verb, action_object=kwargs.pop("action_object", None), ) notification_broadcast(actor, key) elif isinstance(recipient, list): for user in recipient: Notification.objects.create( actor=actor, recipient=get_user_model().objects.get(username=user), verb=verb, action_object=kwargs.pop("action_object", None), ) elif isinstance(recipient, get_user_model()): Notification.objects.create( actor=actor, recipient=recipient, verb=verb, action_object=kwargs.pop("action_object", None), ) notification_broadcast( actor, key, id_value=id_value, recipient=recipient.username ) else: pass
12,013
def data(*args, **kwargs): """ The HTML <data> Element links a given content with a machine-readable translation. If the content is time- or date-related, the <time> must be used. """ return el('data', *args, **kwargs)
12,014
def process_dir(dir, doc_type = 'Annual Return', parallel = False): """ Process all document directories in a directory. Parameters ---------- dir : str Relative path to directory containing the document directories doc_type : str Type of documents (default = 'Annual Return') parallel : bool Process directories in parallel for faster performance Returns ------- data_df : pandas.DataFrame Dataframe containing information about all document directories processed successfully failed_df : pandas.DataFrame Dataframe containing information about all document directories processed unsuccessfully and their corresponding traceback """ doc_data_list = [] failed_list = [] if parallel: completed = 0 def worker(input, output, failed): nonlocal completed for doc_dir in iter(input.get, 'STOP'): completed += 1 try: doc_data = process_doc_dir(doc_dir, doc_type) assert (isinstance(doc_data, pd.DataFrame) or isinstance(doc_data, pd.Series)) output.put(doc_data) except: exception = traceback.format_exc(7) failed.put((doc_dir, exception)) print(f'\t\t****{mp.current_process().name} is at iteration {completed}****') NUMBER_OF_PROCESSES = mp.cpu_count() doc_list = [f'{dir}/{doc_dir}' for doc_dir in os.listdir(dir) if os.path.isdir(f'{dir}/{doc_dir}')] num_doc = len(doc_list) print(f"\t\t****Total documents to be processed: {num_doc}****\n\n") task_manager = mp.Manager() done_manager = mp.Manager() failed_manager = mp.Manager() task_queue = task_manager.Queue() done_queue = done_manager.Queue() failed_queue = failed_manager.Queue() for doc_dir in doc_list: task_queue.put(doc_dir) for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') process_list = [mp.Process(name=f'Process {str(i)}', target=worker, args=(task_queue, done_queue, failed_queue)) for i in range(NUMBER_OF_PROCESSES)] for process in process_list: process.start() for process in process_list: process.join() while not done_queue.empty(): doc_data_list.append(done_queue.get()) while not failed_queue.empty(): failed_list.append(failed_queue.get()) else: doc_list = [f'{dir}/{doc_dir}'.replace('//', '/') for doc_dir in os.listdir(dir) if os.path.isdir(f'{dir}/{doc_dir}'.replace('//', '/'))] num_doc = len(doc_list) print(f"\t\t****Total documents to be processed: {num_doc}****\n\n") for count, doc_dir in enumerate(doc_list): print(f'\t\t****{count} items processed out of {num_doc}****') try: doc_data = process_doc_dir(doc_dir, doc_type = doc_type) doc_data_list.append(doc_data) except: exception = traceback.format_exc(7) failed_list.append((doc_dir, exception)) if len(failed_list) != 0: failed_df = pd.Series(dict(failed_list)) else: failed_df = pd.Series(['There were no exceptions']) if len(doc_data_list) != 0: data_df = pd.concat(doc_data_list, axis = 0, sort=False) else: data_df = pd.Series(['No documents were scraped successfully']) print('\t\t****Task completed****') print(data_df) return (data_df, failed_df)
12,015
def key_I(buf, input_line, cur, count): """Move cursor to first non-blank character and start Insert mode. See Also: `key_base()`. """ pos, _, _ = motion_carret(input_line, cur, 0) set_cur(buf, input_line, pos) set_mode("INSERT")
12,016
def get_crops(nodules, fmt='raw', nodule_shape=(32, 64, 64), batch_size=20, share=0.5, histo=None, variance=(36, 144, 144), hu_lims=(-1000, 400), **kwargs): """ Get pipeline that performs preprocessing and crops cancerous/non-cancerous nodules in a chosen proportion. Parameters ---------- nodules : pd.DataFrame contains: - 'seriesuid': index of patient or series. - 'z','y','x': coordinates of nodules center. - 'diameter': diameter, in mm. fmt : str can be either 'raw', 'blosc' or 'dicom'. nodule_shape : tuple, list or ndarray of int crop shape along (z,y,x). batch_size : int number of nodules in batch generated by pipeline. share : float share of cancer crops in the batch. histo : tuple :func:`numpy.histogramdd` output. Used for sampling non-cancerous crops variance : tuple, list or ndarray of float variances of normally distributed random shifts of nodules' start positions hu_lims : tuple, list of float seq of len=2, representing limits of hu-trimming in normalize_hu-action. **kwargs spacing : tuple (z,y,x) spacing after resize. shape : tuple (z,y,x) shape after crop/pad. method : str interpolation method ('pil-simd' or 'resize'). See :func:`~radio.CTImagesBatch.resize`. order : None or int order of scipy-interpolation (<=5), if used. padding : str mode of padding, any supported by :func:`numpy.pad`. Returns ------- pipeline """ # update args of unify spacing args_unify_spacing = copy(kwargs_default) args_unify_spacing.update(kwargs) # set up other args-dicts args_sample_nodules = dict(nodule_size=nodule_shape, batch_size=batch_size, share=share, histo=histo, variance=variance) # set up the pipeline pipeline = (Pipeline() .load(fmt=fmt) .fetch_nodules_info(nodules=nodules) .unify_spacing(**args_unify_spacing) .create_mask() .normalize_hu(min_hu=hu_lims[0], max_hu=hu_lims[1]) .sample_nodules(**args_sample_nodules) .run(lazy=True, batch_size=RUN_BATCH_SIZE, shuffle=True) ) return pipeline
12,017
def get_dict_from_args(args): """Extracts a dict from task argument string.""" d = {} if args: for k,v in [p.strip().split('=') for p in args.split(',')]: d[k] = v return d
12,018
def plot_mean_feature_impact( explanations, features_name=None, max_display=None, ): """ The same than plot_feature_importance but we will consider the mean explanation value grouped by feature. A more informative plot is the summary_plot_tabular. """ # sanitize explanations to numpy array explanations = np.array(explanations) mean_explanation_per_feature = np.mean(explanations, axis=0) plot_feature_impact( mean_explanation_per_feature, features_name=features_name, max_display=max_display )
12,019
def multi_area_propagation_gpu(input_domain, net_model, thread_number=32): """ Propagation of the input domain through the network to obtain the OVERESTIMATION of the output bound. The process is performed applying the linear combination node-wise and the necessary activation functions. The process is on GPU, completely parallelized on NVIDIA CUDA GPUs and c++ code. Parameters ---------- input_domain : list the input domain expressed as a 3-dim matrix. (a) a list of list for each splitted domain; (b) a list of bound for each input node and (c) a list of two element for the node, lower and upper net_model : tf.keras.Model tensorflow model to analyze, the model must be formatted in the 'tf.keras.Model(inputs, outputs)' format thread_number : int number of CUDA thread to use for each CUDA block, the choice is free and does not effect the results, can however effect the performance Returns: -------- reshaped_bound : list the propagated bound in the same format of the input domain (3-dim) """ # Ignore the standard warning from CuPy import warnings warnings.filterwarnings("ignore") # Import the necessary library for the parallelization (Cupy) and also the c++ CUDA code. import cupy as cp from netver.utils.cuda_code import cuda_code # Load network shape, activations and weights layer_sizes = [] activations = [] full_weights = np.array([]) full_biases = np.array([]) # Iterate on each layer of the network, exluding the input (tf2 stuff) for layer in net_model.layers[1:]: # Obtain the activation function list if layer.activation == tf.keras.activations.linear: activations.append(0) elif layer.activation == tf.keras.activations.relu: activations.append(1) elif layer.activation == tf.keras.activations.tanh: activations.append(2) elif layer.activation == tf.keras.activations.sigmoid: activations.append(3) # Obtain the netowrk shape as a list layer_sizes.append(layer.input_shape[1]) # Obtain all the weights for paramters and biases weight, bias = layer.get_weights() full_weights = np.concatenate((full_weights, weight.T.reshape(-1))) full_biases = np.concatenate((full_biases, bias.reshape(-1))) # Fixe last layer size layer_sizes.append( net_model.output.shape[1] ) # Initialize the kernel loading the CUDA code my_kernel = cp.RawKernel(cuda_code, 'my_kernel') # Convert all the data in cupy array beore the kernel call max_layer_size = max(layer_sizes) results_cuda = cp.zeros(layer_sizes[-1] * 2 * len(input_domain), dtype=cp.float32) layer_sizes = cp.array(layer_sizes, dtype=cp.int32) activations = cp.array(activations, dtype=cp.int32) input_domain = cp.array(input_domain, dtype=cp.float32) full_weights = cp.array(full_weights, dtype=cp.float32) full_biases = cp.array(full_biases, dtype=cp.float32) # Define the number of CUDA block block_number = int(len(input_domain) / thread_number) + 1 # Create and launch the kernel, wait for the sync of all threads kernel_input = (input_domain, len(input_domain), layer_sizes, len(layer_sizes), full_weights, full_biases, results_cuda, max_layer_size, activations) my_kernel((block_number, ), (thread_number, ), kernel_input) cp.cuda.Stream.null.synchronize() # Reshape the results and convert in numpy array reshaped_bound = cp.asnumpy(results_cuda).reshape((len(input_domain), net_model.layers[-1].output_shape[1], 2)) # return reshaped_bound
12,020
def kernel(): """Create a ipykernel conda environment separate from this test environment where jupyter console is installed. The environments must be separate otherwise we cannot easily check if kernel start is activating the environment or if it was already active when the test suite started. """ # unique name for the kernel and environment name = str(uuid4()) env_path = '{}/kernel-env-{name}'.format(gettempdir(), name=name) stdout = subprocess.check_output( ["conda", "create", "--yes", "--quiet", "--prefix", env_path, "python=3.6", "ipykernel"]) stdout = pexpect.run('/bin/bash -c "source activate {env_path} && \ python -m ipykernel install --user \ --name {name}"'.format(env_path=env_path, name=name)) # query jupyter for the user data directory in a separate command to # make parsing easier stdout = pexpect.run('jupyter --data-dir') user_path = stdout.decode('utf-8').strip() # the kernel spec resides in the jupyter user data path spec_path = os.path.join(user_path, 'kernels', name) yield Kernel(name, os.path.join(spec_path, 'kernel.json'), env_path) shutil.rmtree(env_path)
12,021
def opensslCmsSignedDataCreate( conveyedInfoFile, cert, privateKey ): """Create a signed CMS encoded object given a conveyed-info file and base64 encode the response.""" opensslCmdArgs = [ "openssl", "cms", "-sign", "-in", conveyedInfoFile, "-signer", cert, "-inkey", privateKey, "-outform", "der", "-nodetach" ] conveyedInfoCmsSignedDerBase64 = runOpensslCmd( opensslCmdArgs, [ "base64" ] ) return conveyedInfoCmsSignedDerBase64
12,022
def auth_error_handler() -> None: """Handle authorization error. Raises an exception that will be handled by Flask-RESTPlus error handling. """ raise UnauthorizedException('Invalid credentials.')
12,023
def compute_kv(config): """Parse log data and calling draw""" result = {} for _cfg in config['data']: data = data_parser.log_kv(_cfg['path'], _cfg['phase'], _cfg['keys']) # clip from start idx if 'start_iter' in _cfg: start_idx = 0 for idx, iteration in enumerate(data['iter']): if iteration >= _cfg['start_iter']: start_idx = idx break data = utils.process_keys(utils.clip, data, start_idx) # downsampling all points including iter if 'iter_invl' in _cfg: invl = int(_cfg['iter_invl'] / (data['iter'][1]-data['iter'][0])) assert invl >= 1 data = utils.process_keys(utils.downsampling, data, invl) res_list = {} # compute max if _cfg['task'] == 'max': idx, value = _kv_max(data, _cfg['sort_key']) # broadcast to other key res_list['iter'] = data['iter'][idx] for key in _cfg['keys']: res_list[key] = data[key][idx] elif _cfg['task'] == 'min': idx, value = _kv_min(data, _cfg['sort_key']) # broadcast to other key res_list['iter'] = data['iter'][idx] for key in _cfg['keys']: res_list[key] = data[key][idx] # print print(_cfg['path']) for res in res_list: print(' ', res, res_list[res]) # add-in result result[os.path.basename(_cfg['path'])] = data return result
12,024
def cmd_abbreviation(ensoapi, query = None): """ Search for abbreviation meaning """ ws = WebSearchCmd("http://www.urbandictionary.com/define.php?term=%(query)s") ws(ensoapi, query)
12,025
def change_app_header(uri, headers, body): """ Add Accept header for preview features of Github apps API """ headers["Accept"] = "application/vnd.github.machine-man-preview+json" return uri, headers, body
12,026
def fib_fail(n: int) -> int: """doesn't work because it's missing the base case""" return fib_fail(n - 1) + fib_fail(n - 2)
12,027
def get_grad_hook(mod, grad_in, grad_out, mod_name=None, grad_map=None): """ The hook to collect gradient. """ assert isinstance(mod_name, str) assert isinstance(grad_map, dict) assert len(grad_out) == 1 grad_map[mod_name] = grad_out[0]
12,028
def int2str(num, radix=10, alphabet=BASE85): """helper function for quick base conversions from integers to strings""" return NumConv(radix, alphabet).int2str(num)
12,029
def randomize_onesample(a, n_iter=10000, h_0=0, corrected=True, random_seed=None, return_dist=False): """Nonparametric one-sample T test through randomization. On each iteration, randomly flip the signs of the values in ``a`` and test the mean against 0. If ``a`` is two-dimensional, it is assumed to be shaped as (n_observations, n_tests), and a max-statistic based approach is used to correct the p values for multiple comparisons over tests. Parameters ---------- a : array-like input data to test n_iter : int number of randomization iterations h_0 : float, broadcastable to tests in a null hypothesis for the group mean corrected : bool correct the p values in the case of multiple tests random_seed : int or None seed to use for random number generator return_dist : bool if True, return the null distribution of t statistics Returns ------- obs_t : float or array of floats group mean T statistic(s) corresponding to tests in input obs_p : float or array of floats one-tailed p value that the population mean is greater than h_0 (1 - percentile under the null) dist : ndarray, optional if return_dist is True, the null distribution of t statistics """ a = np.asarray(a, np.float) if a.ndim < 2: a = a.reshape(-1, 1) n_samp, n_test = a.shape a -= h_0 rs = np.random.RandomState(random_seed) flipper = (rs.uniform(size=(n_samp, n_iter)) > 0.5) * 2 - 1 flipper = (flipper.reshape(n_samp, 1, n_iter) * np.ones((n_samp, n_test, n_iter), int)) rand_dist = a[:, :, None] * flipper err_denom = np.sqrt(n_samp - 1) std_err = rand_dist.std(axis=0) / err_denom t_dist = rand_dist.mean(axis=0) / std_err obs_t = a.mean(axis=0) / (a.std(axis=0) / err_denom) if corrected: obs_p = 1 - percentile_score(t_dist.max(axis=0), obs_t) / 100 else: obs_p = [] for obs_i, null_i in zip(obs_t, t_dist): obs_p.append(1 - percentile_score(null_i, obs_i) / 100) obs_p = np.array(obs_p) if a.shape[1] == 1: obs_t = np.asscalar(obs_t) obs_p = np.asscalar(obs_p) t_dist = t_dist.squeeze() if return_dist: return obs_t, obs_p, t_dist return obs_t, obs_p
12,030
def git_get_project( directory: str, token: Optional[str] = None, revisions: Optional[Dict[str, str]] = None ) -> BuiltInCommand: """ Create an Evergreen command to clones the tracked project and check current revision. Also, applies patches if the task was created by a patch build. :param directory: Directory to clone into. :param token: Use a token to clone instead of ssh key. :param revisions: Map of revisions to use for modules. """ params = { "directory": directory, } add_if_exists(params, "token", token) add_if_exists(params, "revisions", revisions) return BuiltInCommand("git.get_project", params)
12,031
def main(args=None): """ec2mc script's entry point Args: args (list): Arguments for argparse. If None, set to sys.argv[1:]. """ if args is None: args = sys.argv[1:] try: # Classes of available commands in the commands directory commands = [ configure_cmd.Configure, aws_setup_cmd.AWSSetup, server_cmds.Server, servers_cmds.Servers, address_cmds.Address, user_cmds.User ] # Use argparse to turn args into namedtuple of arguments cmd_args = _argv_to_cmd_args(args, commands) # If basic configuration being done, skip config validation if cmd_args.command != "configure": # Validate config's config.json validate_config.main() # Validate config's aws_setup.json and YAML instance templates validate_setup.main() # Create an instance from the appropriate command class chosen_cmd = next(cmd(cmd_args) for cmd in commands if cmd.cmd_name() == cmd_args.command) # Validate IAM user has needed permissions to use the command halt.assert_empty(chosen_cmd.blocked_actions(cmd_args)) # Use the command chosen_cmd.main(cmd_args) except SystemExit: return False return True
12,032
def body_contour(binary_image): """Helper function to get body contour""" contours = find_contours(binary_image) areas = [cv2.contourArea(cnt) for cnt in contours] body_idx = np.argmax(areas) return contours[body_idx]
12,033
def rule_like(rule, pattern): """ Check if JsonLogic rule matches a certain 'pattern'. Pattern follows the same structure as a normal JsonLogic rule with the following extensions: - '@' element matches anything: 1 == '@' "jsonlogic" == '@' [1, 2] == '@' {'+': [1, 2]} == '@' {'+': [1, 2]} == {'@': [1, 2]} {'+': [1, 2]} == {'+': '@'} {'+': [1, 2]} == {'+': ['@', '@']} {'+': [1, 2]} == {'@': '@'} - 'number' element matches any numeric value: 1 == 'number' 2.34 == 'number' [1, 2] == ['number', 'number'] {'+': [1, 2]} == {'+': ['number', 'number']} - 'string' element matches any string value: "name" == 'string' {'cat': ["json", "logic"]} = {'cat': ['string', 'string']} - 'array' element matches an array of any length: [] == 'array' [1, 2, 3] = 'array' {'+': [1, 2]} == {'+': 'array'} Use this method to make sure JsonLogic rule is correctly constructed. """ if pattern == rule: return True if pattern == '@': return True if pattern == 'number': return _is_numeric(rule) if pattern == 'string': return _is_string(rule) if pattern == "array": return _is_array(rule) if is_logic(pattern): if is_logic(rule): # Both pattern and rule are a valid JsonLogic rule, go deeper pattern_operator = _get_operator(pattern) rule_operator = _get_operator(rule) if pattern_operator == '@' or pattern_operator == rule_operator: # Operators match, go deeper and try matching values return rule_like( _get_values(rule, rule_operator, normalize=False), _get_values(pattern, pattern_operator, normalize=False)) return False # All above assumptions failed if _is_array(pattern): if _is_array(rule): # Both pattern and rule are arrays, go deeper if len(pattern) == len(rule): # Length of pattern and rule arrays are the same, # go deeper and try matching each value return all( rule_like(rule_elem, pattern_elem) for rule_elem, pattern_elem in zip(rule, pattern)) return False # All above assumptions failed return False
12,034
def apt_repo(module, *args): """run apt-repo with args and return its output""" # make args list to use in concatenation args = list(args) rc, out, err = module.run_command([APT_REPO_PATH] + args) if rc != 0: module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err)) return out
12,035
async def get_data( *, config: Box, region: Region, start: Optional[int] = None, end: Optional[int] = None, ) -> Dict[Any, Any]: """Return a new consumer token.""" lookup = f"awattar.{region.name.lower()}" awattar_config = config[lookup] endpoint = f"{awattar_config.host}{awattar_config.url}" + "{}" params = {} if start: params["start"] = str(start) if end: params["end"] = str(end) if params: url = endpoint.format("?" + urlencode(params)) else: url = endpoint.format("") timeout = 10.0 log.debug(f"Awattar URL: {url}") try: async with httpx.AsyncClient() as client: response = await client.get(url, timeout=timeout) except Exception as e: log.error(f"Caught an exception while fetching data from the Awattar API: {e}") raise try: data = response.json() except Exception as e: log.error(f"Could not JSON decode the Awattar response: {e}") raise return data
12,036
def deactivated_equalities_generator(block): """ Generator which returns all deactivated equality Constraint components in a model. Args: block : model to be studied Returns: A generator which returns all deactivated equality Constraint components block """ for c in total_equalities_generator(block): if not c.active: yield c
12,037
def get_rasterization_params() -> RasterizationParams: """ Construct the RasterizationParams namedtuple from the static configuration file :return: the rasterization parameters """ if cfg is None: load_cfg() # get rasterization section rasterization_dict = cfg[compute_dsm_tag][rasterization_tag] rasterization_params = RasterizationParams(*rasterization_dict.values()) return rasterization_params
12,038
def compact(values): """Creates a generator (that can be iterated using next()), from a list of values, avoiding any adjacent duplicates Args: values: Iterator of integer values Returns: A generator without adjacent duplicates """ # check that the iterator is not empty if values: # the iterator is not empty curr = object() # use to keep track of the current (previous value) for val in values: # iterating through iterator if curr != val: # adjacent values are different, yield this value too yield val # update current curr = val
12,039
def main(): """Make a jazz noise here""" args = get_args() kmers1 = count_kmers(args.file1, args.kmer) kmers2 = count_kmers(args.file2, args.kmer) for common in set(kmers1).intersection(set(kmers2)): print('{:10} {:5} {:5}'.format( common, kmers1.get(common), kmers2.get(common)))
12,040
def rain_attenuation_probability(lat, lon, el, hs=None, Ls=None, P0=None): """ The following procedure computes the probability of non-zero rain attenuation on a given slant path Pr(Ar > 0). Parameters ---------- lat : number, sequence, or numpy.ndarray Latitudes of the receiver points lon : number, sequence, or numpy.ndarray Longitudes of the receiver points el : sequence, or number Elevation angle (degrees) hs : number, sequence, or numpy.ndarray, optional Heigh above mean sea level of the earth station (km). If local data for the earth station height above mean sea level is not available, an estimate is obtained from the maps of topographic altitude given in Recommendation ITU-R P.1511. Ls : number, sequence, or numpy.ndarray, optional Slant path length from the earth station to the rain height (km). If data about the rain height is not available, this value is estimated automatically using Recommendation ITU-R P.838 P0 : number, sequence, or numpy.ndarray, optional Probability of rain at the earth station, (0 ≤ P0 ≤ 1) Returns ------- p: Quantity Probability of rain attenuation on the slant path (%) References ---------- [1] Propagation data and prediction methods required for the design of Earth-space telecommunication systems: https://www.itu.int/dms_pubrec/itu-r/rec/p/R-REC-P.618-12-201507-I!!PDF-E.pdf """ type_output = get_input_type(lat) lat = prepare_input_array(lat) lon = prepare_input_array(lon) lon = np.mod(lon, 360) el = prepare_quantity(prepare_input_array(el), u.deg, 'Elevation angle') hs = prepare_quantity( hs, u.km, 'Heigh above mean sea level of the earth station') Ls = prepare_quantity( Ls, u.km, 'Heigh above mean sea level of the earth station') P0 = prepare_quantity(P0, u.pct, 'Point rainfall rate') val = __model.rain_attenuation_probability(lat, lon, el, hs, Ls, P0) return prepare_output_array(val, type_output) * 100 * u.pct
12,041
def discover(paths=None): """Get the full list of files found in the registered folders Args: paths (list, Optional): directories which host preset files or None. When None (default) it will list from the registered preset paths. Returns: list: valid .json preset file paths. """ presets = [] for path in paths or preset_paths(): path = os.path.normpath(path) if not os.path.isdir(path): continue # check for json files glob_query = os.path.abspath(os.path.join(path, "*.json")) filenames = glob.glob(glob_query) for filename in filenames: # skip private files if filename.startswith("_"): continue # check for file size if not check_file_size(filename): log.warning("Filesize is smaller than 1 byte for file '%s'", filename) continue if filename not in presets: presets.append(filename) return presets
12,042
def largets_prime_factor(num): """ Returns the largest prime factor of num. """ prime_factors = [] for n in itertools.count(2): if n > num: break if num%n == 0: prime_factors.append(n) while (num%n == 0): num = num/n return max(prime_factors)
12,043
def run(): """ entrypoint """ parser = ArgumentParser( "kimsufi-checker", description="tool to perform actions when Kimsufi availabilty changes", ) parser.add_argument( "-s", "--sleep", metavar="SECONDS", type=int, default=60, help="duration (in seconds) between checks, default: 60", ) parser.add_argument( "-z", "--zone", dest="zones", action="append", metavar="ZONE", help="check availability in specific zones (example: rbx or gra)", ) parser.add_argument( "-x", "--available", metavar="COMMAND", help="command to execute when plan becomes available", ) parser.add_argument( "-X", "--not-available", metavar="COMMAND", help="command to execute when plan is not available anymore", ) parser.add_argument( "-1", "--execute-on-init", action="store_true", help="execute -x/-X action on first check, by default actions are run when plan status change", ) parser.add_argument( "plans", nargs="*", help="plans to check, example 1801sk13 or 1801sk14" ) args = parser.parse_args() if len(args.plans) == 0: data = get_data() plans = set() zones = set() for pref in data["availability"]: plans.add(pref["reference"]) for zref in pref["zones"]: zones.add(zref["zone"]) print("List of plans:") for plan in sorted(plans): print(" ", plan) print("List of zones:") for zone in sorted(zones): print(" ", zone) else: availability = None while True: try: if availability is None: # first loop availability = OrderedDict([(p, None) for p in args.plans]) else: sleep(args.sleep) data = get_data() for plan, previous_zones in availability.items(): current_zones = get_available_zones(data, plan, args.zones or []) availability[plan] = current_zones if previous_zones is None: # No previous data if len(current_zones) == 0: message( Fore.YELLOW, f"Plan {plan} is initially not available", prefix="", ) if ( args.execute_on_init and execute(args.not_available, plan) is None ): args.not_available = None else: message( Fore.GREEN, f"Plan {plan} is initially available", prefix="", ) if ( args.execute_on_init and execute(args.available, plan) is None ): args.available = None elif previous_zones == current_zones: # No change dot(Fore.GREEN if len(previous_zones) else Fore.YELLOW) elif len(current_zones) == 0: # Not available anymore message(Fore.YELLOW, f"Plan {plan} is not available anymore") if execute(args.not_available, plan) is None: args.not_available = None else: # Becomes available message(Fore.GREEN, f"Plan {plan} is now available") if execute(args.available, plan) is None: args.available = None except KeyboardInterrupt: break except BaseException as e: # pylint: disable=broad-except,invalid-name message(Fore.RED, f"Error: {e}")
12,044
def delete_md5(md5): """Delete the data of the file that has the MD5 hash.""" file = File.query.filter(File.md5 == md5).one_or_none() schema = FileSchema() result = schema.dump(file) if file is not None: filename = f"{result['file_name']}.{result['file_type']}" if not os.path.exists(app.config['UPLOAD_FOLDER']): os.makedirs(app.config['UPLOAD_FOLDER']) folder = app.config['UPLOAD_FOLDER'] file_path = os.path.join(folder, filename) os.remove(file_path) db.session.delete(file) db.session.commit() return make_response(f"File with MD5 hash {md5} deleted.", 200) else: abort(404, f"File not found with MD5 hash: {md5}")
12,045
def compute_eigenvectors(exx, exy, eyy): """ exx, eyy can be 1d arrays or 2D arrays :param exx: strain component, float or 1d array :param exy: strain component, float or 1d array :param eyy: strain component, float or 1d array :rtype: list """ e1, e2 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); # eigenvalues v00, v01 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); v10, v11 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); # eigenvectors dshape = np.shape(exx); if len(dshape) == 1: for i in range(len(exx)): [e11, e22, v] = eigenvector_eigenvalue(exx[i], exy[i], eyy[i]); e1[i], e2 = e11, e22; # convention of this code returns negative eigenvalues compared to my other codes v00[i], v10[i] = v[0][0], v[1][0]; v01[i], v11[i] = v[0][1], v[1][1]; elif len(dshape) == 2: for j in range(dshape[0]): for i in range(dshape[1]): [e11, e22, v] = eigenvector_eigenvalue(exx[j][i], exy[j][i], eyy[j][i]); e1[j][i], e2[j][i] = e11, e22; v00[j][i], v01[j][i] = v[0][0], v[0][1]; v10[j][i], v11[j][i] = v[1][0], v[1][1]; return [e1, e2, v00, v01, v10, v11];
12,046
def name_looks_valid(name: str) -> bool: """ Guesses if a name field is valid. Valid is defined as being at least two words, each beginning with a capital letter and ending with a lowercase letter. :param name: the name to check :return: whether this name is considered valid """ existing_parts = name.split() parts_that_look_like_names = list( filter(lambda part: fullmatch(r"[A-Z](?:[A-Za-z-']+)?[a-z]", part), existing_parts) ) if len(existing_parts) < 2 or len(parts_that_look_like_names) < 2: return False if len(parts_that_look_like_names) > 2 or len(existing_parts) == len(parts_that_look_like_names): return True return False
12,047
def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False): """Convert polygons to lines. Arguments: src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be converted to lines. dst_lines {[type]} -- Filename where to write the line vector dataset to. Keyword Arguments: crs {dict or str} -- Output projection parameters as string or in dictionary format. This will reproject the data when a crs is given (not {None}) (default: {None}). add_allone_col {bool} -- Add an additional attribute column with all ones. This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}). Returns: int -- Exit code 0 if successeful. """ gdf = gpd.read_file(src_polygons) geom_coords = gdf["geometry"] # featureset.get(5)["geometry"]["coordinates"] lines = [] row_ids = [] for i_row, pol in tqdm(enumerate(geom_coords), total=len(geom_coords)): boundary = pol.boundary if boundary.type == 'MultiLineString': for line in boundary: lines.append(line) row_ids.append(i_row) else: lines.append(boundary) row_ids.append(i_row) gdf_lines = gdf.drop("geometry", axis=1).iloc[row_ids, :] gdf_lines["Coordinates"] = lines gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs) if crs is not None: gdf_lines = gdf_lines.to_crs(crs) if add_allone_col: gdf_lines["ALLONE"] = 1 Path(dst_lines).parent.mkdir(exist_ok=True, parents=True) gdf_lines.to_file(dst_lines) return 0
12,048
def format_decimal(amount): """ jinja2 filter function for decimal number treatment """ amt_whole = int(amount) amt_whole_len = len(str(amt_whole)) if amount < 1: amt_str = '{:0.15f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 4: amt_str = '{:0.3f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 6: amt_str = '{:0.2f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 9: amt_str = '{:0.1f}'.format(amount).rstrip("0").rstrip(".") else: amt_str = '{}'.format(amt_whole) return amt_str
12,049
def remove_special_char(df, col): """Removes special characters such as % and $ from numeric variables and converts them into float""" df[col] = df[col].replace(regex = True, to_replace = r'[^0-9.\-]', value=r'') df[col] = df[col].astype("float") return df[col]
12,050
def getNonlinearInfo(numHiddenLayers, numBinary, unaryPerBinary): """ Generates a 2D list to be used as a nonlinearInfo argument in building an EQL/EQL-div model # Arguments numHiddenLayers: integer, number of hidden layers (i.e. layers including nonlinear keras layer components) numBinary: list of integers, available numbers to be used as number of binary functions in a nonlinear layer component unaryPerBinary: integer, number of unary function per binary function in a nonlinear layer component # Returns A 2D list of integers with dimension numHiddenLayers x 2. Rows represent layers, first column is number of unary functions, second column is number of binary functions """ nonlinearInfo = [0 for i in range(numHiddenLayers)] for i in range(numHiddenLayers): v = np.random.choice(numBinary) # binary nodes u = unaryPerBinary * v # unary nodes nonlinearInfo[i] = [u, v] return nonlinearInfo
12,051
def construct_magmad_gateway_payload(gateway_id: str, hardware_id: str) -> types.Gateway: """ Returns a default development magmad gateway entity given a desired gateway ID and a hardware ID pulled from the hardware secrets. Args: gateway_id: Desired gateway ID hardware_id: Hardware ID pulled from the VM Returns: Gateway object with fields filled in with reasonable default values """ return types.Gateway( name='TestGateway', description='Test Gateway', tier='default', id=gateway_id, device=types.GatewayDevice( hardware_id=hardware_id, key=types.ChallengeKey( key_type='ECHO', ), ), magmad=types.MagmadGatewayConfigs( autoupgrade_enabled=True, autoupgrade_poll_interval=60, checkin_interval=60, checkin_timeout=30, ), )
12,052
def endpoint(fun): """ REST HTTP method endpoints should use this decorator. It converts the return value of the underlying method to the appropriate output format and sets the relevant response headers. It also handles RestExceptions, which are 400-level exceptions in the REST endpoints, AccessExceptions resulting from access denial, and also handles any unexpected errors using 500 status and including a useful traceback in those cases. If you want a streamed response, simply return a generator function from the inner method. """ @wraps(fun) def endpointDecorator(self, *path, **params): _setCommonCORSHeaders() cherrypy.lib.caching.expires(0) cherrypy.request.girderRequestUid = str(uuid.uuid4()) setResponseHeader('Girder-Request-Uid', cherrypy.request.girderRequestUid) try: _preventRepeatedParams(params) val = fun(self, path, params) # If this is a partial response, we set the status appropriately if 'Content-Range' in cherrypy.response.headers: cherrypy.response.status = 206 val = _mongoCursorToList(val) if callable(val): # If the endpoint returned anything callable (function, # lambda, functools.partial), we assume it's a generator # function for a streaming response. cherrypy.response.stream = True _logRestRequest(self, path, params) return val() if isinstance(val, cherrypy.lib.file_generator): # Don't do any post-processing of static files return val if isinstance(val, types.GeneratorType): val = list(val) except RestException as e: val = _handleRestException(e) except AccessException as e: val = _handleAccessException(e) except GirderException as e: val = _handleGirderException(e) except ValidationException as e: val = _handleValidationException(e) except cherrypy.HTTPRedirect: raise except Exception: # These are unexpected failures; send a 500 status logger.exception('500 Error') cherrypy.response.status = 500 val = dict(type='internal', uid=cherrypy.request.girderRequestUid) if config.getServerMode() == ServerMode.PRODUCTION: # Sanitize errors in production mode val['message'] = 'An unexpected error occurred on the server.' else: # Provide error details in non-production modes t, value, tb = sys.exc_info() val['message'] = '%s: %s' % (t.__name__, repr(value)) val['trace'] = traceback.extract_tb(tb) resp = _createResponse(val) _logRestRequest(self, path, params) return resp return endpointDecorator
12,053
def test_secrets_provider_get_expired(mock_name, mock_value, config): """ Test SecretsProvider.get() with a cached but expired value """ # Create a new provider provider = parameters.SecretsProvider(config=config) # Inject value in the internal store provider.store[(mock_name, None)] = ExpirableValue(mock_value, datetime.now() - timedelta(seconds=60)) # Stub the boto3 client stubber = stub.Stubber(provider.client) response = { "ARN": f"arn:aws:secretsmanager:us-east-1:132456789012:secret/{mock_name}", "Name": mock_name, "VersionId": "7a9155b8-2dc9-466e-b4f6-5bc46516c84d", "SecretString": mock_value, "CreatedDate": datetime(2015, 1, 1), } expected_params = {"SecretId": mock_name} stubber.add_response("get_secret_value", response, expected_params) stubber.activate() try: value = provider.get(mock_name) assert value == mock_value stubber.assert_no_pending_responses() finally: stubber.deactivate()
12,054
def gene_annotations(corpus_path, annotation_path, mer_path): """ Creates a gene annotation file for each article in the corpus :param corpus_path: article corpus path :param mer_path: mer data path :param annotation_path: genes annotation path :return: gene annotation file for each article in the corpus """ for (dir_path, dir_names, file_names) in os.walk(corpus_path): for filename in file_names: annotations = [] gene_annotations = [] save_line = None article_file = open(corpus_path + filename, 'r', encoding='utf-8') article = str(article_file.read().encode('utf-8')) article_file.close() annotations_genes = os.popen('./' + mer_path + ' ' + article + ' genes').readlines() # print (annotations_genes) for annotations_gene in annotations_genes: annotations_gene = annotations_gene.split('\t') # annotation line with index, entity name and id if len(annotations_gene) == 4: save_line = annotations_gene[0] + '\t' + annotations_gene[1] + '\t' + annotations_gene[2] gene_annotations.append(str(annotations_gene).replace('[', '').replace('\'', '').replace(']', '').replace(',', '\t').replace('\\', '')) # annotation with id only elif len(annotations_gene) == 1: extra_id = annotations_gene[0].strip('\n') if save_line: gene_annotations.append(save_line + '\t' + extra_id + '\n') for annotations_gene in gene_annotations: gene_index_1 = annotations_gene.split('\t')[0] gene_index_2 = annotations_gene.split('\t')[1] gene_name = annotations_gene.split('\t')[2] gene_id = annotations_gene.split('\t')[3][:-1] annotations.append((gene_index_1, gene_index_2, gene_name, gene_id)) # sort by position in text annotations = sorted(annotations, key=lambda position: int(position[0])) annotation_file = open(annotation_path + filename, 'w', encoding='utf-8') for annotation in annotations: annotation_file.write(annotation[0] + '\t' + annotation[1] + '\t' + annotation[2] + '\t' + annotation[3] + '\n') annotation_file.close() return
12,055
def script_rename_number(config): """ The scripting version of `rename_number`. This function applies the rename to the entire directory. It also adds the tags to the header file of each fits. Parameters ---------- config : ConfigObj The configuration object that is to be used for this function. Returns ------- None """ # Extract the configuration parameters. data_directory = core.config.extract_configuration( config_object=config, keys=['data_directory']) begin_garbage = core.config.extract_configuration( config_object=config, keys=['renaming','begin_garbage']) # Obtain the labels. labels, raw = rename_number(data_directory=data_directory, begin_garbage=begin_garbage) # Add to all file headers. Assume that the order has not # changed between renaming steps. core.error.ifas_info("Adding the file number under the `NUMBER` card " "in the headers of the fits files in {data_dir} " "based on the file order." .format(data_dir=data_directory)) fits_files = core.io.get_fits_filenames(data_directory=data_directory) for (filedex, headerdex) in zip(fits_files, raw): __ = core.io.append_astropy_header_card( file_name=filedex, header_cards={'NUMBER':headerdex}) # Finally rename the files based on parallel appending. Glob # provides the directory. core.error.ifas_info("Appending the file number to the end of " "the files in {data_dir}." .format(data_dir=data_directory)) core.io.rename_by_parallel_append(file_names=fits_files, appending_names=labels, directory=None) return None
12,056
def jasper10x4(**kwargs): """ Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs)
12,057
def swap(lst, idx1, idx2): """ >>> swap([0, 1, 2], 0, 1) [1, 0, 2] >>> swap([0, 1, 2], 0, 0) [0, 1, 2] """ # print("Swapping [{}, {}] from {}".format(idx1, idx2, lst)) lst[idx1], lst[idx2] = lst[idx2], lst[idx1] # print("resulting to {}".format(lst)) return lst
12,058
def drawLine(queryDef): """画分割线 Args: queryDef: 查询定义 """ formatStr = "" valueList = [] for title in queryDef.titleList: colDef = queryDef.tableCols[title] size = colDef.size + 2 formatStr += ('+' + formatString("string", size, False)) valueList.append("-"*size) formatStr += "+" print(formatStr % tuple(valueList))
12,059
def debug_keytester() -> None: """Show a keytester widget.""" global _keytester_widget if (_keytester_widget and not sip.isdeleted(_keytester_widget) and _keytester_widget.isVisible()): _keytester_widget.close() else: _keytester_widget = miscwidgets.KeyTesterWidget() _keytester_widget.show()
12,060
def substring_index(column, delim=' ', cnt=1): """ Returns the substring from string ``column`` before ``cnt`` occurrences of the delimiter ``delim``. If ``cnt`` is positive, everything the left of the final delimiter (counting from left) is returned. If ``cnt`` is negative, every to the right of the final delimiter (counting from the right) is returned. substring_index performs a case-sensitive match when searching for ``delim``. """ return _with_expr(exprs.SubstringIndex, column, delim, cnt)
12,061
def askPrize(mon: int) -> str: """ Args : n:欲查詢的期別 Returns: 查詢結果字串 """ (date, data) = initData(mon) date = f"{date}月\n" ssp_prize = f"特別獎:{data[0]}\n" sp_prize = f"特獎:{data[1]}\n" first_prize = f"頭獎:{data[2]}、{data[3]}、{data[4]}\n" six_prize = f"六獎:{data[2][5:]}、{data[3][5:]}、{data[4][5:]}、{data[5]}\n" return date + ssp_prize + sp_prize + first_prize + six_prize
12,062
def maintain_all(delta_day=1): """ 维护所有的mysql表的分区 """ storage_rt_list = model_manager.get_storage_rt_objs_by_type(MYSQL) count = 0 start_time_ts = time.time() mysql_conns = {} maintain_failed_rts = [] # 避免此接口执行时间跨天,导致添加的分区发生变化。 add_date = util.get_date_by_diff(delta_day) add_next_date = util.get_date_by_diff(delta_day + 1) for rt_storage in storage_rt_list: try: # 逐条处理,生成对应的actions,然后执行,避免将actions放入一个dict再遍历一轮 count += 1 cluster_name = rt_storage.storage_cluster_config.cluster_name physical_tn = rt_storage.physical_table_name if cluster_name not in mysql_conns: mysql_cluster = model_manager.get_storage_cluster_config(cluster_name, MYSQL) if mysql_cluster: # TODO 处理无法连接mysql的异常 conn = get_mysql_connection(mysql_cluster[CONNECTION]) mysql_conns[cluster_name] = conn mysql_conn = mysql_conns[cluster_name] table_info = get_sql_result(mysql_conn, SHOW_TABLE_SQL % physical_tn) if table_info: pattern = re.compile(r"PARTITION p(\d+) VALUES LESS THAN") partitions = pattern.findall(table_info[0][1]) # 包含分区日期的列表 if not partitions: maintain_failed_rts.append(rt_storage.result_table_id) continue if add_date not in partitions: if MAX_PARTITION in partitions: add_sql = REORGANIZE_PARTITION % ( physical_tn, MAX_PARTITION, add_date, add_next_date, MAX_PARTITION, ) else: add_sql = ADD_PARTITION % (physical_tn, add_date, add_next_date, MAX_PARTITION) get_sql_result(mysql_conn, add_sql) expires_day = util.translate_expires_day(rt_storage.expires) if expires_day > 0: # 数据需要过期,则计算数据过期的时间,然后删除对应的分区 delete_date = util.get_date_by_diff(-(expires_day + 1)) # 加一天,expires内的都是需要保留的,超出的需删除 drop_partitions = [] for partition in partitions: if int(partition) <= int(delete_date): drop_partitions.append(f"p{partition}") if drop_partitions: drop_sql = DROP_PARTITION % (physical_tn, ", ".join(drop_partitions)) get_sql_result(mysql_conn, drop_sql) else: maintain_failed_rts.append(rt_storage.result_table_id) except Exception: maintain_failed_rts.append(rt_storage.result_table_id) for conn in list(mysql_conns.values()): conn.close() # 记录总共处理的rt数量,以及异常的rt列表 logger.info( f"{count} mysql rts maintain takes {int(time.time() - start_time_ts)}(s), " f"failed are {json.dumps(maintain_failed_rts)}" )
12,063
def get_letters_df(letters_dict_pickle): """Get the letters Pandas Dataframe Parameters ---------- letters_dict_pickle: string Path to the dict with the letters text Returns ------- Pandas DataFrame Pandas DataFrame with a columns with the tokens """ with open(letters_dict_pickle, 'rb') as handle: letters_dict = pickle.load(handle) letters_df = pd.DataFrame(letters_dict, index=[const.LETTER_TEXT]).T letters_df[const.TOKENIZED] = letters_df[const.LETTER_TEXT].apply(tokenize) return letters_df
12,064
def lorenzmod1(XYZ, t, a=0.1, b=4, dz=14, d=0.08): """ The Lorenz Mod 1 Attractor. x0 = (0,1,0) """ x, y, z = XYZ x_dt = -a * x + y**2 - z**2 + a * dz y_dt = x * (y - b * z) + d z_dt = -z + x * (b * y + z) return x_dt, y_dt, z_dt
12,065
def binomial_p(x, n, p0, reps=10**5, alternative='greater', keep_dist=False, seed=None): """ Parameters ---------- sample : array-like list of elements consisting of x in {0, 1} where 0 represents a failure and 1 represents a seccuess p0 : int hypothesized number of successes in n trials n : int number of trials reps : int number of repetitions (default: 10**5) alternative : {'greater', 'less', 'two-sided'} alternative hypothesis to test (default: 'greater') keep_dis : boolean flag for whether to store and return the array of values of the test statistics (default: false) seed : RandomState instance or {None, int, RandomState instance} If None, the pseudorandom number generator is the RandomState instance used by `np.random`; If int, seed is the seed used by the random number generator; If RandomState instance, seed is the pseudorandom number generator Returns ------- float estimated p-value float test statistic list distribution of test statistics (only if keep_dist == True) """ if n < x: raise ValueError("Cannot observe more ones than the population size") prng = get_prng(seed) def generate(): return prng.binomial(n, p0, 1)[0] if keep_dist: permutations = np.empty(reps) for i in range(reps): permutations[i] = generate() if alternative == 'two-sided': hits_up = np.sum(permutations >= x) hits_low = np.sum(permutations <= x) p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5]) elif alternative == 'greater': p_value = np.mean(permutations >= x) else: p_value = np.mean(permutations <= x) return p_value, x, permutations else: hits_up = 0 hits_low = 0 for i in range(reps): ts = generate() hits_up += (ts >= x) hits_low += (ts <= x) if alternative == 'two-sided': p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5]) elif alternative == 'greater': p_value = hits_up/reps else: p_value = hits_low/reps return p_value, x
12,066
def test_docs_by_author1(flask_client, user4): """docs_by_author() displays appropriate docs if user logged in.""" response = flask_client.get("/all/author/2", follow_redirects=True) assert ( b"First user doc" in response.data and b"Third user doc" in response.data and b"Second user doc" not in response.data and b"Fourth user doc" not in response.data )
12,067
def is_guild_owner() -> commands.check: """ Returns True under the following conditions: - **ctx.author** is the owner of the guild where this command was called from """ def predicate(ctx): if ctx.guild is None: raise commands.NoPrivateMessage('This command can only be used in a server.') author: Member = ctx.author if author != ctx.guild.owner.id: commands.MissingPermissions('This command can only be run by the owner of this guild.') return commands.check(predicate)
12,068
def round_grade(grade: int) -> int: """ Round the grade according to policy. Parameters ---------- grade: int Raw grade. Returns ------- rounded_grade: int Rounded grade. """ if grade < 38: rounded_grade = grade else: closest_multiple_5 = (grade // 5 + 1) * 5 if (closest_multiple_5 - grade) >= 3: rounded_grade = grade else: rounded_grade = closest_multiple_5 return rounded_grade
12,069
def patch_indecies(i_max: int, j_max: int, ps: int, pstr: int): """ Given the sizes i_max and j_max of an image, it extracts the top-left corner pixel location of all the patches of size (ps,ps) and distant "pstr" pixels away from each other. If pstr < ps, the patches are overlapping. Input: i_max, j_max - int, sizes of the image ps - int, patch size pstr - int, patch stride Output: idx - int, array of [total_num_patches, 2], pixels locations """ idx = [] for i in range(0, i_max - ps + 1, pstr): for j in range(0, j_max - ps + 1, pstr): idx.append([i, j]) return tf.convert_to_tensor(idx)
12,070
def model_fn(): """ Defines a convolutional neural network for steering prediction. """ model = Sequential() # Input layer and normalization model.add(InputLayer(input_shape=(20, 80, 1))) model.add(Lambda(lambda x: (x / 255.0) - 0.5)) # Convolutional layer 1 model.add(Conv2D(filters=48, kernel_size=(3,3), strides=(1,1), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) # Convolutional layer 2 model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) # Dropout for regularization. model.add(Dropout(0.2)) # Full connected layer model.add(Flatten()) model.add(Dense(100)) # Predicted steering model.add(Dense(1)) print(model.summary()) return model
12,071
def authenticate(request): """Return the user model instance associated with the given request If no user is retrieved, return an instance of `AnonymousUser` """ token, _ = get_token_from_request(request) jwt_info = { 'token': token, 'case': TokenCase.OK, 'payload': None, } if not token: jwt_info['case'] = TokenCase.NO_TOKEN return get_user(), jwt_info try: payload = decode(token) user_pk = payload[JWT_CONFIG.user_pk_key] return get_user(user_pk=user_pk), jwt_info except jwt.ExpiredSignatureError: jwt_info['case'] = TokenCase.EXPIRED except jwt.DecodeError: jwt_info['case'] = TokenCase.DECODE_ERROR except jwt.InvalidTokenError: jwt_info['case'] = TokenCase.INVALID_TOKEN except KeyError: jwt_info['case'] = TokenCase.MISSING_KEY return get_user(), jwt_info
12,072
def test_get_settings(config): """ Test function `get_settings` to get application settings. """ settings = utils.get_settings() # Settings should be a non empty dictionary assert isinstance(settings, dict) assert settings > 0
12,073
def _urpc_test_func_2(buf): """! @brief u-RPC variable length data test function. @param buf A byte string buffer @return The same byte string repeated three times """ return buf*3
12,074
def test_normal(): """Test divide_sizes.""" # 8 / 2 res = divide_sizes(8, 2) assert res == [4, 4] # 8 / 3 res = divide_sizes(8, 3) assert res == [3, 3, 2] # 7 / 3 res = divide_sizes(7, 3) assert res == [3, 2, 2] # 1 / 3 res = divide_sizes(1, 3) assert res == [1, 0, 0] # 0 / 3 res = divide_sizes(0, 3) assert res == [0, 0, 0] # 3 / 0 res = divide_sizes(3, 0) assert res == []
12,075
def main(): """ Use Netmiko to connect to each of the devices. Execute 'show version' on each device. Record the amount of time required to do this """ start_time = datetime.now() for device in devices: print() print('#' * 40) output = show_version(device) print(output) print() print('#' * 40) print("\nBenoetigte Zeit: " + str(datetime.now() - start_time)) return None
12,076
def infer_data_type(data_container: Iterable): """ For a given container of data, infer the type of data as one of continuous, categorical, or ordinal. For now, it is a one-to-one mapping as such: - str: categorical - int: ordinal - float: continuous There may be better ways that are not currently implemented below. For example, with a list of numbers, we can check whether the number of unique entries is less than or equal to 12, but has over 10000+ entries. This would be a good candidate for floats being categorical. :param data_container: A generic container of data points. :type data_container: `iterable` """ warnings.warn( "`infer_data_type` is deprecated! " "Please use `infer_data_family` instead!" ) # Defensive programming checks. # 0. Ensure that we are dealing with lists or tuples, and nothing else. assert isinstance(data_container, list) or isinstance( data_container, tuple ), "data_container should be a list or tuple." # 1. Don't want to deal with only single values. assert ( len(set(data_container)) > 1 ), "There should be more than one value in the data container." # 2. Don't want to deal with mixed data. assert is_data_homogenous(data_container), "Data are not of a homogenous type!" # Once we check that the data type of the container is homogenous, we only # need to check the first element in the data container for its type. datum = data_container[0] # Return statements below # treat binomial data as categorical # TODO: make tests for this. if len(set(data_container)) == 2: return "categorical" elif isinstance(datum, str): return "categorical" elif isinstance(datum, int): return "ordinal" elif isinstance(datum, float): return "continuous" else: raise ValueError("Not possible to tell what the data type is.")
12,077
def test_loader(): """Test ChunkRequest and the ChunkLoader.""" if not config.async_loading: return # temporary until we add the @async_only pytest mark layer = _create_layer() layer_key = LayerKey.from_layer(layer, (0, 0)) key = ChunkKey(layer_key) shape = (64, 32) transpose_shape = (32, 64) # Just load one array. data = np.random.random(shape) chunks = {'image': data} # Give data2 different data. data2 = data * 2 # Create the ChunkRequest. layer_ref = LayerRef.create_from_layer(layer, (0, 0)) request = chunk_loader.create_request(layer_ref, key, chunks) # Should be compatible with the layer we made it from! # assert request.is_compatible(layer) # Load the ChunkRequest. request, _future = chunk_loader.load_chunk(request) # Data should only match data not data2. assert np.all(data == request.image.data) assert not np.all(data2 == request.image.data) # request.image is just short-hand for request.chunks['image'] assert np.all(request.image.data == request.chunks['image'].data) # Since we didn't ask for a thumbnail_source it should be the image. assert np.all(request.thumbnail_source.data == request.image.data) # KeyError for chunks that do not exist. with pytest.raises(KeyError): request.chunks['missing_chunk_name'] # Test transpose_chunks() request.transpose_chunks((1, 0)) assert request.image.shape == transpose_shape
12,078
def test_automatic_label_suffix(setup): """ Test %% replacement with single field Sets - Error situation. Fields email_x should not be blank. Merely inducing an error to assert for correct field name replacement. """ post = deepcopy(setup) post.add(u'email_1', '') dynamic_form = WTFormsDynamicFields() dynamic_form.add_field('email','Email', TextField) dynamic_form.add_validator('email', InputRequired, message='Please fill in %email%.') form = dynamic_form.process(SimpleForm, post) form.validate() assert form.errors['email_1'] == ['Please fill in email_1.'] assert form.email_1() == '<input id="email_1" name="email_1" type="text" value="">'
12,079
def Preprocess( src: str, cflags: typing.List[str], timeout_seconds: int = 60, strip_preprocessor_lines: bool = True, ): """Run input code through the compiler frontend to inline macros. This uses the repository clang binary. Args: src: The source code to preprocess. cflags: A list of flags to be passed to clang. timeout_seconds: The number of seconds to allow before killing clang. strip_preprocessor_lines: Whether to strip the extra lines introduced by the preprocessor. Returns: The preprocessed code. Raises: ClangException: In case of an error. ClangTimeout: If clang does not complete before timeout_seconds. """ cmd = [ "timeout", "-s9", str(timeout_seconds), str(CLANG), "-E", "-c", "-", "-o", "-", ] + cflags app.Log(2, "$ %s", " ".join(cmd)) process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, ) stdout, stderr = process.communicate(src) if process.returncode == 9: raise errors.ClangTimeout( f"Clang preprocessor timed out after {timeout_seconds}s" ) elif process.returncode != 0: raise errors.ClangException(stderr) if strip_preprocessor_lines: return StripPreprocessorLines(stdout) else: return stdout
12,080
def mmgen2torchserver(config_file: str, checkpoint_file: str, output_folder: str, model_name: str, model_version: str = '1.0', model_type: str = 'unconditional', force: bool = False): """Converts MMGeneration model (config + checkpoint) to TorchServe `.mar`. Args: config_file (str): Path of config file. The config should in MMGeneration format. checkpoint_file (str): Path of checkpoint. The checkpoint should in MMGeneration checkpoint format. output_folder (str): Folder where `{model_name}.mar` will be created. The file created will be in TorchServe archive format. model_name (str): Name of the generated ``'mar'`` file. If not None, used for naming the `{model_name}.mar` file that will be created under `output_folder`. If None, `{Path(checkpoint_file).stem}` will be used. model_version (str, optional): Model's version. Defaults to '1.0'. model_type (str, optional): Type of the model to be convert. Handler named ``{model_type}_handler`` would be used to generate ``mar`` file. Defaults to 'unconditional'. force (bool, optional): If True, existing `{model_name}.mar` will be overwritten. Default to False. """ mmcv.mkdir_or_exist(output_folder) config = mmcv.Config.fromfile(config_file) with TemporaryDirectory() as tmpdir: config.dump(f'{tmpdir}/config.py') args = Namespace( **{ 'model_file': f'{tmpdir}/config.py', 'serialized_file': checkpoint_file, 'handler': f'{Path(__file__).parent}/mmgen_{model_type}_handler.py', 'model_name': model_name or Path(checkpoint_file).stem, 'version': model_version, 'export_path': output_folder, 'force': force, 'requirements_file': None, 'extra_files': None, 'runtime': 'python', 'archive_format': 'default' }) manifest = ModelExportUtils.generate_manifest_json(args) package_model(args, manifest)
12,081
def versionString(version): """Create version string.""" ver = [str(v) for v in version] numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:] return '.'.join(numbers) + '-'.join(rest)
12,082
def any(iterable, pred): """Returns True if ANY element in the given iterable is True for the given pred function""" warnings.warn( "pipe.any is deprecated, use the builtin any(...) instead.", DeprecationWarning, stacklevel=4, ) return builtins.any(pred(x) for x in iterable)
12,083
def loadRatings(ratingstablename, ratingsfilepath, openconnection): """ Inserting Ratings.dat into the Database """ with openconnection.cursor() as cursor: sqlDropCommand = "DROP TABLE IF EXISTS {}".format(ratingstablename) sqlCreateCommand = ''' CREATE TABLE {} ( userid INT NOT NULL, movieid INT, rating NUMERIC(2,1), PRIMARY KEY(userid, movieid, rating))'''.format(ratingstablename) cursor.execute(sqlDropCommand) cursor.execute(sqlCreateCommand) ratingFile = open(ratingsfilepath, "r") lines = ratingFile.readlines() i = 0 for line in lines: fields = line.split("::") if (i == 20): break sqlInsertCommand = "INSERT INTO " + ratingstablename + "(userid, movieid, rating) VALUES ({}, {}, {})".format(str(fields[0]), str(fields[1]), str(fields[2])) cursor.execute(sqlInsertCommand) i += 1 ratingFile.close() cursor.close()
12,084
def main(): """ Main entry of program. Set arguments for argument parser, loads the settings and searches current directory for compilable tex files. At the end prints help or calls the task selector function to perform one of the tasks. """ parser = ap.ArgumentParser() root_logger = logging.getLogger() root_logger.handlers.append(logging.StreamHandler(sys.stdout)) config = load_settings() if not config.log_to_console: logging.getLogger().removeHandler(logging.getLogger().handlers[1]) compilable_tex_files = util.find_compilable_tex(".") if config.main_tex not in compilable_tex_files: logger.warning("Configures main tex file is not detected as compilable.") compilable_tex_files_names = [os.path.splitext(f)[0] for f in compilable_tex_files] if len(compilable_tex_files_names) == 0: compilable_tex_files_names.append(NO_TEX_FOUND_WARNING) # Reusable arguments parent_with_tex_file = ap.ArgumentParser(add_help=False) parent_with_tex_file.add_argument("--maintex", type=lambda x: ArgumentParserExtensions.is_valid_file(x, parent_with_tex_file), default=config.main_tex, help="Specify main tex file") parent_with_bib = ap.ArgumentParser(add_help=False) parent_with_bib.add_argument("-b", "--bib", action="store_const", const=True, default=False, help="Compile bibliography.") parent_force_action = ap.ArgumentParser(add_help=False) parent_force_action.add_argument("-f", action="store_true", help="Force action " + "(do not ask for confirmation when overwriting or deleting files).") verbose_print = ap.ArgumentParser(add_help=False) verbose_print.add_argument('-v', '--verbose', action="store_const", const=True, default=False, help="Activate verbose mode.") parent_number_ignore_latex_error = ap.ArgumentParser(add_help=False) parent_number_ignore_latex_error.add_argument('-n', '--number', type=int, default=config.number_ignore_latex_compile_errors, help="The number of attempts to ignore latex errors.") parent_double_translation = ap.ArgumentParser(add_help=False) parent_double_translation.add_argument("-d", "--double", action="store_const", const=True, default=False, help="Perform two tex typesetting compilations.") # Parsers for tasks task_parsers = parser.add_subparsers(dest='task') # Make config file # write_conf_parser = task_parsers.add_parser('conf', parents=[parent_force_action], help=f"Create configuration file ({CONF_FILE}).") # Language check lang_check_parser = task_parsers.add_parser('lang', help='Search for language errors.') lang_check_parser.add_argument("file", type=lambda x: ArgumentParserExtensions.is_valid_file(x, lang_check_parser), help="File to check for language errors.") error_identifiers = config.language_errors.identifiers if len(error_identifiers) > 0 and len(error_identifiers) != len(set(error_identifiers)): logging.warning("Multiple text errors with the same identifier.") for one_error_identifier in set(error_identifiers): associated_error_message = config.language_errors.get_error_for_identifier(one_error_identifier).error_message help_message = f"Exclude error: {associated_error_message}." lang_check_parser.add_argument(f"--{one_error_identifier.strip()}", action="store_const", const=True, default=False, help=help_message) # Free make pdf # make_parser = task_parsers.add_parser('cmake', parents=[parent_with_tex_file, parent_with_bib, verbose_print, parent_number_ignore_latex_error, parent_double_translation], help='Translate document.') # Make pdf make_compile_parser = task_parsers.add_parser('make', parents=[parent_with_bib, verbose_print, parent_number_ignore_latex_error, parent_double_translation], help='Translate document.') make_compile_parser.add_argument("texfile", nargs="?", default=filename_stem(config.main_tex), choices=compilable_tex_files_names, help="File to compile.") make_compile_parser.add_argument("--all", action="store_const", const=True, default=False, help="Translate all independent tex files in the directory.") make_compile_parser.add_argument("-l", "--listStatus", action="store_const", const=True, default=False, help="Produce a list of all compile outcomes.") # Clean clean_parser = task_parsers.add_parser('clean', parents=[parent_force_action], help='Cleans the directory.') clean_parser.add_argument("-d", type=int, default=config.clean_depth, help="Set the maximal depth into sub-folders for looking for files to delete.") clean_parser.add_argument("-l", action="store_const", const=True, default=False, help="Only list files and do not delete.") # Make bibliography file make_bib_file = task_parsers.add_parser('bibfile', parents=[parent_force_action], help='Create bib file from citations in aux file.') make_bib_file.add_argument("-a", "--auxFile", type=lambda x: ArgumentParserExtensions.is_valid_file(x, make_bib_file), default=config.main_aux, help="The aux file.") make_bib_file.add_argument("-b", "--bibsFolder", type=lambda x: ArgumentParserExtensions.is_valid_directory(x, make_bib_file), default=config.bibs_folder, help="The path to the folder containing the bib files to search for entries.") make_bib_file.add_argument("-o", "--output", type=str, default=config.main_bibliography_file, help="The file to write the bibliography entries into.") # Git help git_parser = task_parsers.add_parser("git", help="Help with git related operations.") git_action_parsers = git_parser.add_subparsers(dest='action') git_action_parsers.add_parser("ignore", help="Add default clean extensions to gitignore.") credentials_parser = git_action_parsers.add_parser("credentialUsername", help="Add credential username to local git.") credentials_parser.add_argument("username", type=str, help="The username to use.") argcomplete.autocomplete(parser) args = parser.parse_args() args = vars(args) if args["task"] is not None: select_task(args, compilable_tex_files) else: parser.print_help()
12,085
def get_parser(): """ Parser of nuth kaab independent main TODO: To clean with main. Keep independent main ? """ parser = argparse.ArgumentParser( os.path.basename(__file__), description="Universal co-registration method " "presented in Nuth & Kaab 2011." "NB : 1) It is supposed that both dsms share common reference" " (whether it is geoid or ellipsoid)." " 2) DSMs must be georefenced.", ) parser.add_argument("dsm_to", type=str, help="master dsm") parser.add_argument( "dsm_from", type=str, help="slave dsm you wish to coregister to dsm_to" ) parser.add_argument( "-outfile", action="store_true", help="saves output coregistered DSM" ) parser.add_argument( "-nb_iters", dest="nb_iters", type=int, default=6, help="number of iterations", ) parser.add_argument( "-dirplot", dest="plot", type=str, default=None, help="path to output plot directory. " "Plots are printed if set to None (default)", ) parser.add_argument( "-nodata1", dest="nodata1", type=str, default=None, help="no data value for DSM to compare " "(default value is read in metadata)", ) parser.add_argument( "-nodata2", dest="nodata2", type=str, default=None, help="no data value for Reference DSM " "(default value is read in metadata)", ) parser.add_argument( "-save_diff", action="store_true", help="store on file system a ./initial_dh.tiff and a ./final_dh.tiff " "with dsms differences before and after coregistration", ) return parser
12,086
def build_prev_df_n( dispositions) -> pd.DataFrame: """Build admissions dataframe from Parameters.""" days = np.array(range(0, n_days)) data_dict = dict( zip( ["day", "hosp", "icu", "vent"], [days] + [disposition for disposition in dispositions], ) ) projection = pd.DataFrame.from_dict(data_dict) # New cases projection_admits = projection.iloc[:-1, :] - projection.shift(1) projection_admits["day"] = range(projection_admits.shape[0]) projection_admits.loc[0,'hosp'] = 25 return projection_admits
12,087
def main() -> VDOMNode: """Main entry point.""" vdom = html("<{Heading} />") return vdom
12,088
def GT(x=None, y=None): """ Compares two values and returns: true when the first value is greater than the second value. false when the first value is less than or equivalent to the second value. See https://docs.mongodb.com/manual/reference/operator/aggregation/gt/ for more details :param x: first value or expression :param y: second value or expression :return: Aggregation operator """ if x is None and y is None: return {'$gt': []} return {'$gt': [x, y]}
12,089
def pymongo_formatter(credentials): """Returns a DSN for a pymongo-MongoDB connection. Note that the username and password will still be needed separately in the constructor. Args: credentials (dict): The credentials dictionary from the relationships. Returns: (string) A formatted pymongo DSN. """ return '{0}:{1}/{2}'.format( credentials['host'], credentials['port'], credentials['path'] )
12,090
def test_faulty_tessellation(): """Pass bad arguments to the tessellation.""" with pytest.raises(pqca.exceptions.IrregularCellSize): pqca.tessellation.Tessellation([[0], [1, 2]]) with pytest.raises(pqca.exceptions.PartitionUnevenlyCoversQubits): pqca.tessellation.Tessellation([[0], [0]]) with pytest.raises(pqca.exceptions.EmptyCellException): pqca.tessellation.Tessellation([[]]) with pytest.raises(pqca.exceptions.NoCellsException): pqca.tessellation.Tessellation([])
12,091
def sigma_disp_over_vcirc(gal, R=None): """The velocity dispersion over circular velocity computed at R=x*Rs [km/s]. Isotropic NFW is assumed. :param R: radius [kpc] :param gal: galaxy object """ # get Rs (rho, rs, c) = reconstruct_density_DM(gal, DM_profile='NFW') # make array of r, preferably with gal.R if R is None: x_arr = np.array(gal.R / rs) ratio_arr = sigma_over_vcirc(x_arr) else: R, is_scalar = tl.treat_as_arr(R) x_arr = np.array(R / rs) ratio_arr = sigma_over_vcirc(x_arr) if is_scalar: ratio_arr = np.squeeze(ratio_arr) return ratio_arr
12,092
def repack_model( inference_script, source_directory, dependencies, model_uri, repacked_model_uri, sagemaker_session, kms_key=None, ): """Unpack model tarball and creates a new model tarball with the provided code script. This function does the following: - uncompresses model tarball from S3 or local system into a temp folder - replaces the inference code from the model with the new code provided - compresses the new model tarball and saves it in S3 or local file system Args: inference_script (str): path or basename of the inference script that will be packed into the model source_directory (str): path including all the files that will be packed into the model dependencies (list[str]): A list of paths to directories (absolute or relative) with any additional libraries that will be exported to the container (default: []). The library folders will be copied to SageMaker in the same folder where the entrypoint is copied. Example The following call >>> Estimator(entry_point='train.py', dependencies=['my/libs/common', 'virtual-env']) results in the following inside the container: >>> $ ls >>> opt/ml/code >>> |------ train.py >>> |------ common >>> |------ virtual-env model_uri (str): S3 or file system location of the original model tar repacked_model_uri (str): path or file system location where the new model will be saved sagemaker_session (sagemaker.session.Session): a sagemaker session to interact with S3. kms_key (str): KMS key ARN for encrypting the repacked model file Returns: str: path to the new packed model """ dependencies = dependencies or [] with _tmpdir() as tmp: model_dir = _extract_model(model_uri, sagemaker_session, tmp) _create_or_update_code_dir( model_dir, inference_script, source_directory, dependencies, sagemaker_session, tmp ) tmp_model_path = os.path.join(tmp, "temp-model.tar.gz") with tarfile.open(tmp_model_path, mode="w:gz") as t: t.add(model_dir, arcname=os.path.sep) _save_model(repacked_model_uri, tmp_model_path, sagemaker_session, kms_key=kms_key)
12,093
def filters(param: str, default_value: str, base_key: str, key_manager: KeyManager) -> list: """Filter combo box selector for parameter""" update_type = '|filters|' row = combo_row(param, default_value, base_key, key_manager, update_type) return row
12,094
def get_scanner(fs_id): """ get scanner 3T or 1.5T""" sc = fs_id.split("_")[2] if sc in ("15T", "1.5T", "15t", "1.5t"): scanner = "15T" elif sc in ("3T", "3t"): scanner = "3T" else: print("scanner for subject " + fs_id + " cannot be identified as either 1.5T or 3T...") print("Please double check the IDs in the list of subjects") scanner = "false" return scanner
12,095
def _find_query_rank(similarities, library_keys, query_keys): """tf.py_func wrapper around _find_query_rank_helper. Args: similarities: [batch_size, num_library_elements] float Tensor. These are not assumed to be sorted in any way. library_keys: [num_library_elements] string Tensor, where each column j of similarities corresponds to library_key j. query_keys: [num_queries] string Tensor Returns: query_ranks: a dictionary with keys 'highest', 'lowest' and 'avg', where each value is a [batch_size] Tensor. The 'lowest' Tensor contains for each batch the lowest index of a library key that matches the query key for that batch element when the library keys are sorted in descending order by similarity score. The 'highest' and 'avg' Tensors are defined similarly. The first two are tf.int32 and the final is a tf.float32. Note that the behavior of these metrics is undefined when there are ties within a row of similarities. best_query_similarities: the value of the similarities evaluated at the lowest query rank. """ (highest_rank, lowest_rank, avg_rank, best_query_similarities) = tf.py_func( _find_query_rank_helper, [similarities, library_keys, query_keys], (tf.int32, tf.int32, tf.float32, tf.float32), stateful=False) query_ranks = { 'highest': highest_rank, 'lowest': lowest_rank, 'avg': avg_rank } return query_ranks, best_query_similarities
12,096
def random_adjust_brightness(image, max_delta=0.2, seed=None): """Randomly adjusts brightness. """ delta = tf.random_uniform([], -max_delta, max_delta, seed=seed) image = tf.image.adjust_brightness(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image
12,097
def get_assign_ops_and_restore_dict(filename, restore_all=False): """Helper function to read variable checkpoints from filename. Iterates through all vars in restore_all=False else all trainable vars. It attempts to match variables by name and variable shape. Returns a possibly empty list of assign_ops, and a possibly empty dictionary for tf.train.Saver() """ def check_name_and_shape(name, var, shape_map): if name in shape_map: # Cannot check variables with unknown sizes such as cudnn rnns if str(var.shape) == "<unknown>": # Just return True and hope the shapes match return True if var.shape == shape_map[name]: return True return False assign_ops = [] restore_dict = {} try: reader = tf.train.NewCheckpointReader(filename) var_to_shape_map = reader.get_variable_to_shape_map() variables = tf.trainable_variables() if restore_all: variables = tf.get_collection(tf.GraphKeys.VARIABLES) for var in variables: idx = var.name.find(":") if idx != -1: true_name = var.name[:idx] loss_idx = re.search("Loss_Optimization", true_name) if 'EmbeddingMatrix' in true_name: embed_restore, assign = _restore_embed(var, var_to_shape_map, reader) if assign: assign_ops.append(embed_restore) else: restore_dict[true_name] = embed_restore if check_name_and_shape(true_name, var, var_to_shape_map): tensor = reader.get_tensor(true_name) if tensor.dtype != var.dtype.as_numpy_dtype(): assign_ops.append(var.assign(tf.cast(tensor, var.dtype))) else: restore_dict[true_name] = var elif loss_idx: loss_idx = loss_idx.end() if FP32_TEST.search(true_name): true_name = FP32_TEST.sub("", true_name) else: true_name = (true_name[:loss_idx] + "/Loss_Optimization/FP32-master-copy" + true_name[loss_idx:]) if check_name_and_shape(true_name, var, var_to_shape_map): tensor = reader.get_tensor(true_name) if tensor.dtype != var.dtype.as_numpy_dtype(): assign_ops.append(var.assign(tf.cast(tensor, var.dtype))) else: restore_dict[true_name] = var else: print("Not restoring {}".format(var.name)) if true_name not in var_to_shape_map: print("true name [{}] was not in shape map".format(true_name)) else: if var.shape != var_to_shape_map[true_name]: print(("var.shape [{}] does not match var_to_shape_map[true_name]" "[{}]").format(var.shape, var_to_shape_map[true_name])) print("WARNING: Run will mostly error out due to this") except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") if ("Data loss" in str(e) and (any([e in filename for e in [".index", ".meta", ".data"]]))): proposed_file = ".".join(filename.split(".")[0:-1]) v2_file_error_template = """ It's likely that this is a V2 checkpoint and you need to provide the filename *prefix*. Try removing the '.' and extension. Try: inspect checkpoint --file_name = {}""" print(v2_file_error_template.format(proposed_file)) raise ValueError("Error in loading checkpoint") return assign_ops, restore_dict
12,098
def log_creations(model, **extra_kwargs_for_emit): """ Sets up signal handlers so that whenever an instance of `model` is created, an Entry will be emitted. Any further keyword arguments will be passed to the constructor of Entry as-is. As a special case, if you specify the sentinel value `INSTANCE` as the value of a keyword argument, the newly created instance of `model` will be passed instead. If the value of the keyword argument is a function, it will be called with the newly created instance to determine the value of the keyword argument to the Entry constructor. For examples on usage, see `feedback/handlers/feedback_message.py`. """ meta = model._meta entry_type_name = '{app_label}.{model_name}.created'.format( app_label=meta.app_label, model_name=meta.model_name, ) @receiver(post_save, sender=model, weak=False) def on_save_emit_event_log_entry(sender, instance, created, **kwargs): if not created: return kwargs_for_emit = dict() for key, value in extra_kwargs_for_emit.items(): if value is INSTANCE: value = instance elif callable(value): value = value(instance) kwargs_for_emit[key] = value emit(entry_type_name, **kwargs_for_emit) return on_save_emit_event_log_entry
12,099