content
stringlengths
22
815k
id
int64
0
4.91M
def compute_embeddings(image): """A mock function for a call to a deep learning model or a web service.""" del image # this is just a mock and doesn't do anything with the input return 42
34,400
def dir(path: str) -> Path: """Get equivalent directory in cache""" _, filename = os.path.split(path) if filename: # TODO fix; this won't work as intended # If file return __get_cache_filepath(path).parent else: # If directory return __get_cache_filepath(os.path.join(path, 'tmp')).parent
34,401
def text_to_string(filename): """Read a text file and return a string.""" with open(filename) as infile: return infile.read()
34,402
def lgsvlToScenicElevation(pos): """Convert LGSVL positions to Scenic elevations.""" return pos.y
34,403
def render_to_string(backend, filename, context): # type: (str, str, Dict) -> str """ Render a template using the specified context :param backend: The backend for which the template is rendered :param filename: The template name :param context: The data to use when rendering the template :return: The rendered template as a string """ template_directory = "./swagger_django_generator/templates/{}".format(backend) loaders = [jinja2.FileSystemLoader(template_directory)] try: import swagger_django_generator loaders.append(jinja2.PackageLoader("swagger_django_generator", "templates/{}".format(backend))) except ImportError: pass environment = jinja2.Environment( loader=jinja2.ChoiceLoader(loaders), trim_blocks=True, lstrip_blocks=True, ) environment.filters["clean_schema"] = clean_schema environment.filters["parse_array"] = parse_array environment.filters["capitalize_splitter"] = capitalize_splitter return environment.get_template(filename).render(context)
34,404
def work(commits, files, works, bug_commit): """ Fill given files with random numbers and executes commits, randomly add !BUG! to a file of given bug_commit """ bug_commited = False for i in range(commits): for j in range(randint(1, works)): filename = choice(files) file = open(filename, 'a') file.write(name()) file.write('\n') if bug_commited == False: if i == bug_commit: file.write('!BUG!\n') bug_commited = True file.close() commit()
34,405
def customfield_by_name(self, name): """ Get the value of a customfield by name """ # Get all fields from Jira. This is expensive, so only do it once if not hasattr(self, '_fields'): response = self._session.get( self._base_url.format( server=self._options['server'], rest_path=self._options['rest_path'], rest_api_version=self._options['rest_api_version'], path='field', ), auth=self._session.auth, ) if response.status_code != 200: raise JIRAError(response.text) else: self._fields = response.json() for field in self._fields: if field.get('name') == name: break else: raise JIRAError('Could not find customfield') return getattr(self.fields, field.get('id'))
34,406
def download_images(imgs): """Save any images on page to local directory""" had_download_issue = False for img in imgs: image_url = 'https://projecteuler.net/{}'.format(img.get('src')) logger.info(f'downloading image {image_url}') image_name = Path(image_url).name image = get_the_response(image_url) if image: (LOCAL_IMAGES_DIR / image_name).write_bytes(image.content) else: had_download_issue = True return not had_download_issue
34,407
def create_data(arguments, event_log, preprocessor, cases_of_fold): """ Generates data to train and test/evaluate a model during hyper-parameter optimization (hpo) with Optuna. Parameters ---------- arguments : Namespace Settings of the configuration parameters. event_log : list of dicts, where single dict represents a case pm4py.objects.log.log.EventLog object representing an event log. preprocessor : nap.preprocessor.Preprocessor Object to preprocess input data. cases_of_fold : list of dicts, where single dict represents a case Cases of the current fold. Returns ------- x_train : ndarray, shape[S, T, F], S is number of samples, T is number of time steps, F is number of features. The features of the training set. y_train : ndarray, shape[S, T], S is number of samples, T is number of time steps. The labels of the training set. x_test : ndarray, shape[S, T, F], S is number of samples, T is number of time steps, F is number of features. The features of the test set. y_test : ndarray, shape[S, T], S is number of samples, T is number of time steps. The labels of the test set. args : Namespace Settings of the configuration parameters. """ global args global max_case_length global num_features global num_classes global iteration_cross_validation global x_train global y_train global x_test global y_test # init parameters args = arguments max_case_length = preprocessor.get_max_case_length(event_log) num_features = preprocessor.get_num_features(args) num_classes = preprocessor.get_num_outcome_classes() iteration_cross_validation = preprocessor.iteration_cross_validation # preprocess data train_indices, test_indices = train_test_split_for_hyperparameter_optimization(cases_of_fold) train_cases, test_cases = retrieve_train_test_instances(cases_of_fold, train_indices, test_indices) train_subseq_cases, test_subseq_cases = retrieve_train_test_subsequences(train_cases, test_cases, preprocessor) x_train = preprocessor.get_features_tensor(args, 'train', event_log, train_subseq_cases) y_train = preprocessor.get_labels_tensor(args, train_subseq_cases) x_test = preprocessor.get_features_tensor(args, 'train', event_log, test_subseq_cases) y_test = preprocessor.get_labels_tensor(args, test_subseq_cases)
34,408
def admin_order_pdf(request, order_id): """ 1. Get data (and templates for displaying data) 2. Set type (cuz you'll need to download it, right?) 3. Using the module (configuring stuff, e.g. the CSS :P) """ order = get_object_or_404(Order, id=order_id) html = render_to_string('orders/order/pdf.html', { 'order': order }) response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = ( 'filename=order_{}.pdf'.format(order.id) ) weasyprint.HTML(string=html).write_pdf( response, stylesheets=[ weasyprint.CSS( settings.STATIC_ROOT + 'css/pdf.css' ) ] ) return response
34,409
def delete_container(request, container): """ Deletes a container """ storage_url = request.session.get('storage_url', '') #meta_storage_url = request.session.get('meta_storage_url', '') auth_token = request.session.get('auth_token', '') #meta_auth_token = request.session.get('meta_auth_token', '') username = request.session.get('username', '') project_id = request.session.get('project_id','') try: conn = EncSwiftclientAPI(auth_token, project_id) conn.delete_container(container) messages.add_message(request, messages.INFO, _("Container deleted.")) except client.ClientException: traceback.print_exc() messages.add_message(request, messages.ERROR, _("Access denied. If there are some files in %s, before delete them!" % container)) return redirect(containerview)
34,410
def dense_encoder(X, params): """Dense model encoder subgraph that produces latent matrix. Given data matrix tensor X and dictionary of parameters, process through dense model encoder subgraph and return encoder latent vector for each example in batch. Args: X: tf.float64 matrix tensor of input data. params: Dictionary of parameters. Returns: tf.float64 matrix tensor encoder latent vector for each example in batch. """ # Create the input layer to our DNN network = X # Add hidden layers with the given number of units/neurons per layer for units in params["enc_dnn_hidden_units"]: network = tf.layers.dense( inputs=network, units=units, activation=tf.nn.relu) return tf.layers.dense( inputs=network, units=params["latent_vector_size"], activation=tf.nn.relu)
34,411
def __asset_inventory_espanol(asset): """ Renombra los encabezados del inventario de bases de datos de Datos \ Abiertos Colombia a términos en español. :param asset: (pandas.DataFrame) - Tabla de inventario del portal de datos\ abiertos Colombia (https://www.datos.gov.co). :return: base de datos en formato dataframe. """ lista_columnas = list(DIC_RENAME.keys()) asset = asset[lista_columnas].rename(columns=DIC_RENAME) # Cambiar las fechas asset["fecha_creacion"] = asset["fecha_creacion"].apply(lambda x: x[0:10]) asset["fecha_actualizacion"] = asset["fecha_actualizacion"].apply( lambda x: x[0:10]) # Pasar filas y columnas a float asset["filas"] = asset["filas"].astype(float) asset["columnas"] = asset["columnas"].astype(float) # Traducir las categorías de 'base_publica' asset["base_publica"] = asset["base_publica"].map( {"published": "Si", "unpublished": "No"}) # Traducir las categorías de asset["tipo"] = asset["tipo"].map({ "dataset": "conjunto de datos", "federatet_href": "enlace externo", "href": "enlace externo", "map": "mapa", "chart": "grafico", "filter": "vista filtrada", "file": "archivo o documento", "visualization": "visualizacion", "story": "historia", "datalens": "lente de datos", "form": "formulario", "calendar": "calendario", "invalid_datatype": "tipo_invalido"}) return asset
34,412
def end_of_sign_found(token: str, preceding_token: str): """ This function receives a token and its preceding token and returns whether that token ends an Akkadian sign. """ if not preceding_token: return False if '-' in token or '.' in token: return True if not preceding_token.endswith('-') and not token.startswith('##'): return True return False
34,413
def main(argv=None): """Execute the application from CLI.""" if argv is None: argv = sys.argv[1:] if not argv: argv = [curdir] args = _parse_args(argv) data = csft2data(args.path) if args.top: data = data.head(args.top) if args.with_raw: data['raw'] = data[column.SIZE] data[column.SIZE] = data[column.SIZE].map(format_size) print(data) return 0
34,414
def translateFrontendStrings(language_code): """ Translate all strings used in frontend templates and code :param language_code: de|en """ if sys.platform == 'win32': pybabel = 'pybabel' else: pybabel = 'flask/bin/pybabel' # frontend pages os.system(pybabel + ' extract -F bin/babel.cfg -k gettext -o emonitor\\frontend\\translations\\frontend.pot emonitor\\frontend') os.system(pybabel + ' update -D frontend -i emonitor\\frontend\\translations\\frontend.pot -d emonitor\\frontend\\translations -l ' + language_code)
34,415
def build_gemini3d(targets: list[str]): """ build targets from gemini3d program Specify environment variable GEMINI_ROOT to reuse existing development code """ if isinstance(targets, str): targets = [targets] gem_root = get_gemini_root() src_dir = Path(gem_root).expanduser() if not (src_dir / "CMakeLists.txt").is_file(): jmeta = json.loads(importlib.resources.read_text("gemini3d", "libraries.json")) git_download(src_dir, repo=jmeta["gemini3d"]["git"], tag=jmeta["gemini3d"]["tag"]) build_dir = src_dir / "build" build( src_dir, build_dir, run_test=False, install=False, config_args=["-DBUILD_TESTING:BOOL=false", "-Dmsis2:BOOL=true"], build_args=["--target", *targets], ) for t in targets: for n in {"build", "build/Release", "build/Debug"}: exe = shutil.which(t, path=src_dir / n) if exe: break if not exe: raise RuntimeError(f"{t} not found in {build_dir}")
34,416
def get_base_path(node=None): """ get the base path for the system """ if node==None: node = get_system() ## ## Base path try: path = os.environ['sdss_catl_path'] assert(os.path.exists(path)) except: proj_dict = cookiecutter_paths(__file__) ## ## Path to `base` path = proj_dict['base_dir'] return path
34,417
def get_loaders( dataset: str, batch_size: int, num_workers: Optional[int] ) -> Dict[str, DataLoader]: """Init loaders based on parsed parametrs. Args: dataset: dataset for the experiment batch_size: batch size for loaders num_workers: number of workers to process loaders Returns: {"train":..., "valid":...} """ transforms = datasets[dataset]["train_transform"] transform_original = datasets[dataset]["valid_transform"] train_data = SelfSupervisedDatasetWrapper( datasets[dataset]["dataset"](root="data", train=True, transform=None, download=True), transforms=transforms, transform_original=transform_original, ) valid_data = SelfSupervisedDatasetWrapper( datasets[dataset]["dataset"](root="data", train=False, transform=None, download=True), transforms=transforms, transform_original=transform_original, ) train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers) valid_loader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers) return {"train": train_loader, "valid": valid_loader}
34,418
def pipeline(): """ Creates a pipeline configured to use a given model with a specified configuration. Notes ----- Pipeline can be executed only if its config contains the following parameters: model_class : TFModel Architecture of model. List of available models is defined at 'AVAILABLE_MODELS'. model_config : Config Model parameters. Returns ------- Pipeline A pipeline that contains model initialization and training with a given config. """ test_pipeline = (Pipeline() .init_variable('current_loss') .init_model('dynamic', C('model_class'), 'model', C('model_config')) .to_array() .train_model('model', fetches='loss', images=B('images'), labels=B('labels'), save_to=V('current_loss')) ) return test_pipeline
34,419
def get_screen_point_array(width: float, height: float): """Get screen points(corners) in pixels from normalized points_in_square :param width: screen width :param height: screen height :return: """ points = copy.deepcopy(points_in_square) for i in range(len(points_in_square)): points[i] = points[i][0] * width, points[i][1] * height result = list_points_to_triangle(points) return np.array(result, dtype=np.float32)
34,420
def get_spacing_matrix(size, spacing, offset): """Returns a sparse matrix LinOp that spaces out an expression. Parameters ---------- size : tuple (rows in matrix, columns in matrix) spacing : int The number of rows between each non-zero. offset : int The number of zero rows at the beginning of the matrix. Returns ------- LinOp A sparse matrix constant LinOp. """ val_arr = [] row_arr = [] col_arr = [] # Selects from each column. for var_row in range(size[1]): val_arr.append(1.0) row_arr.append(spacing*var_row + offset) col_arr.append(var_row) mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), size).tocsc() return lu.create_const(mat, size, sparse=True)
34,421
def next_power2(x): """ :param x: an integer number :return: the power of 2 which is the larger than x but the smallest possible >>> result = next_power2(5) >>> np.testing.assert_equal(result, 8) """ return 2 ** np.ceil(np.log2(x)).astype(int)
34,422
def category_induced_page(): """Form to compute the Category induced.""" return render_template('category-induced.html')
34,423
def zext(value, n): """Extend `value` by `n` zeros""" assert (isinstance(value, (UInt, SInt, Bits)) or (isinstance(value, Array) and issubclass(value.T, Digital))) if not is_int(n) or n < 0: raise TypeError(f"Expected non-negative integer, got '{n}'") if n == 0: return value if isinstance(value, UInt): zeros = uint(0, n) elif isinstance(value, SInt): zeros = sint(0, n) elif isinstance(value, Bits): zeros = bits(0, n) elif isinstance(value, Array): zeros = array(0, n) result = concat(value, zeros) if isinstance(value, UInt): return uint(result) elif isinstance(value, SInt): return sint(result) elif isinstance(value, Bits): return bits(result) return result
34,424
def _distance(point0, point1, point2, seg_len): """Compute distance between point0 and segment [point1, point2]. Based on Mark McClure's PolylineEncoder.js.""" if (point1[0] == point2[0]) and (point1[1] == point2[1]): out = _dist(point0, point2) else: uuu = ((point0[0] - point1[0]) * (point2[0] - point1[0]) + (point0[1] - point1[1]) * (point2[1] - point1[1])) / seg_len if uuu <= 0: out = _dist(point0, point1) elif uuu >= 1: out = _dist(point0, point2) else: out = math.sqrt(math.pow((point0[0] - point1[0]) - (uuu * (point2[0] - point1[0])), 2) + math.pow((point0[1] - point1[1]) - (uuu * (point2[1] - point1[1])), 2)) return out
34,425
def delete_node( graph: xpb2.GraphProto, node_name: str = "", **kwargs): """ Add node appends a node to graph g and returns the extended graph Prints a message and returns False if fails. Args: graph: A graph, onnx.onnx_ml_pb2.GraphProto. node_name: Name of the node to remove. **kwargs Returns: The extended graph. """ if type(graph) is not xpb2.GraphProto: _print("The graph is not a valid ONNX graph.") return False if not node_name: _print("Please specify a node name.") return False found = False try: for elem in graph.node: if elem.name == node_name: graph.node.remove(elem) found = True except Exception as e: _print("Unable to iterate the nodes. " + str(e)) return False if not found: _print("Unable to find the node by name.") return False return graph
34,426
def image_to_term256(pil_image): """Convert image to a string that resembles it when printed on a terminal Needs a PIL image as input and a 256-color xterm for output. """ result = [] im = pil_image.convert('RGBA') try: from PIL import Image except ImportError: im.thumbnail((80, 80)) else: im.thumbnail((80, 80), Image.ANTIALIAS) width, height = im.size for y in range(height // 2): try: for x in range(width): result.append('\033[48;5;%dm\033[38;5;%dm' % ( term256color(*im.getpixel((x, y * 2))), term256color(*im.getpixel((x, y * 2 + 1))))) result.append('\N{LOWER HALF BLOCK}') finally: result.append('\033[0m\n') return ''.join(result)
34,427
def login(driver, user, pwd): """ Type user email and password in the relevant fields and perform log in on linkedin.com by using the given credentials. :param driver: selenium chrome driver object :param user: str username, email :param pwd: str password :return: None """ username = driver.find_element_by_id('session_key') username.send_keys(user) sleep(0.5) password = driver.find_element_by_id('session_password') password.send_keys(pwd) sleep(0.5) sign_in_button = driver.find_element_by_xpath('//*[@type="submit"]') sign_in_button.click()
34,428
def false_discovery(alpha,beta,rho): """The false discovery rate. The false discovery rate is the probability that an observed edge is incorrectly identified, namely that is doesn't exist in the 'true' network. This is one measure of how reliable the results are. Parameters ---------- alpha : float The estimate of the true-positive rate. beta : float The estimate of the false-positive rate. rho : float The estimate of network density. Returns ------- float The false discovery rate (probability). References ---------- .. [1] Newman, M.E.J. 2018. “Network structure from rich but noisy data.” Nature Physics 14 6 (June 1): 542–545. doi:10.1038/s41567-018-0076-1. """ return (1-rho)*beta/(rho*alpha + (1-rho)*beta)
34,429
def dis3(y=None): """frobnicate classes, methods, functions, or code. With no argument, frobnicate the last traceback. """ if y is None: distb() return if isinstance(y, types.InstanceType): y = y.__class__ if hasattr(y, 'im_func'): y = y.im_func if hasattr(y, 'func_code'): y = y.func_code if hasattr(y, '__dict__'): items = y.__dict__.items() items.sort() for name, y1 in items: if isinstance(y1, _have_code): print("Disassembly of %s:" % name) try: dis(y1) except TypeError(msg): print("Sorry:", msg) print() elif hasattr(y, 'co_code'): frobnicate(y) elif isinstance(y, str): frobnicate_string(y) else: raise TypeError( "don't know how to frobnicate %s objects" % type(y).__name__)
34,430
def assert_almost_equal( actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.legendre(1)"] ): """ usage.scipy: 1 """ ...
34,431
def __keep_update_tweets(interval: int) -> None: """run update-tweets command with interval on background.""" mzk.set_process_name(f'MocaTwitterUtils({core.VERSION}) -- keep-update-tweets') while True: mzk.call( f'nohup {mzk.executable} "{core.TOP_DIR.joinpath("moca.py")}" update-tweets &> /dev/null &', shell=True ) mzk.sleep(interval)
34,432
def task_rescheduled_handler(sender, **kwargs): """ Notify the admins when a task has failed and rescheduled """ name = kwargs['task'].verbose_name attempts = kwargs['task'].attempts last_error = kwargs['task'].last_error date_time = kwargs['task'].run_at task_name = kwargs['task'].task_name task_params = kwargs['task'].task_params notification.task_rescheduled_notify(name, attempts, last_error, date_time, task_name, task_params)
34,433
def repeat_first(iterable): """Repeat the first item from an iterable on the end. Example: >>> ''.join(repeat_first("ABDC")) "ABCDA" Useful for making a closed cycle out of elements. If iterable is empty, the result will also be empty. Args: An iterable series of items. Yields: All items from iterables, followed by the first item from iterable. """ iterator = iter(iterable) try: first = next(iterator) except StopIteration: return yield first for item in iterator: yield item yield first
34,434
def add_default_legend(axes, subplots, traces): """ Add legend to the axes of the plot. This is needed to be done using matplotlib shapes rather than the build in matplotlib legend because otherwise the animation will add a legend at each time step rather than just once. Parameters ---------- axes: axes object the axes of the matplotlib figure subplots: int number of subplots in the figure traces: list of dictionaries a list of dictionaries where each dictionary corresponds to one of the passed in filenames or dataframes, the keys of the dictionaries are subplots (0-indexed), and the values are a list of values for that subplot from that filename (ex. traces = [{0: ["bg", "bg_sensor"], 1: ["iob"], 2: ["sbr"]}]) Returns ------- """ # Add the corresponding shape and label for each field in the plot to the legend for subplot in range(subplots): legend_items = [] for trace_dict in traces: if subplot in trace_dict.keys(): for field in trace_dict[subplot]: features = get_features_dictionary(field) legend_items.append( Line2D( [0], [0], color=features["color"], label=features["legend_label"], marker=features["marker"], markersize=3, linestyle=features["linestyle"], ) ) # Syntax is slightly different if there is only 1 subplot if subplots < 2: add_to = axes else: add_to = axes[subplot] add_to.legend(handles=legend_items, loc="upper right") # Return the updated axes return axes
34,435
def _conv2d(input, filter, bias=False, strides=[1, 1], pads=[1, 1, 1, 1], dilations=[1, 1], group=1, debugContext=''): """Encapsulation of function get_builder().aiOnnx.conv! args: x: input tensor ksize: int,kernel size stride: int,stride of conv pads: int, conv padding c_out: int, output channel group: int, conv group nums,default:1 """ args = [input.getIpuIndex(), filter.getIpuIndex()] if bias: args.append(bias.getIpuIndex()) output = get_builder().aiOnnx.conv(args, strides=strides, pads=pads, dilations=dilations, group=group, debugContext=debugContext) if get_memory_proportion() is not None: get_builder().setAvailableMemoryProportion(output, get_memory_proportion()) return TTensor(output)
34,436
def remove_dir_content(dir_path: str, ignore_files: Iterable[str] = tuple(), force_reset: bool = False) -> None: """ Remove the content of dir_path ignoring the files under ignore_files. If force_reset is False, prompts the user for approval before the deletion. :param dir_path: path to dir. either relative or full :param ignore_files: list of files to ignore (don't delete them) :param force_reset: when False (default), asks the user for approval when deleting content. Else, delete without prompting. :return: None """ # if no content - do nothing files = os.listdir(dir_path) files = [file for file in files if file not in ignore_files] num_files = len(files) if num_files > 0: # prompt the user for approval force_remove = True if force_reset else Misc.query_yes_no( question=f'Folder {dir_path} contains {num_files} files. Delete anyway?') # iterate over all the content and delete it failure = False if force_remove: for filename in files: file_path = os.path.join(dir_path, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) failure = True # in case remove wasn't approved or failed to delete some of the files if not force_remove or failure: msg = f'Folder {dir_path} is already used, please remove it and rerun the program' print(msg) raise Exception(msg)
34,437
def StartUpRedis(host_protos): """ Given a list of host protos, run external program 'mx' that allows one to start up a program on an running host container. This function will use that program to start up redis on the appropraite hosts. Arguments: host_protos: list of host protobuf messages as defined in QuaggaTopo.proto, only the one with type HT_LOOKUPSERVICE will have redis started on it. """ #find lookup service host proto host name lookupservice_host_name = '' redis_path = '' for host_proto in host_protos: if(host_proto.host_type == QuaggaTopo_pb2.HostType.Value('HT_LOOKUPSERVICE')): lookupservice_host_name = host_proto.host_name redis_path = host_proto.path_to_redis_executable break #run mx <host_name> <path_to_redis> commandline command = './' + kMxLocation + ' ' + lookupservice_host_name + ' ' + redis_path print command p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # print 'starting redis command' # print p.stdout.readlines() # print p.stderr
34,438
def set_seed(seed: int): """ Sets random, numpy, torch, and torch.cuda seeds """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)
34,439
def split_data(n_samps, percent_test): """ :param n_samps: number of data samples :param percent_test: percent of data to hold out :return: two sets of indices corresponding to training and validation data """ # generate and randomly shuffle idx = np.arange(n_samps) np.random.shuffle(idx) # determine cut-point i_cut = int(n_samps * (1 - percent_test)) # generate train and test indices i_train = idx[:i_cut] i_valid = idx[i_cut:] return i_train, i_valid
34,440
def compute_totals(songs, limit_n, save_file=None): """ Return array of shape (4, 3, 35) representing counts for each group of each context type of each label """ totals = np.zeros((4, 3, 35), dtype='int32') i = 0 for song_path, beatmap_ids in songs: print('song {}'.format(i)) spectrogram = np.load(song_path) beatmap_data = [db.beatmap_data(beatmap_id) for beatmap_id in beatmap_ids] counts = get_counts(beatmap_data, spectrogram, limit_n=limit_n) totals[:] = totals + counts i += 1 if save_file: np.save(save_file, totals) return totals
34,441
def _initialize_arrays(initial_values, num_steps): """Construct a structure of `TraceArray`s from initial values.""" trace_arrays = tf.nest.map_structure( lambda t: tf.TensorArray( # pylint: disable=g-long-lambda dtype=t.dtype, size=num_steps, # Initial size. clear_after_read=False, # Allow reading->tiling final value. element_shape=t.shape), initial_values) return tf.nest.map_structure( lambda ta, t: ta.write(0, t), trace_arrays, initial_values)
34,442
def blend(image1, image2, factor): """Blend image1 and image2 using 'factor'. Factor can be above 0.0. A value of 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor of type uint8. image2: An image Tensor of type uint8. factor: A floating point value above 0.0. Returns: A blended image Tensor of type uint8. """ if factor == 0.0: return tf.convert_to_tensor(image1) if factor == 1.0: return tf.convert_to_tensor(image2) image1 = tf.to_float(image1) image2 = tf.to_float(image2) difference = image2 - image1 scaled = factor * difference # Do addition in float. temp = tf.to_float(image1) + scaled # Interpolate if factor > 0.0 and factor < 1.0: # Interpolation means we always stay within 0 and 255. return tf.cast(temp, tf.uint8) # Extrapolate: # # We need to clip and then cast. return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
34,443
def AddReservationAffinityFlags(parser, for_node_pool=False): """Adds the argument to handle reservation affinity configurations.""" target = 'node pool' if for_node_pool else 'default initial node pool' group_text = """\ Specifies the reservation for the {}.""".format(target) group = parser.add_group(help=group_text) affinity_text = """\ The type of the reservation for the {}.""".format(target) group.add_argument( '--reservation-affinity', choices=['any', 'none', 'specific'], default=None, help=affinity_text) group.add_argument( '--reservation', default=None, help=""" The name of the reservation, required when `--reservation-affinity=specific`. """)
34,444
def handle_debug(drive_file, node_id, show_all): """Handle the debug verb by toggling the debug flag.""" if drive_file.debug: print("# handle_debug(node_id: " + str(node_id) + ",") print("# show_all: " + str(show_all)) drive_file.set_debug(not drive_file.get_debug()) return True
34,445
def start_of_next_clk_period(time: float, clk_period: float): """ :return: start time of next clk period """ return (start_clk(time, clk_period) + 1) * clk_period
34,446
def eval_formula(formula, assignment): """ Evaluates a formula represented as a string. **Attention**: Be extremely careful about what to pass to this function. All parameters are plugged into the formula and evaluated using `eval()` which executes arbitrary python code. Parameters ---------- formula : str String representation of the formula to be evaluated. assignment : dict Dictionary containing parameter names and values as keys and values, respectively. Returns ------- float Evaluation result. Examples -------- >>> eval_formula('a + (1 - b) * a', {'a': 0.1, 'b': 0.8}) 0.12 """ expression = formula for param, value in sorted(assignment.items(), reverse=True): expression = expression.replace(param, str(value)) # remove leading 0's expression = re.sub(r'\d-0\d', lambda x: re.sub(r'-0', '-', x[0]), expression) # pylint: disable=eval-used return eval(expression) # pylint: enable=eval-used
34,447
def verify_hash(path: Path, expected_hash: str) -> None: """ Raises: MissingFileError - path doesn't exist IntegrityError - path exists with bad hash """ if not path.exists(): raise MissingFileError(str(path)) h = hash_file(path) if h != expected_hash: raise IntegrityError( path.name, actual_hash=h, expected_hash=expected_hash )
34,448
def soil_temperature(jth: int, states: States, weather: Weather): # j = 1,2,..,5 """ Equation 2.4 / 8.4 cap_soil_j * soil_j_t = sensible_heat_flux_soil_j_minus_soil_j - sensible_heat_flux_soil_j_soil_j_plus 0 is Floor, 6 is SoOut """ h_soil_j_minus = Coefficients.Floor.floor_thickness if jth == 1 else Coefficients.Soil.soil_thicknesses[jth - 2] h_soil_j = Coefficients.Soil.soil_thicknesses[jth - 1] h_soil_j_plus = 1.28 if jth == 5 else Coefficients.Soil.soil_thicknesses[jth] # Assumed by GreenLight's authors, line 83, setGlParams cap_soil_j = h_soil_j * Coefficients.Soil.rho_c_p_So soil_heat_conductivity = Coefficients.Soil.soil_heat_conductivity HEC_soil_j_minus_soil_j = 2 * soil_heat_conductivity / (h_soil_j_minus + h_soil_j) HEC_soil_j_soil_j_plus = 2 * soil_heat_conductivity / (h_soil_j + h_soil_j_plus) soil_j_minus_t = states.floor_t if jth == 1 else states.soil_j_t[jth - 2] soil_j_t = states.soil_j_t[jth - 1] soil_j_plus_t = weather.soil_out_t if jth == 5 else states.soil_j_t[jth] sensible_heat_flux_soil_j_minus_soil_j = convective_and_conductive_heat_fluxes(HEC_soil_j_minus_soil_j, soil_j_minus_t, soil_j_t) sensible_heat_flux_soil_j_soil_j_plus = convective_and_conductive_heat_fluxes(HEC_soil_j_soil_j_plus, soil_j_t, soil_j_plus_t) return (sensible_heat_flux_soil_j_minus_soil_j - sensible_heat_flux_soil_j_soil_j_plus) / cap_soil_j
34,449
def test_metadata_default(metadata_class): """Test long_break_clock_count default value.""" metadata_default = metadata_class() assert metadata_default.long_break_clock_count == 4
34,450
def clean_user_data(model_fields): """ Transforms the user data loaded from LDAP into a form suitable for creating a user. """ # Create an unusable password for the user. model_fields["password"] = make_password(None) return model_fields
34,451
def main(): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ save_path = 'data/raw/data.csv' model = RecipeModel() if model.all(): table_to_csv(f'select * from {model.TABLE_NAME}', model.con,save_path) return user_input = input('Local database is empty. Would you like to start scrapping the data from source?(Y/N) : ') if not user_input: print('Invalid input! Exiting script...') sys.exit() if user_input.lower() == 'y': start_scrapper() table_to_csv(f'select * from {model.TABLE_NAME}', model.con,save_path) sys.exit()
34,452
def case_structure_generator(path): """Create test cases from reference data files.""" with open(str(path), 'r') as in_f: case_data = json.load(in_f) system_dict = case_data['namelists']['SYSTEM'] ibrav = system_dict['ibrav'] ins = {'ibrav': ibrav, 'cell': case_data['cell']} if '-' in path.name: _, qe_version_with_suffix = path.name.split('-') qe_version, _ = qe_version_with_suffix.rsplit('.', 1) else: qe_version = None ins = {'ibrav': ibrav, 'cell': case_data['cell'], 'qe_version': qe_version} if ibrav == 0: return ins, None, ValueError outs = dict() for key in (['a', 'b', 'c', 'cosab', 'cosac', 'cosbc'] + ['celldm({})'.format(i) for i in range(1, 7)]): if key in system_dict: outs[key] = system_dict[key] return ins, outs, None
34,453
def draw_pixel(x, y, pixel, img): """ Draw `pixel` onto `img` at the specified coordinates. Args: x(int): x coordinate of draw y(int): y coordinate of draw pixel(tuple): red, green, blue, and optionally alpha img(Image): the Image to draw the pixel onto Raises: CoordinateOffGridException: If x or y is negative. """ if x >= 0 and x < img.width and y >= 0 and y < img.height: img.putpixel((x, y), blend_color(img.getpixel((x, y)), pixel)) else: ''' # Uses the img to get the width/height for the error message throw CoordinateOffGridException(x, y, img) ''' raise Exception( f'The coordinates ({x}, {y}) lay outside the image bounds ' f'({img.width}, {img.height}).' )
34,454
async def source_feeder(tangled_object, source_name): """ This coroutine will update a Foo or Bar source node with values at random intervals """ for _ in range(4): await asyncio.sleep(random.uniform(0.5, 2)) setattr(tangled_object, source_name, random.randint(-20, 20)/2.0)
34,455
def load_ascii(file: 'BinaryFile', # pylint: disable=unused-argument,keyword-arg-before-vararg parser: 'Optional[Type[ASCIIParser]]' = None, type_hook: 'Optional[Dict[str, Type[BaseType]]]' = None, enum_namespaces: 'Optional[List[str]]' = None, bare: bool = False, *args: 'Any', **kwargs: 'Any') -> 'ASCIIInfo': """Parse ASCII log file. Args: file: Log file object opened in binary mode. parser (:class:`~zlogging.loader.ASCIIParser`, optional): Parser class. type_hook (:obj:`dict` mapping :obj:`str` and :class:`~zlogging.types.BaseType` class, optional): Bro/Zeek type parser hooks. User may customise subclasses of :class:`~zlogging.types.BaseType` to modify parsing behaviours. enum_namespaces (:obj:`List[str]`, optional): Namespaces to be loaded. bare (:obj:`bool`, optional): If ``True``, do not load ``zeek`` namespace by default. *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: The parsed ASCII log data. """ if parser is None: parser = ASCIIParser ascii_parser = parser(type_hook, enum_namespaces, bare) return ascii_parser.parse_file(file)
34,456
def test_histcontrol(hist, xession): """Test HISTCONTROL=ignoredups,ignoreerr""" ignore_opts = ",".join(["ignoredups", "ignoreerr", "ignorespace"]) xession.env["HISTCONTROL"] = ignore_opts assert len(hist) == 0 # An error, items() remains empty hist.append({"inp": "ls foo", "rtn": 2}) assert len(hist) == 0 assert len(hist.inps) == 1 assert len(hist.rtns) == 1 assert 2 == hist.rtns[-1] # Success hist.append({"inp": "ls foobazz", "rtn": 0}) assert len(hist) == 1 assert len(hist.inps) == 2 assert len(hist.rtns) == 2 items = list(hist.items()) assert "ls foobazz" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 0 == hist.rtns[-1] # Error hist.append({"inp": "ls foo", "rtn": 2}) assert len(hist) == 1 items = list(hist.items()) assert "ls foobazz" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 2 == hist.rtns[-1] # File now exists, success hist.append({"inp": "ls foo", "rtn": 0}) assert len(hist) == 2 items = list(hist.items()) assert "ls foo" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 0 == hist.rtns[-1] # Success hist.append({"inp": "ls", "rtn": 0}) assert len(hist) == 3 items = list(hist.items()) assert "ls" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 0 == hist.rtns[-1] # Dup hist.append({"inp": "ls", "rtn": 0}) assert len(hist) == 3 # Success hist.append({"inp": "/bin/ls", "rtn": 0}) assert len(hist) == 4 items = list(hist.items()) assert "/bin/ls" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 0 == hist.rtns[-1] # Error hist.append({"inp": "ls bazz", "rtn": 1}) assert len(hist) == 4 items = list(hist.items()) assert "/bin/ls" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert "ls bazz" == hist.inps[-1] assert 1 == hist.rtns[-1] # Error hist.append({"inp": "ls bazz", "rtn": -1}) assert len(hist) == 4 items = list(hist.items()) assert "/bin/ls" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert -1 == hist.rtns[-1] # Success hist.append({"inp": "echo not secret", "rtn": 0, "spc": False}) assert len(hist) == 5 items = list(hist.items()) assert "echo not secret" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 0 == hist.rtns[-1] # Space hist.append({"inp": "echo secret command", "rtn": 0, "spc": True}) assert len(hist) == 5 items = list(hist.items()) assert "echo not secret" == items[-1]["inp"] assert 0 == items[-1]["rtn"] assert 0 == hist.rtns[-1]
34,457
def differences_dict(input_dict): """Create a dictionary of combinations of readers to create bar graphs""" # Getting the combinations of the formats for each_case in input_dict.keys(): comb = combinations(input_dict[each_case].keys(), 2) x = list(comb) comp_values = {} comp_values[each_case] = {} for each in x: name = each[0].split("_")[0] + " vs " + each[1].split("_")[0] comp_values[each_case][name] = {} comp_values[each_case][name]["R0"] = [] comp_values[each_case][name]["X0"] = [] comp_values[each_case][name]["R1"] = [] comp_values[each_case][name]["X1"] = [] for (k, v), (k1, v1) in zip( input_dict[each_case][each[0]].items(), input_dict[each_case][each[1]].items(), ): comp_values[each_case][name]["R0"].append(abs(v[0] - v1[0])) comp_values[each_case][name]["X0"].append(abs(v[1] - v1[1])) comp_values[each_case][name]["R1"].append(abs(v[2] - v1[2])) comp_values[each_case][name]["X1"].append(abs(v[3] - v1[3])) return comp_values
34,458
def available_memory(): """ Returns total system wide available memory in bytes """ import psutil return psutil.virtual_memory().available
34,459
def aslr_for_module(target, module): """ Get the aslr offset for a specific module - parameter target: lldb.SBTarget that is currently being debugged - parameter module: lldb.SBModule to find the offset for - returns: the offset as an int """ header_address = module.GetObjectFileHeaderAddress() load_address = header_address.GetLoadAddress(target) return load_address - header_address.GetFileAddress()
34,460
def get_hypergraph_incidence_matrix(node_list: List[Node], hyperedge_list: List[Set[Node]] ) -> numpy.array: """Get the incidence matrix of a hypergraph""" node_to_index = {node: index for index, node in enumerate(node_list)} incidence_matrix = numpy.zeros((len(node_list), len(hyperedge_list)), dtype=int) for hyperedge_index, hyperedge in enumerate(hyperedge_list): for node in hyperedge: incidence_matrix[node_to_index[node], hyperedge_index] = 1 return incidence_matrix
34,461
def root_tree_by_phyla(T, phyla): """Root the tree next to the phylum that is as far apart as possible from the other phyla""" phylum_LCA = {} for p in phyla.unique(): phylum_LCA[p] = T.get_common_ancestor(*tuple(phyla.index[phyla == p].values)) Dist = pd.DataFrame() for p1, lca1 in phylum_LCA.items(): for p2, lca2 in phylum_LCA.items(): Dist.loc[p1, p2] = T.get_distance(lca1, lca2) furthest_phylum = Dist.mean().idxmax() outgroup = phylum_LCA[furthest_phylum] if not outgroup == T: T.set_outgroup(outgroup)
34,462
def fformat(last_data, last_records): """ @param last_data: dictionary(node_name => node's data segment) @param last_records: dictionary(node_name => timestamp, node when last transmitted) @return: html """ nodelist = last_data.keys() a = repr(map(str, nodelist)) b = ''.join(['<div id="'+x+'" class="node"></div>' for x in nodelist]) return (X % (a, b)).encode('utf8')
34,463
def ceil(base): """Get the ceil of a number""" return math.ceil(float(base))
34,464
def process_raw_data(input_seqs, scaffold_type=None, percentile=None, binarize_els=True, homogeneous=False, deflank=True, insert_into_scaffold=True, extra_padding=0, pad_front=False, report_loss=True, report_times=True, remove_files=True, create_sample_of_size=None): """ A wrapper function that: Takes raw data as retrieved from Carl de Boer's publication at https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE104878, and processes the sequences according to the custom arguments, pads them to same length, and writes them to an output file along with their expression levels (tab separated). The end of the file contains comments specifying the number of sequences in the file and the lengths of the padded sequences. Args: ----- input_seqs (str) -- the absolute pathname of the file that contains all of the input sequences and their expression levels (tab separated). scaffold_type (str) -- the scaffold type (pTpA or Abf1TATA) that the input sequences had their expression levels measured in. percentile (float) -- the proportion of the raw input data to extract from the sequences with the highest and lowest expression levels. i.e if 'percentile=0.1' then the top 10 % of sequences with highest expression levels, and the bottom 10 % of sequences with lowest expression levels will be extracted from the raw input data. The resulting data file will contain ~ 20 % of the data as the raw input data. binarize_els (bool) -- if (and only if) a 'percentile' value is passed, this argument determines whether the expression level values (Els) will be binarized or not. If True (defualt), sequences with ELs in the top percentile will have their ELs binarized to 1, and sequences with ELs in the bottom percentile will have their ELs binarized to 0. homogeneous (bool) -- if True, only sequences of modal length will be processed. If False, all sequences will be processed regardless of length. Default: False. deflank (bool) -- if True, removes the constant flanking regions of the input sequences. Default: True. insert_into_scaffold (bool) -- if True inserts the input sequences into the appropriate scaffold. If False, the sequences are encoded as they are. Default: True. extra_padding (int) -- the number of 'P' characters greater than the maximum sequence length to pad each sequence to. Default: 0. pad_front (bool) -- whether to pad out the front (left hand side) or end (right hand side) of the sequences. If True, pads the front. Default: False (will pad the end). report_loss (bool) -- if True, reports the number of lines of data lost at each step in the process. Default: False. report_times (bool) -- if True, reports the time each step in the cleaning process takes. Default: False. remove_files (bool) -- if True, will remove intermediate files created in the process of processing raw data. Default: False (i.e. intermediary files will be kept). create_sample_of_size (int) -- if a number is passed, a sample of this size will be taken by pseudo-random from the file containing processed data, and written to a separate file. Returns: ----- processed_data (str) -- the absolute path for the file containing processed sequences along with their expression levels. """ # Assertions assert isinstance(input_seqs, str), ('Input file path name must be ' 'passed as a string.') assert os.path.exists(input_seqs), 'Input file does not exist.' assert isinstance(scaffold_type, str), ('Scaffold type must be passed as ' 'a string if specified.') assert scaffold_type == 'pTpA' or scaffold_type == 'Abf1TATA', 'Scaffold \ type must be specified as either "pTpA" or "Abf1TATA".' assert isinstance(percentile, (float, type(None))), ('The "percentile" ' 'argument must be ' 'passed as a float.') if percentile is not None: assert percentile < 0.5, '"percentile" must be less that 0.5' assert isinstance(homogeneous, bool), ('The homogeneous argument must be ' 'passed as a bool.') assert isinstance(deflank, bool), ('The deflank argument must be passed ' 'as a bool.') assert isinstance(insert_into_scaffold, bool), ('insert_into_scaffold ' 'argument must be passed ' 'as a bool.') assert isinstance(extra_padding, int), ('The number of extra vectors to ' 'pad each sequence by should be ' 'passed as an integer.') assert extra_padding >= 0, ('extra_padding must be passed as a non-' 'negative integer.') assert isinstance(pad_front, bool), ('The pad_front argument must be ' 'passed as a bool.') assert isinstance(report_loss, bool), ('The report_loss argument must be ' 'passed as a bool.') assert isinstance(report_times, bool), ('The report_times argument must ' 'be passed as a bool.') assert isinstance(remove_files, bool), ('The remove_files argument must ' 'be passed as a bool.') if create_sample_of_size is not None: assert isinstance(create_sample_of_size, int), ('Sample size must be ' 'passed as an int') # Functionality print('Starting processing of raw data...') raw_data = input_seqs # Define final output file path time_stamp = get_time_stamp() relative_path = 'example/processed_data/' + time_stamp processed_data = os.path.join(ROOT_DIR, relative_path) # Create log file to write reports to if report_loss or report_times: report = smart_open(processed_data + '_process_report' + '.txt', 'w') # Initialize custom operations if specified (i.e loss + timing reports) if report_loss: loss_report = {} loss_report['Raw Data'] = get_seq_count(input_seqs) if report_times: t_init = t.time() t0 = t_init if remove_files: created_files = [] # keep track of the intermediate files created. # Pull out the top and bottom percentiles of data if percentile is not None: print('Pulling out the top and bottom percentiles...') df = organize.sort_by_exp_level(input_seqs) df = organize.discard_mid_data(df, percentile=percentile) processed_data += '_percentiles' if binarize_els: print('Binarizing expression levels...') df = organize.binarize_data(df) processed_data += '_els_binarized' input_seqs = organize.write_df_to_file(df) if report_loss: loss_report['Percentile Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Top & bottom percentiles pulled...\n' + text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) # Create new file of only homogeneous (same length) seqs if homogeneous: print('Pulling homogeneous sequences from input file...') input_seqs = organize.pull_homogeneous_seqs(input_seqs, scaffold_type) processed_data += '_homogeneous' if report_loss: loss_report['Homogeneous Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Homogeneous sequences pulled...\n' + text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) # Remove all of the flanking regions from the input sequences if deflank: print('Removing flank regions from sequences...') input_seqs = build.remove_flanks_from_all_seqs(input_seqs, scaffold_type) processed_data += '_deflanked' if report_loss: loss_report['Deflanked Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Sequences deflanked...\n' + text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) processed_data += '_sequences' # Insert sequences into appropriate scaffold if insert_into_scaffold: print('Inserting sequences into %s scaffold...' % (scaffold_type)) input_seqs = build.insert_all_seq_into_one_scaffold(input_seqs, scaffold_type) processed_data += '_inserted_into_%s_scaffold' % (scaffold_type) if report_loss: loss_report['Scaffold-Inserted Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Seqs inserted into ' + scaffold_type + 'scaffold...\n') report.write(text + '\n') t0 = t1 if remove_files: created_files.append(input_seqs) # Pad sequences if homogeneous and extra_padding == 0: pass else: print('Padding sequences...') input_seqs = build.pad_sequences(input_seqs, pad_front=pad_front, extra_padding=extra_padding) if not homogeneous: # then they will have been padded processed_data += '_padded_at' if pad_front: processed_data += '_front' else: processed_data += '_back' if extra_padding != 0: processed_data += '_%s_extra' % (extra_padding) if report_loss: loss_report['Padded Seqs'] = get_seq_count(input_seqs) if report_times: t1 = t.time() text = '\tFile created in %s s' % (t1 - t0) print(text) report.write('Padded sequences...\n') report.write(text + '\n') t0 = t1 # Remove intermediate files created in the process if remove_files: created_files.append(input_seqs) # Rename the final output file to reflect how data has been cleaned. processed_data += '_with_exp_levels.txt' # Report end of process and print final output file locations. if input_seqs != raw_data: # i.e. if data has been processed in some way os.rename(input_seqs, processed_data) # Report end of process and print absolute path of processed data. text = ('\nRaw data successfully processed.\nLocation: %s\n' % (processed_data)) print(text) if report_loss or report_times: report.write(text) else: # If no processing was performed. text = '\nNo processing performed.\n' text += 'Change processing specifications and try again.' print(text) report.write(text + '\n') text = 'Raw data remains unchanged.' print(text) report.write(text + '\n') text = 'Location : %s' % (raw_data) print(text) if report_loss or report_times: report.write(text + '\n') # Write the number of seqs and length of seqs to the start of file organize.write_num_and_len_of_seqs_to_file(processed_data) # Report loss if report_loss: report.write('\nLine counts at each step of the process:\n') for category in loss_report.keys(): curr_count = loss_report[category] if category == 'Raw Data': report.write('\t%s : %s\n' % (category, curr_count)) prev_count = curr_count else: report.write('\t%s : %s (%s lines lost since last step)\n' % (category, curr_count, (prev_count - curr_count))) prev_count = curr_count # Remove intermediate files if remove_files: print('\nRemoving intermediate files...') organize.remove_file_list(created_files) print('Files successfully removed.') print('Process complete.') # Report total time taken if report_times: t_final = t.time() text = '\nTotal processing time : %s s' % (t_final - t_init) print(text) report.write(text) print('Please find the process report in the same directory as the' ' output file for reports of data losses and timings.') if report_times or report_loss: report.close() # Create sample data if create_sample_of_size is not None: size = create_sample_of_size print('\n\nCreating sample of size %s ...' % str(size)) sample_seqs = organize.create_sample_data(processed_data, size) print('\nSample data successfully created.') print('\nLocation: %s \n' % (sample_seqs)) return processed_data
34,465
def train( dir, input_s3_dir, output_s3_dir, hyperparams_file, ec2_type, volume_size, time_out, docker_tag, aws_role, external_id, base_job_name, job_name, use_spot_instances=False, metric_names=None, tags=None ): """ Trains ML model(s) on SageMaker :param dir: [str], source root directory :param input_s3_dir: [str], S3 location to input data :param output_s3_dir: [str], S3 location to save output (models, etc) :param hyperparams_file: [str], path to hyperparams json file :param ec2_type: [str], ec2 instance type. Refer to: https://aws.amazon.com/sagemaker/pricing/instance-types/ :param volume_size: [int], size in GB of the EBS volume :param time_out: [int], time-out in seconds :param docker_tag: [str], the Docker tag for the image :param aws_role: [str], the AWS role assumed by SageMaker while training :param external_id: [str], Optional external id used when using an IAM role :param base_job_name: [str], Optional prefix for the SageMaker training job :param job_name: [str], Optional name for the SageMaker training job. Overrides `base_job_name` :param use_spot_instances: bool, default=False], Specifies whether to use SageMaker Managed Spot instances for training. More information: https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html (default: ``False``). :param metric_names: [list[str], default=None], Optional list of string metric names :param tags: [optional[list[dict]], default: None], List of tags for labeling a training job. For more, see https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. Example: [ { 'Key': 'key_name_1', 'Value': key_value_1, }, { 'Key': 'key_name_2', 'Value': key_value_2, }, ... ] :return: [str], S3 model location """ config = _read_config(dir) hyperparams_dict = _read_hyperparams_config(hyperparams_file) if hyperparams_file else None sage_maker_client = sagemaker.SageMakerClient(config.aws_profile, config.aws_region, aws_role, external_id) image_name = config.image_name+':'+docker_tag return sage_maker_client.train( image_name=image_name, input_s3_data_location=input_s3_dir, train_instance_count=1, train_instance_type=ec2_type, train_volume_size=volume_size, train_max_run=time_out, output_path=output_s3_dir, hyperparameters=hyperparams_dict, base_job_name=base_job_name, job_name=job_name, use_spot_instances=use_spot_instances, tags=tags, metric_names=metric_names )
34,466
def wikitext_page(d, e, title, fmt='wikitext'): """Create infobox with stats about a single page from a category. Create infobox with stats about a single page from a category. Currently only supports formatting as wikitext. Only returns the string of the text, does not save any files or modify other data structures.""" datum = d['stats']['scrape_start'][:10] date_from = d['stats']['date_from'] date_to = d['stats']['date_to'] date_days = d['stats']['pv_days'] desc = f"Sidvisningsstatistik {datum} för tidsperioden {date_from}--{date_to} ({date_days} dagar)\n\n" page_stats = d['pages'][title]['stats'] if fmt == 'wikitext': text = f"{desc}\n\n\n" table = table_start([colspan('Sidinformation')], [], cellpadding=3, cls='wikitable') table += f"|Visningar || align='right' | {page_stats['pageviews_sv']}\n|-\n" table += f"|Längd || align='right' | {page_stats['len_sv']}\n|-\n" table += f"|Kvalitet || align='right' | {page_stats['quality']}\n|-\n" if 'len_fi' in page_stats: table += f"|Visningar Finska || align='right' | {page_stats['pageviews_fi']}\n|-\n" table += f"|Längd Finska || align='right' | {page_stats['len_fi']}\n|-\n" if 'len_en' in page_stats: table += f"|Visningar Engelska || align='right' | {page_stats['pageviews_en']}\n|-\n" table += f"|Längd Engelska || align='right' | {page_stats['len_en']}\n|-\n" if 'len_de' in page_stats: table += f"|Visningar Tyska || align='right' | {page_stats['pageviews_de']}\n|-\n" table += f"|Längd Tyska || align='right' | {page_stats['len_de']}\n|-\n" table += f"|Kategorier || align='right' | {page_stats['categories_cnt']}\n|-\n" table += f"|Kontributörer || align='right' | {page_stats['contributors_tot']}\n|-\n" table += f"|Antal andra språk || align='right' | {page_stats['langlinks_cnt']}\n|-\n" table += f"|Externa länkar || align='right' | {page_stats['extlinks_cnt']}\n|-\n" table += f"|Bilder || align='right' | {page_stats['images_cnt']}\n|-\n" table += f"|Länkar || align='right' | {page_stats['links_cnt']}\n|-\n" table += f"|Omdirigeringar || align='right' | {page_stats['redirects_cnt']}\n|-\n" table += f"|Länkar till denna sida || align='right' | {page_stats['linkshere_cnt']}\n|-\n" table += "|}\n\n" text += table text += """Kvalitet räknas ut med formeln: Kvalitet = 3 * antalet kategorier + 4 * antalet bilder + 4 * antalet andra språk + 1 * antalet länkar + 1 * antalet länkar till denna sida + 2 * externa länkar + 3 * antalet omdirigeringar + 1 * antalet kontributörer """ return text elif fmt == 'print': text = f"Visningar---------------{page_stats['pageviews_sv']}\n" text += f"Längd-------------------{page_stats['len_sv']}\n" text += f"Kvalitet----------------{page_stats['quality']}\n" if 'len_fi' in page_stats: text += f"Visningar Finska--------{page_stats['pageviews_fi']}\n" text += f"Längd Finska------------{page_stats['len_fi']}\n" if 'len_en' in page_stats: text += f"Visningar Engelska------{page_stats['pageviews_en']}\n" text += f"Längd Engelska----------{page_stats['len_en']}\n" if 'len_de' in page_stats: text += f"Visningar Tyska---------{page_stats['pageviews_de']}\n" text += f"Längd Tyska-------------{page_stats['len_de']}\n" text += f"Kategorier--------------{page_stats['categories_cnt']}\n" text += f"Kontributörer-----------{page_stats['contributors_tot']}\n" text += f"Antal andra språk-------{page_stats['langlinks_cnt']}\n" text += f"Externa länkar----------{page_stats['extlinks_cnt']}\n" text += f"Bilder------------------{page_stats['images_cnt']}\n" text += f"Länkar------------------{page_stats['links_cnt']}\n" text += f"Omdirigeringar----------{page_stats['redirects_cnt']}\n" text += f"Länkar till denna sida--{page_stats['linkshere_cnt']}\n" return text
34,467
def _quadratic( self: qp.utils.Minimize[Vector], direction: Vector, step_size_test: float, state: qp.utils.MinimizeState[Vector], ) -> Tuple[float, float, bool]: """Take a quadratic step calculated from an energy-only test step. Adjusts step size to back off if energy increases.""" # Check initial point: step_size_prev = 0.0 # cumulative progress along direction E = self._sync(float(state.energy)) E_orig = E g_d = self._sync(state.gradient.overlap(direction)) if g_d >= 0.0: qp.log.info( f"{self.name}: Bad step direction with positive" " gradient component" ) return E_orig, step_size_prev, False # Test step and quadratic step size prediction: for i_step in range(self.step_size.n_adjust): # Check test step size: if step_size_test < self.step_size.minimum: qp.log.info(f"{self.name}: Test step size below threshold.") return E, step_size_prev, False # Try test step: self.step(direction, step_size_test - step_size_prev) step_size_prev = step_size_test E_test = self._compute(state, energy_only=True) # gradient not needed # Check if step left valid domain: if not np.isfinite(E_test): # Back off from difficult region step_size_test *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Test step failed with" f" {state.energy.name} = {E_test:.3e};" f" reducing test step size to {step_size_test:.3e}." ) continue # Predict step size (quadratic based on gradient and two energies): step_size = ( 0.5 * (step_size_test ** 2) * g_d / (step_size_test * g_d + E - E_test) ) # Check reasonableness of predicted step: if step_size < 0.0: # Curvature has wrong sign, but E_test < E, so accept step # for now and try descending further next time: step_size_test *= self.step_size.grow_factor qp.log.info( f"{self.name}: Wrong curvature in test step," f" growing test step size to {step_size_test:.3e}." ) E = self._compute(state, energy_only=False) return E, step_size_prev, True if step_size / step_size_test > self.step_size.grow_factor: step_size_test *= self.step_size.grow_factor qp.log.info( f"{self.name}: Predicted step size growth" f" > {self.step_size.grow_factor}," f" growing test step size to {step_size_test:.3e}." ) continue if step_size / step_size_test < self.step_size.reduce_factor: step_size_test *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Predicted step size reduction" f" < {self.step_size.reduce_factor}," f" reducing test step size to {step_size_test:.3e}." ) continue # Successful test step: break if not np.isfinite(E_test): qp.log.info( f"{self.name}: Test step failed {self.step_size.n_adjust}" " times. Quitting step." ) return E_orig, step_size_prev, False # Actual step: for i_step in range(self.step_size.n_adjust): # Try the step: self.step(direction, step_size - step_size_prev) step_size_prev = step_size E = self._compute(state, energy_only=False) if not np.isfinite(E): step_size *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Step failed with" f" {state.energy.name} = {E:.3e};" f" reducing step size to {step_size:.3e}." ) continue if E > E_orig + self.energy_threshold: step_size *= self.step_size.reduce_factor qp.log.info( f"{self.name}: Step increased" f" {state.energy.name} by {E - E_orig:.3e};" f" reducing step size to {step_size:.3e}." ) continue # Step successful: break if (not np.isfinite(E)) or (E > E_orig + self.energy_threshold): qp.log.info( f"{self.name}: Step failed to reduce {state.energy.name}" f" after {self.step_size.n_adjust} attempts." " Quitting step." ) return E_orig, step_size_prev, False return E, step_size_prev, True
34,468
def create_image(ds: "Dataset", data_element: "DataElement") -> "gdcm.Image": """Return a ``gdcm.Image``. Parameters ---------- ds : dataset.Dataset The :class:`~pydicom.dataset.Dataset` containing the Image Pixel module. data_element : gdcm.DataElement The ``gdcm.DataElement`` *Pixel Data* element. Returns ------- gdcm.Image """ image = gdcm.Image() number_of_frames = getattr(ds, 'NumberOfFrames', 1) image.SetNumberOfDimensions(2 if number_of_frames == 1 else 3) image.SetDimensions((ds.Columns, ds.Rows, number_of_frames)) image.SetDataElement(data_element) pi_type = gdcm.PhotometricInterpretation.GetPIType( ds.PhotometricInterpretation ) image.SetPhotometricInterpretation( gdcm.PhotometricInterpretation(pi_type) ) tsyntax = ds.file_meta.TransferSyntaxUID ts_type = gdcm.TransferSyntax.GetTSType(str.__str__(tsyntax)) image.SetTransferSyntax(gdcm.TransferSyntax(ts_type)) pixel_format = gdcm.PixelFormat( ds.SamplesPerPixel, ds.BitsAllocated, ds.BitsStored, ds.HighBit, ds.PixelRepresentation ) image.SetPixelFormat(pixel_format) if 'PlanarConfiguration' in ds: image.SetPlanarConfiguration(ds.PlanarConfiguration) return image
34,469
def linear_interpolate_by_datetime(datetime_axis, y_axis, datetime_new_axis, enable_warning=True): """A datetime-version that takes datetime object list as x_axis """ numeric_datetime_axis = [ totimestamp(a_datetime) for a_datetime in datetime_axis ] numeric_datetime_new_axis = [ totimestamp(a_datetime) for a_datetime in datetime_new_axis ] return linear_interpolate( numeric_datetime_axis, y_axis, numeric_datetime_new_axis, enable_warning=enable_warning)
34,470
def calculate_index( target_ts: pd.Timestamp, timestamps: pd.DatetimeIndex ) -> pd.Timestamp: """ Return the first index value after the target timestamp if the exact timestamp is not available """ # noinspection PyUnresolvedReferences target_beyond_available = (target_ts > timestamps).all() if target_beyond_available: return timestamps[-1] elif target_ts in timestamps: return target_ts else: return timestamps[timestamps > target_ts][0]
34,471
def walk_through_package(package): """ Get the documentation for each of the modules in the package: Args: package: An imported python package. Returns: output: A dictionary with documentation strings for each module. """ output = OrderedDict() modules = pydoc.inspect.getmembers(package, pydoc.inspect.ismodule) for mod in modules: module_name, reference = mod output[module_name] = getmodule(module_name, reference) return output
34,472
def extract_images_2(f): """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. Args: f: A file object that can be passed into a gzip reader. Returns: data: A 4D unit8 numpy array [index, y, x, depth]. Raises: ValueError: If the bytestream does not start with 2051. """ print('Extracting', f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, f.name)) num_images = np.int64(_read32(bytestream)) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, rows, cols, 1) return data
34,473
def slope_finder(station): """ This function computes the slope of a least-squares fit of polynomial of degree p to water level data and return that is it positive or negative""" try: dt = 2 dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt)) slope = polyfit(dates,levels,1) if slope[1] >= 0: return True else: return False except: return None
34,474
def get_attr_counts(datas, attr): """ 不同属性值的数量. :param datas: :type datas: list[BaseDataSample] :param attr: :type attr: str :return: """ results = {} for data in datas: value = data.get_value(attr) if isinstance(value, list): for v in value: results.setdefault(attr + "-" + v, 0) results[attr + "-" + v] += 1 else: results.setdefault(value, 0) results[value] += 1 return results
34,475
def restart_processes(): """ Restart processes on the remote server """ for service in env.services_to_restart: sudo("/bin/systemctl restart {}.service".format(service), shell=False)
34,476
def split(string: str, separator: str = " ") -> list: """ Will split the string up into all the values separated by the separator (defaults to spaces) >>> split("apple#banana#cherry#orange",separator='#') ['apple', 'banana', 'cherry', 'orange'] >>> split("Hello there") ['Hello', 'there'] >>> split("11/22/63",separator = '/') ['11', '22', '63'] >>> split("12:43:39",separator = ":") ['12', '43', '39'] """ split_words = [] last_index = 0 for index, char in enumerate(string): if char == separator: split_words.append(string[last_index:index]) last_index = index + 1 elif index + 1 == len(string): split_words.append(string[last_index : index + 1]) return split_words
34,477
def _find_class(name: str, target: ast.Module) -> t.Tuple[int, ast.ClassDef]: """Returns tuple containing index of classdef in the module and the ast.ClassDef object""" for idx, definition in enumerate(target.body): if isinstance(definition, ast.ClassDef) and definition.name == name: return idx, definition
34,478
def get_floor_reference_points(): """ This function get 4 points of reference from the real world, asking the user to move the baxter arm to the position of each corresponding point in the image, and then getting the X,Y and Z coordinates of baxter's hand. Returns an array of size 4 containing 4 coordinates: [[x1,y1], [x2,y2], [x3,y3], [x4,y4]]. All the coordinates Z should be approximatelly the same. We assume the table is niveled. Save the Z coordinate in the global variable. TODO: Implement this. Figure out a way to get the end position of baxter hand. I know that in baxter_msgs """ global Z # This declaration is needed to modify the global variable Z global floor_reference_points # Maybe erase. global floor_reference_orientations # Maybe erase. #Z = (-0.04311285564353425 -0.04512672573083166 -0.04080078888404003 -0.046071914959185875)/4 #Z= -0.04721129960500225 Z = -0.15113003072395247 print Z # [0.5264201148167275, 0.40034933311487086, -0.027560670871152958] # Point 1 = [0.5264201148167275, 0.40034933311487086, -0.027560670871152958] # Move the LEFT arm to point 2 and press enter. # Move the LEFT arm to point 3 and press enter. # Point 3 = [0.8164126163781988, 0.00011724257622775782, -0.006060458646583389] # Move the LEFT arm to point 4 and press enter. # Point 4 = [0.5774338486223564, -0.02912627450728407, -0.02923769860966796] # Point 1 = [0.45835412247904794, 0.4167330917312844, -0.11362745036843477] # Move the LEFT arm to point 2 and press enter. # Point 2 = [0.7046556740624649, 0.45390428836232344, -0.11322759071560898] # Move the LEFT arm to point 3 and press enter. # Point 3 = [0.7778487250094798, 0.07406413897305184, -0.11181591166991744] # Move the LEFT arm to point 4 and press enter. # Point 4 = [0.5418466718761972, 0.034360381218309734, -0.11464607923115094] #return [[p1[0],p1[1]], [p2[0],p2[1]], [p3[0],p3[1]], [p4[0],p4[1]]] #print p4 filename = "/home/sampath/midca/examples/_gazebo_baxter/calibration.txt" f = open(filename, 'r') p1 = f.readline().split(' ') p2 = f.readline().split(' ') p3 = f.readline().split(' ') p4 = f.readline().split(' ') p1[0] = float(p1[0]) p1[1] = float(p1[1]) p2[0] = float(p2[0]) p2[1] = float(p2[1]) p3[0] = float(p3[0]) p3[1] = float(p3[1]) p4[0] = float(p4[0]) p4[1] = float(p4[1]) return [[p1[0], p1[1]], [p2[0], p2[1]], [p3[0], p3[1]], [p4[0], p4[1]]]
34,479
def tidy_osx_command_line_tools_command(client: TidyClient, **kwargs) -> DemistoResult: """ Install OSX command line tools Args: client: Tidy client object. **kwargs: command kwargs. Returns: DemistoResults: Demisto structured response. """ runner: Runner = client.osx_command_line_tools() return parse_response(response=runner, human_readable_name="OSx command line tools", installed_software="command line tools", additional_vars={})
34,480
def grid_search(parameters, mlflow_client, experiment_name, use_cache=False, result_file_path=Path("./output/results_reader.csv"), gpu_id=-1, elasticsearch_hostname="localhost", elasticsearch_port=9200, yaml_dir_prefix="./output/pipelines/retriever_reader"): """ Returns a generator of tuples [(id1, x1, v1), ...] where id1 is the run id, the lists xi are the parameter values for each evaluation and the dictionaries vi are the run results. The parameter values for each successive run are determined by a grid search method. """ parameters_grid = list(ParameterGrid(param_grid=parameters)) list_run_ids = create_run_ids(parameters_grid) list_past_run_names = get_list_past_run(mlflow_client, experiment_name) for idx, param in tqdm( zip(list_run_ids, parameters_grid), total=len(list_run_ids), desc="GridSearch", unit="config", ): enriched_param = add_extra_params(param) if ( idx in list_past_run_names.keys() and use_cache ): # run not done logging.info( f"Config {param} already done and found in mlflow. Not doing it again." ) # Log again run with previous results previous_metrics = mlflow_client.get_run(list_past_run_names[idx]).data.metrics yield (idx, param, previous_metrics) else: # run notalready done or USE_CACHE set to False or not set logging.info(f"Doing run with config : {param}") run_results = single_run(param, gpu_id = gpu_id, elasticsearch_hostname = elasticsearch_hostname, elasticsearch_port = elasticsearch_port, yaml_dir_prefix = yaml_dir_prefix) # For debugging purpose, we keep a copy of the results in a csv form save_results(result_file_path=result_file_path, results_list={**run_results, **enriched_param}) # update list of past experiments list_past_run_names = get_list_past_run(mlflow_client, experiment_name) yield (idx, param, run_results)
34,481
def nav_entries(context): """ Renders dynamic nav bar entries from nav_registry for the provided user. """ context['nav_registry'] = nav_registry return context
34,482
def get_largest_component(graph: ig.Graph, **kwds: Any) -> ig.Graph: """Get largest component of a graph. ``**kwds`` are passed to :py:meth:`igraph.Graph.components`. """ vids = None for component in graph.components(**kwds): if vids is None or len(component) > len(vids): vids = component return graph.induced_subgraph(vids)
34,483
def test_download_file(app, default_user, _get_user_mock): """Test download_file view.""" with app.test_client() as client: with patch("reana_server.rest.workflows.requests"): res = client.get( url_for( "workflows.download_file", workflow_id_or_name="1", file_name="test_download", ), query_string={"file_name": "test_upload.txt",}, ) assert res.status_code == 401 with patch("reana_server.rest.workflows.requests"): res = client.get( url_for( "workflows.download_file", workflow_id_or_name="1", file_name="test_download", ), query_string={ "file_name": "test_upload.txt", "access_token": "wrongtoken", }, ) assert res.status_code == 403 requests_mock = Mock() requests_response_mock = Mock() requests_response_mock.status_code = 200 requests_response_mock.json = Mock(return_value={"message": "File downloaded."}) requests_mock.get = Mock(return_value=requests_response_mock) with patch( "reana_server.rest.workflows.requests", requests_mock ) as requests_client: res = client.get( url_for( "workflows.download_file", workflow_id_or_name="1", file_name="test_download", ), query_string={"access_token": default_user.access_token}, ) requests_client.get.assert_called_once() assert requests_client.get.return_value.status_code == 200
34,484
def hiring_contests(): """Gets all the hiring challenges from all the availbale platforms""" contests_data = get_contests_data() active_contests = contests_data["active"] upcoming_contests = contests_data["pending"] get_challenge_name = lambda x : x.lower().split() hiring_challenges = [contest for contest in active_contests if "hiring" in get_challenge_name(contest["contest_name"])] hiring_challenges += [contest for contest in upcoming_contests if "hiring" in get_challenge_name(contest["contest_name"])] return hiring_challenges
34,485
def _find_quantized_op_num(model, white_list, op_count=0): """This is a helper function for `_fallback_quantizable_ops_recursively` Args: model (object): input model white_list (list): list of quantizable op types in pytorch op_count (int, optional): count the quantizable op quantity in this module Returns: the quantizable op quantity in this module """ quantize_op_num = op_count for name_tmp, child_tmp in model.named_children(): if type(child_tmp) in white_list \ and not (isinstance(child_tmp, torch.quantization.QuantStub) or isinstance(child_tmp, torch.quantization.DeQuantStub)): quantize_op_num += 1 else: quantize_op_num = _find_quantized_op_num( child_tmp, white_list, quantize_op_num) return quantize_op_num
34,486
def make_withdrawal(account): """Adjusts account balance for withdrawal. Script that verifies withdrawal amount is valid, confirms that withdrawal amount is less than account balance, and adjusts account balance. Arg: account(dict): contains pin and balance for account Return: account(dict): returns account with balance adjusted for withdrawal """ # Use questionary to capture the withdrawal and set equal to amount variable amount = questionary.text("How much would you like to withdraw?").ask() amount = float(amount) # Validates amount of withdrawal. If less than or equal to 0 system exits with error message. if amount <= 0.0: sys.exit("This is not a valid withdrawal amount. Please try again.") # Validates if withdrawal amount is less than or equal to account balance, processes withdrawal and returns account. # Else system exits with error messages indicating that the account is short of funds. if amount <= account["balance"]: account["balance"] = account["balance"] - amount print("Your withdrawal was successful!") return account else: sys.exit( "You do not have enough money in your account to make this withdrawal. Please try again." )
34,487
def monta_reacao(coef, form): """ Retorna a estrutura de uma reação química com base em arrays gerados por métodos de sorteio de reações. :param coefs: Array com coeficientes das substâncias. :param formulas: Array com fórmulas das substâncias. :return: string pronta para ser impressa (print()) adequadamente. """ # ÍNDICE GERAL DAS TUPLAS (combustão/salificação): # 0: combustível/ácido | 1: oxigênio/base | 2: dióxido de carbono/sal | 3: água if len(coef) == 4: return ('' if coef[0] == 1 else str(coef[0])) + ' ' + form[0] + ' + ' + \ ('' if coef[1] == 1 else str(coef[1])) + ' ' + form[1] + ' → ' + \ ('' if coef[2] == 1 else str(coef[2])) + ' ' + form[2] + ' + ' + \ ('' if coef[3] == 1 else str(coef[3])) + ' ' + form[3] else: print('Formatação de reações com mais ou menos que quatro substâncias ainda não gerada.')
34,488
def test_process_bulk_queue_errors(app, queue): """Test error handling during indexing.""" with app.app_context(): # Create a test record r1 = Record.create({ 'title': 'invalid', 'reffail': {'$ref': '#/invalid'}}) r2 = Record.create({ 'title': 'valid', }) db.session.commit() RecordIndexer().bulk_index([r1.id, r2.id]) ret = {} def _mock_bulk(client, actions_iterator, **kwargs): ret['actions'] = list(actions_iterator) return len(ret['actions']) with patch('invenio_indexer.api.bulk', _mock_bulk): # Exceptions are caught assert RecordIndexer().process_bulk_queue() == 1 assert len(ret['actions']) == 1 assert ret['actions'][0]['_id'] == str(r2.id)
34,489
def list_to_string(the_list): """Converts list into one string.""" strings_of_list_items = [str(i) + ", " for i in the_list] the_string = "".join(strings_of_list_items) return the_string
34,490
def pretty_print_subkey_scores(np_array, limit_rows=20, descending=True): """ Print score matrix as a nice table. :param np_array: :param limit_rows: :param descending: :return: """ if type(np_array) != np.ndarray: raise TypeError("Expected np.ndarray") elif len(np_array.shape) != 2: raise ValueError("Expected 2D array") else: print('') num_subkeys = np_array.shape[0] num_guess_values = np_array.shape[1] # Sort array sorted_scores = [] for subkey in range(0, num_subkeys): sorted_subkey = sorted(zip(np_array[subkey, :], range(num_guess_values)), key=lambda f: f[0], reverse=descending)[0:limit_rows] sorted_scores.append(sorted_subkey) # Print header for subkey in range(0, num_subkeys): print(" {:>2d} ".format(subkey), end='') print("\n" + "-"*192) # Print body for key_guess in range(0, limit_rows): for subkey in range(0, num_subkeys): score, byte = sorted_scores[subkey][key_guess] print(" {:>4.2f} ({:02x}) |".format(float(score), byte), end='') print('')
34,491
def test_new_feature(): """ Test that a valid feature function handle is returned when adding new feat. """
34,492
def _op_select_format(kernel_info): """ call op's op_select_format to get op supported format Args: kernel_info (dict): kernel info load by json string Returns: op supported format """ try: op_name = kernel_info['op_info']['name'] te_set_version(kernel_info["op_info"]["socVersion"]) impl_path = build_in_impl_path custom_flag = False if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: op_impl_path = os.path.realpath(kernel_info['impl_path']) if os.path.isfile(op_impl_path): path, file_name = os.path.split(op_impl_path) op_name, _ = os.path.splitext(file_name) impl_path = path custom_flag = True if impl_path not in sys.path: sys.path.insert(0, impl_path) if custom_flag: op_module = __import__(op_name) else: op_module = __import__("impl." + op_name, globals(), locals(), [op_name], 0) # get function if not hasattr(op_module, "op_select_format"): return "" op_func = getattr(op_module, "op_select_format", None) # call function inputs_args = get_args(kernel_info['op_info'], 'inputs') outputs_args = get_args(kernel_info['op_info'], 'outputs') attrs_args = get_args(kernel_info['op_info'], 'attrs') kernel_name = kernel_info['op_info']['kernel_name'] ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) except Exception as e: raise TBEException(str(e)) finally: pass return ret
34,493
def test_k_command_line_mode(vim_bot): """Select line up.""" main, editor_stack, editor, vim, qtbot = vim_bot editor.stdkey_backspace() editor.go_to_line(3) editor.moveCursor(QTextCursor.StartOfLine, QTextCursor.KeepAnchor) qtbot.keyPress(editor, Qt.Key_Right) qtbot.keyPress(editor, Qt.Key_Right) cmd_line = vim.get_focus_widget() qtbot.keyClicks(cmd_line, 'V') qtbot.keyClicks(cmd_line, '2k') qtbot.keyClicks(cmd_line, 'y') clipboard = QApplication.clipboard().text() # editor.moveCursor(QTextCursor.EndOfLine, QTextCursor.KeepAnchor) # new_line, new_col = editor.get_cursor_line_column() assert clipboard == u' 123\u2029line 1\u2029line 2\u2029'
34,494
def get_rate_plan(apiproduct_id: Optional[str] = None, organization_id: Optional[str] = None, rateplan_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRatePlanResult: """ Gets the details of a rate plan. """ __args__ = dict() __args__['apiproductId'] = apiproduct_id __args__['organizationId'] = organization_id __args__['rateplanId'] = rateplan_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:apigee/v1:getRatePlan', __args__, opts=opts, typ=GetRatePlanResult).value return AwaitableGetRatePlanResult( apiproduct=__ret__.apiproduct, billing_period=__ret__.billing_period, consumption_pricing_rates=__ret__.consumption_pricing_rates, consumption_pricing_type=__ret__.consumption_pricing_type, created_at=__ret__.created_at, currency_code=__ret__.currency_code, description=__ret__.description, display_name=__ret__.display_name, end_time=__ret__.end_time, fixed_fee_frequency=__ret__.fixed_fee_frequency, fixed_recurring_fee=__ret__.fixed_recurring_fee, last_modified_at=__ret__.last_modified_at, name=__ret__.name, revenue_share_rates=__ret__.revenue_share_rates, revenue_share_type=__ret__.revenue_share_type, setup_fee=__ret__.setup_fee, start_time=__ret__.start_time, state=__ret__.state)
34,495
def detail_blotter(backtest, positions, holdings, mode='simplified'): """ 分品种获取详细交易状况,合并市场数据、交易情况和账户变动 参数: backtest, positions, holdings为回测引擎返回的变量 mode: 'simplified'则市场行情数据只保留'close'列 (DataFrame的字典) 返回: 字典,键为symbol,值为DataFrame格式 示例: blotter = detail_blotter(backtest, positions, holdings) blotter_rb = blotter['RB'] blotter_br.head() """ blotter = dict() data_dict = backtest.data_handler.latest_symbol_data trades = backtest.trade_record() trades['direction'] = [1 if d=='BUY' else -1 for d in trades['direction']] trades['cost'] = trades['direction'] * trades['fill_price'] * trades['quantity'] for symb in data_dict.keys(): data = pd.DataFrame(data_dict[symb], columns=['symbol', 'datetime', 'open', 'high', 'low', 'close', 'volume']) if mode == 'simplified': data = data[['datetime', 'close']].set_index('datetime') else: # 'full' data = data.set_index('datetime') trades_symb = trades[trades['symbol']==symb][['direction','fill_price', 'commission', 'cost']] holdings_symb = pd.Series(holdings[symb], name='holdings') positions_symb = pd.Series(positions[symb], name='positions') merge = data.join([positions_symb, holdings_symb, trades_symb], how='outer').iloc[1:, :].fillna(0.) # 计算每根bar结束后的盈亏 merge['pnl'] = merge['holdings'] - merge['holdings'].shift(1) - merge['cost'].shift(1) - \ merge['commission'].shift(1) merge.ix[0, 'pnl'] = 0. # NaN # 回测结束时对可能存在的强制平仓进行额外计算 merge.ix[-1, 'pnl'] = merge['holdings'].iloc[-1] - merge['holdings'].iloc[-2] - merge['cost'].iloc[-1] - \ merge['commission'].iloc[-1] # 以回测第一根bar收盘价作为起始资本 merge['adj_total'] = merge['pnl'].cumsum() + merge['close'].iloc[0] del merge['cost'] blotter[symb] = merge return blotter
34,496
def parameters_create_lcdm(Omega_c, Omega_b, Omega_k, h, norm_pk, n_s, status): """parameters_create_lcdm(double Omega_c, double Omega_b, double Omega_k, double h, double norm_pk, double n_s, int * status) -> parameters""" return _ccllib.parameters_create_lcdm(Omega_c, Omega_b, Omega_k, h, norm_pk, n_s, status)
34,497
def _split_header_params(s): """Split header parameters.""" result = [] while s[:1] == b';': s = s[1:] end = s.find(b';') while end > 0 and s.count(b'"', 0, end) % 2: end = s.find(b';', end + 1) if end < 0: end = len(s) f = s[:end] result.append(f.strip()) s = s[end:] return result
34,498
def test_wuch2_nllmatrix(): """ test wuch2 nllmatrix""" filename = r"nll_matrix_wuch2.pkl" with open(filename, "rb") as fhandle: nll_matrix = pickle.load(fhandle) triple_set = find_aligned_pairs(nll_matrix) res = triple_set[0][0] if triple_set else None assert res == 6, "Expected to be {}".format(res)
34,499