content
stringlengths
22
815k
id
int64
0
4.91M
def traffic(config, task): """Main function of Bulkdozer, performs the Bulkdozer job""" global clean_run if config.verbose: print('traffic') try: setup(config, task) logger.clear() logger.log('Bulkdozer traffic job starting') logger.flush() init_daos(config, task) assets(config, task) landing_pages(config, task) campaigns(config, task) event_tags(config, task) placement_groups(config, task) placements(config, task) creatives(config, task) ads(config, task) dynamic_targeting_keys(config, task) #if clean_run: # store.clear() except Exception as error: stack = traceback.format_exc() print(stack) logger.log(str(error)) finally: logger.log('Bulkdozer traffic job ended') logger.flush() #store.save_id_map() if clean_run: print('Done: Clean run.') else: raise Exception( 'Done: Errors happened with some of the assets, check your sheet log.')
9,200
def test_complex_expression(): """Ensures that add works correctly.""" complex_expression = _MathExpression() * 2 + 1 assert complex_expression(1) == 3
9,201
def parse_urdf_file(package_name, work_name): """ Convert urdf file (xml) to python dict. Using the urdfpy package for now. Using the xml package from the standard library could be easier to understand. We can change this in the future if it becomes a mess. """ rospack = rospkg.RosPack() filepath = rospack.get_path(package_name) filepath += REL_WORK_PATH urdf = urdfpy.URDF.load(filepath + work_name + ".urdf") d = {"links": {}, "joints": {}} for link in urdf.links: if link.name == "world" or link.name == "work": continue else: d["links"][link.name] = parse_link(link, filepath) for joint in urdf.joints: p = PoseStamped() p.header.frame_id = joint.parent p.pose = numpy_to_pose(joint.origin) d["joints"][joint.name] = { "pose": p, "parent": joint.parent, "child": joint.child } return d
9,202
def _get_mgmtif_mo_dn(handle): """ Internal method to get the mgmt_if dn based on the type of platform """ if handle.platform == IMC_PLATFORM.TYPE_CLASSIC: return("sys/rack-unit-1/mgmt/if-1") elif handle.platform == IMC_PLATFORM.TYPE_MODULAR: return("sys/chassis-1/if-1") else: raise ImcValidationException("Invalid platform detected:%s" % handle.platform)
9,203
def measure_cv_performance(gene_by_latent_train, data_test): """ Measure NMF model performance on held out data. Performance is evaluated based on the model's ability to reconstuct held out samples. \hat{u} := arg min_{u} || x - uV^T || s.t. u >= 0 \hat{x} := \hat{u} V^T error = || x - \hat{x} || normalized_error = error / ||x|| Parameters ---------- gene_by_latent_train : np.array V in the above equations data_test : np.array X in the above equations Returns ------- error : np.array normalized error for each sample """ m_samples, n_genes = data_test.shape error = np.zeros(m_samples,) # TODO multi-sample version of nnls? for m in range(m_samples): u_hat, err = scipy.optimize.nnls(gene_by_latent_train, data_test[m,:]) error[m] = err / np.linalg.norm(data_test[m,:]) return error
9,204
def _eject_vmedia(task, managers, boot_device=None): """Eject virtual CDs and DVDs :param task: A task from TaskManager. :param managers: A list of System managers. :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`, `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY` or `None` to eject everything (default). :raises: InvalidParameterValue, if no suitable virtual CD or DVD is found on the node. """ for manager in managers: for v_media in manager.virtual_media.get_members(): if boot_device and boot_device not in v_media.media_types: continue inserted = v_media.inserted if inserted: v_media.eject_media() LOG.info("Boot media is%(already)s ejected from " "%(boot_device)s for node %(node)s" "", {'node': task.node.uuid, 'already': '' if inserted else ' already', 'boot_device': v_media.name})
9,205
def sectionize(parts, first_is_heading=False): """Join parts of the text after splitting into sections with headings. This function assumes that a text was splitted at section headings, so every two list elements after the first one is a heading-section pair. This assumption is used to join sections with their corresponding headings. Parameters ---------- parts : list of str List of text parts. first_is_heading : bool Should first element be treated as heading in lists of length greater than 1. """ parts = parts.copy() if len(parts) <= 1: return parts first = [] if not first_is_heading: first.append(parts[0]) del parts[0] sections = first + [ "\n".join(parts[i:i+2]) for i in range(0, len(parts), 2) ] return sections
9,206
def set_runner(runner): """ Set the global runner instance :param runner: the runner instance to set as the global runner """ global RUNNER RUNNER = runner
9,207
def byte_compare(stream_a, stream_b): """Byte compare two files (early out on first difference). Returns: (bool, int): offset of first mismatch or 0 if equal """ bufsize = 16 * 1024 equal = True ofs = 0 while True: b1 = stream_a.read(bufsize) b2 = stream_b.read(bufsize) if b1 != b2: equal = False if b1 and b2: # we have two different buffers: find first mismatch for a, b in zip(b1, b2): if a != b: break ofs += 1 break ofs += len(b1) if not b1: # both buffers empty break return (equal, ofs)
9,208
def update_variable(_outvars, _values): """Update the variable to the new value in the variable table.""" pass
9,209
def get_resize_augmentation(image_size, keep_ratio=False, box_transforms=False): """ Resize an image, support multi-scaling :param image_size: shape of image to resize :param keep_ratio: whether to keep image ratio :param box_transforms: whether to augment boxes :return: albumentation Compose """ bbox_params = A.BboxParams( format='pascal_voc', min_area=0, min_visibility=0, label_fields=['class_labels']) if box_transforms else None if not keep_ratio: return A.Compose([ A.Resize( height=image_size[1], width=image_size[0] )], bbox_params=bbox_params) else: return A.Compose([ A.LongestMaxSize(max_size=max(image_size)), A.PadIfNeeded( min_height=image_size[1], min_width=image_size[0], p=1.0, border_mode=cv2.BORDER_CONSTANT), ], bbox_params=bbox_params)
9,210
def calc_theta_from_q(q, E): """ ======================================================================= >> @DATE: 02/06/2020 SS 1.0 original >> @AUTHOR: Saransh Singh Lawrence Livermore national Lab >> @DETAIL: converts q to theta given an energy >> @param q scattering parameter = 4 * pi * sin(theta) / lambda (in A^-1) >> @param E assumed energy of xray beam in keV >> @return theta array which converts q to scattering angle (could have NaNs if the scattering parameter is not allowed!) ======================================================================= """ # plancks constant times speed of light in kev A hc = 12.39841984 theta = np.arcsin(q * hc / 4.0 / np.pi / E)
9,211
def test_block_quotes_229fx(): """ Test case 229f: variation of 229 with different spacing """ # Arrange source_markdown = """> ``` > 2 >> ``` """ expected_tokens = [ "[block-quote(1,1)::> \n> \n>\n]", "[fcode-block(1,3):`:3::::::]", "[text(2,3):2\n\a>\a&gt;\a ```:]", "[end-fcode-block:::True]", "[end-block-quote:::True]", "[BLANK(4,1):]", ] expected_gfm = """<blockquote> <pre><code>2 &gt; ``` </code></pre> </blockquote>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
9,212
def extract_question(metric): """Extracts the name and question from the given metric""" with open(metric) as f: data = f.readlines() data = [x.strip() for x in data] # filter out empty strings data = list(filter(None, data)) # data[0] = '# Technical Fork' metric_name = data[0].split(maxsplit=1)[1] # data[1] = 'Question: question part of the metric' metric_question = spilt_by_colon(data[1]) # Replace '&' to 'and' to prevent breaking of tables in pandoc metric_name = metric_name.replace('&', 'and') metric_question = metric_question.replace('&', 'and') return metric_name, metric_question
9,213
def get_side(node, vowels, matcher, r): """Get side to which char should be added. r means round (or repeat). Return 0 or plus int to add char to right, minus int to left, None if char node should be avoided. """ # check if node has both char neighbours if node.next is None: if node.prev is None: raise Exception() elif node.prev.syllable: return -1 else: return None elif node.prev is None: if node.next.syllable: return 1 else: return None # node has both left and right char neighbours # check if node has at least one syllable neighbour if node.prev.syllable is None and node.next.syllable is None: return None # char matching right_db = get_db_right_side(node, matcher) if right_db == 2: return right_db elif right_db == 1 and r < 3: return None # suffix suff = get_suffix_side(node, matcher) if suff != 0: syllable = node.prev.syllable if suff < 0 else node.next.syllable return suff if syllable is not None else None # prefix pre = get_prefix_side(node, matcher) if pre != 0: syllable = node.prev.syllable if pre < 0 else node.next.syllable return pre if syllable is not None else None # syllable matching if node.prev.syllable and node.next.syllable: sdb = get_db_syllable_side(node, matcher) / 2 + right_db if abs(sdb) >= 1: return sdb # no match in db nor suffixes nor prefixes if r < 3: return None if node.prev in vowels and node.prev.neighbours_consonants(2, syllabic=False): return -1 # this condition is for c in jablcko if node.prev.syllabic_consonant_in_the_middle() and node.neighbours_consonants(1): return -1 elif node.next.syllable: return 1 elif node.prev.syllable: return -1 return 0
9,214
def serve_build(): """needed for building assets""" serve("build")
9,215
def gap_perform_pruning(model_path, pruned_save_path=None, mode='gap', slim_ratio=0.5, mask_len=False, full_save=False, full_save_path=None, var_scope='', ver=1): """ Interface for GAP pruning step (step2). Args: model_path: path to the saved checkpoint, including 3 files: `.meta', `.data' and `.index'. pruned_save_path: path to save the pruned data (file in pickle format) slim_ratio: ratio for model pruning. Return: data_dict: the pruned data dict """ graph = saver.import_meta_graph(model_path+'.meta', clear_devices=True) with open('graph_def.pbtxt', 'w') as f: f.write(str(ops.get_default_graph().as_graph_def(add_shapes=True))) key_graph = KeyGraph(ops.get_default_graph()) data_dict = key_graph.gap(model_path, pruned_save_path, mode, slim_ratio, mask_len, full_save, full_save_path, var_scope, ver) return data_dict
9,216
def html_wrap(ptext, owrapper, attribute=''): """ Wrap text with html tags. Input: ptext -- text to be wrapped owrapper -- tag to wrap ptext with attribute -- if set, attribute to add to ptext If owrapper ends with a newline, then the newline will appear after the bracket character in the last tag. Returns the wrapped string value. """ wrapper = owrapper.strip() hdr = '<%s>' % wrapper if attribute: hdr = add_attrib(attribute, hdr) trlr = '</%s>' % wrapper if owrapper.endswith('\n'): trlr += '\n' return hdr + ptext + trlr
9,217
def start_vsfm(port=None, vsfm_binary_path=default_path): """ Starts VSFM, binds it to a socket, opens the socket interface, sets up a logger and waits. :param port: Port number to open, defaults to a random one :param vsfm_binary_path: the path to VSFM.exe, defaults from the vsfm_data file :return: port that was opened """ # 'start program' if port is None: tmp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tmp_sock.bind(('',0)) port = int(tmp_sock.getsockname()[1]) tmp_sock.close() logger.info("Binding to port " + str(port)) cmd = '"{}" listen+log {}'.format(vsfm_binary_path, port) # Opens up VSFM and begins the socket. logger.debug("Sending cmd: " + cmd) vsfm_subprocess = subprocess.Popen(cmd, shell=True) # this needs changed from shell=True return port
9,218
def names(namespace): """Return extension names without loading the extensions.""" if _PLUGINS: return _PLUGINS[namespace].keys() else: return _pkg_resources_names(namespace)
9,219
def create_tech_storage(connector, technology_list): """ This function writes the ``StorageDuration`` table. """ cursor = connector.cursor() storage_techs = [tech for tech in technology_list if tech.storage_tech] table_command = """CREATE TABLE "StorageDuration" ( "regions" text, "tech" text, "duration" real, "duration_notes" text, PRIMARY KEY("regions","tech") ); """ insert_command = """ INSERT INTO "StorageDuration" VALUES (?,?,?,?) """ cursor.execute(table_command) entries = [] for tech in storage_techs: db_entry = [(place, tech.tech_name, storage, '') for place, storage in zip(list(tech.storage_duration.keys()), list(tech.storage_duration.values()))] entries += db_entry cursor.executemany(insert_command, entries) connector.commit() return
9,220
def toggle_device_setting(daq, device): """ Toggle a setting on the device: If it's enabled, disable the setting, and vice versa. """ path = "/%s/sigouts/0/on" % device is_enabled = daq.getInt(path) print(f"Toggling setting '{path}'.") daq.setInt(path, not is_enabled) daq.sync()
9,221
def cfq_lstm_attention_multi(): """LSTM+attention hyperparameters tuned for CFQ.""" hparams = common_hparams.basic_params1() hparams.daisy_chain_variables = False hparams.batch_size = 1024 hparams.hidden_size = 128 hparams.num_hidden_layers = 2 hparams.initializer = 'uniform_unit_scaling' hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.add_hparam('attention_layer_size', hparams.hidden_size) hparams.add_hparam('output_attention', True) hparams.add_hparam('num_heads', 1) hparams.add_hparam('attention_mechanism', 'bahdanau') hparams.num_heads = 4 # The remaining hyperparameters were determined as described in the paper: hparams.batch_size = 2048 hparams.dropout = 0.4 hparams.hidden_size = 512 hparams.learning_rate = 0.03 hparams.num_hidden_layers = 2 return hparams
9,222
def create_modeling_tables(spi_historical, spi_fixtures, fd_historical, fd_fixtures, names_mapping): """Create tables for machine learning modeling.""" # Rename teams for col in ['team1', 'team2']: spi_historical = pd.merge(spi_historical, names_mapping, left_on=col, right_on='left_team', how='left').drop(columns=[col, 'left_team']).rename(columns={'right_team': col}) spi_fixtures = pd.merge(spi_fixtures, names_mapping, left_on=col, right_on='left_team', how='left').drop(columns=[col, 'left_team']).rename(columns={'right_team': col}) # Combine data historical = pd.merge(spi_historical, fd_historical, left_on=SPI_KEYS, right_on=FD_KEYS).dropna(subset=ODDS_COLS_MAPPING.keys(), how='any').reset_index(drop=True) fixtures = pd.merge(spi_fixtures, fd_fixtures, left_on=SPI_KEYS, right_on=FD_KEYS) # Extract training, odds and fixtures X = historical.loc[:, ['season'] + SPI_KEYS + INPUT_COLS] y = historical.loc[:, OUTPUT_COLS] odds = historical.loc[:, SPI_KEYS + list(ODDS_COLS_MAPPING.keys())].rename(columns=ODDS_COLS_MAPPING) X_test = fixtures.loc[:, SPI_KEYS + INPUT_COLS] odds_test = fixtures.loc[:, SPI_KEYS + list(ODDS_COLS_MAPPING.keys())].rename(columns=ODDS_COLS_MAPPING) # Add average scores columns for ind in (1, 2): avg_score = y[['adj_score%s' % ind, 'xg%s' % ind, 'nsxg%s' % ind]].mean(axis=1) avg_score[avg_score.isna()] = y['score%s' % ind] y['avg_score%s' % ind] = avg_score # Add combined odds columns for target in TARGETS: if '+' in target: targets = target.split('+') odds[target] = combine_odds(odds[targets]) odds_test[target] = combine_odds(odds_test[targets]) # Feature extraction with np.errstate(divide='ignore', invalid='ignore'): for df in (X, X_test): df['quality'] = hmean(df[['spi1', 'spi2']], axis=1) df['importance'] = df[['importance1', 'importance2']].mean(axis=1) df['rating'] = df[['quality', 'importance']].mean(axis=1) df['sum_proj_score'] = df['proj_score1'] + df['proj_score2'] return X, y, odds, X_test, odds_test
9,223
def restart_supervisord(): """Reload supervisorctl conf to run celery and celerybeat""" sudo('supervisorctl reload', pty=True)
9,224
def daily_selection(): """ Select a random piece of material from what is available. A piece is defined by a newline; every line is a new piece of content. """ logger.log("Selecting today's material") with open(settings.CONTENT, "r") as file: content = file.readlines() lines = len(content) prev = get_previous(int(math.log10(lines))) selection_index = random.choice(list(range(prev)) + list(range(prev + 1, lines))) selection = content[selection_index] selection += ("\n" if selection[-1] != "\n" else "") logger.log("Selected: " + selection, newline=False) set_previous(selection_index) return selection
9,225
def pin_output(pin, state): """ Wrapper function used to debug pin states. """ GPIO.output(pin, state) log('debug', 'pin ', pin, ', state ', state)
9,226
def cli(): """ Easy and effective tooling for FIX Repository data. FIX2dict greatly simplifies working with FIX Repository data by leveraging open and combat-proven web technologies. The ultimate goal is to provide users with a consistent, authoritative and high-quality FIX reference in an accessible way. JSON is the preferred choice of format; JSON Schema [1] and JSON Patch [2] are internally used respectively for validation and Extension Packs (EPs). Type 'fix2dict <COMMAND> --help' for more information. You can submit bugs by sending an email to <filippocosta.italy+fix2dict@gmail.com>. \b Footnotes: [1]: https://json-schema.org/ https://json-schema.org/draft/2019-09/json-schema-core.html [2]: https://tools.ietf.org/html/rfc6902 \b Copyright (c) 2020, Filippo Costa. Released under Apache License 2.0: https://www.apache.org/licenses/LICENSE-2.0.txt Find me at <https://filippocosta.net>. """ pass
9,227
def center_image(image): """ Центрирование якорной точки изображения. """ image.anchor_x = image.width // 2 image.anchor_y = image.height // 2
9,228
def write_stored_taxa(stored_taxa: Dict): """Write taxon view history to file, along with stats on most frequently viewed taxa Args: Complete taxon history (including previously stored history) """ # Do a recount/resort before writing stored_taxa["frequent"] = OrderedDict(Counter(stored_taxa["history"]).most_common()) logger.info( 'Settings: Writing stored taxa: ' f'{len(stored_taxa["history"])} history items, ' f'{len(stored_taxa["starred"])} starred items, ' f'{len(stored_taxa["frequent"])} frequent items, ' f'{len(stored_taxa["observed"])} observed items' ) with open(STORED_TAXA_PATH, 'w') as f: json.dump(stored_taxa, f, indent=4) logger.info('Settings: Done')
9,229
def get_browser_version(): """ obtain the firefox browser version, this is necessary because zeus can only handle certain versions. """ logger.info(set_color( "attempting to get firefox browser version" )) try: firefox_version_command = shlex.split("firefox --version") output = subprocess.check_output(firefox_version_command) except (OSError, Exception): logger.error(set_color( "failed to run firefox", level=50 )) return "failed to start" try: major, minor = map(int, re.search(r"(\d+).(\d+)", output).groups()) except (ValueError, Exception): logger.error(set_color( "failed to parse '{}' for version number".format(output), level=50 )) return "failed to gather" return major, minor
9,230
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn: """ Create scatter plot between each feature and the response. - Plot title specifies feature name - Plot title specifies Pearson Correlation between feature and response - Plot saved under given folder with file name including feature name Parameters ---------- X : DataFrame of shape (n_samples, n_features) Design matrix of regression problem y : array-like of shape (n_samples, ) Response vector to evaluate against output_path: str (default ".") Path to folder in which plots are saved """ for feature in X.columns: feature_col = X.loc[:, feature] pearson = pearson_correlation( feature_col , y) x_axis = feature_col y_axis = y go.Figure([go.Scatter(x=x_axis, y=y_axis, name='correlation' + str(pearson), showlegend=True, # why need name here if i have title down there? marker=dict(color="black", opacity=.7), mode="markers", line=dict(color="black", width=1))], layout=go.Layout(title=r"$\text{(1) feature and price} $", xaxis={"title": feature}, yaxis={"title": "price"}, height=400)).show()
9,231
def is_checkpointing() -> bool: """Whether the current forward propagation is under checkpointing. Returns: bool: :data:`True` if it's under checkpointing. """ return thread_local.is_checkpointing
9,232
def create_tile_assets_profile(iam, profile_name, locations): """ Creates a profile (and corresponding role) with read and write access to the tile assets bucket. """ profile = iam.create_instance_profile( InstanceProfileName=profile_name, Path='/', ) iam.create_role( RoleName=profile_name, AssumeRolePolicyDocument=json.dumps( assume_role_policy_document('ec2.amazonaws.com')), ) iam.add_role_to_instance_profile( InstanceProfileName=profile_name, RoleName=profile_name, ) assets_path = locations.assets.name + '/' + locations.assets.prefix + '/*' policy = { "Version": "2012-10-17", "Statement": [ { "Sid": "VisualEditor0", "Effect": "Allow", "Action": [ "s3:PutObject", "s3:GetObject", "s3:DeleteObject" ], "Resource": 'arn:aws:s3:::' + assets_path, }, { "Sid": "VisualEditor1", "Effect": "Allow", "Action": "s3:ListBucket", "Resource": 'arn:aws:s3:::' + locations.assets.name, } ] } iam.put_role_policy( RoleName=profile_name, PolicyName='AllowReadWriteAccessToTilesAssetsBucket', PolicyDocument=json.dumps(policy), ) return profile['InstanceProfile']
9,233
def cli(dir): """ Sets up the project environment. This is called by default after `whisk create` and should be run manually after cloning an existing project. See :func:`whisk.setup.setup` for the full list of steps that are performed. """ whisk.setup.setup(dir)
9,234
def noise_filter(rgb_array, coef=8, read_noise=2, shot_noise=246): """ Apply bilateral noise filter to RGB image""" h, w, _ = rgb_array.shape luma_img = rgb_array[:, :, 0] + rgb_array[:, :, 1] + rgb_array[:, :, 2] average = scipy.ndimage.filters.uniform_filter(luma_img, 5, mode='mirror') sigma_map = average * shot_noise + read_noise del average sigma_map[sigma_map < 1] = 1 sy, sx = sigma_map.strides sigma_tile = as_strided(sigma_map, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5)) sigma_tile = sigma_tile[2:h-2, 2:w-2, :, :] del sigma_map sy, sx = luma_img.strides luma_tile = as_strided(luma_img, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5)) luma_tile = luma_tile[2:h-2, 2:w-2, :, :] luma_box = as_strided(luma_img, strides=(sy, sx, sy, sx), shape=(h-4, w-4, 5, 5)) del luma_img diff = luma_box - luma_tile del luma_tile, luma_box diff = diff * diff weight = np.exp(-coef * diff / sigma_tile) del diff, sigma_tile weight_sum = weight.sum(axis=(2, 3)) sy, sx, sz, sw = weight.strides weight_extend = as_strided(weight, strides=(sy, sx, 0, sz, sw), shape=(h-4, w-4, 3, 5, 5)) del weight sy, sx = weight_sum.strides weight_sum_extend = as_strided(weight_sum, strides=(sy, sx, 0), shape=(h-4, w-4, 3)) del weight_sum sy, sx, sz = rgb_array.strides img_boxes = as_strided(rgb_array, strides=(sy, sx, sz, sy, sx), shape=(h-4, w-4, 3, 5, 5)) img_flt = (weight_extend * img_boxes).sum(axis=(3, 4)) / weight_sum_extend return img_flt
9,235
def test_flux_custom_units_correct(specviz_gui, qtbot): """ Accept valid custom units """ ucd = get_ucd(specviz_gui) qtbot.addWidget(ucd) ucd.ui.comboBox_units.setCurrentIndex(0) assert ucd.ui.comboBox_units.currentText() == ucd.data_unit_equivalencies_titles[0] ucd.ui.comboBox_units.setCurrentIndex(ucd.ui.comboBox_units.count() - 1) assert ucd.ui.comboBox_units.currentText() == "Custom" ucd.ui.line_custom_units.setText("Jy") assert ucd.on_accepted() == True
9,236
def logout(): """ Logout of AVA Cloud. """ try: r = requests.get(url + '/logout') r.raise_for_status() except requests.exceptions.HTTPError as e: if r.status_code == 400: click.echo('Error: Bad credentials', err=True) elif r.status_code == 401: click.echo('Error: You are not logged in', err=True) else: click.echo('Error: Problem happenned', err=True) sys.exit(1) except requests.exceptions.RequestException as e: click.echo('Error: Unable to end the request with AVA', err=True) sys.exit(1) click.echo('Logged Out.')
9,237
def drop_entity(p_json: json): """ Удаляет сущность :param p_json: json с указанием id сущности, которую нужно удалить """ try: l_model=Model(p_json=p_json) l_model.drop_entity() return _JsonOutput(p_json_object=None, p_message="Entity has dropped successfully").body except Exception as e: return _JsonOutput(p_json_object=None, p_error=e.args[0]).body
9,238
def get_ssl_vpn_client_certs(ids: Optional[Sequence[str]] = None, name_regex: Optional[str] = None, output_file: Optional[str] = None, ssl_vpn_server_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSslVpnClientCertsResult: """ The SSL-VPN client certificates data source lists lots of SSL-VPN client certificates resource information owned by an Alicloud account. ## Example Usage ```python import pulumi import pulumi_alicloud as alicloud foo = alicloud.vpc.get_ssl_vpn_client_certs(ids=["fake-cert-id"], name_regex="^foo", output_file="/tmp/clientcert", ssl_vpn_server_id="fake-server-id") ``` :param Sequence[str] ids: IDs of the SSL-VPN client certificates. :param str name_regex: A regex string of SSL-VPN client certificate name. :param str output_file: Save the result to the file. :param str ssl_vpn_server_id: Use the SSL-VPN server ID as the search key. """ __args__ = dict() __args__['ids'] = ids __args__['nameRegex'] = name_regex __args__['outputFile'] = output_file __args__['sslVpnServerId'] = ssl_vpn_server_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:vpc/getSslVpnClientCerts:getSslVpnClientCerts', __args__, opts=opts, typ=GetSslVpnClientCertsResult).value return AwaitableGetSslVpnClientCertsResult( certs=__ret__.certs, id=__ret__.id, ids=__ret__.ids, name_regex=__ret__.name_regex, names=__ret__.names, output_file=__ret__.output_file, ssl_vpn_server_id=__ret__.ssl_vpn_server_id)
9,239
def quantile(h: Distogram, value: float) -> Optional[float]: """ Returns a quantile of the distribution Args: h: A Distogram object. value: The quantile to compute. Must be between 0 and 1 Returns: An estimation of the quantile. Returns None if the Distogram object contains no element or value is outside of (0:1). """ if len(h.bins) == 0: return None if not (0 <= value <= 1): return None total_count = count(h) q_count = int(total_count * value) v0, f0 = h.bins[0] vl, fl = h.bins[-1] if q_count <= (f0 / 2): # left values fraction = q_count / (f0 / 2) result = h.min + (fraction * (v0 - h.min)) elif q_count >= (total_count - (fl / 2)): # right values base = q_count - (total_count - (fl / 2)) fraction = base / (fl / 2) result = vl + (fraction * (h.max - vl)) else: mb = q_count - f0 / 2 mids = [(fi + fj) / 2 for (_, fi), (_, fj) in zip(h.bins[:-1], h.bins[1:])] i, _ = next(filter(lambda i_f: mb < i_f[1], enumerate(accumulate(mids)))) (vi, _), (vj, _) = h.bins[i], h.bins[i + 1] fraction = (mb - sum(mids[:i])) / mids[i] result = vi + (fraction * (vj - vi)) return result
9,240
def convert_to_py(path): """从ipynb文档中提取代码内容""" content = [] cells = get_notebook_cells(path) for cell in cells: if cell["cell_type"] == "code": if content: content += ["\n"] for i in cell["source"]: if i.strip().startswith("!") or i.strip().startswith("%"): content.append("# " + i) else: content.append(i) content.append("\n") save_path = path.replace(".ipynb", "_lint.py") with open(save_path, "w", encoding="utf-8") as f: f.write("".join(content))
9,241
def arachni_del_vuln(request): """ The function Delete the Arachni Vulnerability. :param request: :return: """ if request.method == 'POST': vuln_id = request.POST.get("del_vuln", ) un_scanid = request.POST.get("scan_id", ) scan_item = str(vuln_id) value = scan_item.replace(" ", "") value_split = value.split(',') split_length = value_split.__len__() print "split_lenght", split_length for i in range(0, split_length): vuln_id = value_split.__getitem__(i) delete_vuln = arachni_scan_result_db.objects.filter(vuln_id=vuln_id) delete_vuln.delete() arachni_all_vul = arachni_scan_result_db.objects.filter(scan_id=un_scanid).values( 'name', 'severity', 'vuln_color' ).distinct() total_vul = len(arachni_all_vul) total_high = len(arachni_all_vul.filter(severity="high")) total_medium = len(arachni_all_vul.filter(severity="medium")) total_low = len(arachni_all_vul.filter(severity="low")) arachni_scan_db.objects.filter(scan_id=un_scanid).update( total_vul=total_vul, high_vul=total_high, medium_vul=total_medium, low_vul=total_low ) messages.success(request, "Deleted vulnerability") return HttpResponseRedirect("/webscanners/arachni_list_vuln?scan_id=%s" % un_scanid)
9,242
def pipe(*args, encoding="utf-8", print_output=False, raise_exception=False): """Every arg should be a subprocess command string which will be run and piped to any subsequent args in a linear process chain. Each arg will be split into command words based on whitespace so whitespace embedded within words is not possible. Returns stdout from the chain. """ pipes = [] for cmd in args: words = cmd.split() if pipes: p = subprocess.Popen(words, stdin=pipes[-1].stdout, stdout=subprocess.PIPE) pipes[-1].stdout.close() else: p = subprocess.Popen(words, stdout=subprocess.PIPE) pipes.append(p) output = p.communicate()[0] ret_code = p.wait() if ret_code and raise_exception: raise RuntimeError(f"Subprocess failed with with status: {ret_code}") output = output.decode(encoding) if encoding else output if print_output: print(output, end="") return output
9,243
def construct_imports(variables, imports): """Construct the list of imports by expanding all command line arguments.""" result = {} for i in imports: kv = i.split('=', 1) if len(kv) != 2: print 'Invalid value for --imports: %s. See --help.' % i sys.exit(1) result[kv[0]] = expand_template(kv[1], variables, result) return result
9,244
def is_regex(param): """ 判断参数是否是合法正则表达式字符串 :param param: {String} 参数 :return: {Boolean} 是否是合法正则表达式 """ try: re.compile(param) return True except re.error: return False
9,245
def NetCDF_SHP_lat_lon(name_of_nc, box_values, name_of_lat_var, name_of_lon_var, correct_360): """ @ author: Shervan Gharari @ Github: https://github.com/ShervanGharari/candex @ author's email id: sh.gharari@gmail.com @license: Apache2 This function gets a NetCDF file the assosiated shapefile given the cordination of a given box if correct_360 is True then the code convert the lon values more than 180 to negative lon Arguments --------- name_of_nc: string, the name of the nc file box_values: the box to limit to a specific domain name_of_lat_var: string, the name of the variable lat name_of_lon_var: string, the name of the variable lon correct_360: logical, True or Flase Returns ------- result: a shapefile for the NetCDF file """ # open the nc file to read dataset = xr.open_dataset(name_of_nc, decode_times=False) # reading the lat and lon and converting them to np.array lat = dataset[name_of_lat_var].data lon = dataset[name_of_lon_var].data lat = np.array(lat) lon = np.array(lon) # check if lat and lon are 1 D, if yes then they should be converted to 2D lat and lon WARNING only for case 1 and 2 if len(lat.shape) == 1 and len(lon.shape) == 1: lat, lon = lat_lon_2D(lat, lon) # creating the shapefile result = lat_lon_SHP(lat, lon, box_values, correct_360) return result
9,246
def format_issues( input_issues: list, developer_ids: list, start_date: datetime.datetime, end_date: datetime.datetime, end_date_buffer: int = 0, ) -> list: """extract and formats key fields into an output list Args: input_issues: issues (tuples) from GitHub developer_ids: GitHub id strings to filter start_date: start date of report end_date: similar, passed in for testing end_date_buffer: number of days to add to 'end time' Returns: list issues_summary: list of tuples with select, reformatted fields """ logging.info("beginning format issues") issues_summary = [] len(input_issues) for issue in input_issues: logging.info(f"formatting issue #: {issue.number}") # determine branch based on common PR naming pattern with [X.Y] branch prefix if "[main]" in issue.title or "[3." not in issue.title: branch_name = "[main]" else: branch_name = str(issue.title).split(" ", 2)[0] match issue.state: case "open": # issues we authored if ( issue.user.login in developer_ids and check_if_issue_date_interesting( issue.updated_at, start_date, end_date, end_date_buffer ) ): issues_summary.append( tuple( ( f"{issue.updated_at}", "Issue", "opened", f"{branch_name.rjust(6)}", f"{issue.url}", f"{issue.title}", ) ) ) # issues we closed case "closed": if issue.closed_by.login in developer_ids: issues_summary.append( tuple( ( f"{issue.closed_at}", "Issue", "closed", f"{branch_name.rjust(6)}", f"{issue.url}", f"{issue.title}", ) ) ) # END match # END for issue in input_issues return issues_summary
9,247
def train_transforms(image_size, train_img_scale=(0.35, 1), normalize: bool = True, mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])): """Transforms for train augmentation with Kornia.""" transforms = [ AccimageImageToTensorNN(), RandomResizedCrop((image_size, image_size), train_img_scale, keepdim=True), RandomHorizontalFlip(keepdim=True)] if normalize: transforms.append(Normalize(mean=std, std=std, keepdim=True)) return torch.nn.Sequential(*transforms)
9,248
def test_tmatrix_spheroid_is_sphere(material, atol): """tmatrix of spheroid with aspect ratio 1 is equal to tmatrix of sphere""" sphere = miepy.sphere([0,0,0], radius, material) spheroid = miepy.spheroid([0,0,0], radius, radius, material, tmatrix_lmax=4) T1 = sphere.compute_tmatrix(lmax, wavelength, eps_b) T2 = spheroid.compute_tmatrix(lmax, wavelength, eps_b) print(np.max(np.abs(T1-T2))) assert np.allclose(T1, T2, rtol=0, atol=atol)
9,249
def get_spans_bio(tags,id2label=None): """Gets entities from sequence. Args: tags (list): sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: >>> tags = ['B-PER', 'I-PER', 'O', 'B-LOC'] >>> get_spans_bio(tags) # output [['PER', 0,1], ['LOC', 3, 3]] """ chunks = [] chunk = [-1, -1, -1] for indx, tag in enumerate(tags): if not isinstance(tag, str): tag = id2label[tag] if tag.startswith("B-"): if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] chunk[1] = indx chunk[0] = tag.split('-')[1] elif tag.startswith('I-') and chunk[1] != -1: _type = tag.split('-')[1] if _type == chunk[0]: chunk[2] = indx if indx == len(tags) - 1: chunks.append(chunk) else: if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] return chunks
9,250
def tag_stamp(b_tag_after_update, repo_path_in_section, repo, branch='', commit=''): """ Tag with time stamp after clone or pull """ if b_tag_after_update: # store current path cwd = os.getcwd() # move to the repository path os.chdir(repo_path_in_section) # get latest hash value last_sha = git.get_last_sha(branch=branch) # decide tag string if branch: if '/' in branch: branch = branch[(branch.index('/')+1):] # add branch name tag_string = f'{get_timestamp_str()}__{branch}__{last_sha}' else: # just time stamp tag_string = f'{get_timestamp_str()}__{last_sha}' # Tag if the latest commit does not already have a tag if not git.has_a_tag(commit=commit): if not git.tag(tag_string, revision=commit): raise IOError('Unable to tag {name} {tag}'.format( tag=tag_string, name=repo['name'])) # return to the stored path os.chdir(cwd)
9,251
def min_count1(lst): """ Get minimal value of list, version 1 :param lst: Numbers list :return: Minimal value and its count on the list """ if len(lst) == 0: return [] count = 0 min_value = lst[0] for num in lst: if num == min_value: count += 1 elif num < min_value: count = 1 min_value = num return [min_value, count]
9,252
def setJournal(filename = None): """ Method of open / close a journal file that records prompts and commands typed to a text file. param filename string the name of the journal file, if None will close any current open journal file. :param filename: the name of the journal file, (defaults to None which switches off journal) :type filename: str This is elementary fucntion at the moment and may be expanded in scope. """ global __journalFile if filename == None: # No file give. if __journalFile != None: # Close Journal file if open __journalFile.write(__tiocomment + " closed at {0:s}\n".format(str(datetime.now()))) __journalFile.close() __journalFile = None # Null journal file tprint("tio.info: Journal off.") return # All finished # File given, so try and open it fn = getExpandedFilename(filename) if not fn.endswith("tio"): fn += ".tio" try: __journalFile = open(fn,"w") __journalFile.write("# tio Journal file\n") __journalFile.write("# opened at {0:s}\n".format(str(datetime.now()))) tprint("tio.info: Journal on.") except IOError: __tioerr.write("setJournal.error: file open of {0:s} failed\n".format(fn)) if getBool("Manually open journal file",False): __journalFile = openFile("Journal File","w","tio") else: __journalFile = None
9,253
def create_build_from_docker_image( image_name, install_package, namespace, source_image="quay.io/ocsci/fedora", source_image_label="latest", ): """ Allows to create a build config using a Dockerfile specified as an argument, eg.:: $ oc new-build -D $'FROM centos:7\\nRUN yum install -y httpd' creates a build with ``httpd`` installed. Args: image_name (str): Name of the image to be created source_image (str): Source image to build docker image from, defaults to Centos as base image namespace (str): project where build config should be created source_image_label (str): Tag to use along with the image name, defaults to 'latest' install_package (str): package to install over the base image Returns: ocs_ci.ocs.ocp.OCP (obj): The OCP object for the image Fails on UnavailableBuildException exception if build creation fails """ base_image = source_image + ":" + source_image_label if config.DEPLOYMENT.get("disconnected"): base_image = mirror_image(image=base_image) cmd = f"yum install -y {install_package}" http_proxy, https_proxy, no_proxy = get_cluster_proxies() if http_proxy: cmd = ( f"http_proxy={http_proxy} https_proxy={https_proxy} " f"no_proxy='{no_proxy}' {cmd}" ) docker_file = f"FROM {base_image}\n " f" RUN {cmd}\n" f"CMD tail -f /dev/null" command = f"new-build -D $'{docker_file}' --name={image_name}" kubeconfig = os.getenv("KUBECONFIG") oc_cmd = f"oc -n {namespace} " if kubeconfig: oc_cmd += f"--kubeconfig {kubeconfig} " oc_cmd += command logger.info(f"Running command {oc_cmd}") result = run(oc_cmd, stdout=PIPE, stderr=PIPE, timeout=15, shell=True) if result.stderr.decode(): raise UnavailableBuildException( f"Build creation failed with error: {result.stderr.decode()}" ) out = result.stdout.decode() logger.info(out) if "Success" in out: # Build becomes ready once build pod goes into Completed state pod_obj = OCP(kind="Pod", resource_name=image_name) if pod_obj.wait_for_resource( condition="Completed", resource_name=f"{image_name}" + "-1-build", timeout=300, sleep=30, ): logger.info(f"build {image_name} ready") set_image_lookup(image_name) logger.info(f"image {image_name} can now be consumed") image_stream_obj = OCP(kind="ImageStream", resource_name=image_name) return image_stream_obj else: raise UnavailableBuildException("Build creation failed")
9,254
def waymo_data_prep(root_path, info_prefix, version, out_dir, workers, max_sweeps=5): """Prepare the info file for waymo dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. max_sweeps (int, optional): Number of input consecutive frames. Default: 5. Here we store pose information of these frames for later use. """ from tools.data_converter import waymo_converter as waymo splits = ['training', 'validation', 'testing'] for i, split in enumerate(splits): load_dir = osp.join(root_path, 'waymo_format', split) if split == 'validation': save_dir = osp.join(out_dir, 'kitti_format', 'training') else: save_dir = osp.join(out_dir, 'kitti_format', split) converter = waymo.Waymo2KITTI( load_dir, save_dir, prefix=str(i), workers=workers, test_mode=(split == 'testing')) converter.convert() # Generate waymo infos out_dir = osp.join(out_dir, 'kitti_format') kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps) create_groundtruth_database( 'WaymoDataset', out_dir, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl', relative_path=False, with_mask=False)
9,255
def exactly_one_topping(ketchup, mustard, onion): """Return whether the customer wants exactly one of the three available toppings on their hot dog. """ return True if int(ketchup) + int(mustard) + int(onion) == 1 else False
9,256
def strip_line_endings(data: list) -> list: """Removes line endings(\n). Removes item if only contains \n.""" return [i.rstrip("\n") for i in data if i != "\n"]
9,257
def calculate_afqt_scores(df): """This function calculates the AFQT scores. See information at https://www.nlsinfo.org/content/cohorts/nlsy79/topical-guide/education/aptitude-achievement-intelligence-scores for more details. In addition, we adjust the Numerical Operations score along the lines described in NLS Attachment 106. """ df["NUMERICAL_ADJ"] = df["ASVAB_NUMERICAL_OPERATIONS"] adjust_no = { 0: 0, 1: 0, 2: 1, 3: 2, 7: 8, 8: 9, 9: 10, 10: 11, 11: 12, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 21, 19: 22, 20: 23, 21: 24, 22: 25, 23: 26, 24: 27, 25: 28, 26: 29, 27: 30, 28: 31, 29: 33, 30: 34, 31: 35, 32: 36, 33: 37, 34: 38, 35: 39, 36: 39, 37: 40, 38: 41, 39: 42, 40: 43, 41: 44, 42: 45, 43: 46, 44: 47, 45: 48, 46: 49, 47: 49, 48: 50, 49: 50, 50: 50, } df["NUMERICAL_ADJ"].replace(adjust_no, inplace=True) df["AFQT_RAW"] = 0.00 df["AFQT_RAW"] += df["ASVAB_ARITHMETIC_REASONING"] df["AFQT_RAW"] += df["ASVAB_WORD_KNOWLEDGE"] df["AFQT_RAW"] += df["ASVAB_PARAGRAPH_COMPREHENSION"] df["AFQT_RAW"] += 0.5 * df["NUMERICAL_ADJ"] del df["NUMERICAL_ADJ"] # There are a couple of variables for which we can compute AFQT_RAW while there is no AFQT_1 # available. The variable AFQT_1 is set to NAN by the NLSY team if the test procedure was # altered, i.e. variable R06148 (ASVAB_ALTERED_TESTING) takes value 67. However, we noticed # that there are other indicators of problems as well. # # PROFILES, ASVAB VOCATIONAL TEST - NORMAL/ALTERED TESTING # # 11625 51 COMPLETED # 41 52 COMP-CONVERTED REFUSAL # 127 53 COMP-PROBLEM REPORTED # 85 54 COMP-SPANISH INSTR. CARDS # 36 67 COMP-PRODECURES ALTERED # # We followed up with the NLSY staff to get some guidance on how to deal with 51, 52, 53, # 54. The correspondence is available in ``correspondence-altered-testing.pdf'' in the sources # subdirectory. In a nutshell, not detailed information is available anymore on the meaning # of the different realizations. We decided to follow the original decision of the NLSY staff # to only set 67 to NAN. cond = df["ASVAB_ALTERED_TESTING"].isin([67]) df.loc[cond, "AFQT_RAW"] = np.nan # We have a little unit test, where we reconstruct the AFQT_1 variable from the inputs. assert_equal(_test_afqt(df), True) return df
9,258
def test_qubitop_to_paulisum(): """ Conversion of QubitOperator; accuracy test """ hop_term = FermionOperator(((2, 1), (0, 0))) term = hop_term + hermitian_conjugated(hop_term) pauli_term = jordan_wigner(term) forest_term = qubitop_to_pyquilpauli(pauli_term) ground_truth = PauliTerm("X", 0)*PauliTerm("Z", 1)*PauliTerm("X", 2) ground_truth += PauliTerm("Y", 0)*PauliTerm("Z", 1)*PauliTerm("Y", 2) ground_truth *= 0.5 assert ground_truth == forest_term
9,259
def weighting_system_z(): """Z-weighting filter represented as polynomial transfer function. :returns: Tuple of `num` and `den`. Z-weighting is 0.0 dB for all frequencies and therefore corresponds to a multiplication of 1. """ numerator = [1] denomenator = [1] return numerator, denomenator
9,260
def shutdown(): """ Shuts down the API (since there is no legit way to kill the thread) Pulled from https://stackoverflow.com/questions/15562446/how-to-stop-flask-application-without-using-ctrl-c """ func = request.environ.get('werkzeug.server.shutdown') if func is None: raise RuntimeError('Not running with the Werkzeug Server') func() return 'Server shutting down...', 200
9,261
def add_glitch(psr, epoch, amp): """ Like pulsar term BWM event, but now differently parameterized: just an amplitude (not log-amp) parameter, and an epoch. [source: piccard] :param psr: pulsar object :param epoch: TOA time (MJD) the burst hits the earth :param amp: amplitude of the glitch """ # Define the heaviside function heaviside = lambda x: 0.5 * (N.sign(x) + 1) # Glitches are spontaneous spin-up events. # Thus TOAs will be advanced, and resiudals will be negative. psr.stoas[:] -= amp * heaviside(psr.toas() - epoch) * (psr.toas() - epoch) * 86400.0
9,262
def on_post_request(): """This function triggers on every POST request to chosen endpoint""" data_sent = request.data.decode('utf-8') return Response(return_animal_noise(data_sent), mimetype='text/plain')
9,263
def settings(comid=None, community=None): """Modify a community.""" pending_records = \ len(CommunityRecordsCollection(community).filter({'status': 'P'})) return render_template( 'invenio_communities/settings.html', community=community, comid=comid, pending_records=pending_records)
9,264
def product_of_basins(): """Return the product of the sizes of the three largest basins.""" max_x = len(values[0]) - 1 max_y = len(values) - 1 def heightmap(x, y): """Return the height value in (xth column, yth row).""" return values[y][x] def is_lowpoint(x, y): """Return True if (x, y) is a lowpoint, else False.""" value = heightmap(x, y) return all((x == 0 or value < heightmap(x - 1, y), # left x == max_x or value < heightmap(x + 1, y), # right y == 0 or value < heightmap(x, y - 1), # up y == max_y or value < heightmap(x, y + 1))) # down def basin_size(x, y): """Return the basin size of the low point (x, y).""" if (x, y) in visited or heightmap(x, y) == 9: return 0 visited.add((x, y)) value = heightmap(x, y) size = 1 if x > 0 and value <= heightmap(x - 1, y): # left size += basin_size(x - 1, y) if x < max_x and value <= heightmap(x + 1, y): # right size += basin_size(x + 1, y) if y > 0 and value <= heightmap(x, y - 1): # up size += basin_size(x, y - 1) if y < max_y and value <= heightmap(x, y + 1): # down size += basin_size(x, y + 1) return size visited = set() basin_sizes = [] lowpoints = ((x, y) for x in range(max_x + 1) for y in range(max_y + 1) if is_lowpoint(x, y)) for x, y in lowpoints: basin_sizes.append(basin_size(x, y)) basin_sizes.sort(reverse=True) return math.prod(basin_sizes[:3])
9,265
def test_io_forward(): """Test IO for forward solutions """ fwd = mne.read_forward_solution(fname) fwd = mne.read_forward_solution(fname, force_fixed=True) leadfield = fwd['sol']['data']
9,266
def close_consumer(consumer): """Close consumer""" consumer.close()
9,267
def singularity26(function): """Decorator to set the global singularity version""" def wrapper(*args, **kwargs): hpccm.config.g_ctype = container_type.SINGULARITY hpccm.config.g_singularity_version = StrictVersion('2.6') return function(*args, **kwargs) return wrapper
9,268
def testScanUpdate_always_updatesExistingScan(mocker, db_engine_path): """Test Agent save implementation.""" mocker.patch.object(models, 'ENGINE_URL', db_engine_path) models.Database().create_db_tables() models.Scan.create('test') database = models.Database() database.session.commit() assert database.session.query(models.Scan).count() == 1 scan = database.session.query(models.Scan).first() scan.title = 'test2' database.session.commit() assert database.session.query(models.Scan).count() == 1 scan = database.session.query(models.Scan).first() assert scan.title == 'test2'
9,269
def denormalize(series, last_value): """Denormalize the values for a given series. This uses the last value available (i.e. the last closing price of the week before our prediction) as a reference for scaling the predicted results. """ result = last_value * (series + 1) return result
9,270
def tfidf( s: pd.Series, max_features=None, min_df=1, max_df=1.0, return_feature_names=False ) -> pd.Series.sparse: """ Represent a text-based Pandas Series using TF-IDF. *Term Frequency - Inverse Document Frequency (TF-IDF)* is a formula to calculate the _relative importance_ of the words in a document, taking into account the words' occurences in other documents. It consists of two parts: The *term frequency (tf)* tells us how frequently a term is present in a document, so tf(document d, term t) = number of times t appears in d. The *inverse document frequency (idf)* measures how _important_ or _characteristic_ a term is among the whole corpus (i.e. among all documents). Thus, idf(term t) = log((1 + number of documents) / (1 + number of documents where t is present)) + 1. Finally, tf-idf(document d, term t) = tf(d, t) * idf(t). Different from the `sklearn-implementation of tfidf <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`, this function does *not* normalize the output in any way, so the result is exactly what you get applying the formula described above. The input Series should already be tokenized. If not, it will be tokenized before tfidf is calculated. If working with big pandas Series, you might want to limit the number of features through the max_features parameter. Parameters ---------- s : Pandas Series (tokenized) max_features : int, optional, default to None. If not None, only the max_features most frequent tokens are used. min_df : int, optional, default to 1. When building the vocabulary, ignore terms that have a document frequency (number of documents a term appears in) strictly lower than the given threshold. max_df : int or double, optional, default to 1.0 When building the vocabulary, ignore terms that have a document frequency (number of documents a term appears in) strictly higher than the given threshold. This arguments basically permits to remove corpus-specific stop words. When the argument is a float [0.0, 1.0], the parameter represents a proportion of documents. return_feature_names: Boolean, optional, default to False Whether to return the feature (i.e. word) names with the output. Examples -------- >>> import texthero as hero >>> import pandas as pd >>> s = pd.Series(["Hi Bye", "Test Bye Bye"]) >>> s = hero.tokenize(s) >>> hero.tfidf(s, return_feature_names=True) (document 0 [1.0, 1.4054651081081644, 0.0] 1 [2.0, 0.0, 1.4054651081081644] dtype: object, ['Bye', 'Hi', 'Test']) """ # Check if input is tokenized. Else, print warning and tokenize. if not isinstance(s.iloc[0], list): warnings.warn(_not_tokenized_warning_message, DeprecationWarning) s = preprocessing.tokenize(s) tfidf = TfidfVectorizer( use_idf=True, max_features=max_features, min_df=min_df, max_df=max_df, tokenizer=lambda x: x, preprocessor=lambda x: x, norm=None, # Disable l1/l2 normalization. ) tfidf_vectors_csr = tfidf.fit_transform(s) # Result from sklearn is in Compressed Sparse Row format. # Pandas Sparse Series can only be initialized from Coordinate format. tfidf_vectors_coo = coo_matrix(tfidf_vectors_csr) s_out = pd.Series.sparse.from_coo(tfidf_vectors_coo) # Map word index to word name and keep original index of documents. feature_names = tfidf.get_feature_names() s_out.index = s_out.index.map(lambda x: (s.index[x[0]], feature_names[x[1]])) s_out.rename_axis(["document", "word"], inplace=True) # NOTE: Currently: still convert to flat series instead of representation series. # Will change to return representation series directly in Version 2. s_out = representation_series_to_flat_series( s_out, fill_missing_with=0.0, index=s.index ) if return_feature_names: return s_out, feature_names else: return s_out
9,271
def decorator(fn: AsyncFn, *, expire: int, maxsize: int) -> AsyncFn: """Cache decorator.""" cache = LRUCache(maxsize=maxsize) @wraps(fn) async def wrapper(*args: Tuple[Any, ...], **kwds: Dict[str, Any]) -> Any: """Wrap the original async `fn`. Cached results will be returned if cache hit, otherwise (missing/expired) `fn` will be invoked and its result will be cached. Args: args: Positional arguments in function parameters. kwds: Keyword arguments in function parameters. Returns: The (maybe cached) result of `fn(*args, **kwds)`. """ key = CacheKey.make(args, kwds) value = cache[key] # cache miss/expired if value is None: result = await fn(*args, **kwds) cache[key] = CacheValue(expired=time.monotonic() + expire, data=result) return result return value.data wrapper.__dict__["cache"] = cache wrapper.__dict__["expire"] = expire return cast(AsyncFn, wrapper)
9,272
def _write_incon( f, labels, primary_variables, porosities, permeabilities, phase_compositions, eos ): """Write INCON block.""" from ._helpers import _write_incon as writer # Write INCON block if permeabilities is not None: permeabilities = ( permeabilities[:, None] if permeabilities.ndim == 1 else permeabilities ) for record in writer( labels, primary_variables, porosities, permeabilities, phase_compositions, eos ): f.write(record)
9,273
def cleanQrc(uiFile): """ Looks for included resources files in provided .ui file If it doesn't find any, it returns the original file else: Adds all search paths to Qt Converts all paths turns this> :/images/C:/Users/mindd/Desktop/CircleOfFifths.jpg into this> images:CircleOfFifths.jpg Removes resources 'include' tag Creates and returns new _mpi.ui file or original """ # uiFile = os.path.join(os.getcwd(),uiFile) parsed = xml.parse(uiFile) # No resource.qrc files found # we return the original file if parsed.find('resources') is None: return uiFile # Add search paths for include in parsed.iter('include'): location = include.get('location')# qrc file if 'qrc' in location: qrcFile = xml.parse(location) for qresource in qrcFile.iter('qresource'): prefix = qresource.get('prefix')# prefix for file in qresource.findall('file'): QDir.addSearchPath(prefix, os.path.dirname(file.text)) # print(location,prefix, file.text) # fix resources paths def fixPath(path): """ turns this> :/images/C:/Users/mindd/Desktop/CircleOfFifths.jpg into this> images:CircleOfFifths.jpg """ path = path.replace(':/','') s = path.index('/') e = path.rindex('/') path = path.replace(path[s:e+1],':') return path for _any in parsed.iter(): txt = _any.text if txt: txt = txt.strip() if txt != '' and txt.count(':/'): newTxt = txt if 'url' in txt:# StyleSheet # All occurrences of 'url(:' in string for i in range(len(txt)): if txt.startswith('url(:', i): all_from_here = txt[i:] start = all_from_here.index(':/') end = all_from_here.index(')') actual_txt = all_from_here[start:end] newTxt = newTxt.replace(actual_txt,fixPath(actual_txt)) else: newTxt = newTxt.replace(txt,fixPath(txt)) _any.text = newTxt if parsed.find('resources') is not None: parsed.getroot().remove(parsed.find('resources')) mpi_file = uiFile.replace('.ui','_mpi.ui') xml.indent(parsed.getroot()) parsed.write(mpi_file) return mpi_file
9,274
def reindex(src_index, dst_index, type_list, chunk_size=None, time=None): """Reindex a set of indexes internally within ElasticSearch. All of the documents under the types that live in "type_list" under the index "src_index" will be copied into the documents under the same types in the index "dst_index". In other words, a perfect re-index! Instead of using the plugin API and consuming bandwidth to perform the re-index we will allow ElasticSearch to do some heavy lifting for us. Under the covers we are combining scan/scroll with bulk operations to do this re-indexing as efficient as possible. """ es_engine = searchlight.elasticsearch.get_api() # Create a Query DSL string to access all documents within the specified # document types. We will filter on the "_type" field in this index. Since # there are multiple docuent types, we will need to use the "terms" filter. # All of the document types will be added to the list for "_type". We need # to enable version to allow the search to return the version field. This # will be used by the reindexer. body = {"version": "true", "query": {"bool": {"filter": {"terms": {"_type": type_list}}}}} # Debug: Show all documents that ES will re-index. # LOG.debug(es_engine.search(index=src_index, body=body, size=500)) helper_reindex(client=es_engine, source_index=src_index, target_index=dst_index, query=body)
9,275
def pad(data, paddings, mode="CONSTANT", name=None, constant_value=0): """ PlaidML Pad """ # TODO: use / implement other padding method when required # CONSTANT -> SpatialPadding ? | Doesn't support first and last axis + # no support for constant_value # SYMMETRIC -> Requires implement ? if mode.upper() != "REFLECT": raise NotImplementedError("pad only supports mode == 'REFLECT'") if constant_value != 0: raise NotImplementedError("pad does not support constant_value != 0") return plaidml.op.reflection_padding(data, paddings)
9,276
def taxon_id(_): """ Always returns 10090, the mouse taxon id. """ return 10090
9,277
def rthread_if(data, *forms): """ Similar to rthread, but each form must be a tuple with (test, fn, ...args) and only pass the argument to fn if the boolean test is True. If test is callable, the current value to the callable to decide if fn must be executed or not. Like rthread, Arguments are passed as tuples and the value is passed as the last argument. Examples: >>> sk.rthread_if(20, (True, op.div, 2), (False, op.mul, 4), (sk.is_even, op.add, 2)) 0.1 See Also: :func:`thread` :func:`rthread_if` """ for form in forms: do_it, func, *args = form if callable(do_it): do_it = do_it(data) if do_it: try: data = func(*args, data) except Exception as ex: raise _thread_error(ex, func, (*args, data)) from ex return data
9,278
def cell_edit(sender, app_data): """Click-handler. Function to enable editing in a table cell. The function shows a status update to the user including tag and content of the clicked cell. To achieve editing, the label within the table cell is removed and a text input widget is placed at the same position and updated with the same tag and content. Args: sender (obj): callback-sender app_data ([obj]): app_data """ # remember current cell for later global current_cell current_cell = app_data[1] # show clicked info to user show_status_update(f"clicked on cell tag {app_data[1]}, content {dpg.get_value(app_data[1])}") # store cell contents for later cell_content = dpg.get_value(app_data[1]) # save current parent parent = dpg.get_item_parent(current_cell) # iterate cells in current row and save the position cell_position = None cell_before = None for child in dpg.get_item_children(parent)[1]: # if we saved a cell position, we can save the next one for the before-parameter if cell_position: cell_before = dpg.get_item_alias(child) # escape to not overwrite any saved items break if dpg.get_item_alias(child) == current_cell: cell_position = dpg.get_item_alias(child) # delete current cell (meaning the label) dpg.delete_item(current_cell) # remove the alias if not already happened if dpg.does_alias_exist(current_cell): dpg.remove_alias(current_cell) # add an input text widget instead of the label one # if we saved a "before"-position, attach it before that position if cell_before: dpg.add_input_text(tag=current_cell, parent=parent, before=cell_before) else: dpg.add_input_text(tag=current_cell, parent=parent) dpg.set_value(current_cell, cell_content)
9,279
def decompose_matrices(Ks): """ Apply Cholesky decomposition to each matrix in the given list :param Ks: a list of matrices """ Ls = [] for i, K_d in enumerate(Ks): Ls.append(np.linalg.cholesky(K_d)) return Ls
9,280
async def plugin_remove_cmd(client, message): """remove an installed plugin. alemiBot plugins are git repos, cloned into the `plugins` folder as git submodules. This will call `git submodule deinit -f`, then remove the related folder in `.git/modules` and last remove \ plugin folder and all its content. If flag `-lib` is added, libraries installed with pip will be removed too (may break dependancies of other plugins!) """ if not alemiBot.allow_plugin_install: return await edit_or_reply(message, "`[!] → ` Plugin management is disabled") out = message.text.markdown if is_me(message) else f"`→ ` {get_username(message.from_user)} requested plugin removal" msg = message if is_me(message) else await message.reply(out) try: if len(message.command) < 1: out += "\n`[!] → ` No input" return await msg.edit(out) plugin = message.command[0] out += f"\n`→ ` Uninstalling `{plugin}`" if "/" in plugin: # If user passes <user>/<repo> here too, get just repo name plugin = plugin.split("/")[1] logger.info(f"Removing plugin \"{plugin}\"") if message.command["-lib"]: out += "\n` → ` Removing libraries" await msg.edit(out) if os.path.isfile(f"plugins/{plugin}/requirements.txt"): proc = await asyncio.create_subprocess_exec( "pip", "uninstall", "-y", "-r", f"plugins/{plugin}/requirements.txt", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) stdout, _stderr = await proc.communicate() logger.info(stdout.decode()) if b"ERROR" in stdout: out += " [`WARN`]" else: out += f" [`{stdout.count(b'Uninstalling')} del`]" out += "\n` → ` Removing source code" await msg.edit(out) proc = await asyncio.create_subprocess_shell( f"git submodule deinit -f plugins/{plugin} && rm -rf .git/modules/plugins/{plugin} && git rm -f plugins/{plugin}", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) stdout, _stderr = await proc.communicate() res = cleartermcolor(stdout.decode()) logger.info(res) if not res.startswith("Cleared"): logger.error(res) out += f" [`FAIL`]\n`[!] → ` Could not deinit `{plugin}`" return await msg.edit(out) if f"rm 'plugins/{plugin}'" not in res: logger.error(res) out += f" [`FAIL`]\n`[!] → ` Could not delete `{plugin}`" return await msg.edit(out) out += f" [`OK`]\n` → ` Restarting process" await msg.edit(out) with open("data/lastmsg.json", "w") as f: json.dump({"message_id": msg.message_id, "chat_id": msg.chat.id}, f) asyncio.get_event_loop().create_task(client.restart()) except Exception as e: logger.exception("Error while installing plugin") out += " [`FAIL`]\n`[!] → ` " + str(e) await msg.edit(out)
9,281
def ndcg_score(y_pre, y_true, k=20): """ get NDCG@k :param y_pre: numpy (batch_size,x) :param y_true: y_truth: list[batch_size][ground_truth_num] :param k: k :return: NDCG@k """ dcg = dcg_score(y_pre, y_true, k) idcg = dcg_score(y_true, y_true, k) return dcg / idcg
9,282
def test_instantiate_8(): """Test value stored as int""" a = FixedPoint(0.1, 'Q0.4') assert a.value == 1
9,283
def get_discount_weights( discount_factor: float, traj_len: int, num_trajs: int = 1 ) -> Optional[npt.NDArray[np.float32]]: """ Return the trajectory discount weight array if applicable :param discount_factor: the discount factor by which the displacements corresponding to the k^th timestep will be discounted :param traj_len: len of traj :param optional num_trajs: num of ego trajs, default is set to 1, but it's generalized in case we need to compare multiple ego trajs with expert :return array of discount_weights. """ discount_weights = None if discount_factor != 1.0: # Compute discount_factors pow_arr = np.tile(np.arange(traj_len), (num_trajs, 1)) # type:ignore discount_weights = np.power(discount_factor, pow_arr) return discount_weights
9,284
def touch_to_square(touch_x, touch_y, num_rows, num_cols): """ Given a touch x and y, convert it to a coordinate on the square. """ x = clamp(maprange((PAD_Y_RANGE_MAX, PAD_Y_RANGE_MIN), (0, num_rows), touch_y) + random.randrange(-1, 2), 0, num_rows - 1) y = clamp(maprange((PAD_X_RANGE_MAX, PAD_X_RANGE_MIN), (0, num_cols), touch_x) + random.randrange(-1, 2), 0, num_cols - 1) return (int(x), int(y))
9,285
def is_valid_scheme(url): """Judge whether url is valid scheme.""" return urlparse(url).scheme in ["ftp", "gopher", "http", "https"]
9,286
def test_binom_conf_badinput4(): """Sterne. With current implementation, Sterne can only be used with two-sided CI """ pytest.raises(ValueError, binom_conf_interval, 10, 3, 0.95, 'upper', None, 'sterne')
9,287
def test_one_library(logger): """ Runs a test using one solution from the library as a quick litmus test for code health. """ solution = "\/ [TCP:dataofs:5]-drop-|" censor = "censor2" test_type = "echo" fitness = common.run_test(logger, solution, censor, test_type, log_on_fail=True) # If the fitness was less than 0, the strategy failed to beat the censor if fitness < 0: pytest.fail("Fitness was %d - censor beat strategy." % fitness)
9,288
def height_to_transmission(height, material, energy, rho=0, photo_only=False, source='nist'): """ Calculates the resulting x-ray transmission of an object based on the given height (thickness) and for a given material and energy. Parameters ========== height: grating height (thickness) [um] material: chemical formula ('Fe2O3', 'CaMg(CO3)2', 'La1.9Sr0.1CuO4') energy: x-ray energy [keV] rho: density in [g/cm3], default=0 (no density given) photo_only: boolean for returning photo cross-section component only, default=False source: material params LUT... default='nist' Returns ======= transmission: percentage of resulting x-ray transmission """ return 1 - height_to_absorption(height, material, energy, rho, photo_only, source)
9,289
def assignModelClusters(keyframe_model, colors): """ Map each colorspace segment to the closest color in the input. Parameters ---------- keyframe_model : FrameScorer colors : numpy array of int, shape (num_colors, 3) """ hsv_mean_img = keyframe_model.hsv_means.copy().reshape(1, keyframe_model.n_clusters, 3) hsv_mean_img_saturated = hsv_mean_img.copy() hsv_mean_img_saturated[:, :, 1] = 1 hsv_mean_img_saturated[:, :, 2] = 1 rgb_mean_img_saturated = imageprocessing.color.hsv2rgb(hsv_mean_img_saturated) # rgb_mean_img = imageprocessing.color.hsv2rgb(hsv_mean_img) # imageprocessing.displayImage(rgb_mean_img) # imageprocessing.displayImage(rgb_mean_img_saturated) rgb_means_saturated = rgb_mean_img_saturated.reshape(keyframe_model.n_clusters, 3) distances = np.array(tuple( np.linalg.norm(rgb_means_saturated - np.array(rgb_color), axis=1) for rgb_color in colors )).T best_idxs = distances.argmin(axis=1) keyframe_model.color_mappings = best_idxs return keyframe_model
9,290
def internal_path_exists(path): """ Validates that url path is registered and can properly be resolved in a django configuration. """ try: resolve(path) except Http404: raise ValidationError("'{0}' is not a valid url.".format(path))
9,291
def sort_according_to_ref_list(fixturenames, param_names): """ Sorts items in the first list, according to their position in the second. Items that are not in the second list stay in the same position, the others are just swapped. A new list is returned. :param fixturenames: :param param_names: :return: """ cur_indices = [] for pname in param_names: try: cur_indices.append(fixturenames.index(pname)) except (ValueError, IndexError): # can happen in case of indirect parametrization: a parameter is not in the fixture name. # TODO we should maybe rather add the pname to fixturenames in this case ? pass target_indices = sorted(cur_indices) sorted_fixturenames = list(fixturenames) for old_i, new_i in zip(cur_indices, target_indices): sorted_fixturenames[new_i] = fixturenames[old_i] return sorted_fixturenames
9,292
def calc_ef_from_bases(x,*args): """ Calculate energies and forces of every samples using bases data. """ global _hl1,_ergs,_frcs,_wgt1,_wgt2,_wgt3,_aml,_bml #.....initialize variables if _nl == 1: _wgt1,_wgt2= vars2wgts(x) elif _nl == 2: _wgt1,_wgt2,_wgt3= vars2wgts(x) es=np.zeros(len(_samples)) fs= [] for smpl in _samples: fs.append(np.zeros((smpl.natm,3))) p= mp.Pool(_nprcs) _hl1= [] _aml= [] _bml= [] if _nprcs == 1: for ismpl in range(len(_samples)): smpl= _samples[ismpl] if _nl == 1: est,fst,hl1s,ams,bms= calc_ef1(ismpl,x,*args) _hl1.append(hl1s) _aml.append(ams) _bml.append(bms) elif _nl == 2: est,fst,hl1s,hl2s,ams,bms,cms= calc_ef2(ismpl,x,*args) _hl1.append(hl1s) _hl2.append(hl2s) _aml.append(ams) _bml.append(bms) _cml.append(cms) es[ismpl]= est for ia in range(smpl.natm): fs[ismpl][ia,0] += fst[ia,0] fs[ismpl][ia,1] += fst[ia,1] fs[ismpl][ia,2] += fst[ia,2] else: func_args=[] if _nl == 1: for ismpl in range(len(_samples)): func_args.append( (calc_ef1,ismpl,x) ) elif _nl == 2: for ismpl in range(len(_samples)): func_args.append( (calc_ef2,ismpl,x) ) results= p.map(arg_wrapper,func_args) p.close() p.join() for ismpl in range(len(_samples)): smpl= _samples[ismpl] if _nl == 1: est,fst,hl1s,ams,bms= results[ismpl] _hl1.append(hl1s) _aml.append(ams) _bml.append(bms) elif _nl == 2: est,fst,hl1s,hl2s,ams,bms,cms= results[ismpl] _hl1.append(hl1s) _hl2.append(hl2s) _aml.append(ams) _bml.append(bms) _cml.append(cms) es[ismpl]= est for ia in range(smpl.natm): fs[ismpl][ia,0] += fst[ia,0] fs[ismpl][ia,1] += fst[ia,1] fs[ismpl][ia,2] += fst[ia,2] # print ' es:' # print es _ergs= es _frcs= fs return (es,fs)
9,293
def test_wheel_no_compiles_pyc( script: PipTestEnvironment, shared_data: TestData, tmpdir: Path ) -> None: """ Test installing from wheel with --compile on """ shutil.copy(shared_data.packages / "simple.dist-0.1-py2.py3-none-any.whl", tmpdir) script.pip( "install", "--no-compile", "simple.dist==0.1", "--no-index", "--find-links", tmpdir, ) # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"), *glob.glob(script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"), ] assert not any(exists)
9,294
def HIadj_post_anthesis( NewCond_DelayedCDs, NewCond_sCor1, NewCond_sCor2, NewCond_DAP, NewCond_Fpre, NewCond_CC, NewCond_fpost_upp, NewCond_fpost_dwn, Crop, Ksw): """ Function to calculate adjustment to harvest index for post-anthesis water stress <a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126) *Arguments:* `InitCond`: `InitCondClass` : InitCond object containing model paramaters `Crop`: `CropClass` : Crop object containing Crop paramaters `Ksw`: `KswClass` : Ksw object containing water stress paramaters *Returns:* `NewCond`: `InitCondClass` : InitCond object containing updated model paramaters """ ## Store initial conditions in a structure for updating ## # NewCond = InitCond InitCond_DelayedCDs = NewCond_DelayedCDs*1 InitCond_sCor1 = NewCond_sCor1*1 InitCond_sCor2 = NewCond_sCor2*1 ## Calculate harvest index adjustment ## # 1. Adjustment for leaf expansion tmax1 = Crop.CanopyDevEndCD - Crop.HIstartCD DAP = NewCond_DAP - InitCond_DelayedCDs if ( (DAP <= (Crop.CanopyDevEndCD + 1)) and (tmax1 > 0) and (NewCond_Fpre > 0.99) and (NewCond_CC > 0.001) and (Crop.a_HI > 0) ): dCor = 1 + (1 - Ksw.Exp) / Crop.a_HI NewCond_sCor1 = InitCond_sCor1 + (dCor / tmax1) DayCor = DAP - 1 - Crop.HIstartCD NewCond_fpost_upp = (tmax1 / DayCor) * NewCond_sCor1 # 2. Adjustment for stomatal closure tmax2 = Crop.YldFormCD DAP = NewCond_DAP - InitCond_DelayedCDs if ( (DAP <= (Crop.HIendCD + 1)) and (tmax2 > 0) and (NewCond_Fpre > 0.99) and (NewCond_CC > 0.001) and (Crop.b_HI > 0) ): # print(Ksw.Sto) dCor = np.power(Ksw.Sto, 0.1) * (1 - (1 - Ksw.Sto) / Crop.b_HI) NewCond_sCor2 = InitCond_sCor2 + (dCor / tmax2) DayCor = DAP - 1 - Crop.HIstartCD NewCond_fpost_dwn = (tmax2 / DayCor) * NewCond_sCor2 # Determine total multiplier if (tmax1 == 0) and (tmax2 == 0): NewCond_Fpost = 1 else: if tmax2 == 0: NewCond_Fpost = NewCond_fpost_upp else: if tmax1 == 0: NewCond_Fpost = NewCond_fpost_dwn elif tmax1 <= tmax2: NewCond_Fpost = NewCond_fpost_dwn * ( ((tmax1 * NewCond_fpost_upp) + (tmax2 - tmax1)) / tmax2 ) else: NewCond_Fpost = NewCond_fpost_upp * ( ((tmax2 * NewCond_fpost_dwn) + (tmax1 - tmax2)) / tmax1 ) return ( NewCond_sCor1, NewCond_sCor2, NewCond_fpost_upp, NewCond_fpost_dwn, NewCond_Fpost)
9,295
def packify(fmt=u'8', fields=[0x00], size=None, reverse=False): """ Packs fields sequence of bit fields into bytearray of size bytes using fmt string. Each white space separated field of fmt is the length of the associated bit field If not provided size is the least integer number of bytes that hold the fmt. If reverse is true reverse the order of the bytes in the byte array before returning. This is useful for converting between bigendian and littleendian. Assumes unsigned fields values. Assumes network big endian so first fields element is high order bits. Each field in format string is number of bits for the associated bit field Fields with length of 1 are treated as has having boolean truthy field values that is, nonzero is True and packs as a 1 for 2+ length bit fields the field element is truncated to the number of low order bits in the bit field if sum of number of bits in fmt less than size bytes then the last byte in the bytearray is right zero padded if sum of number of bits in fmt greater than size bytes returns exception to pad just use 0 value in source field. example packify("1 3 2 2", (True, 4, 0, 3)). returns bytearry([0xc3]) """ tbfl = sum((int(x) for x in fmt.split())) if size is None: size = (tbfl // 8) + 1 if tbfl % 8 else tbfl // 8 if not (0 <= tbfl <= (size * 8)): raise ValueError("Total bit field lengths in fmt not in [0, {0}]".format(size * 8)) n = 0 bfp = 8 * size # starting bit field position bu = 0 # bits used for i, bfmt in enumerate(fmt.split()): bits = 0x00 bfl = int(bfmt) bu += bfl if bfl == 1: if fields[i]: bits = 0x01 else: bits = 0x00 else: bits = fields[i] & (2**bfl - 1) # bit-and mask out high order bits bits <<= (bfp - bfl) #shift left to bit position less bit field size n |= bits # bit-or in bits bfp -= bfl #adjust bit field position for next element return bytify(n=n, size=size, reverse=reverse, strict=True)
9,296
def SetScore(objects: object) -> None: """Updates the score count and the render values.""" eaten = 0 for item in objects.items.keys(): if item.startswith("chip"): eaten += objects.items[item]["eaten"] objects.score_font = objects.font.render(f'Score: {eaten}', False, (0, 0, 0)) objects.score_width = objects.score_font.get_rect().width objects.score_height = objects.score_font.get_rect().height
9,297
def price(bot, update, args): """Receive a stock code and return the price. Parameters: *args: Accept only one str argument containing the stock code (e.g. 'PETR3') Examples: /price BBAS3 This would return the current price for BBAS3 stock""" stock_code = str(args[0]) stock = Stock(stock_code) if stock.is_valid: bot.send_message(chat_id=update.message.chat_id, text=f"The {stock.code} price is: R${stock.price}") else: bot.send_message(chat_id=update.message.chat_id, text=f"The stock you're looking for does not exist in our database")
9,298
def fix_conf_params(conf_obj, section_name): """from a ConfigParser object, return a dictionary of all parameters for a given section in the expected format. Because ConfigParser defaults to values under [DEFAULT] if present, these values should always appear unless the file is really bad. :param configparser_object: ConfigParser instance :param section_name: string of section name in config file (e.g. "MyBank" matches "[MyBank]" in file) :return: dict with all parameters """ config = { "input_columns": ["Input Columns", False, ","], "output_columns": ["Output Columns", False, ","], "input_filename": ["Source Filename Pattern", False, ""], "path": ["Source Path", False, ""], "ext": ["Source Filename Extension", False, ""], "regex": ["Use Regex For Filename", True, ""], "fixed_prefix": ["Output Filename Prefix", False, ""], "input_delimiter": ["Source CSV Delimiter", False, ""], "header_rows": ["Header Rows", False, ""], "footer_rows": ["Footer Rows", False, ""], "date_format": ["Date Format", False, ""], "delete_original": ["Delete Source File", True, ""], "cd_flags": ["Inflow or Outflow Indicator", False, ","], "payee_to_memo": ["Use Payee for Memo", True, ""], "plugin": ["Plugin", False, ""], "api_token": ["YNAB API Access Token", False, ""], "api_account": ["YNAB Account ID", False, "|"], } for key in config: config[key] = get_config_line(conf_obj, section_name, config[key]) config["bank_name"] = section_name # quick n' dirty fix for tabs as delimiters if config["input_delimiter"] == "\\t": config["input_delimiter"] = "\t" return config
9,299