content
stringlengths
22
815k
id
int64
0
4.91M
def search_cut(sentence): """ HMM的切割方式 :param sentence: :return: """ return jieba.lcut_for_search(sentence)
5,328,200
def count_items(column_list:list): """ Contar os tipos (valores) e a quantidade de items de uma lista informada args: column_list (list): Lista de dados de diferentes tipos de valores return: Retorna dois valores, uma lista de tipos (list) e o total de itens de cada tipo (list) """ counter = Counter(column_list) item_types = list(counter.keys()) count_items = list(counter.values()) return item_types, count_items
5,328,201
def abort(exc=None): """Raise an exception to terminate a transaction. If not specified, 'exceptions.NotFound' (404) is raised by default. :param exc: The exception object to raise. """ if exc is None: raise exceptions.NotFound assert not issubclass(exc, exceptions.RedirectException), \ 'Redirections should be handled by `redirect()` function' assert issubclass(exc, exceptions.HTTPException), \ 'exc is expected to be a subclass of exceptions.HTTPException' raise exc
5,328,202
def py_call(obj, inputs=(), direct_args=()): """Create a task that calls Python code Example: >>> def hello(x): return b"Hello " + x.read() >>> a = tasks.const("Loom") >>> b = tasks.py_call((a,), hello) >>> client.submit(b) b'Hello Loom' """ task = Task() task.task_type = PY_CALL task.inputs = (obj,) + tuple(inputs) task.config = cloudpickle.dumps(tuple(direct_args)) task.resource_request = cpu1 return task
5,328,203
def to_graph(grid): """ Build adjacency list representation of graph Land cells in grid are connected if they are vertically or horizontally adjacent """ adj_list = {} n_rows = len(grid) n_cols = len(grid[0]) land_val = "1" for i in range(n_rows): for j in range(n_cols): if grid[i][j] == land_val: adj_list[(i,j)] = [] if i > 0 and grid[i-1][j] == land_val: adj_list[(i,j)].append((i-1,j)) if i < n_rows-1 and grid[i+1][j] == land_val: adj_list[(i,j)].append((i+1,j)) if j > 0 and grid[i][j-1] == land_val: adj_list[(i,j)].append((i,j-1)) if j < n_cols-1 and grid[i][j+1] == land_val: adj_list[(i,j)].append((i,j+1)) return adj_list
5,328,204
def validate_doc(doc): """ Check to see if the given document is a valid dictionary, that is, that it contains a single definition list. """ return len(doc.content) == 1 and \ isinstance(doc.content[0], pf.DefinitionList)
5,328,205
def is_renderable(obj, quiet=True): """ Checks if object is renderable Args: obj (unicode): Name of object to verify quiet (bool): If the function should keep quiet (default=True) Returns: (bool) if its renderable or not """ # unit test # make sure we are not working with components/attributes obj = cmds.ls(obj, objectsOnly=True, l=True) if isinstance(obj, list) or isinstance(obj, tuple): if len(obj) == 1: obj = obj[0] else: LOG.error("isRenderable - {0} cannot be checked".format(obj)) return False if not cmds.objExists(obj): if not quiet: LOG.error("{0} does not exist, skipping it".format(obj)) return False # doIt if cmds.getAttr("{0}.template".format(obj)): if not quiet: LOG.error("{0} is a template object, skipping it".format(obj)) return False if not cmds.getAttr("{0}.visibility".format(obj)): # Let's check if it has any in-connection (its animated) if not cmds.listConnections("{0}.visibility".format(obj)): if not quiet: LOG.error("{0} is not visible, skipping it".format(obj)) return False if not cmds.getAttr("{0}.lodVisibility".format(obj)): # Let's check if it has any in-connection (its animated) if not cmds.listConnections("{0}.lodVisibility".format(obj)): if not quiet: LOG.error("{0} has no lodVisibility, skipping it".format(obj)) return False # TODO Display layer override check renderable = True # check parents parent = cmds.listRelatives(obj, parent=True, path=True) if parent: renderable = renderable and is_renderable(parent[0]) return renderable
5,328,206
def _refresh_database(bot, force=False, prune=True, callback=None, background=False, db=None): """ Actual implementation of refresh_database. Refreshes the database of starsystems. Also rebuilds the bloom filter. :param bot: Bot instance :param force: True to force refresh :param prune: True to prune non-updated systems. Keep True unless performance testing. :param callback: Optional function that is called as soon as the system determines a refresh is needed. :param background: If True and a refresh is needed, it is submitted as a background task rather than running immediately. :param db: Database handle Note that this function executes some raw SQL queries (among other voodoo). This is for performance reasons concerning the insanely large dataset being handled, and should NOT serve as an example for implementation elsewhere. """ eddb_url = bot.config.ratbot.edsm_url or "https://eddb.io/archive/v5/systems.csv" chunked = bot.config.ratbot.chunked_systems # Should really implement this, but until then if chunked: raise NotImplementedError("Chunked system loading is not implemented yet.") status = get_status(db) eddb_maxage = float(bot.config.ratbot.edsm_maxage or (7*86400)) # Once per week = 604800 seconds if not ( force or not status.starsystem_refreshed or (datetime.datetime.now(tz=datetime.timezone.utc) - status.starsystem_refreshed).total_seconds() > eddb_maxage ): # No refresh needed. # print('not force and no refresh needed') return False if callback: callback() if background: print('Scheduling background refresh of starsystem data') return bot.memory['ratbot']['executor'].submit( _refresh_database, bot, force=True, callback=None, background=False ) conn = db.connection() # Now in actual implementation beyond background scheduling # Counters for stats # All times in seconds stats = { 'load': 0, # Time spent retrieving the CSV file(s) and dumping it into a temptable in the db. 'prune': 0, # Time spent removing non-update updates. 'systems': 0, # Time spent merging starsystems into the db. 'prefixes': 0, # Time spent merging starsystem prefixes into the db. 'stats': 0, # Time spent (re)computing system statistics 'bloom': 0, # Time spent (re)building the system prefix bloom filter. 'optimize': 0, # Time spent optimizing/analyzing tables. 'misc': 0, # Miscellaneous tasks (total time - all other stats) 'total': 0, # Total time spent. } def log(fmt, *args, **kwargs): print("[{}] ".format(datetime.datetime.now()) + fmt.format(*args, **kwargs)) overall_timer = TimedResult() log("Starsystem refresh started") if chunked: # FIXME: Needs to be reimplemented. log("Retrieving starsystem index at {}", eddb_url) with timed() as t: response = requests.get(eddb_url) response.raise_for_status() urls = list(urljoin(eddb_url, chunk["SectorName"]) for chunk in response.json()) stats['index'] += t.seconds log("{} file(s) queued for starsystem refresh. (Took {}}", len(urls), format_timestamp(t.delta)) else: urls = [eddb_url] temptable = sa.Table( '_temp_new_starsystem', sa.MetaData(), sa.Column('id', sa.Integer, primary_key=True, autoincrement=True), sa.Column('eddb_id', sa.Integer), sa.Column('name_lower', sa.Text(collation="C")), sa.Column('name', sa.Text(collation="C")), sa.Column('first_word', sa.Text(collation="C")), sa.Column('word_ct', sa.Integer), sa.Column('xz', SQLPoint), sa.Column('y', sa.Numeric), # sa.Index('_temp_id_ix', 'eddb_id'), prefixes=['TEMPORARY'], postgresql_on_commit='DROP' ) temptable.create(conn) sql_args = { 'sp': StarsystemPrefix.__tablename__, 's': Starsystem.__tablename__, 'ts': temptable.name, 'tsp': '_temp_new_prefixes' } buffer = io.StringIO() # Temporary IO buffer for COPY FROM columns = ['eddb_id', 'name_lower', 'name', 'first_word', 'word_ct', 'xz', 'y'] # Columns to copy to temptable getter = operator.itemgetter(*columns) total_flushed = 0 # Total number of flushed items so far pending_flush = 0 # Number of items waiting to flush def exec(sql, *args, **kwargs): try: conn.execute(sql.format(*args, **kwargs, **sql_args)) except Exception as ex: log("Query failed.") import traceback traceback.print_exc() raise def flush(): nonlocal buffer, total_flushed, pending_flush if not pending_flush: return log("Flushing system(s) {}-{}", total_flushed + 1, total_flushed + pending_flush) buffer.seek(0) cursor = conn.connection.cursor() cursor.copy_from(buffer, temptable.name, sep='\t', null='', columns=columns) buffer = io.StringIO() # systems = [] total_flushed += pending_flush pending_flush = 0 with timed() as t: for url in urls: log("Retrieving starsystem data at {}", url) try: response = requests.get(url, stream=True) reader = csv.DictReader(io.TextIOWrapper(response.raw)) for row in reader: # Parse and reformat system info from CSV name, word_ct = re.subn(r'\s+', ' ', row['name'].strip()) name_lower = name.lower() first_word, *_ = name_lower.split(" ", 1) word_ct += 1 if all((row['x'], row['y'], row['z'])): xz = "({x},{z})".format(**row) y = row['y'] else: xz = y = '' system_raw = { 'eddb_id': str(row['id']), 'name_lower': name_lower, 'name': name, 'first_word': first_word, 'xz': xz, 'y': y, 'word_ct': str(word_ct) } pending_flush += 1 buffer.write("\t".join(getter(system_raw))) buffer.write("\n") if pending_flush >= FLUSH_THRESHOLD: flush() except ValueError: pass except Exception: log("Failed to retrieve data") import traceback traceback.print_exc() flush() log("Creating index") exec("CREATE INDEX ON {ts}(eddb_id)") stats['load'] += t.seconds with timed() as t: log("Removing possible duplicates") exec("DELETE FROM {ts} WHERE eddb_id NOT IN(SELECT MAX(id) AS id FROM {ts} GROUP BY eddb_id)") # No need for the temporary 'id' column at this point. exec("ALTER TABLE {ts} DROP id CASCADE") # Making this a primary key (or even just a unique key) apparently affects query planner performance vs the # non-existing unique key. exec("ALTER TABLE {ts} ADD PRIMARY KEY(eddb_id)") if prune: log("Removing non-updates to existing systems") # If a starsystem has been updated, at least one of 'name', 'xz' or 'y' are guaranteed to have changed. # (A change that effects word_ct would effect name as well, for instance.) # Delete any temporary systems that exist in the real table with matching attributes. exec(""" DELETE FROM {ts} AS t USING {s} AS s WHERE s.eddb_id=t.eddb_id AND ROW(s.name, s.y) IS NOT DISTINCT FROM ROW(t.name, t.y) AND ((s.xz IS NULL)=(t.xz IS NULL)) AND (s.xz~=t.xz OR s.xz IS NULL) """) else: log("Skipping non-update removal phase") stats['prune'] += t.seconds with timed() as t: log("Building list of distinct prefixes") # Create list of unique prefixes in this batch exec(""" CREATE TEMPORARY TABLE {tsp} ON COMMIT DROP AS SELECT DISTINCT first_word, word_ct FROM {ts} """) # Insert new prefixes exec(""" INSERT INTO {sp} (first_word, word_ct) SELECT t.first_word, t.word_ct FROM {tsp} AS t LEFT JOIN {sp} AS sp ON sp.first_word=t.first_word AND sp.word_ct=t.word_ct WHERE sp.first_word IS NULL """) stats['prefixes'] += t.seconds with timed() as t: log("Updating existing systems.") exec(""" UPDATE {s} AS s SET name_lower=t.name_lower, name=t.name, first_word=t.first_word, word_ct=t.word_ct, xz=t.xz, y=t.y FROM {ts} AS t WHERE s.eddb_id=t.eddb_id """) log("Inserting new systems.") exec(""" INSERT INTO {s} (eddb_id, name_lower, name, first_word, word_ct, xz, y) SELECT t.eddb_id, t.name_lower, t.name, t.first_word, t.word_ct, t.xz, t.y FROM {ts} AS t LEFT JOIN {s} AS s ON s.eddb_id=t.eddb_id WHERE s.eddb_id IS NULL """) stats['systems'] += t.seconds with timed() as t: log('Computing prefix statistics') exec(""" UPDATE {sp} SET ratio=t.ratio, cume_ratio=t.cume_ratio FROM ( SELECT t.first_word, t.word_ct, ct/(SUM(ct) OVER w) AS ratio, (SUM(ct) OVER p)/(SUM(ct) OVER w) AS cume_ratio FROM ( SELECT sp.*, COUNT(s.eddb_id) AS ct FROM {sp} AS sp LEFT JOIN {s} AS s USING (first_word, word_ct) WHERE sp.first_word IN(SELECT first_word FROM {tsp}) GROUP BY sp.first_word, sp.word_ct HAVING COUNT(*) > 0 ) AS t WINDOW w AS (PARTITION BY t.first_word ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), p AS (PARTITION BY t.first_word ORDER BY t.word_ct ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) ) AS t WHERE {sp}.first_word=t.first_word AND {sp}.word_ct=t.word_ct """) stats['stats'] += t.seconds with timed() as t: log("Analyzing tables") exec("ANALYZE {sp}") exec("ANALYZE {s}") stats['optimize'] += t.seconds log("Starsystem database update complete") # Update refresh time try: status = get_status(db) status.starsystem_refreshed = sql.func.clock_timestamp() db.add(status) db.commit() except Exception: import traceback traceback.print_exc() raise log("Starsystem database update committed") with timed() as t: log("Rebuilding bloom filter") refresh_bloom(bot) stats['bloom'] += t.seconds overall_timer.stop() stats['misc'] = overall_timer.seconds - sum(stats.values()) stats['total'] = overall_timer.seconds bot.memory['ratbot']['stats']['starsystem_refresh'] = stats log("Starsystem refresh finished") return True
5,328,207
def refresh_emoji_text_map(pg: sql_query_tools.Postgres, pg_schema: str, table_name: str, columnspec: dict, logger: logging.Logger) -> None: """ Refresh table emoji_text_map. """ logger.info(f'Refreshing table "{bold(table_name)}"', arrow='yellow') emoji_table = pd.DataFrame(emoji.UNICODE_EMOJI['en'], index=[0]) emoji_table = emoji_table.T.reset_index().rename(columns={'index': 'emoji', 0: 'plain_text'}) columns_match_expectation(emoji_table, table_name, columnspec) emoji_table.to_sql(name=table_name, con=pg.dbcon, schema=pg_schema, index=False, if_exists='replace') logger.info(f'Rebuilt table "{bold(table_name)}", shape: {emoji_table.shape}', arrow='yellow')
5,328,208
def coverageone(ctx, source, test): """运行单元测试和计算测试覆盖率 inv coverageone --source plum_tools.fib --test tests/test_fib.py """ ctx.run( "export PYTHONPATH=`pwd` && " "coverage run --rcfile=.coveragerc --source={source} -m pytest -vv -rsxS -q {test} && " "coverage report -m".format(source=covert_source(source), test=test), encoding="utf-8", pty=True, echo=True, )
5,328,209
def V_bandpass(V, R_S, C, L, R_L, f): """ filter output voltage input voltage minus the current times the source impedance """ # current in circuit I = V/(R_S + Z_bandpass(C, L, R_L, f)) # voltage across circuit V_out = V - I*R_S return V_out
5,328,210
def move_to_element(driver, element, display_scaling=100, chrome_info_bar_shown=True): """ Move cursor to middle of element. Works for chrome and firefox :param driver: Chrome or Firefox driver :type driver: WebDriver :param element: Web element :type element:WebElement :param display_scaling: Display scaling percentage (100, 125, 150 or 175). This info can be found in 'Display settings'. :type display_scaling: int :return: """ move_to_element_chrome(driver, element, display_scaling, chrome_info_bar_shown)
5,328,211
def get_server_url(): """ Return current server url, does not work in a task """ host = os.environ.get('HTTP_X_FORWARDED_HOST') or os.environ['HTTP_HOST'] return u'%s://%s' % (os.environ['wsgi.url_scheme'], host)
5,328,212
def celery(): """Celery app test fixture.""" from admiral.celery import celery return celery
5,328,213
def test_copy_provenance(tracked_file): """Test `esmvalcore._provenance.TrackedFile.copy_provenance`.""" provenance = ProvDocument() provenance.add_namespace('task', uri=ESMVALTOOL_URI_PREFIX + 'task') activity = provenance.activity('task:test-task-name') tracked_file.initialize_provenance(activity) copied_file = tracked_file.copy_provenance() assert copied_file.activity == tracked_file.activity assert copied_file.entity == tracked_file.entity assert copied_file.provenance == tracked_file.provenance assert copied_file.provenance is not tracked_file.provenance
5,328,214
def perm_cache(func): """ 根据用户+请求参数,把权限验证结果结果进行缓存 """ def _deco(self, request, view): # 只对查询(GET方法)进行权限缓存 if request.method != "GET": return func(self, request, view) user = request.user.username kwargs = "_".join("{}:{}".format(_k, _w) for _k, _w in list(view.kwargs.items())) cache_name = "{}__{}__{}".format(user, view.action, kwargs) perm = cache.get(cache_name) if perm is None: perm = func(self, request, view) cache.set(cache_name, perm, 60) return perm return _deco
5,328,215
def test_median_even(): """Test the median for even number of items.""" list_1 = [0, 1, 2, 3, 4, 5, 6] list_2 = [7, 8, 9] assert ( find_median(list_1, list_2) == find_median_simpler_bit_less_efficient(list_1, list_2) == 4.5 )
5,328,216
def ast_walker(handler): """ A generic AST walker decorator. Decorates either a function or a class (if dispatching based on node type is required). ``handler`` will be wrapped in a :py:class:`~peval.Dispatcher` instance; see :py:class:`~peval.Dispatcher` for the details of the required class structure. Returns a callable with the signature:: def walker(state, node, ctx=None) :param state: a dictionary with the state which will be passed to every handler call. It will be converted into a :class:`~peval.tools.immutableadict` object at the start of the traversal. Handlers can update it by returning a modified version. :param node: an ``ast.AST`` object to traverse. :param ctx: a dictionary with the global context which will be passed to every handler call. It will be converted into a :class:`~peval.tools.immutableadict` object at the start of the traversal. :returns: a tuple ``(state, new_node)``, where ``state`` is the same object which was passed as the corresponding parameter. Does not mutate ``node``. ``handler`` will be invoked for every node during the AST traversal (depth-first, pre-order). The ``handler`` function, if it is a function, or its static methods, if it is a class must have the signature:: def handler([state, node, ctx, prepend, visit_after, visiting_after, skip_fields, walk_field,] **kwds) The names of the arguments must be exactly as written here, but their order is not significant (they will be passed as keywords). If ``handler`` is a class, the default handler is a "pass-through" function that does not change the node or the state. :param state: the (supposedly immutable) state object passed during the initial call. :param node: the current node :param ctx: the (supposedly immutable) dictionary with the global context passed during the initial call. In addition to normal dictionary methods, its values can be alternatively accessed as attributes (e.g. either ``ctx['value']`` or ``ctx.value``). :param prepend: a function ``prepend(lst)`` which, when called, prepends the list of ``ast.AST`` objects to whatever is returned by the handler of the closest statement block that includes the current node. These nodes are not traversed automatically. :param visit_after: a function of no arguments, which, when called, schedules to call the handler again on this node when all of its fields are traversed (providing that after calling it, the handler returns an ``ast.AST`` object and not a list or ``None``). During the second call this parameter is set to ``None``. :param visiting_after: set to ``False`` during the normal (pre-order) visit, and to ``True`` during the visit caused by ``visit_after()``. :param skip_fields: a function of no arguments, which, when called, orders the walker not to traverse this node's fields. :param walk_field: a function ``walk_field(state, value, block_context=False) -> (new_state, new_value)``, which traverses the given field value. If the value contains a list of statements, ``block_context`` must be set to ``True``, so that ``prepend`` could work correctly. :returns: must return a tuple ``(new_state, new_node)``, where ``new_node`` is one of: * ``None``, in which case the corresponding node will be removed from the parent list or the parent node field. * The passed ``node`` (unchanged). By default, its fields will be traversed (unless ``skip_fields()`` is called). * A new ``ast.AST`` object, which will replace the passed ``node`` in the AST. By default, its fields will not be traversed, and the handler must do it manually if needed (by calling ``walk_field()``). * If the current node is an element of a list, a list of ``ast.AST`` objects can be returned, which will be spliced in place of the node. Same as in the previous case, these new nodes will not be automatically traversed. """ return _Walker(handler, transform=True, inspect=True)
5,328,217
def test_list_boolean_min_length_1_nistxml_sv_iv_list_boolean_min_length_2_4(mode, save_output, output_format): """ Type list/boolean is restricted by facet minLength with value 6. """ assert_bindings( schema="nistData/list/boolean/Schema+Instance/NISTSchema-SV-IV-list-boolean-minLength-2.xsd", instance="nistData/list/boolean/Schema+Instance/NISTXML-SV-IV-list-boolean-minLength-2-4.xml", class_name="NistschemaSvIvListBooleanMinLength2", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,328,218
def html_escape(text): """Produce entities within text.""" L=[] for c in text: L.append(html_escape_table.get(c,c)) return "".join(L)
5,328,219
def qs_without_parameter(arg1, arg2): """ Removes an argument from the get URL. Use: {{ request|url_without_parameter:'page' }} Args: arg1: request arg2: parameter to remove """ parameters = {} for key, value in arg1.items(): if parameters.get(key, None) is None and arg2 != key: try: parameters[key] = value[0] except IndexError: parameters[key] = value return "&".join( [k + "=" + v for k, v in parameters.items()])
5,328,220
def main( dataset: str = typer.Option( "mc4", help="The name of the hub dataset or local csv/tsv file." ), dataset_config: Optional[str] = typer.Option( "es", help="The configuration of the hub dataset, if any. Does not apply to local csv/tsv files.", ), dataset_split: Optional[str] = typer.Option( "train", help="The dataset split. Does not apply to local csv/tsv files." ), text_column: str = typer.Option("text", help="The text field name."), language: str = typer.Option( "es", help=f"The language of the text. Options: {LANGUAGES}" ), doc_type: str = typer.Option( "sentence", help=f"Whether to embed at the sentence or document level. Options: {DOCUMENT_TYPES}.", ), sample: int = typer.Option(1000, help="Maximum number of examples to use."), perplexity_model: str = typer.Option( "wikipedia", help=f"Dataset on which the perplexity model was trained on. Options: {PERPLEXITY_MODELS}", ), dimensionality_reduction: str = typer.Option( DIMENSIONALITY_REDUCTION_ALGORITHMS[0], help=f"Whether to use UMAP or t-SNE for dimensionality reduction. Options: {DIMENSIONALITY_REDUCTION_ALGORITHMS}.", ), model_name: str = typer.Option( EMBEDDING_MODELS[0], help=f"The sentence embedding model to use. Options: {EMBEDDING_MODELS}", ), output_file: str = typer.Option( "perplexity", help="The name of the output visualization files." ), ): """ Perplexity Lenses: Visualize text embeddings in 2D using colors to represent perplexity values. """ logger.info("Loading embedding model...") model = load_model(model_name) dimensionality_reduction_function = ( partial(get_umap_embeddings, random_state=SEED) if dimensionality_reduction.lower() == "umap" else partial(get_tsne_embeddings, random_state=SEED) ) logger.info("Loading KenLM model...") kenlm_model = KenlmModel.from_pretrained( perplexity_model.lower(), language, lower_case=True, remove_accents=True, normalize_numbers=True, punctuation=1, ) logger.info("Loading dataset...") if dataset.endswith(".csv") or dataset.endswith(".tsv"): df = pd.read_csv(dataset, sep="\t" if dataset.endswith(".tsv") else ",") if doc_type.lower() == "sentence": df = documents_df_to_sentences_df(df, text_column, sample, seed=SEED) df["perplexity"] = df[text_column].map(kenlm_model.get_perplexity) else: df = hub_dataset_to_dataframe( dataset, dataset_config, dataset_split, sample, text_column, kenlm_model, seed=SEED, doc_type=doc_type, ) # Round perplexity df["perplexity"] = df["perplexity"].round().astype(int) logger.info( f"Perplexity range: {df['perplexity'].min()} - {df['perplexity'].max()}" ) plot, plot_registry = generate_plot( df, text_column, "perplexity", None, dimensionality_reduction_function, model, seed=SEED, hub_dataset=dataset, ) logger.info("Saving plots") bokeh_output_file(f"{output_file}.html") save(plot) if dataset == REGISTRY_DATASET: bokeh_output_file(f"{output_file}_registry.html") save(plot_registry) fig = draw_histogram(df["perplexity"].values) fig.savefig(f"{output_file}_histogram.png") logger.info("Done")
5,328,221
def test_dissolution_statement_type(session, test_status, legal_type, dissolution_type, dissolution_statement_type, identifier, expected_code, expected_msg): # pylint: disable=too-many-arguments """Assert that a VD can be validated.""" # setup business = Business(identifier=identifier) filing = copy.deepcopy(FILING_HEADER) filing['filing']['header']['name'] = 'dissolution' filing['filing']['business']['legalType'] = legal_type filing['filing']['dissolution'] = copy.deepcopy(DISSOLUTION) filing['filing']['dissolution']['dissolutionStatementType'] = dissolution_statement_type filing['filing']['dissolution']['dissolutionType'] = dissolution_type filing['filing']['dissolution']['parties'][1]['deliveryAddress'] = \ filing['filing']['dissolution']['parties'][1]['mailingAddress'] if legal_type != Business.LegalTypes.COOP.value: del filing['filing']['dissolution']['dissolutionStatementType'] # perform test with patch.object(dissolution, 'validate_affidavit', return_value=None): err = validate(business, filing) # validate outcomes if expected_code or expected_msg: assert expected_code == err.code assert expected_msg == err.msg[0]['error'] else: assert not err
5,328,222
def find_power_graph(I, J, w_intersect=10, w_difference=1): """takes a graph with edges I,J, and returns a power graph with routing edges Ir,Jr and power edges Ip,Jp. Note that this treats the graph as undirected, and will internally convert edges to be undirected if not already.""" n = int(max(max(I), max(J)) + 1) Ir, Jr, Ip, Jp = cpp.routing_swig(n, I, J, w_intersect, w_difference) return Ir, Jr, Ip, Jp
5,328,223
def getcomments(pyObject): """Get lines of comments immediately preceding an object's source code. Returns None when source can't be found. """ try: lines, lnum = findsource(pyObject) except (IOError, TypeError): return None if ismodule(pyObject): # Look for a comment block at the top of the file. start = 0 if lines and lines[0][:2] == '#!': start = 1 while start < len(lines) and string.strip(lines[start]) in ('', '#'): start = start + 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(string.expandtabs(lines[end])) end = end + 1 return string.join(comments, '') # Look for a preceding block of comments at the same indentation. elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \ indentsize(lines[end]) == indent: comments = [string.lstrip(string.expandtabs(lines[end]))] if end > 0: end = end - 1 comment = string.lstrip(string.expandtabs(lines[end])) while comment[:1] == '#' and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = string.lstrip(string.expandtabs(lines[end])) while comments and string.strip(comments[0]) == '#': comments[:1] = [] while comments and string.strip(comments[-1]) == '#': comments[-1:] = [] return string.join(comments, '')
5,328,224
def crack_captcha(headers): """ 破解验证码,完整的演示流程 :return: """ currentTime = str(int(time.time())*1000) # 向指定的url请求验证码图片 rand_captcha_url = 'http://59.49.77.231:81/getcode.asp?t=' + currentTime res = requests.get(rand_captcha_url, stream=True,headers=headers) f = io.BytesIO() for chunk in res.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() img = Image.open(f) # 从网络上请求验证码图片保存在内存中 bin_clear_img = get_clear_bin_image(img) # 处理获得去噪的二值图 child_img_list = get_crop_imgs(bin_clear_img) # 切割图片为单个字符,保存在内存中,例如:4位验证码就可以分割成4个child # 加载SVM模型进行预测 svm_model_name = 'svm_model_file' model_path = data_root + '/svm_train/' + svm_model_name model = svm_load_model(model_path) img_ocr_name = '' for child_img in child_img_list: img_feature_list = get_feature(child_img) # 使用特征算法,将图像进行特征化降维 yt = [0] # 测试数据标签 # xt = [{1: 1, 2: 1}] # 测试数据输入向量 xt = convert_feature_to_vector(img_feature_list) # 将所有的特征转化为标准化的SVM单行的特征向量 p_label, p_acc, p_val = svm_predict(yt, xt, model) img_ocr_name += ('%d' % p_label[0]) # 将识别结果合并起来 #uuid_tag = str(uuid.uuid1()) # 生成一组随机的uuid的字符串(开发人员自己写,比较好实现) #img_save_folder = data_root + '/crack_img_res' #img.save(img_save_folder + '/' + img_ocr_name + '__' + uuid_tag + '.png') # 例如:__0067__77b10a28f73311e68abef0def1a6bbc8.png f.close() return img_ocr_name
5,328,225
def test_2d_sill(): """test_2d_sill Tests against expected classic solution of shallow water equations over a sill.""" from . import sill def verify_expected(expected): def sill_verify(claw): from clawpack.pyclaw.util import check_diff import numpy as np q0 = claw.frames[0].state.get_q_global() qfinal = claw.frames[claw.num_output_times].state.get_q_global() if q0 is not None and qfinal is not None: dx, dy = claw.solution.domain.grid.delta total_mass = dx * dy * np.linalg.norm(qfinal[0,:,:].reshape(-1), 1) return check_diff(expected, total_mass, reltol=1e-3) else: return return sill_verify from clawpack.pyclaw.util import gen_variants classic_tests = gen_variants(sill.setup, verify_expected(3.7439), kernel_languages=["Fortran"], solver_type='classic', outdir=None) from itertools import chain for test in chain(classic_tests): yield test
5,328,226
def nucleus_instance_segment( pretrained_model, pretrained_weights, img_input, file_types, masks, mode, output_path, batch_size, yaml_config_path, num_loader_workers, num_postproc_workers, auto_generate_mask, on_gpu, verbose, ): """Process an image/directory of input images with a patch classification CNN.""" from tiatoolbox.models.engine.nucleus_instance_segmentor import ( IOSegmentorConfig, NucleusInstanceSegmentor, ) from tiatoolbox.utils.misc import save_as_json files_all, masks_all, output_path = prepare_model_cli( img_input=img_input, output_path=output_path, masks=masks, file_types=file_types, ) ioconfig = prepare_ioconfig_seg( IOSegmentorConfig, pretrained_weights, yaml_config_path ) predictor = NucleusInstanceSegmentor( pretrained_model=pretrained_model, pretrained_weights=pretrained_weights, batch_size=batch_size, num_loader_workers=num_loader_workers, num_postproc_workers=num_postproc_workers, auto_generate_mask=auto_generate_mask, verbose=verbose, ) output = predictor.predict( imgs=files_all, masks=masks_all, mode=mode, on_gpu=on_gpu, save_dir=output_path, ioconfig=ioconfig, ) save_as_json(output, str(output_path.joinpath("results.json")))
5,328,227
def draw_box( canvas, layout, box_width=None, box_alpha=0, color_map=None, show_element_id=False, show_element_type=False, id_font_size=None, id_font_path=None, id_text_color=None, id_text_background_color=None, id_text_background_alpha=1, ): """Draw the layout region on the input canvas(image). Args: canvas (:obj:`~np.ndarray` or :obj:`~PIL.Image.Image`): The canvas to draw the layout boxes. layout (:obj:`Layout` or :obj:`list`): The layout of the canvas to show. box_width (:obj:`int`, optional): Set to change the width of the drawn layout box boundary. Defaults to None, when the boundary is automatically calculated as the the :const:`DEFAULT_BOX_WIDTH_RATIO` * the maximum of (height, width) of the canvas. box_alpha (:obj:`float`, optional): A float range from 0 to 1. Set to change the alpha of the drawn layout box. Defaults to 0 - the layout box will be fully transparent. color_map (dict, optional): A map from `block.type` to the colors, e.g., `{1: 'red'}`. You can set it to `{}` to use only the :const:`DEFAULT_OUTLINE_COLOR` for the outlines. Defaults to None, when a color palette is is automatically created based on the input layout. show_element_id (bool, optional): Whether to display `block.id` on the top-left corner of the block. Defaults to False. show_element_type (bool, optional): Whether to display `block.type` on the top-left corner of the block. Defaults to False. id_font_size (int, optional): Set to change the font size used for drawing `block.id`. Defaults to None, when the size is set to :const:`DEFAULT_FONT_SIZE`. id_font_path (:obj:`str`, optional): Set to change the font used for drawing `block.id`. Defaults to None, when the :const:`DEFAULT_FONT_OBJECT` is used. id_text_color (:obj:`str`, optional): Set to change the text color used for drawing `block.id`. Defaults to None, when the color is set to :const:`DEFAULT_TEXT_COLOR`. id_text_background_color (:obj:`str`, optional): Set to change the text region background used for drawing `block.id`. Defaults to None, when the color is set to :const:`DEFAULT_TEXT_BACKGROUND`. id_text_background_alpha (:obj:`float`, optional): A float range from 0 to 1. Set to change the alpha of the drawn text. Defaults to 1 - the text box will be solid. Returns: :obj:`PIL.Image.Image`: A Image object containing the `layout` draw upon the input `canvas`. """ assert 0 <= box_alpha <= 1, ValueError( f"The box_alpha value {box_alpha} is not within range [0,1]." ) assert 0 <= id_text_background_alpha <= 1, ValueError( f"The id_text_background_alpha value {id_text_background_alpha} is not within range [0,1]." ) draw = ImageDraw.Draw(canvas, mode="RGBA") id_text_background_color = id_text_background_color or DEFAULT_TEXT_BACKGROUND id_text_color = id_text_color or DEFAULT_TEXT_COLOR if box_width is None: box_width = _calculate_default_box_width(canvas) if show_element_id or show_element_type: font_obj = _create_font_object(id_font_size, id_font_path) if color_map is None: all_types = set([b.type for b in layout if hasattr(b, "type")]) color_map = _create_color_palette(all_types) for idx, ele in enumerate(layout): if isinstance(ele, Interval): ele = ele.put_on_canvas(canvas) outline_color = ( DEFAULT_OUTLINE_COLOR if not isinstance(ele, TextBlock) else color_map.get(ele.type, DEFAULT_OUTLINE_COLOR) ) _draw_box_outline_on_handler(draw, ele, outline_color, box_width) _draw_transparent_box_on_handler(draw, ele, outline_color, box_alpha) if show_element_id or show_element_type: text = "" if show_element_id: ele_id = ele.id or idx text += str(ele_id) if show_element_type: text = str(ele.type) if not text else text + ": " + str(ele.type) start_x, start_y = ele.coordinates[:2] text_w, text_h = font_obj.getsize(text) text_box_object = Rectangle( start_x, start_y, start_x + text_w, start_y + text_h ) # Add a small background for the text _draw_transparent_box_on_handler( draw, text_box_object, id_text_background_color, id_text_background_alpha, ) # Draw the ids draw.text( (start_x, start_y), text, fill=id_text_color, font=font_obj, ) return canvas
5,328,228
def gpio_connect(self): """gpio connections.""" # Check if the pins are already connected. check_single_pin(self.hwint_1, self.hwint_2) # gpio pins must be input-output or both-both. if self.hwint_1.type == self.hwint_2.type \ and not self.hwint_1.type == GPIOType.BOTH: raise InvalidGpioError("Invalid connection. GPIO types should be " "input-output") # Update interfaces update_int(self.hwint_1, [self.hwint_1.pin]) update_int(self.hwint_2, [self.hwint_2.pin])
5,328,229
def main(targets: str, cookies: str, outfile: str, customHeaders: str): """ Main function, takes a target name/file and parses them, passes to thread pool and ultimately writes to the outfile in CSV format """ targets = parseTargets(targets) if customHeaders: global HEADERS HEADERS += parseCustomHeaders(customHeaders) args = ((target, cookies) for target in targets) with ThreadPoolExecutor(max_workers=5) as executor: results = executor.map(lambda arg: getHeaders(*arg), args, timeout=10) with open(outfile, 'w+') as f: f.write("URL,{}\n".format(",".join(HEADERS))) for result in results: print(result) if len(result) > 0: f.write("{},{}\n".format(result[0], ",".join( ["Yes" if x else "No" for x in result[1:]] ))) print("[+] Results written to {}".format(outfile))
5,328,230
def login(request): """Home view, displays login mechanism""" return render(request, 'duck/login.html')
5,328,231
def make_user_role_table(table_name='user', id_column_name='id'): """ Create the user-role association table so that it correctly references your own UserMixin subclass. """ return db.Table('fp_user_role', db.Column( 'user_id', db.Integer, db.ForeignKey('{}.{}'.format( table_name, id_column_name))), db.Column( 'role_id', db.Integer, db.ForeignKey('fp_role.id')), extend_existing=True)
5,328,232
def _remove_comments_inline(text): """Removes the comments from the string 'text'.""" if 'auto-ignore' in text: return text if text.lstrip(' ').lstrip('\t').startswith('%'): return '' match = re.search(r'(?<!\\)%', text) if match: return text[:match.end()] + '\n' else: return text
5,328,233
def wide_to_tall(df: pd.DataFrame) -> pd.DataFrame: """Convert a wide table to a tall table Args: df (pd.DataFrame): wide table Returns: pd.DataFrame: tall table """ return df.unstack().dropna().reset_index()
5,328,234
def is_pj_player_plus(value): """ :param value: The value to be checked :type value: Any :return: whether or not the value is a PJ Player+ :rtype: bool """ return isinstance(value, list) and len(value) == 4 or len(value) == 3
5,328,235
def pd_fuzz_partial_token_sort_ratio(col1, col2): """ Calculate "partial token sort" ratio (`fuzz.partial_token_sort_ratio`) between two text columns. Args: col1 (Spark Column): 1st text column col2 (Spark Column): 2nd text column Returns: Spark Column (IntegerType): result of `fuzz.partial_token_sort_ratio` calculation. """ return pd.Series(map(fuzz.partial_token_sort_ratio, col1.astype(str), col2.astype(str)))
5,328,236
def setup(bot: StarBot) -> None: """Load the Configuration cog.""" bot.add_cog(Configuration(bot))
5,328,237
def path_remove(x, ask=True): """ Remove directories """ del_list = [] undel_list = [] if isinstance(x, str): if os.path.isdir(x) and file_exists(x): del_list.append(x) else: undel_list.append(x) elif isinstance(x, list): for f in x: if os.path.isdir(f) and file_exists(x): del_list.append(f) else: undel_list.append(f) elif isinstance(x, dict): for f in list(x.values()): if os.path.isdir(f) and file_exists(x): del_list.append(f) else: undel_list.append(f) else: log.info('Nothing removed, str, list, dict expected, got {}'.format( type(x).__name__)) # remove files if len(del_list) > 0: del_msg = ['{:>6s}: {}'.format('remove', i) for i in del_list] undel_msg = ['{:>6s}: {}'.format('skip', i) for i in undel_list] msg = '\n'.join(del_msg + undel_msg) log.info('Removing files: \n' + msg) if ask: ask_msg = input('Removing the files? [Y|n]: ') else: ask_msg = 'Y' # remove if ask_msg.lower() in ['y', 'yes']: for f in del_list: # os.remove(f) shutil.rmtree(f) log.info('{} files removed'.format(len(del_list))) else: log.info('Nothing removed, skipped')
5,328,238
def looterCanReinforce(mine: Game) -> bool: """ Return True if, in the given game, the looter (the attack) can reinforce at this moment, regardless of whether its the first or the second time """ return getLooterReinforcementStatus(mine) != 0
5,328,239
def _get_normed_sym_np(X_, _eps=DEFAULT_EPS): """ Compute the normalized and symmetrized probability matrix from relative probabilities X_, where X_ is a numpy array Parameters ---------- X_ : 2-d array_like (N, N) asymmetric probabilities. For instance, X_(i, j) = P(i|j) Returns ------- P : 2-d array_like (N, N) symmetric probabilities, making the assumption that P(i|j) = P(j|i) Diagonals are all 0s.""" batch_size = X_.shape[0] zero_diags = 1.0 - np.identity(batch_size) X_ *= zero_diags norm_facs = np.sum(X_, axis=0, keepdims=True) X_ = X_ / (norm_facs + _eps) X_ = 0.5*(X_ + np.transpose(X_)) return X_
5,328,240
def load_encoding_model(): """Model to encode image as vector of length 4096 using 2nd to last layer of VGG16""" base_model = VGG16(weights='imagenet', include_top=True) encoding_model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output) return encoding_model
5,328,241
def get_geohash_radius_approximation(latitude, longitude, radius, precision, georaptor_flag=False, minlevel=1, maxlevel=12): """ Get the list of geohashed that approximate a circle :param latitude: Float the longitude to get the radius approximation for :param longitude: Float the latitude to get the radius approximation for :param radius: Integer Radius coverage in meters :param precision: Integer the geohash precision level :param georaptor_flag: Do you want to compress it with georaptor :param minlevel: minimal precision level possible :param maxlevel: maximal precision level possible :return: A list of geohashes """ x = 0.0 y = 0.0 points = [] geohashes = [] grid_width = [5009400.0, 1252300.0, 156500.0, 39100.0, 4900.0, 1200.0, 152.9, 38.2, 4.8, 1.2, 0.149, 0.0370] grid_height = [4992600.0, 624100.0, 156000.0, 19500.0, 4900.0, 609.4, 152.4, 19.0, 4.8, 0.595, 0.149, 0.0199] height = (grid_height[precision - 1]) / 2 width = (grid_width[precision - 1]) / 2 lat_moves = int(math.ceil(radius / height)) # 4 lon_moves = int(math.ceil(radius / width)) # 2 for i in range(0, lat_moves): temp_lat = y + height * i for j in range(0, lon_moves): temp_lon = x + width * j if in_circle_check(temp_lat, temp_lon, y, x, radius): x_cen, y_cen = get_centroid(temp_lat, temp_lon, height, width) lat, lon = convert_to_latlon(y_cen, x_cen, latitude, longitude) points += [[lat, lon]] lat, lon = convert_to_latlon(-y_cen, x_cen, latitude, longitude) points += [[lat, lon]] lat, lon = convert_to_latlon(y_cen, -x_cen, latitude, longitude) points += [[lat, lon]] lat, lon = convert_to_latlon(-y_cen, -x_cen, latitude, longitude) points += [[lat, lon]] for point in points: geohashes += [pgh.encode(point[0], point[1], precision)] if georaptor_flag: georaptor_out = georaptor.compress(set(geohashes), int(minlevel), int(maxlevel)) return list(georaptor_out) else: return list(set(geohashes))
5,328,242
def manage_products(request, category_id, template_name="manage/category/products.html"): """ """ category = Category.objects.get(pk=category_id) inline = products_inline(request, category_id, True) # amount options amount_options = [] for value in (10, 25, 50, 100): amount_options.append({ "value": value, "selected": value == request.session.get("category-products-amount") }) return render_to_string(template_name, RequestContext(request, { "category": category, "products_inline": inline, "amount_options": amount_options, }))
5,328,243
def update_progress(pid: int, increment: int = 1) -> None: """ Updates a progress object. :param pid: The id of the progress. :param increment: The value to add to current. """ p = get_progress_by_id(pid=pid) assert p is not None p.current += increment p.save()
5,328,244
def random_indices(X, size=None, p=None, sort_indices=True, **kwargs): """ Get indices for a random subset of the data. Parameters ---------- size: int * integer size to sample (required if p=None) p: float * threshold percentage to keep (required if size=None) Returns ------- indices: tuple of np.ndarrays * indices of samples in the data set """ assert(size or p) # convert p (i.e., percentage of points) to integer size if size is None: size = int(p / 100. * len(X)) # Get original indices indices = np.arange(len(X)) # Get randomized indices indices = np.random.choice(indices, int(size), replace=False) # Sort indices if sort_indices is True: indices = np.sort(indices) return indices
5,328,245
def get_facts(F5, uri): """ Issue a GET of the URI specified to the F5 appliance and return the result as facts. If the URI must have a slash as the first character, add it if missing In Ansible 2.2 found name clashing http://stackoverflow.com/questions/40281706/cant-read-custom-facts-with-list-array-of-items """ result = { 'ansible_facts': {} } if uri[0] != "/": uri = "/" + uri status, result["ansible_facts"] = F5.genericGET(uri) try: result["ansible_facts"]["bigip_items"] = result["ansible_facts"].pop("items") # replace key name of 'items' with 'bigip_items' except: result["ansible_facts"]["bigip_items"] = dict() return status, result
5,328,246
def _enter_beta_test_mode(): """Called by conda-kapsel executable to do special things for beta.""" global _beta_test_mode _beta_test_mode = True
5,328,247
def tesselated_wrangler(gdp, paramset=None, properties=None, override_node=None): """Wrangler for any geo that needs to be tesselated""" prim_name = gdp.iterPrims()[0].intrinsicValue("typename") api.Comment( "%s prims is are not directly supported, they will be tesselated" % prim_name ) mesh_wrangler(gdp, paramset, properties) return
5,328,248
def test_CNOT_in_X_basis(qvm): """ Testing the definition of CNOT in the X basis. """ # CNOT truth table true_truth_table = {(0, 0): (0, 0), (0, 1): (0, 1), (1, 0): (1, 1), (1, 1): (1, 0)} CNOTX = CNOT_X_basis(0, 1) for key, value in true_truth_table.items(): state_prep_prog = Program() meas_prog = Program() for qbit_idx, bit in enumerate(key): if bit == 1: state_prep_prog += X(qbit_idx) # Hadamard to get to the X basis state_prep_prog += H(qbit_idx) # Hadamard to get back to the Z basis before measurement meas_prog += H(qbit_idx) prog = state_prep_prog + CNOTX + meas_prog ro = prog.declare('ro', 'BIT', 3) for q in range(2): prog += MEASURE(q, ro[q]) exe = qvm.compiler.native_quil_to_executable(prog) result = qvm.run(exe) assert tuple(result[0]) == true_truth_table[key]
5,328,249
def parse_args(args: Optional[Sequence[str]] = None) -> Namespace: """ Parses args and validates the consistency of origin/target using the generator """ parser = ArgumentParser( prog="python -m luh3417.transfer", description="Transfers a WordPress to one location to the other", ) parser.add_argument( "-g", "--settings-generator", help="A Python script that handles the transitions", type=generator_validator, required=True, ) parser.add_argument("origin", help="Origin environment") parser.add_argument("target", help="Target environment") parsed = parser.parse_args(args) for env in ["origin", "target"]: env_name = getattr(parsed, env) try: parsed.settings_generator.get_source(env_name) except UnknownEnvironment as e: parser.error( f'Environment "{env_name}" not recognized by generator: {e.message}' ) if not parsed.settings_generator.allow_transfer(parsed.origin, parsed.target): parser.error( f"Generator does not allow transfer from {parsed.origin} to {parsed.target}" ) return parsed
5,328,250
def test_suite(session: nox.Session) -> None: """Run the Python-based test suite""" install_requirements_file(session, "test-env") session.install(".") session.chdir("") session.run("no-tests-yet")
5,328,251
def parse_papers_plus_json(data): """ Function which parses the papers_plus json and returns a pandas dataframe of the results. Solr Field definition shown below: <!-- Citing paper fields: papers, metadata, arxiv_metadata --> <!-- Papers --> <field name="sentencenum" type="pint" indexed="true" stored="true" multiValued="false"/> <field name="sentence" type="text_classic" indexed="true" stored="true" multiValued="false"/> <field name="arxiv_identifier" type="string" indexed="true" stored="true" multiValued="false"/> <!-- arxiv metadata--> <field name="arxiv_url" type="string" indexed="true" stored="true" multiValued="false"/> <field name="authors" type="text_classic" indexed="true" stored="true" multiValued="false"/> <field name="title" type="text_classic" indexed="true" stored="true" multiValued="false"/> <field name="published_date" type="pdate" indexed="true" stored="true" multiValued="false"/> <field name="revision_dates" type="string" indexed="true" stored="true" multiValued="false"/> <!-- meta field: dblp_url--> <field name="dblp_url" type="string" indexed="true" stored="true" multiValued="false"/> """ docs = data['response']['docs'] docs_df = pd.DataFrame(docs) docs_df = docs_df.drop(['_version_', 'id'], axis=1) return docs_df
5,328,252
def get_credential(config_file: Path, credential_key: str = 'api_key') -> Optional[str]: """ Get a single credential from yaml file. Usual case is 'api_key' :param config_file: :param credential_key: :return: """ config = load_credentials(config_file) credential = config.get('credentials', {}).get(credential_key, None) if config else None return credential
5,328,253
def authenticate_secondarily(endpoint): """Proper authentication for function views.""" @functools.wraps(endpoint) def wrapper(request: HttpRequest): if not request.user.is_authenticated: try: auth_result = PersonalAPIKeyAuthentication.authenticate(request) if isinstance(auth_result, tuple) and auth_result[0].__class__.__name__ == "User": request.user = auth_result[0] else: raise AuthenticationFailed("Authentication credentials were not provided.") except AuthenticationFailed as e: return JsonResponse({"detail": e.detail}, status=401) return endpoint(request) return wrapper
5,328,254
def gather_emails_GUIDs(mailbox, search, folder): """ Download GUID of messages passing search requirements """ mailbox.folder.set(folder) return (email for email in mailbox.uids(search))
5,328,255
def pyrolite_meltsutil_datafolder(subfolder=None): """ Returns the path of the pyrolite-meltsutil data folder. Parameters ----------- subfolder : :class:`str` Subfolder within the pyrolite data folder. Returns ------- :class:`pathlib.Path` """ return get_module_datafolder(module="pyrolite_meltsutil", subfolder=subfolder)
5,328,256
def setup_stanford_corenlp(force=False): """Download and move Stanford CoreNLP to the expected place. Arguments: force (boolean): If ``False``, then don't download if ``CORENLP_LOCAL_PATH`` exists. Otherwise, download anyway. """ temp_corenlp_name = "corenlp.zip" if not force: if not os.path.isdir(CORENLP_LOCAL_PATH): force = True if force: print "Installing CoreNLP" print "Downloading..." download_file(CORENLP, temp_corenlp_name) with zipfile.ZipFile(temp_corenlp_name, "r") as local_zip: print "Extracting..." local_zip.extractall() shutil.move(CORENLP_ZIP_DIRECTORY, CORENLP_LOCAL_PATH) print "Cleaning up..." os.remove("corenlp.zip") print "Done" else: print "CoreNLP seems to be installed, skipping"
5,328,257
def get_resource_record_set_cloud_formation_dict_list(hosted_zone: ResourceRecordSetList, with_soa: str, client: botocore.client.BaseClient, zone_id: str, type_counter_aws_resource_record_set: dict) -> List[dict]: """ Provide a dict representation of a resource record set that can be used to dump a cloud formation formatted YAML file. :return: a dict in the form: { "Name": str, "Type": str, "TTL": str, "ResourceRecord": [str], "AliasTarget": { "DNSName": str, "HostedZoneId": str } } """ resource_record_set_cloud_formation_dict_list = [] while hosted_zone is not None: for resource_record_set in hosted_zone.resource_record_sets: if ((resource_record_set.type != "SOA" and resource_record_set.type != "NS") or (with_soa and (resource_record_set.type == "SOA" or resource_record_set.type == "NS"))): resource_record_values = [resource_record.value for resource_record in resource_record_set.resource_records] resource_record_set_cloud_formation_dict = { "Name": resource_record_set.name, "Type": resource_record_set.type } update_type_counter_aws_resource_record_set(type_counter_aws_resource_record_set, resource_record_set.type) if resource_record_set.ttl: resource_record_set_cloud_formation_dict['TTL'] = resource_record_set.ttl if resource_record_values: resource_record_set_cloud_formation_dict['ResourceRecords'] = resource_record_values if resource_record_set.alias_target: resource_record_set_cloud_formation_dict['AliasTarget'] = { "DNSName": resource_record_set.alias_target.dns_name, "HostedZoneId": resource_record_set.alias_target.hosted_zone_id } resource_record_set_cloud_formation_dict_list.append(resource_record_set_cloud_formation_dict) next_record_name = hosted_zone.next_record_name if next_record_name: hosted_zone = ResourceRecordSetList(client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=next_record_name)) else: hosted_zone = None return resource_record_set_cloud_formation_dict_list
5,328,258
async def make_async_request( url: str, method: str = 'GET', **kwargs) -> dict: """ Делает асинхронный запрос по указанному URL с параметрами и возвращает словарь из JSON-ответа Keyword Args: headers: Request HTTP Headers params: URI HTTP request params data: POST HTTP data timeout: Timeout requests Raises: HttpError: Ошибка сети или вебсервера IncorrectJsonError: Ошибка парсинга JSON-ответа """ try: async with aiohttp.client.ClientSession() as session: async with session.request(method, url, **kwargs) as response: logger.debug(f'Status {response.status} from {method} request to {url} with {kwargs}') response.raise_for_status() if response.status in (200, 302) and response.headers.get('Location'): raise FoundLocation(location=response.headers.get('Location')) elif not response.content_type.startswith('application/json'): text = await response.text() logger.error(f'{response.content_type} -> {text}') raise esia_client.exceptions.IncorrectJsonError( f'Invalid content type -> {response.content_type}' ) return await response.json() except aiohttp.client.ClientError as e: logger.error(e, exc_info=True) raise esia_client.exceptions.HttpError(e) except ValueError as e: logger.error(e, exc_info=True) raise esia_client.exceptions.IncorrectJsonError(e)
5,328,259
def pad_table(table, min_width=0, extra_pad=0): """takes a multidimensional array of strings and pads them so they're evenly formatted when printed out""" longest = [] most_cols = 0 for row in table: #naively assumes we're always passing in collections and not a string most_cols = max(len(row), most_cols) num = 0 for row in table: if len(row) != most_cols: continue col_length = [] for col in row: col_length.append(len(col)) if not longest: longest = col_length num = len(col_length) else: for i in range(num): a = longest[i] b = col_length[i] if b > a: longest[i] = b #pad step for ri, row in enumerate(table): for i, col in enumerate(row): pad = longest[i] row[i] = "%-*s" % (max(pad+extra_pad, min_width), col) table[ri] = row
5,328,260
def test_check_levels(): """Test _check_levels function of Fetch class""" era5 = initialize() era5.variables = "temperature" # No levels should raise with pytest.raises(ValueError): era5._check_levels() # Valid levels should pass era5.pressure_levels = [1000, 950] era5._check_levels() # Invalid levels should raise era5.pressure_levels = [777] with pytest.raises(ValueError): era5._check_levels()
5,328,261
def _prepare_line(edges, nodes): """prepare a plotly scatter3d line plot so that a set of disconnected edges can be drawn as a single line. `edges` are values associated with each edge (that get mapped to colors through a colorscale). `nodes` are pairs of (source, target) node indices for each edge. the color of a line segment in plotly is a mixture of the colors associated with the points it connects. Moreover, segments that begin or end at a point whose value is `null` are not drawn. given edges = [eab, ecd, eef] and nodes = [(a, b), (c, d), (e, f)], this function returns: path_edges: eab eab 0 ecd ecd 0 eef eef 0 path_nodes: a b 0 c d 0 e f 0 moreover the javascript code replaces every third element (the '0' in the lists above) with `null`, so only the a-b, c-d, and e-f segments will get plotted, and their colors are correct because both their start and end points are associated with the same value. """ path_edges = np.zeros(len(edges) * 3, dtype=int) path_edges[::3] = edges path_edges[1::3] = edges path_nodes = np.zeros(len(nodes) * 3, dtype=int) path_nodes[::3] = nodes[:, 0] path_nodes[1::3] = nodes[:, 1] return path_edges, path_nodes
5,328,262
def validate_password( password:str ) -> bool: """ Validates the password again a password policy. Args: password ( str, required ): password to verify. Returns: valid ( bool ): True if the password meets validity requirements. """ policy = PasswordPolicy.from_names( strength=0.20, entropybits=10, length=6, ) if not password: return False tested_pass = policy.password(password) result = tested_pass.test() if len(result) > 0: print(colored('Password not strong enough. Try increasing the length of the password or the password complexity')) return False password_verification = getpass.getpass("Retype your password: ") if password != password_verification: print("Passwords do not match") return False return True
5,328,263
def check_stats(hist, values, dtype, error): """Check the statistics of a streaming histogram.""" assert isinstance(hist, StreamingHistogram) assert hist.count() == values.size assert hist.size() == values.size assert hist.max() == np.max(values) assert hist.mean() == pytest.approx(np.mean(values), rel=error, abs=error) assert hist.min() == np.min(values) assert hist.sum_of_weights() == values.size assert hist.quantile() == pytest.approx(np.quantile(values, 0.5), rel=error, abs=error) assert hist.var() == pytest.approx(np.var(values), rel=error, abs=error) assert hist.std() == pytest.approx(np.std(values), rel=error, abs=error) kurtosis = weighted_mom4(values, np.ones(values.size, dtype=dtype)) assert hist.kurtosis() == pytest.approx(kurtosis, abs=error) skewness = weighted_mom3(values, np.ones(values.size, dtype=dtype)) assert hist.skewness() == pytest.approx(skewness, rel=error, abs=error)
5,328,264
def build_webhooks( handlers_: Iterable[handlers.WebhookHandler], *, resources: Iterable[references.Resource], name_suffix: str, client_config: reviews.WebhookClientConfig, persistent_only: bool = False, ) -> List[Dict[str, Any]]: """ Construct the content for ``[Validating|Mutating]WebhookConfiguration``. This function concentrates all conventions how Kopf manages the webhook. """ return [ { 'name': _normalize_name(handler.id, suffix=name_suffix), 'sideEffects': 'NoneOnDryRun' if handler.side_effects else 'None', 'failurePolicy': 'Ignore' if handler.ignore_failures else 'Fail', 'matchPolicy': 'Equivalent', 'rules': [ { 'apiGroups': [resource.group], 'apiVersions': [resource.version], 'resources': ( [resource.plural] if handler.subresource is None else [f'{resource.plural}/{handler.subresource}'] ), 'operations': ['*'] if handler.operation is None else [handler.operation], 'scope': '*', # doesn't matter since a specific resource is used. } for resource in resources if handler.selector is not None # None is used only in sub-handlers, ignore here. if handler.selector.check(resource) ], 'objectSelector': _build_labels_selector(handler.labels), 'clientConfig': _inject_handler_id(client_config, handler.id), 'timeoutSeconds': 30, # a permitted maximum is 30. 'admissionReviewVersions': ['v1', 'v1beta1'], # only those understood by Kopf itself. } for handler in handlers_ if not persistent_only or handler.persistent ]
5,328,265
def ndo_real(data, n): """mimic of gmx_fio_ndo_real in gromacs""" return [data.unpack_real() for i in range(n)]
5,328,266
def build_string(): """ Demonstrates building a STRING by using the Accumulator pattern. We will later see a more efficient way to build/modify strings, namely, by using the split/join methods. """ # ------------------------------------------------------------------ # The Accumulator pattern for building up STRINGs # is the same as for LISTs except: # -- Initialize the list variable (the "accumulator") # to the empty STRING '' instead of the empty LIST []. # -- Concatenate the one (or more) element STRING: 'blah' # instead of the one-element LIST: [blah] # # The built-in str function returns a string version # of its argument. # ------------------------------------------------------------------ """ This example builds (and then prints) the STRING 0 1 4 9 16 25 36 49 64 81 """ sequence = '' for k in range(10): sequence = sequence + str(k ** 2) + ' ' print(sequence)
5,328,267
def test_iterable_becomes_attribute_if_passed_as_argument(eh): """An iterable in our case is limited to list, tuple.""" from binary_heap import BinaryHeap b = BinaryHeap([1, 2, 3]) c = BinaryHeap((1, 2, 3)) assert type(b.iterable) == list assert type(c.iterable) == tuple
5,328,268
def test_probe(mp3, wav): """Tests whether minimp3py.probe() returns the correct information""" samples, channels, sample_rate = minimp3py.probe(mp3) ref_pcm, ref_rate = wav assert samples == len(ref_pcm) assert channels == ref_pcm.shape[1] assert sample_rate == ref_rate
5,328,269
def test_break_read_fhd(): """Try various cases of incomplete file lists.""" fhd_uv = UVData() # missing flags with pytest.raises(ValueError, match="No flags file included in file list"): fhd_uv.read(testfiles[1:]) # Missing params subfiles = [item for sublist in [testfiles[0:2], testfiles[3:]] for item in sublist] with pytest.raises(ValueError, match="No params file included in file list"): fhd_uv.read(subfiles) # No data files with pytest.raises( ValueError, match="No data files included in file list and read_data is True.", ): fhd_uv.read(["foo.sav"])
5,328,270
def get_field_type(field: Union[syntax.Field, syntax.Command], idl_file: syntax.IDLParsedSpec, idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]: """Resolve and get field type of a field from the IDL file.""" parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection()) field_type = idl_file.spec.symbols.resolve_field_type(parser_ctxt, field, field.name, field.type) if parser_ctxt.errors.has_errors(): parser_ctxt.errors.dump_errors() return field_type
5,328,271
def query(context: models.Context, query_str: str) -> TimeSeriesCollection: """Do a monitoring query in the specified project. Note that the project can be either the project where the monitored resources are, or a workspace host project, in which case you will get results for all associated monitored projects. """ time_series = TimeSeriesCollection() for project_id in context.projects: mon_api = apis.get_api('monitoring', 'v3', project_id) try: request = mon_api.projects().timeSeries().query(name='projects/' + project_id, body={'query': query_str}) logging.info('executing monitoring query (project: %s)', project_id) logging.debug('query: %s', query_str) pages = 0 start_time = datetime.datetime.now() while request: pages += 1 response = request.execute() time_series.add_api_response(response) request = mon_api.projects().timeSeries().query_next( previous_request=request, previous_response=response) if request: logging.info('still executing monitoring query (project: %s)', project_id) end_time = datetime.datetime.now() logging.debug('query run time: %s, pages: %d', end_time - start_time, pages) except googleapiclient.errors.HttpError as err: gcp_err = utils.GcpApiError(err) # Ignore 502 because we get that when the monitoring query times out. if gcp_err.status in [502]: logging.warning('error executing monitoring query: %s', str(gcp_err.message)) else: raise utils.GcpApiError(err) from err return time_series
5,328,272
def run_collector(service, metrics_factory, in_queue, pipe_to_server): """ Manages Prometheus metrics. Receives changes to the metrics through a queue and emits their text representation (for HTTP export) over a pipe. Designed to be run as "target" in a multiprocessing.Process in conjunction with run_http_server(). Args: service: Slug of this checker instance's service. metrics_factory: Callable returning a dict of the mtrics to use mapping from name to Metric object. in_queue: Queue over which MetricsMessages and HTTPGenMessages are received. pipe_to_server: Pipe to which text representations of the metrics are sent in response to HTTPGenMessages. """ registry = prometheus_client.CollectorRegistry() metrics = metrics_factory(registry) def handle_metrics_message(msg): try: metric = metrics[msg.name] except KeyError: logging.error('Recevied message for unknown metric "%s", ignoring', msg.name) return # Apparently, there is no nicer way to access the label names if 'service' in metric._labelnames: # pylint: disable=protected-access msg.labels['service'] = service if len(msg.labels) > 0: try: metric = metric.labels(**(msg.labels)) except ValueError: logging.error('Invalid labels specified for metric "%s", ignoring', msg.name) return try: bound_method = getattr(metric, msg.instruction) except AttributeError: logging.error('Cannot use instruction "%s" on metric "%s", ignoring', msg.instruction, msg.name) return try: bound_method(msg.value) except: # noqa, pylint: disable=bare-except logging.exception('Could not update metric "%s":', msg.name) def send_metrics_text(): metrics_text = prometheus_client.generate_latest(registry) pipe_to_server.send(metrics_text) while True: message = in_queue.get(True) if isinstance(message, MetricsMessage): handle_metrics_message(message) elif isinstance(message, HTTPGenMessage): send_metrics_text() else: logging.error('Received unknown message on collector queue')
5,328,273
def get_status(): """get the node status and return data""" return data({})
5,328,274
def read_orc(path, columns=None, storage_options=None, **kwargs): """Read cudf dataframe from ORC file(s). Note that this function is mostly borrowed from upstream Dask. Parameters ---------- path: str or list(str) Location of file(s), which can be a full URL with protocol specifier, and may include glob character if a single string. columns: None or list(str) Columns to load. If None, loads all. storage_options: None or dict Further parameters to pass to the bytes backend. Returns ------- cudf.DataFrame """ storage_options = storage_options or {} fs, fs_token, paths = get_fs_token_paths( path, mode="rb", storage_options=storage_options ) schema = None nstripes_per_file = [] for path in paths: with fs.open(path, "rb") as f: o = orc.ORCFile(f) if schema is None: schema = o.schema elif schema != o.schema: raise ValueError( "Incompatible schemas while parsing ORC files" ) nstripes_per_file.append(o.nstripes) schema = _get_pyarrow_dtypes(schema, categories=None) if columns is not None: ex = set(columns) - set(schema) if ex: raise ValueError( "Requested columns (%s) not in schema (%s)" % (ex, set(schema)) ) else: columns = list(schema) with fs.open(paths[0], "rb") as f: meta = cudf.read_orc(f, stripe=0, columns=columns, **kwargs) name = "read-orc-" + tokenize(fs_token, path, columns, **kwargs) dsk = {} N = 0 for path, n in zip(paths, nstripes_per_file): for stripe in range(n): dsk[(name, N)] = ( _read_orc_stripe, fs, path, stripe, columns, kwargs, ) N += 1 divisions = [None] * (len(dsk) + 1) return dd.core.new_dd_object(dsk, name, meta, divisions)
5,328,275
def test_multiple_invocations_parallel_same_flow_running(api_type, capsys): """Requires job to be setup allowing simultaneous executions""" xfail("TODO Note: Test job allowing concurrent builds!")
5,328,276
def get_bm25_index(args, db, db_opts): """Form a sparse word to document count matrix (inverted index). M[i, j] = # times word i appears in document j. """ # Map doc_ids to indexes global DOC2IDX db_class = retriever.get_class(db) logging.info("Getting doc ids") with db_class(**db_opts) as doc_db: doc_ids = doc_db.get_doc_ids() DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)} # Setup worker pool tok_class = tokenizers.get_class(args.tokenizer) #workers = ProcessPool( # args.num_workers, # initializer=init, # initargs=(tok_class, db_class, db_opts) #) logging.info(f"Documents ({len(doc_ids)}) are being tokenized") tok=tok_class() tokenized_corpus=[] bm25 = BM25Okapi() for i,doc_id in enumerate(doc_ids): #logging.info(doc_db.get_doc_text(doc_id)) #logging.info(tok.tokenize(retriever.utils.normalize(doc_db.get_doc_text(doc_id))).words())i tokens= tok.tokenize(retriever.utils.normalize(doc_db.get_doc_text(doc_id))) ngrams = tokens.ngrams(n=args.ngram, uncased=True, filter_fn=retriever.utils.filter_ngram) tokenized_corpus.append(ngrams) if i%10000==0: logging.info("%s/%s"%(i,len(doc_ids))) bm25.add(tokenized_corpus) logging.info(f"sizeof tokenized_corpus {sys.getsizeof(tokenized_corpus)}") logging.info(f"sizeof doc_freqs {sys.getsizeof(bm25.doc_freqs)}") logging.info(f"sizeof nd {sys.getsizeof(bm25.nd)}") logging.info(f"sizeof num_doc {sys.getsizeof(bm25.num_doc)}") del tokenized_corpus tokenized_corpus = [] gc.collect() if i==20000: pass #break bm25.add(tokenized_corpus) logging.info(f"sizeof tokenized_corpus {sys.getsizeof(tokenized_corpus)}") logging.info(f"sizeof doc_freqs {sys.getsizeof(bm25.doc_freqs)}") logging.info(f"sizeof nd {sys.getsizeof(bm25.nd)}") logging.info(f"num_doc {bm25.num_doc}") logging.info("Tokenization done") #logging.info(tokenized_corpus) bm25.finish() return bm25 , (DOC2IDX, doc_ids)
5,328,277
def dedup_model1(): """Here, we use the w2v_wiki500_yelp_embed_nontrainable mdoel to build indexes of block then save it to disk for further usage. """ model_path = 'models' # Define block size block_size_x = 10000 block_size_y = 100 # Build block indexes on w2v_wiki500_yelp_embed_nontrainable m1 = tf.keras.models.load_model(os.path.join(model_path, 'w2v_wiki500_yelp_embed_nontrainable.h5'), custom_objects={'KerasLayer':hub.KerasLayer}) m1_ms = blocker.block_model_2d(m1, block_size_x=block_size_x, block_size_y=block_size_y) # Create indexer blocks_indexer = indexer.Indexer(block_size_x=block_size_x, block_size_y=block_size_y) blocks_indexer.build_index(m1_ms, 'yelp_embed_nontrainable') detector_output, num_unique_blocks = deduplicator.generate_detector_output(pd.DataFrame(), m1_ms, 0) np.save('exp_update_m2.npy', detector_output) blocks_indexer.save('exp_update_blocks_indexer_1.npy')
5,328,278
def get_bernoulli_sample(probs): """Conduct Bernoulli sampling according to a specific probability distribution. Args: prob: (torch.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution. Returns: A Tensor of binary samples (0 or 1) with the same shape of probs. """ if torch.cuda.is_available(): bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape, device=torch.device('cuda'))) else: bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape)) return bernoulli_sample
5,328,279
def angle_load(root, ext='.angle'): """ Load information from the :ref:`Output_angle` file previously created by :func:`.angle_save`. Args: root (str): root name for the file to be loaded ext (str, optional): default ".angle" - extension for the file to be loaded: name = root + ext Returns: (tuple): tuple containing: - ndarray(float): 2D array containing degrees and corresponding values of adf """ # open file path = root + ext try: f = open(path, 'r') except IOError: utility.err_file('angle_load', path) text = f.readlines() # read text as lines for i in range(len(text)): text[i] = text[i].split() # split each line into list with strings as elements for i in range(len(text)): if len(text[i]) > 1: # find beginning beginning of data if text[i] == ['DEGREE', 'ADF']: data = np.array(text[i+1:], dtype=float) break return data
5,328,280
def write_data_to_gcs(csv_data): """Writes CSV to GCS.""" logging.info('Uploading data to GCS') now_iso8601 = datetime.datetime.utcnow().isoformat('T') for classname, results in csv_data.iteritems(): filename_to_create = '{0}/{1}.csv'.format(now_iso8601, classname) bucket_with_filename = os.path.join('/', DATA_BUCKET, filename_to_create) # noqa: E501 try: with gcs.open(bucket_with_filename, 'w', content_type='text/csv') as gcs_file: writer = csv.writer(gcs_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) # noqa: E501 writer.writerows(results) except Exception as error: logging.error('An error occurred writing the file to GCS: {0}'.format(error)) # noqa: E501 raise error
5,328,281
def parse_env_file(filename, pattern): """Source a shell script and extract variables from it.""" # Use the shell to parse this so we can also read substitutions # like $() for example. env = {} command = 'source {}; set | grep -E "{}"'.format(filename, pattern) output = subprocess.check_output(['sh', '-c', command]) output = output.decode(locale.getpreferredencoding()) for line in output.splitlines(): p1 = line.find('=') env[line[:p1]] = line[p1+1:] return env
5,328,282
def dispatch(intent_request): """ Called when the user specifies an intent for this bot. """ logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name'])) intent_name = intent_request['currentIntent']['name'] # Dispatch to your bot's intent handlers if intent_name == 'GetAccountDetail': return get_balance(intent_request) elif intent_name == 'GetLoanDetail': return get_loan_balance(intent_request) elif intent_name == 'GetLoanProducts': return get_loan_offer(intent_request) raise Exception('Intent with name ' + intent_name + ' not supported')
5,328,283
def load_newsdata_and_labels(): """ Read newsdata, return list of documents, each line in list is one document as string. And list of labels, each line in list is one-hot-encoded class """ # read newsdata which is pickled import pickle def read_pickle_one_by_one(pickle_file): with open(pickle_file, "rb") as t_in: while True: try: yield pickle.load(t_in) except EOFError: break #sentnos = [s for s in read_pickle_one_by_one("sentnos.pkl")] # sentence numbers labels = [l for l in read_pickle_one_by_one("data_own/labels.pkl")] #focuses = [f for f in read_pickle_one_by_one("focuses.pkl")] texts = [t for t in read_pickle_one_by_one("data_own/texts.pkl")] # assert == len(labels) == len(texts) # == len(sentnos) == len(focuses) #print("longest text") #print(max(len(t) for t in texts)) #print(sentnos[23]) #print(texts[23]) #print(focuses[23]) #print(labels[23]) # import copy # if need real copies, not just new pointers # new_texts = copy.deepcopy(texts) # empty list, same lenght as texts new_texts = [None] * len(texts) # go through list and for each document in list, join list of words to a string for documentnr, value in enumerate(texts): #print(document, value) new_texts[documentnr] = ' '.join(value) # labels are 5-6 classes. turn them into 1-hot-encoded. 6 classes mentioned in paper, only 5 present in data. new_labels = np.zeros((len(labels),5)) for labelnr, value in enumerate(labels): if value[0]==1: new_labels[labelnr][0]=1 #one hot to true elif value[0]==0.7: new_labels[labelnr][1]=1 elif value[0]==0.5: new_labels[labelnr][2]=1 elif value[1]==0.7: new_labels[labelnr][3]=1 elif value[0]==0: new_labels[labelnr][4]=1 x_text = new_texts y = new_labels return [x_text, y]
5,328,284
def CollectUniqueByOrderOfAppearance(dataset:list): """ This method collect all unique in order of appearance and return it as list. :param dataset:list: dataset list """ try: seen = set() seen_add = seen.add return [x for x in dataset if not (x in seen or seen_add(x))] except Exception as ex: template = "An exception of type {0} occurred in [ContentSupport.CollectUniqueByOrderOfAppearance]. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message)
5,328,285
async def list_slot_set_actions(current_user: User = Depends(Authentication.get_current_user_and_bot)): """ Returns list of slot set actions for bot. """ actions = mongo_processor.list_slot_set_actions(current_user.get_bot()) return Response(data=actions)
5,328,286
def openfile_dialog(file_types="All files (*)", multiple_files=False, file_path='.', caption="Select a file..."): """ Opens a File dialog which is used in open_file() function This function uses pyQt5. Parameters ---------- file_types : str, optional. Default = all types of files accepted multiple_files : bool, optional. Default = False Whether or not multiple files can be selected file_path: str, optional. Default = '.' path to starting or root directory caption: str, optional. Default = "Select a file..." caption of the open file dialog Returns ------- filename : str full filename with absolute path and extension Notes ----- In jupyter notebooks use ``%gui Qt`` early in the notebook. Examples -------- >> import sidpy as sid >> filename = sid.io.openfile_dialog() >> print(filename) """ # Check whether QT is available try: from PyQt5 import QtGui, QtWidgets, QtCore except ImportError: raise ModuleNotFoundError('Required package PyQt5 not available') # try to find a parent the file dialog can appear on top try: get_QT_app() except: pass for param in [file_path, file_types, caption]: if param is not None: if not isinstance(param, (str, unicode)): raise TypeError('param must be a string') parent = None if multiple_files: func = QtWidgets.QFileDialog.getOpenFileNames fnames, file_filter = func(parent, caption, file_path, filter=file_types, options=[QtCore.Qt.WindowStaysOnTopHint]) if len(fnames) > 0: fname = fnames[0] else: return else: func = QtWidgets.QFileDialog.getOpenFileName fname, file_filter = func(parent, caption, file_path, filter=file_types) if multiple_files: return fnames else: return str(fname)
5,328,287
def getBlocks(bal: "BKAlignedLayout"): """ Finds all blocks of a given layout. :param bal The layout of which the blocks shall be found :return: The blocks of the given layout """ blocks = defaultdict(list) for layer in bal.layeredGraph.layers: for node in layer: root = bal.root[node] blockContents = blocks[root] blockContents.append(node) return blocks
5,328,288
def read_mcmc(path_to_file): """ Reads mcmc chain from file Parameters ---------- path_to_file: string Path to mcmc chain file Returns --------- emcee_table: pandas dataframe Dataframe of mcmc chain values with NANs removed """ colnames = ['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',\ 'scatter'] if mf_type == 'smf' and survey == 'eco': emcee_table = pd.read_csv(path_to_file,names=colnames,sep='\s+',\ dtype=np.float64) else: emcee_table = pd.read_csv(path_to_file, names=colnames, delim_whitespace=True, header=None) emcee_table = emcee_table[emcee_table.mhalo_c.values != '#'] emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64) emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64) emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64) # Cases where last parameter was a NaN and its value was being written to # the first element of the next line followed by 4 NaNs for the other # parameters for idx,row in enumerate(emcee_table.values): if np.isnan(row)[4] == True and np.isnan(row)[3] == False: scatter_val = emcee_table.values[idx+1][0] row[4] = scatter_val # Cases where rows of NANs appear emcee_table = emcee_table.dropna(axis='index', how='any').\ reset_index(drop=True) return emcee_table
5,328,289
def rotzV(x, theta): """Roate a coordinate in the local z frame""" M = [[np.cos(theta), -np.sin(theta), 0], \ [np.sin(theta), np.cos(theta), 0], [0, 0, 1]] return np.dot(M, x)
5,328,290
def has_flock(fd): """ Checks if fd has flock over it True if it is, False otherwise :param fd: :return: :rtype: bool """ try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError: return True else: return False
5,328,291
def check_header(in_path): """Logs an error message and raises an exception if the in_path file has no header (relies on the has_header function). """ if not has_header(in_path): s = f"Error: the input file {in_path} must have a header" log(s) s = "Make sure the first elements of the first two lines are of different lengths" log_print(s) raise Exception(g.E_MH)
5,328,292
def userprofile_set_date_joined_program_pre_save(sender, instance, **kwargs): """Set date_joined_program to today when empty.""" if not instance.date_joined_program: instance.date_joined_program = timezone.now().date()
5,328,293
def test_atomic_duration_min_inclusive_nistxml_sv_iv_atomic_duration_min_inclusive_1_3(mode, save_output, output_format): """ Type atomic/duration is restricted by facet minInclusive with value P1970Y01M01DT00H00M00S. """ assert_bindings( schema="nistData/atomic/duration/Schema+Instance/NISTSchema-SV-IV-atomic-duration-minInclusive-1.xsd", instance="nistData/atomic/duration/Schema+Instance/NISTXML-SV-IV-atomic-duration-minInclusive-1-3.xml", class_name="NistschemaSvIvAtomicDurationMinInclusive1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,328,294
def test_nested_list_comprehensions(): """Nested List Comprehensions The initial expression in a list comprehension can be any arbitrary expression, including another list comprehension. """ # Consider the following example of a 3x4 matrix implemented as a list of 3 lists of length 4: matrix = [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], ] # The following list comprehension will transpose rows and columns: transposed_matrix = [[row[i] for row in matrix] for i in range(4)] assert transposed_matrix == [ [1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12], ] # As we saw in the previous section, the nested listcomp is evaluated in the context of the # for that follows it, so this example is equivalent to: transposed = [[row[i] for row in matrix] for i in range(4)] assert transposed == [ [1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12], ] # which, in turn, is the same as: transposed = [] for i in range(4): # the following 3 lines implement the nested listcomp transposed_row = [row[i] for row in matrix] transposed.append(transposed_row) assert transposed == [ [1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12], ] # In the real world, you should prefer built-in functions to complex flow statements. # The zip() function would do a great job for this use case: assert list(zip(*matrix)) == [ (1, 5, 9), (2, 6, 10), (3, 7, 11), (4, 8, 12), ]
5,328,295
def _check_data( dataset_name="celeba", train_batch_size=32, eval_batch_size=512, num_per_group=200, ): """Check the blond vs non-blond number of images.""" # TODO: Support black hair. train_loader, valid_loader, test_loader = _make_loaders( dataset_name, train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=0, drop_last=False, # Don't drop training images. ) # CelebA has ~160k train data, ~20k test and val data; total ~200k data. data_stats = dict() for loader_name, loader in utils.zip_( ('train', 'valid', 'test'), (train_loader, valid_loader, test_loader) ): size = num_blond = num_not_blond = 0 blond, not_blond = [], [] for tensors in tqdm.tqdm(loader, desc="batches"): images, labels = tensors labels = labels[:, 9] # blond hair. labels = labels.bool().cpu().tolist() size += images.size(0) for image, label in utils.zip_(images, labels): if label: num_blond += 1 if len(blond) < num_per_group: # Don't store too many. blond.append(image) else: num_not_blond += 1 if len(not_blond) < num_per_group: # Don't store too many. not_blond.append(image) data_stats[loader_name] = { 'blond': num_blond, 'not blond': num_not_blond, 'size': size, } # Show some. blond, not_blond = tuple( utils.denormalize(torch.stack(t), mean=CHANNEL_MEAN, std=CHANNEL_STD) for t in (blond, not_blond) ) # def show(imgs): # if not isinstance(imgs, list): # imgs = [imgs] # fix, axs = plt.subplots(ncols=len(imgs), squeeze=False) # for i, img in enumerate(imgs): # img = img.detach() # img = tvF.to_pil_image(img) # axs[0, i].imshow(np.asarray(img)) # axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) # plt.show() # plt.close() torchvision.utils.save_image( blond, utils.join('.', 'explainx', 'plots', 'blond.png'), nrow=20, ) torchvision.utils.save_image( not_blond, utils.join('.', 'explainx', 'plots', 'not_blond.png'), nrow=20, ) utils.jdump( data_stats, utils.join('.', 'explainx', 'data_stats', 'blond_not_blond.json'), )
5,328,296
def ExtractCodeBySystem( codable_concept, system): """Extract code in codable_concept.""" for coding in codable_concept.coding: if (coding.HasField('system') and coding.HasField('code') and coding.system.value == system): return coding.code.value return None
5,328,297
def add_item(data, type): """ Add an item to the data in ranked order This function handles the process of adding an item to the list. It first requests the item from the console. Items are nothing more than a line of text typed in. Next, this kicks off a type of binary search to find the proper location of the new item. Then it adds the item to the data and lastly it prompts to add another. Args: data The original list of items type A label describing the type of items being ranked Returns A new list containing the new item """ # prompt for item clear_console() print_header(' A D D I T E M') prompt_message = "\nAdd something new for {type}, leave blank to quit ==> ".format(type = type.lower()) thing = input(prompt_message) while thing != "": # search for placement place_at = search_for_spot(thing, data, 0, len(data) - 1) # add to list data.insert(place_at, thing) # prompt for another thing = input(prompt_message) return data
5,328,298
def write_phones_txt(orig_lines, highest_numbered_symbol, nonterminals, filename): """Writes updated phones.txt to 'filename'. 'orig_lines' is the original lines in the phones.txt file as a list of strings (without the newlines); highest_numbered_symbol is the highest numbered symbol in the original phones.txt; nonterminals is a list of strings like '#nonterm:foo'.""" with open(filename, 'w', encoding='latin-1') as f: for l in orig_lines: print(l, file=f) cur_symbol = highest_numbered_symbol + 1 for n in ['#nonterm_bos', '#nonterm_begin', '#nonterm_end', '#nonterm_reenter' ] + nonterminals: print("{0} {1}".format(n, cur_symbol), file=f) cur_symbol = cur_symbol + 1
5,328,299