content
stringlengths
22
815k
id
int64
0
4.91M
def parse(peaker): # type: (Peaker[Token]) -> Node """Parse the docstring. Args: peaker: A Peaker filled with the lexed tokens of the docstring. Raises: ParserException: If there is anything malformed with the docstring, or if anything goes wrong with parsing. # noqa Returns: The parsed docstring as an AST. """ keyword_parse_lookup = { 'Args': parse_args, 'Arguments': parse_args, 'Returns': parse_returns, 'Yields': parse_yields, 'Raises': parse_raises, } children = [ parse_description(peaker) ] while peaker.has_next(): next_value = peaker.peak().value if next_value in keyword_parse_lookup: children.append( keyword_parse_lookup[next_value](peaker) ) else: children.append( parse_long_description(peaker) ) return Node( node_type=NodeType.DOCSTRING, children=children, )
31,300
def listify(what, *, debug=False): """ non-reversible version of listify_safe(). In this case "None" always means "no columns". output: list """ l, _ = listify_safe(what, debug=debug) return l
31,301
def check_server_url(url): """Check if given url starts with http tag""" goodName = url.startswith('http://') or url.startswith('https://') if not goodName: msg = "You must include http(s):// in your server's address, %s doesn't" % url raise ValueError(msg)
31,302
def sphrec(r, colat, lon): """ Convert from spherical coordinates to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphrec_c.html :param r: Distance of a point from the origin. :type r: float :param colat: Angle of the point from the positive Z-axis. :type colat: float :param lon: Angle of the point from the XZ plane in radians. :type lon: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ r = ctypes.c_double(r) colat = ctypes.c_double(colat) lon = ctypes.c_double(lon) rectan = stypes.emptyDoubleVector(3) libspice.sphrec_c(r, colat, lon, rectan) return stypes.cVectorToPython(rectan)
31,303
def counter_endpoint( event=None, context=None ): """ API endpoint that returns the total number of UFO sightings. An example request might look like: .. sourcecode:: http GET www.x.com/counter HTTP/1.1 Host: example.com Accept: application/json, text/javascript Results will be returned as JSON object with the following format: .. code-block:: json { "count": <number> } """ return app.response_class( json.dumps( count_rows_in_table() ), mimetype='application/json' )
31,304
def search_trie(result, trie): """ trie search """ if result.is_null(): return [] # output ret_vals = [] for token_str in result: ret_vals += trie.find(token_str) if result.has_memory(): ret_vals = [ one_string for one_string in ret_vals if result.is_memorized(one_string) == False ] return ret_vals
31,305
def get_nb_entry(path_to_notes: str = None, nb_name: str = None, show_index: bool = True) -> str: """Returns the entry of a notebook. This entry is to be used for the link to the notebook from the table of contents and from the navigators. Depending on the value of the argument `show_index`, the entry can be either the full entry provided by the function `get_nb_full_entry()` or simply the title of the notebook, provided by the function `get_nb_title()`. Parameters ---------- path_to_notes : str The path to the directory that contains the notebooks, either absolute or relative to the script that calls `nbbinder.bind()`. nb_name : str The name of the jupyter notebook file. show_index : boolean Indicates whether to include the chapter and section numbers of the notebook in the table of contents (if True) or just the title (if False). Returns ------- entry : str A string with the entry name. """ if show_index: entry = ''.join(list(get_nb_full_entry(path_to_notes, nb_name)[1:3])) else: entry = get_nb_title(path_to_notes, nb_name) return entry
31,306
def get_label_encoder(config): """Gets a label encoder given the label type from the config Args: config (ModelConfig): A model configuration Returns: LabelEncoder: The appropriate LabelEncoder object for the given config """ return LABEL_MAP[config.label_type](config)
31,307
def get_lesson_comment_by_sender_user_id(): """ { "page": "Long", "size": "Long", "sender_user_id": "Long" } """ domain = request.args.to_dict() return lesson_comment_service.get_lesson_comment_by_sender_user_id(domain)
31,308
def output(pin, dir): """ Writes state (HIGH, LOW) based on pin number. Param 'pin': Any pin from 1 to 16. Param 'dir': Defines pin state, on (HIGH) or off (LOW). Return: ValueError if pin or dir are invalid. """ global LATA, LATB if (str(pin).isdigit() and (0 < pin < 17)): if (dir == HIGH or dir == LOW): if (0 < pin < 9): LATB = LATB[0:pin - 1] + str(dir) + LATB[pin:] i2c.write_byte_data(ADDRESS, 0x15, int(LATB, 2)) elif (8 < pin < 17): LATA = LATA[0:(pin - 8) - 1] + str(dir) + LATA[(pin - 8):] i2c.write_byte_data(ADDRESS, 0x14, int(LATA, 2)) else: raise ValueError(str(dir) + " is not a valid state") else: raise ValueError(str(pin) + " is not a valid number")
31,309
def _inertia_grouping(stf): """Grouping function for class inertia. """ if hasattr(stf[2], 'inertia_constant'): return True else: return False
31,310
def parse_input(raw_input: str) -> nx.DiGraph: """Parses Day 12 puzzle input.""" graph = nx.DiGraph() graph.add_nodes_from([START, END]) for line in raw_input.strip().splitlines(): edge = line.split('-') for candidate in [edge, list(reversed(edge))]: if candidate[0] == END: continue if candidate[1] == START: continue graph.add_edge(*candidate) return graph
31,311
def zonal_mode_extract(infield, mode_keep, low_pass = False): """ Subfunction to extract or swipe out zonal modes (mode_keep) of (y, x) data. Assumes here that the data is periodic in axis = 1 (in the x-direction) with the end point missing If mode_keep = 0 then this is just the zonal averaged field Input: in_field 2d layer input field mode_keep the zonal mode of the data to be extracted from Opt input: low_pass get rid of all modes from mode_keep + 1 onwards Output: outfield zonal mode of the data """ outfield_h = rfft(infield, axis = 1) outfield_h[:, mode_keep+1::] = 0 if not low_pass: outfield_h[:, 0:mode_keep] = 0 return irfft(outfield_h, axis = 1)
31,312
def create_room(game_map, room): """Go through the tiles in the rectangle and make them passable.""" for x in range(room.x1 + 1, room.x2): for y in range(room.y1 + 1, room.y2): game_map.walkable[x, y] = True game_map.transparent[x, y] = True
31,313
def clone(): """Clone model PUT /models Parameters: { "model_name": <model_name_to_clone>, "new_model_name": <name_for_new_model> } Returns: - {"model_names": <list_of_model_names_in_session>} """ request_json = request.get_json() name = request_json["model_name"] new_name = request_json["new_model_name"] models = None if 'models' in session: models = pickle.loads(session["models"]) else: models = {} if name in models: models[new_name] = models[name].clone() session["models"] = pickle.dumps(models) res = {"model_names": get_model_names()} return jsonify(res)
31,314
def GetFlagFromDest(dest): """Returns a conventional flag name given a dest name.""" return '--' + dest.replace('_', '-')
31,315
def updated_clicked_recipe_maker(w): """ Updates the maker display Data with the selected recipe""" if not w.LWMaker.selectedItems(): return DP_HANDLER.clear_recipe_data_maker(w) handle_alcohollevel_change(w) cocktailname = w.LWMaker.currentItem().text() machineadd_data, handadd_data = DB_COMMANDER.get_recipe_ingredients_by_name_seperated_data(cocktailname) total_volume = sum([v[1] for v in machineadd_data] + [v[1] for v in handadd_data]) ingredient_data = machineadd_data if handadd_data: ingredient_data.extend([["", ""], ["Selbst hinzufügen:", ""]]) ingredient_data.extend(handadd_data) DP_HANDLER.fill_recipe_data_maker(w, ingredient_data, total_volume, cocktailname)
31,316
def get_rst_export_elements( file_environment, environment, module_name, module_path_name, skip_data_value=False, skip_attribute_value=False, rst_elements=None ): """Return :term:`reStructuredText` from exported elements within *file_environment*. *environment* is the full :term:`Javascript` environment processed in :mod:`~champollion.parser`. *module_name* is the module alias that should be added to each directive. *module_path_name* is the module path alias that should be added to each directive. *skip_data_value* indicate whether data value should not be displayed. *skip_attribute_value* indicate whether attribute value should not be displayed. *rst_elements* can be an initial dictionary that will be updated and returned. """ export_environment = file_environment["export"] import_environment = file_environment["import"] if rst_elements is None: rst_elements = {} for _exported_env_id, _exported_env in export_environment.items(): from_module_id = _exported_env["module"] line_number = _exported_env["line_number"] if line_number not in rst_elements.keys(): rst_elements[line_number] = [] name = _exported_env["name"] alias = _exported_env["alias"] if alias is None: alias = name # Update module origin and name from import if necessary if (from_module_id is None and _exported_env_id in import_environment.keys()): name = import_environment[_exported_env_id]["name"] from_module_id = import_environment[_exported_env_id]["module"] # Ignore element if the origin module can not be found if from_module_id not in environment["module"].keys(): continue from_module_environment = environment["module"][from_module_id] from_file_id = from_module_environment["file_id"] from_file_env = environment["file"][from_file_id] if name == "default": rst_element = get_rst_default_from_file_environment( from_file_env, alias, module_name, module_path_name, skip_data_value=skip_data_value, skip_attribute_value=skip_attribute_value, ) if rst_element is None: continue rst_elements[line_number].append(rst_element) elif name == "*": extra_options = [ ":force-partial-import:", ":members:", ":skip-description:" ] if skip_data_value: extra_options.append(":skip-data-value:") if skip_attribute_value: extra_options.append(":skip-attribute-value:") rst_element = rst_generate( directive="automodule", element_id=from_module_id, module_alias=module_name, module_path_alias=module_path_name, extra_options=extra_options ) rst_elements[line_number].append(rst_element) else: rst_element = get_rst_name_from_file_environment( name, from_file_env, alias, module_name, module_path_name, skip_data_value=skip_data_value, skip_attribute_value=skip_attribute_value, ) if rst_element is None: continue rst_elements[line_number].append(rst_element) return rst_elements
31,317
def oriented_tree(): """ Number of unique oriented trees with n unlabeled nodes OEIS A000238 """
31,318
def get_insight_comments(): """ Get comments for insight """ alert_id = demisto.getArg('insight-id') resp_json = req('GET', 'alert/%s/comments' % alert_id, QUERY) comments = [to_readable(comment, ['id', 'alert_id', 'author', 'body', 'last_updated', 'timestamp'], {'alert_id': 'InsightId'}) for comment in resp_json['objects']] ec = {'Jask.Insight(val.Id == "%s").CommentList': comments} md = tableToMarkdown('Insight Comments:', comments, ['Id', 'InsightId', 'Author', 'Body', 'LastUpdated', 'Timestamp']) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': ec, 'HumanReadable': md, 'Contents': resp_json, 'ContentsFormat': formats['json'] })
31,319
def ganache_url(host='127.0.0.1', port='7445'): """Return URL for Ganache test server.""" return f"http://{host}:{port}"
31,320
def to_news_detail_list_by_period(uni_id_list: list, start: str, end: str) -> list: """ 根据统一社会信用代码列表,获取企业在给定日期范围的新闻详情列表,使用串行 :param end: :param start: :param uni_id_list: :return: """ detail_list = [] for uni_id in uni_id_list: for summary in to_news_summary_list_by_period(uni_id, start, end): detail_list.append(to_news_detail_by_summary(summary)) return detail_list
31,321
def positions_to_df(positions: List[alp_api.entity.Asset]) -> pd.DataFrame: """Generate a df from alpaca api assests Parameters ---------- positions : List[alp_api.entity.Asset] List of alpaca trade assets Returns ------- pd.DataFrame Processed dataframe """ df = pd.DataFrame(columns=["Symbol", "MarketValue", "Quantity", "CostBasis"]) sym = [] mv = [] qty = [] cb = [] for pos in positions: sym.append(pos.symbol) mv.append(float(pos.market_value)) qty.append(float(pos.qty)) cb.append(float(pos.cost_basis)) df["Symbol"] = sym df["MarketValue"] = mv df["Quantity"] = qty df["CostBasis"] = cb df["Broker"] = "alp" return df
31,322
def get_all_pages(date): """For the specific date, get all page URLs.""" r = requests.get(URL, params={"search": date}) soup = BeautifulSoup(r.text, "html.parser") return [ f"https://www.courts.phila.gov/{url}" for url in set([a["href"] for a in soup.select(".pagination li a")]) ]
31,323
def boundary_condition( outer_bc_geometry: List[float], inner_bc_geometry: List[float], bc_num: List[int], T_end: float, ): """ Generate BC points for outer and inner boundaries """ x_l, x_r, y_d, y_u = outer_bc_geometry xc_l, xc_r, yc_d, yc_u = inner_bc_geometry N_x, N_y, N_t, N_bc = bc_num N_bc = N_bc // 4 + 1 # generate bc for outer boundary left_points = np.stack((np.ones(N_y) * x_l, np.linspace(y_d, y_u, N_y)), 1) right_points = np.stack((np.ones(N_y) * x_r, np.linspace(y_d, y_u, N_y)), 1) t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1) X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)]))) X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)]))) X_lr = np.concatenate((X_left, X_right), 1) lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False) X_lr = X_lr[lr_idx] down_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_d), 1) up_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_u), 1) t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1) X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)]))) X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)]))) X_du = np.concatenate((X_down, X_up), 1) ud_idx = np.random.choice(len(X_du), size=N_bc, replace=False) X_du = X_du[ud_idx] X_bc_outer = (X_lr, X_du) # generate bc for inner boundary left_points = np.stack((np.ones(N_y) * xc_l, np.linspace(yc_d, yc_u, N_y)), 1) right_points = np.stack((np.ones(N_y) * xc_r, np.linspace(yc_d, yc_u, N_y)), 1) t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1) X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)]))) X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)]))) X_lr = np.concatenate((X_left, X_right), 1) lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False) X_lr = X_lr[lr_idx] down_points = np.stack((np.linspace(xc_l, xc_r, N_x), np.ones(N_x) * yc_d), 1) up_points = np.stack((np.linspace(xc_l, xc_r, N_x), np.ones(N_x) * yc_u), 1) t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1) X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)]))) X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)]))) X_du = np.concatenate((X_down, X_up), 1) ud_idx = np.random.choice(len(X_du), size=N_bc, replace=False) X_du = X_du[ud_idx] X_bc_inner = (X_lr, X_du) return X_bc_outer, X_bc_inner
31,324
def test_upsert_return_created_values(): """ Tests that values that are created are returned properly when returning is True. """ results = pgbulk.upsert( models.TestModel, [ models.TestModel(int_field=1), models.TestModel(int_field=3), models.TestModel(int_field=4), ], ['int_field'], ['float_field'], returning=True, ) assert len(list(results.created)) == 3 for test_model, expected_int in zip( sorted(results.created, key=lambda k: k.int_field), [1, 3, 4] ): assert test_model.int_field == expected_int assert test_model.id is not None assert models.TestModel.objects.count() == 3
31,325
def extractInfiniteNovelTranslations(item): """ # Infinite Novel Translations """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('Ascendance of a Bookworm', 'Ascendance of a Bookworm', 'translated'), ('Yomigaeri no Maou', 'Yomigaeri no Maou', 'translated'), ('Kakei Senki wo Kakageyo!', 'Kakei Senki wo Kakageyo!', 'translated'), ('Kuro no Shoukan Samurai', 'Kuro no Shoukan Samurai', 'translated'), ('Nidoume no Jinsei wo Isekai de', 'Nidoume no Jinsei wo Isekai de', 'translated'), ('Hachi-nan', 'Hachinan tte, Sore wa Nai Deshou!', 'translated'), ('Summoned Slaughterer', 'Yobidasareta Satsuriku-sha', 'translated'), ('maou no utsuwa', 'Maou no Utsuwa', 'translated'), ('Maou no Ki', 'Maou no Ki', 'translated'), ('Imperial wars and my stratagems', 'Imperial Wars and my Stratagems', 'translated'), ('Kuro no Shoukanshi', 'Kuro no Shoukanshi', 'translated'), ('I work as Healer in Another World\'s Labyrinth City', 'I work as Healer in Another World\'s Labyrinth City', 'translated'), ('The Spearmaster and The Black Cat', 'The Spearmaster and The Black Cat', 'translated'), ('Hakai no Miko', 'Hakai no Miko', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
31,326
def set_output_dir( data: dict, rel_path: str, default: str = DEFAULT_DIR ) -> None: """Set the 'output_dir' key correctly on a dictionary.""" out_dir = get_output_dir(data, rel_path, default) os.makedirs(out_dir, exist_ok=True) data["output_dir"] = out_dir
31,327
def fill_diagonal(matrix, value, k=0, unpadded_dim=None): """ Returns a matrix identical to `matrix` except that the `k'th` diagonal has been overwritten with the value `value`. Args: matrix: Matrix whose diagonal to fill. value: The value to fill the diagonal with. k: The diagonal to fill. unpadded_dim: If specified, only the `unpadded_dim x unpadded_dim` top left block will be filled. Returns: A copy of `matrix`, with the `k'th` diagonal replaced by `value`. """ replace_here = on_kth_diagonal(matrix.shape, k=k, unpadded_dim=unpadded_dim) replace_with = jnp.full(replace_here.shape[1], value) return jnp.where(replace_here, x=replace_with, y=matrix)
31,328
def loop_filter(data, images, features, matches, pairs): """ if there’s an edge between (i, j) where i and j are sequence numbers far apart, check that there also exists an edge (i plus/minus k, j plus/minus k), where k is a small integer, and that the loop formed by the four nodes pass the multiplying-to-identity check. if so, this is a valid "quad". we then merge quads into clusters. each cluster is a loop candidate. we perform checks on the candidates to filter out bad ones, and remove all edges in them. :param data: :param images: :param matches: :param pairs: :return: """ logger.debug("loop pass 1 filtering start") common_feature_thresh = data.config['filtering_common_feature_thresh'] # TODO: cren optionize the following thresholds gap = 6 edges_to_remove = [] all_valid_triplets = [] for (im1, im2) in matches: if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) > gap: valid_triplets = get_valid_triplets(im1, im2, matches, pairs) if valid_triplets: all_valid_triplets.extend(valid_triplets) else: edges_to_remove.append((im1, im2)) for edge in sorted(edges_to_remove): logger.debug("loop pass 1 removing edge {} -{}".format(edge[0], edge[1])) matches.pop(edge) logger.debug("loop pass 1 filtering end, removed {} edges, {:2.1f}% of all". format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs))) logger.debug("loop pass 2 filtering start") radius = gap/2 valid_triplets_set = set(tuple(triplet) for triplet in all_valid_triplets) # cluster quads into loop candidates loop_candidates = cluster_triplets(valid_triplets_set, radius) # apply various checks to figure out bad loop candidates bad_candidates = filter_candidates(images, loop_candidates, matches, features, pairs, common_feature_thresh) # remove matches in bad loop candidates edges_to_remove = set() for cand in bad_candidates: loop_candidates.remove(cand) for im1 in cand.get_ids_0(): for im2 in cand.get_ids_1(): if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) > radius: if (im1, im2) in matches: edges_to_remove.add((im1, im2)) elif (im2, im1) in matches: edges_to_remove.add((im2, im1)) for edge in sorted(edges_to_remove): #logger.debug("loop removing edge {} -{}".format(edge[0], edge[1])) matches.pop(edge) logger.debug("loop pass 2 filtering end, removed {} edges, {:2.1f}% of all". format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs))) return matches
31,329
def container_model(*, model: type, caption: str, icon: Optional[str]) -> Callable: """ ``container_model`` is an object that keeps together many different properties defined by the plugin and allows developers to build user interfaces in a declarative way similar to :func:`data_model`. ``container_model`` can also hold a reference to a :func:`data_model` declared from the plugin, making this object a parent for all new :func:`data_model` created. .. rubric:: **Application Required**: The following options are required when declaring a ``container_model``. :param caption: A text to be displayed over the Tree. :param icon: Name of the icon to be used over the Tree. :param model: A reference to a class decorated with :func:`data_model`. .. note:: Even though the icon parameter is required, it's not currently being used. .. rubric:: **Plugin defined**: Visual elements that allow the user to input information into the application, or to arrange better the user interface. :Input Fields: Visual elements that allow the user to provide input information into the application. :Layout: Elements that assist the developer to arrange input fields in meaningfully way. Check the section :ref:`visual elements <api-types-section>` to see all inputs available, and :ref:`layout elements<api-layout-section>` to see all layouts available. .. rubric:: Example myplugin.py .. code-block:: python @data_model(icon="", caption="My Child") class ChildModel: distance = Quantity(value=1, unit="m", caption="Distance") @container_model(icon='', caption='My Container', model=ChildModel) class MyModelContainer: my_string = String(value='Initial Value', caption='My String') @alfasim_sdk.hookimpl def alfasim_get_data_model_type(): return [MyModelContainer] .. image:: /_static/images/api/container_model_example_1_1.png :scale: 70% .. image:: /_static/images/api/container_model_example_1_2.png :scale: 70% .. image:: /_static/images/api/container_model_example_1_3.png :scale: 70% Container data also includes automatically two actions for the model: .. rubric:: Action: Create new Model An action that creates a new model inside the container selected, you can activate this action by right-clicking in the container over the Tree, or by clicking on the "Plus" icon available at the ``Model Explorer``. .. image:: /_static/images/api/container_model_new_model_1.png :scale: 80% .. image:: /_static/images/api/container_model_new_model_2.png :scale: 80% .. rubric:: Action: Remove An action that remove the selected model, only available for models inside a container, you can activate this action by right-clicking the model over the Tree, or by clicking on the "Trash" icon available at the ``Model Explorer``. .. image:: /_static/images/api/container_model_remove_1.png :scale: 80% .. image:: /_static/images/api/container_model_remove_2.png :scale: 80% """ def apply(class_): @functools.wraps(class_) def wrap_class(class_, caption, icon): return get_attr_class(class_, caption, icon, model) return wrap_class(class_, caption, icon) return apply
31,330
def ward_quick(G, feature, verbose = 0): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G graph instance, topology-defining graph feature: array of shape (G.V,dim_feature): some vectorial information related to the graph vertices Returns ------- t: weightForest instance, that represents the dendrogram of the data NOTE ---- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ # basic check if feature.ndim==1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0]!=G.V: raise ValueError, "Incompatible dimension for the\ feature matrix and the graph" Features = [np.ones(2*G.V), np.zeros((2*G.V, feature.shape[1])), np.zeros((2*G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature**2 """ Features = [] for i in range(G.V): Features.append(np.reshape(feature[i],(1,feature.shape[1]))) """ n = G.V nbcc = G.cc().max()+1 # prepare a graph with twice the number of vertices K = _auxiliary_graph(G,Features) parent = np.arange(2*n-nbcc).astype(np.int) pop = np.ones(2*n-nbcc).astype(np.int) height = np.zeros(2*n-nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters q = 0 while (q<n-nbcc): # 1. find the lightest edges aux = np.zeros(2*n) ape = np.nonzero(K.weights<np.infty) ape = np.reshape(ape,np.size(ape)) idx = np.argsort(K.weights[ape]) for e in range(n-nbcc-q): i,j = K.edges[ape[idx[e]],0], K.edges[ape[idx[e]],1] if aux[i]==1: break if aux[j]==1: break aux[i]=1 aux[j]=1 emax = np.maximum(e,1) for e in range(emax): m = ape[idx[e]] cost = K.weights[m] k = q+n #if K.weights[m]>=stop: break i = K.edges[m,0] j = K.edges[m,1] height[k] = cost if verbose: print q,i,j, m,cost # 2. remove the current edge K.edges[m,:] = -1 K.weights[m] = np.infty linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml,1]==i)>0: m = ml[np.flatnonzero(K.edges[ml,1]==i)] K.edges[m,:] = -1 K.weights[m] = np.infty linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] """ totalFeatures = np.vstack((Features[i], Features[j])) Features.append(totalFeatures) Features[i] = [] Features[j] = [] """ linc,rinc = _remap(K, i, j, k, Features, linc, rinc) q+=1 # build a tree to encode the results t = WeightedForest(2*n-nbcc, parent, height) return t
31,331
def conv3x3(in_planes, out_planes, stride=1, output_padding=0): """3x3 convolution transpose with padding""" return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, output_padding=output_padding, bias=False)
31,332
def unwrap(func): """ Returns the object wrapped by decorators. """ def _is_wrapped(f): return hasattr(f, '__wrapped__') unwrapped_f = func while (_is_wrapped(unwrapped_f)): unwrapped_f = unwrapped_f.__wrapped__ return unwrapped_f
31,333
def get_payload_from_scopes(scopes): """ Get a dict to be used in JWT payload. Just merge this dict with the JWT payload. :type roles list[rest_jwt_permission.scopes.Scope] :return dictionary to be merged with the JWT payload :rtype dict """ return { get_setting("JWT_PAYLOAD_SCOPES_KEY"): [scope.identifier for scope in scopes] }
31,334
def missing_keys_4(data: Dict, lprint=print, eprint=print): """ Add keys: _max_eval_all_epoch, _max_seen_train, _max_seen_eval, _finished_experiment """ if "_finished_experiment" not in data: lprint(f"Add keys _finished_experiment ...") max_eval = -1 for k1, v1 in data["_eval_trace"].items(): for k2, v2 in v1.items(): max_eval += len(v2) max_train = -1 for k1, v1 in data["_train_trace"].items(): for k2, v2 in v1.items(): max_train += len(v2) data["_max_eval_all_epoch"] = max_eval data["_max_train_all_epoch"] = max_train data["_max_seen_train"] = max_seen_train = max(data["_train_trace"].keys()) data["_max_seen_eval"] = max_seen_eval = max(data["_eval_trace"].keys()) # Check if finished or no no_tasks = len(data["_task_info"]) epochs_per_task = data["_args"]["train"]["epochs_per_task"] should_train = no_tasks * epochs_per_task reached_max_train = should_train == max_train + 1 same_seen = data["_max_seen_train"] == data["_max_seen_eval"] all_final_tasks_evaluated = len(data["_eval_trace"][max_seen_eval]) == no_tasks data["_finished_experiment"] = reached_max_train \ and same_seen and all_final_tasks_evaluated return 1 return 0
31,335
def translate(txt): """Takes a plain czech text as an input and returns its phonetic transcription.""" txt = txt.lower() txt = simple_replacement(txt) txt = regex_replacement(txt) txt = chain_replacement(txt) txt = grind(txt) return txt
31,336
def struct_getfield_longlong(ffitype, addr, offset): """ Return the field of type ``ffitype`` at ``addr+offset``, casted to lltype.LongLong. """ value = _struct_getfield(lltype.SignedLongLong, addr, offset) return value
31,337
def test_connect(loop, conn): """ 测试连接对象属性 """ assert conn.loop is loop assert conn.timeout == 5 assert not conn.closed assert not conn.autocommit assert conn.isolation_level is '' assert conn.row_factory is None assert conn.text_factory is str
31,338
def subscribe(request): """View to subscribe the logged in user to a channel""" if request.method == "POST": channels = set() users = set() for key in request.POST: if key.startswith("youtube-"): channel_id = key[8:] if models.YoutubeChannel.objects.filter(id=channel_id).exists(): channels.add(models.YoutubeChannel.objects.get(id=channel_id)) elif key.startswith("twitch-"): user_id = key[7:] if models.TwitchUser.objects.filter(id=user_id).exists(): users.add(models.TwitchUser.objects.get(id=user_id)) action = request.POST.get("action") if action == "Subscribe": for channel in channels: if not models.YoutubeSubscription.objects.filter(channel=channel, user=request.user).exists(): models.YoutubeSubscription.objects.create(channel=channel, user=request.user) for user in users: if not models.TwitchSubscription.objects.filter(channel=user, user=request.user).exists(): models.TwitchSubscription.objects.create(channel=user, user=request.user) elif action == "Unsubscribe" or action == "Remove from history": for channel in channels: for entry in models.YoutubeSubscription.objects.filter(channel=channel, user=request.user): entry.delete() for user in users: for entry in models.TwitchSubscription.objects.filter(channel=user, user=request.user): entry.delete() history = getattr(request.user, "subscriptionhistory", None) if action == "Remove from history" and history is not None: for channel in channels: history.youtube.remove(channel) for user in users: history.twitch.remove(user) return redirect("notifpy:subscriptions")
31,339
def generate_orders(events, sell_delay=5, sep=','): """Generate CSV orders based on events indicated in a DataFrame Arguments: events (pandas.DataFrame): Table of NaNs or 1's, one column for each symbol. 1 indicates a BUY event. -1 a SELL event. nan or 0 is a nonevent. sell_delay (float): Number of days to wait before selling back the shares bought sep (str or None): if sep is None, orders will be returns as tuples of `int`s, `float`s, and `str`s otherwise the separator will be used to join the order parameters into the yielded str Returns: generator of str: yielded CSV rows in the format (yr, mo, day, symbol, Buy/Sell, shares) """ sell_delay = float(unicode(sell_delay)) or 1 for i, (t, row) in enumerate(events.iterrows()): for sym, event in row.to_dict().iteritems(): # print sym, event, type(event) # return events if event and not np.isnan(event): # add a sell event `sell_delay` in the future within the existing `events` DataFrame # modify the series, but only in the future and be careful not to step on existing events if event > 0: sell_event_i = min(i + sell_delay, len(events) - 1) sell_event_t = events.index[sell_event_i] sell_event = events[sym][sell_event_i] if np.isnan(sell_event): events[sym][sell_event_t] = -1 else: events[sym][sell_event_t] += -1 order = (t.year, t.month, t.day, sym, 'Buy' if event > 0 else 'Sell', abs(event) * 100) if isinstance(sep, basestring): yield sep.join(order) yield order
31,340
def test__parser__match_construct(method, input_func, raw_seg): """Test construction of MatchResults.""" # Let's make our input src = input_func(raw_seg) # Test construction getattr(MatchResult, method)(src)
31,341
def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None, exit_code=False, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None, enter_chroot=False, num_retries=0, log_to_file=None, combine_stdout_stderr=False): """Runs a shell command. Arguments: cmd: cmd to run. Should be input to subprocess.POpen. If a string, converted to an array using split(). print_cmd: prints the command before running it. error_ok: does not raise an exception on error. error_message: prints out this message when an error occurrs. exit_code: returns the return code of the shell command. redirect_stdout: returns the stdout. redirect_stderr: holds stderr output until input is communicated. cwd: the working directory to run this cmd. input: input to pipe into this command through stdin. enter_chroot: this command should be run from within the chroot. If set, cwd must point to the scripts directory. num_retries: the number of retries to perform before dying log_to_file: Redirects all stderr and stdout to file specified by this path. combine_stdout_stderr: Combines stdout and stdin streams into stdout. Auto set to true if log_to_file specifies a file. Returns: If exit_code is True, returns the return code of the shell command. Else returns the output of the shell command. Raises: Exception: Raises RunCommandException on error with optional error_message, but only if exit_code, and error_ok are both False. """ # Set default for variables. stdout = None stderr = None stdin = None file_handle = None output = '' # Modify defaults based on parameters. if log_to_file: file_handle = open(log_to_file, 'w+') stdout = file_handle stderr = file_handle else: if redirect_stdout: stdout = subprocess.PIPE if redirect_stderr: stderr = subprocess.PIPE if combine_stdout_stderr: stderr = subprocess.STDOUT if input: stdin = subprocess.PIPE if enter_chroot: cmd = ['cros_sdk', '--'] + cmd # Print out the command before running. cmd_string = 'PROGRAM(%s) -> RunCommand: %r in dir %s' % (_GetCallerName(), cmd, cwd) if print_cmd: if not log_to_file: _Info(cmd_string) else: _Info('%s -- Logging to %s' % (cmd_string, log_to_file)) for retry_count in range(num_retries + 1): # If it's not the first attempt, it's a retry if retry_count > 0 and print_cmd: _Info('PROGRAM(%s) -> RunCommand: retrying %r in dir %s' % (_GetCallerName(), cmd, cwd)) proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=True) (output, error) = proc.communicate(input) # if the command worked, don't retry any more. if proc.returncode == 0: break if file_handle: file_handle.close() # If they asked for an exit_code, give it to them on success or failure if exit_code: return proc.returncode # If the command (and all retries) failed, handle error result if proc.returncode != 0 and not error_ok: if output: print >> sys.stderr, output sys.stderr.flush() error_info = ('Command "%r" failed.\n' % (cmd) + (error_message or error or '')) if log_to_file: error_info += '\nOutput logged to %s' % log_to_file raise RunCommandException(error_info) # return final result return output
31,342
def eval_accuracies(hypotheses, references, sources=None, filename=None, mode='dev'): """An unofficial evalutation helper. Arguments: hypotheses: A mapping from instance id to predicted sequences. references: A mapping from instance id to ground truth sequences. copy_info: Map of id --> copy information. sources: Map of id --> input text sequence. filename: print_copy_info: """ assert (sorted(references.keys()) == sorted(hypotheses.keys())) # Compute BLEU scores _, bleu, ind_bleu = corpus_bleu(hypotheses, references) # Compute ROUGE scores rouge_calculator = Rouge() rouge_l, ind_rouge = rouge_calculator.compute_score(references, hypotheses) # Compute METEOR scores if mode == 'test': meteor_calculator = Meteor() meteor, _ = meteor_calculator.compute_score(references, hypotheses) else: meteor = 0 fw = open(filename, 'w') if filename else None for key in references.keys(): if fw: pred_i = hypotheses[key] logobj = OrderedDict() logobj['id'] = key if sources is not None: logobj['code'] = sources[key] logobj['predictions'] = pred_i logobj['references'] = references[key] logobj['bleu'] = ind_bleu[key] logobj['rouge_l'] = ind_rouge[key] print(json.dumps(logobj), file=fw) if fw: fw.close() return bleu * 100, rouge_l * 100, meteor * 100
31,343
def perform_login(db: Session, user: FidesopsUser) -> ClientDetail: """Performs a login by updating the FidesopsUser instance and creating and returning an associated ClientDetail.""" client: ClientDetail = user.client if not client: logger.info("Creating client for login") client, _ = ClientDetail.create_client_and_secret( db, user.permissions.scopes, user_id=user.id ) user.last_login_at = datetime.utcnow() user.save(db) return client
31,344
def check(e, compiler_params, cmd=None, time_error=False, nerror=-1, nground=-1, nsamples=0, precompute_samples=None, print_command=False, log_rho=False, do_copy=True, extra_args='', do_run=True, do_compile=True, get_command=False, skip_save=False, ndims=-1, our_id=None, sanity_code=None, code_only=False): """ Convert Expr to finalized source code and (by default) run to check the correctness or performance of the output code. If time_error is True then return time and error information in a dict. Here nerror and nground control the number of samples to estimate the error and estimate the ground truth by convolution (if -1, use default). If log_rho is True then return log_rho as a key in a dict containing correlation coefficients. If precompute_samples is an integer then set the #define PRECOMPUTE_SAMPLES to the given integer in problems.cpp. If do_compile is True then generate and compile C++ code. If do_run is True then run the target program (unless get_command is True, in which case, do not run but instead return the command to run (with full path-name included)." """ ans = {} csolver_path = '../apps' if our_id is None: our_id = util.machine_process_id() h_filename = our_id + COMPILER_PROBLEM_PY orig_h_filename = 'compiler_problem_orig.py' if ndims <= 0: try: arg_array = locate_argument_array(e) arg_array_ndims = arg_array.ndims except NoArgumentArray: arg_array_ndims = 1 if cmd is None: check_g_int = 3 if nsamples != 0: check_g_int = 0 check_command_begin = 'python tf_parser.py ' check_command = check_command_begin + '--ndims %d --check_g %d --samples %d' % (ndims if ndims > 0 else arg_array_ndims, check_g_int, nsamples) else: check_command = cmd if time_error: check_command += ' --error %d --ground %d' % (nerror, nground) if log_rho: check_command += ' --print_rho 1' if len(extra_args): check_command += ' ' + extra_args if skip_save: check_command += ' --skip_save 1' check_command += ' --our_id %s' % our_id if do_compile: T0 = time.time() source = to_source(e, compiler_params, info_d=ans, do_copy=do_copy, sanity_code=sanity_code) T1 = time.time() h_filename_full = os.path.join(csolver_path, h_filename) current_source = '' if os.path.exists(h_filename_full): with open(h_filename_full, 'rt') as f: current_source = f.read() if current_source != source: with open(h_filename_full, 'wt') as f: f.write(source) shutil.copyfile(h_filename_full, orig_h_filename) if print_benchmark: print('Generated C++ code in %f seconds' % (T1-T0)) old_path = os.getcwd() os.chdir(csolver_path) if print_command and do_run: print(check_command) if get_command: ans = check_command do_run = False if do_run: T0 = time.time() check_out = subprocess.check_output(check_command, shell=True) T1 = time.time() check_out = check_out.decode('utf-8') os.chdir(old_path) if get_command: return ans return ans
31,345
def _bytestring_to_textstring(bytestring: str, number_of_registers: int = 16) -> str: """Convert a bytestring to a text string. Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits). For example 16 consecutive registers can hold 32 characters (32 bytes). Not much of conversion is done, mostly error checking. Args: * bytestring (str): The string from the slave. Length = 2*number_of_registers * number_of_registers (int): The number of registers allocated for the string. Returns: A the text string (str). Raises: TypeError, ValueError """ _check_int( number_of_registers, minvalue=1, maxvalue=_MAX_NUMBER_OF_REGISTERS_TO_READ, description="number of registers", ) max_characters = _NUMBER_OF_BYTES_PER_REGISTER * number_of_registers _check_string( bytestring, "byte string", minlength=max_characters, maxlength=max_characters ) textstring = bytestring return textstring
31,346
def sigma_XH(elem,Teff=4500.,M_H=0.,SNR=100.,dr=None): """ NAME: sigma_XH PURPOSE: return uncertainty in a given element at specified effective temperature, metallicity and signal to noise ratio (functional form taken from Holtzman et al 2015) INPUT: elem - string element name following the ASPCAP star naming convention i.e. for DR12 carbon, string is 'C_H' Teff - effective temperature or array thereof in K, defaults to 4500 K M_H - metallicity or array thereof, defaults to 0 SNR - signal to noise ratio or array thereof, defaults to 100 dr - data release OUTPUT: float or array depending on shape of Teff, M_H and SNR input HISTORY: 2017-07-24 - Written - Price-Jones (UofT) """ if dr is None: dr=appath._default_dr() A,B,C,D = drcoeffs[dr][elem] logsig = A + B*((Teff-4500.)/1000.) + C*M_H + D*(SNR-100) return numpy.exp(logsig)
31,347
def search(outputpath: str, query: Optional[str] = None, since: Optional[datetime.date] = None, until: Optional[datetime.date] = None, limit: Optional[int] = None, limit_per_database: Optional[int] = None, databases: Optional[List[str]] = None, publication_types: Optional[List[str]] = None, scopus_api_token: Optional[str] = None, ieee_api_token: Optional[str] = None, proxy: Optional[str] = None, verbose: Optional[bool] = False): """ When you have a query and needs to get papers using it, this is the method that you'll need to call. This method will find papers from some databases based on the provided query. Parameters ---------- outputpath : str A valid file path where the search result file will be placed query : str, optional A query string that will be used to perform the papers search. If not provided, the query will be loaded from the environment variable FINDPAPERS_QUERY All the query terms need to be enclosed in quotes and can be associated using boolean operators, and grouped using parentheses. E.g.: [term A] AND ([term B] OR [term C]) AND NOT [term D] You can use some wildcards in the query too. Use ? to replace a single character or * to replace any number of characters. E.g.: "son?" -> will match song, sons, ... E.g.: "son*" -> will match song, sons, sonar, songwriting, ... Note: All boolean operators needs to be uppercased. The boolean operator "NOT" must be preceded by an "AND" operator. since : Optional[datetime.date], optional A lower bound (inclusive) date that will be used to filter the search results, by default None until : Optional[datetime.date], optional A upper bound (inclusive) date that will be used to filter the search results, by default None limit : Optional[int], optional The max number of papers to collect, by default None limit_per_database : Optional[int], optional The max number of papers to collect per each database, by default None databases : List[str], optional List of databases where the search should be performed, if not specified all databases will be used, by default None publication_types : List[str], optional List of publication list of publication types to filter when searching, if not specified all the publication types will be collected (this parameter is case insensitive). The available publication types are: journal, conference proceedings, book, other, by default None scopus_api_token : Optional[str], optional A API token used to fetch data from Scopus database. If you don't have one go to https://dev.elsevier.com and get it, by default None ieee_api_token : Optional[str], optional A API token used to fetch data from IEEE database. If you don't have one go to https://developer.ieee.org and get it, by default None proxy : Optional[str], optional proxy URL that can be used during requests. This can be also defined by an environment variable FINDPAPERS_PROXY. By default None verbose : Optional[bool], optional If you wanna a verbose logging """ common_util.logging_initialize(verbose) if proxy is not None: os.environ['FINDPAPERS_PROXY'] = proxy logging.info('Let\'s find some papers, this process may take a while...') if databases is not None: databases = [x.lower() for x in databases] if publication_types is not None: publication_types = [x.lower().strip() for x in publication_types] for publication_type in publication_types: if publication_type not in ['journal', 'conference proceedings', 'book', 'other']: raise ValueError(f'Invalid publication type: {publication_type}') if query is None: query = os.getenv('FINDPAPERS_QUERY') if query is not None: query = _sanitize_query(query) if query is None or not _is_query_ok(query): raise ValueError('Invalid query format') common_util.check_write_access(outputpath) if ieee_api_token is None: ieee_api_token = os.getenv('FINDPAPERS_IEEE_API_TOKEN') if scopus_api_token is None: scopus_api_token = os.getenv('FINDPAPERS_SCOPUS_API_TOKEN') search = Search(query, since, until, limit, limit_per_database, databases=databases, publication_types=publication_types) if databases is None or arxiv_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: arxiv_searcher.run(search), search, arxiv_searcher.DATABASE_LABEL) if databases is None or pubmed_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: pubmed_searcher.run(search), search, pubmed_searcher.DATABASE_LABEL) if databases is None or acm_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: acm_searcher.run(search), search, acm_searcher.DATABASE_LABEL) if ieee_api_token is not None: if databases is None or ieee_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: ieee_searcher.run( search, ieee_api_token), search, ieee_searcher.DATABASE_LABEL) else: logging.info('IEEE API token not found, skipping search on this database') if scopus_api_token is not None: if databases is None or scopus_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: scopus_searcher.run( search, scopus_api_token), search, scopus_searcher.DATABASE_LABEL) else: logging.info('Scopus API token not found, skipping search on this database') if databases is None or medrxiv_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: medrxiv_searcher.run(search), search, medrxiv_searcher.DATABASE_LABEL) if databases is None or biorxiv_searcher.DATABASE_LABEL.lower() in databases: _database_safe_run(lambda: biorxiv_searcher.run(search), search, biorxiv_searcher.DATABASE_LABEL) logging.info('Enriching results...') _enrich(search, scopus_api_token) logging.info('Filtering results...') _filter(search) logging.info('Finding and merging duplications...') search.merge_duplications() logging.info('Flagging potentially predatory publications...') _flag_potentially_predatory_publications(search) logging.info(f'It\'s finally over! {len(search.papers)} papers retrieved. Good luck with your research :)') persistence_util.save(search, outputpath)
31,348
def test_ipow_special_cases_two_args_equal__less_equal_1(arg1, arg2): """ Special case test for `__ipow__(self, other, /)`: - If `x1_i` is `-infinity`, `x2_i` is less than `0`, and `x2_i` is an odd integer value, the result is `-0`. """ res = asarray(arg1, copy=True) ipow(res, arg2) mask = logical_and(exactly_equal(arg1, -infinity(arg1.shape, arg1.dtype)), logical_and(less(arg2, zero(arg2.shape, arg2.dtype)), isodd(arg2))) assert_exactly_equal(res[mask], (-zero(arg1.shape, arg1.dtype))[mask])
31,349
def plot_graph(graphs, title=None, xlabel='x', ylabel='y'): """ Plot graphs using matplot libray Paramaters ---------- graphs : list List of created graphs title : str title of graph xlabel : str name of x axis ylabel : str name of y axis """ for g in graphs: plt.plot(g['x'], g['y'], g['color'], label=g['label']) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.legend() plt.show()
31,350
def lazy_load_command(import_path: str) -> Callable: """Create a lazy loader for command""" _, _, name = import_path.rpartition('.') def command(*args, **kwargs): func = import_string(import_path) return func(*args, **kwargs) command.__name__ = name # type: ignore return command
31,351
def tlam(func, tup): """Split tuple into arguments """ return func(*tup)
31,352
def sax_df_reformat(sax_data, sax_dict, meter_data, space_btw_saxseq=3): """"Function to format a SAX timeseries original data for SAX heatmap plotting.""" counts_nb = Counter(sax_dict[meter_data]) # Sort the counter dictionnary per value # source: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value counter = {k: v for k, v in sorted(counts_nb.items(), key=lambda item: item[1])} keys = counter.keys() new_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns) empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' '] * space_btw_saxseq) for sax_seq in keys: if counter[sax_seq] > 10: empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' '] * space_btw_saxseq) else: s2 = min(int(round(space_btw_saxseq*(counter[sax_seq]/5))), space_btw_saxseq) empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' ']*s2) # Obtaining sax indexes of corresponding profiles within dataframe indexes = [i for i, x in enumerate(sax_dict[meter_data]) if x == sax_seq] # returns all indexes # Formating a newdataframe from selected sax_seq df_block = sax_data[meter_data].iloc[indexes].copy() df_block["SAX"] = [sax_seq] * len(indexes) new_sax_df = pd.concat([df_block, empty_sax_df, new_sax_df], axis=0) # Reformated dataframe # Mapping the sax sequence to the data index_map_dictionary = dict() index_map_dictionary["SAX_seq"], index_map_dictionary["SAX_idx"] = [], [] for sax_seq in counter: indexes = [i for i, x in enumerate(new_sax_df["SAX"]) if x == sax_seq] # returns all indexes #index_map_dictionary["SAX_seq"].append(sax_seq) if counter[sax_seq] > 10: index_map_dictionary["SAX_seq"].append(sax_seq) else: index_map_dictionary["SAX_seq"].append(" ") index_map_dictionary["SAX_idx"].append(np.median(indexes)) # Droping the SAX column of the dataframe now that we have a mapping variable for it new_sax_df.drop("SAX", axis=1, inplace=True) return new_sax_df, index_map_dictionary
31,353
def rm_action(): """TODO: merge with clean_action (@pgervais)""" parser = ArgumentParser(usage='mprof rm [options] numbers_or_filenames') parser.add_argument('--version', action='version', version=mp.__version__) parser.add_argument("--dry-run", dest="dry_run", default=False, action="store_true", help="""Show what will be done, without actually doing it.""") parser.add_argument("numbers_or_filenames", nargs='*', help="""numbers or filenames removed""") args = parser.parse_args() if len(args.numbers_or_filenames) == 0: print("A profile to remove must be provided (number or filename)") sys.exit(1) filenames = get_profile_filenames(args.numbers_or_filenames) if args.dry_run: print("Files to be removed: ") for filename in filenames: print(filename) else: for filename in filenames: os.remove(filename)
31,354
def filter_not_t(func): """ Transformation for Sequence.filter_not :param func: filter_not function :return: transformation """ return Transformation( "filter_not({0})".format(name(func)), partial(filterfalse, func), {ExecutionStrategies.PARALLEL}, )
31,355
def get_task_by_id(id): """Return task by its ID""" return TaskJson.json_by_id(id)
31,356
def priority(floors, elevator): """Priority for a State.""" priority = 3 - elevator for i, floor in enumerate(floors): priority += (3 - i) * len(floor) return priority
31,357
def get_user_groups(user_id: Union[int, str]) -> List[UserSerializer]: """ 获取指定 User 的全部 Groups Args: user_id: 指定 User 的 {login} 或 {id} Returns: Group 列表, 语雀这里将 Group 均视作 User. """ uri = f'/users/{user_id}/groups' method = 'GET' anonymous = True return Request.send(method, uri, anonymous=anonymous)
31,358
def user_news_list(): """ 新闻列表 :return: """ user = g.user page = request.args.get("page") try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 # 查询 news_list = [] current_page = 1 total_page = 1 try: paginate = user.news_list.paginate(page, constants.OTHER_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) news_dict_li = [news.to_review_dict() for news in news_list] data = { "news_dict_li": news_dict_li, "current_page": current_page, "total_page": total_page } print(news_list) return render_template("news/user_news_list.html", data=data)
31,359
def unificate_link(link): """Process whitespace, make first letter upper.""" link = process_link_whitespace(link) if len(link) < 2: return link.upper() else: return link[0].upper() + link[1:]
31,360
def compute_vel_acc( robo, symo, antRj, antPj, forced=False, gravity=True, floating=False ): """Internal function. Computes speeds and accelerations usitn Parameters ========== robo : Robot Instance of robot description container symo : symbolmgr.SymbolManager Instance of symbolic manager """ #init velocities and accelerations w = ParamsInit.init_w(robo) wdot, vdot = ParamsInit.init_wv_dot(robo, gravity) # decide first link first_link = 1 if floating or robo.is_floating or robo.is_mobile: first_link = 0 #init auxilary matrix U = ParamsInit.init_u(robo) for j in xrange(first_link, robo.NL): if j == 0: w[j] = symo.mat_replace(w[j], 'W', j) wdot[j] = symo.mat_replace(wdot[j], 'WP', j) vdot[j] = symo.mat_replace(vdot[j], 'VP', j) dv0 = ParamsInit.product_combinations(w[j]) symo.mat_replace(dv0, 'DV', j) hatw_hatw = Matrix([ [-dv0[3]-dv0[5], dv0[1], dv0[2]], [dv0[1], -dv0[5]-dv0[0], dv0[4]], [dv0[2], dv0[4], -dv0[3]-dv0[0]] ]) U[j] = hatw_hatw + tools.skew(wdot[j]) symo.mat_replace(U[j], 'U', j) else: jRant = antRj[j].T qdj = Z_AXIS * robo.qdot[j] qddj = Z_AXIS * robo.qddot[j] wi, w[j] = _omega_ij(robo, j, jRant, w, qdj) symo.mat_replace(w[j], 'W', j) symo.mat_replace(wi, 'WI', j) _omega_dot_j(robo, j, jRant, w, wi, wdot, qdj, qddj) symo.mat_replace(wdot[j], 'WP', j, forced) _v_dot_j(robo, symo, j, jRant, antPj, w, wi, wdot, U, vdot, qdj, qddj) symo.mat_replace(vdot[j], 'VP', j, forced) return w, wdot, vdot, U
31,361
def _sample_n_k(n, k): """Sample k distinct elements uniformly from range(n)""" if not 0 <= k <= n: raise ValueError("Sample larger than population or is negative") if 3 * k >= n: return np.random.choice(n, k, replace=False) else: result = np.random.choice(n, 2 * k) selected = set() selected_add = selected.add j = k for i in range(k): x = result[i] while x in selected: x = result[i] = result[j] j += 1 if j == 2 * k: # This is slow, but it rarely happens. result[k:] = np.random.choice(n, k) j = k selected_add(x) return result[:k]
31,362
def plot_bout_start_end( body_speed, start: int = 0, end: int = -1, is_moving: np.ndarray = None, precise_end: int = None, precise_start: int = None, speed_th: float = 1, **other_speeds, ): """ Plot speeds zoomed in at start and end of bout """ f, axes = plt.subplots(ncols=2, sharey=True, figsize=(16, 9)) # plot start & end plot_speeds( body_speed, start=start - 60 if start > 60 else 0, end=start + 60, ax=axes[0], show=False, **other_speeds, ) plot_speeds( body_speed, start=end - 60, end=end + 60, ax=axes[1], show=False, **other_speeds, ) # mark precise starts and ends if precise_start is not None: axes[0].axvline( 60 + (precise_start - start), lw=4, alpha=0.5, color="salmon" ) if precise_end is not None: axes[1].axvline( 60 + (precise_end - end), lw=4, alpha=0.5, color="salmon" ) for name, ax in zip(("start", "end"), axes): ax.axvline(60, ls="--", color=[0.4, 0.4, 0.4], zorder=-1) ax.axhline(speed_th, color=[0.4, 0.4, 0.4], zorder=-1) ax.set(title=name) plt.show()
31,363
def test_nelder_mead_max_iter(check_error): """ Test the OptimizationWorkChain with the Nelder-Mead engine in the case when the set `max_iter` is reached. This triggers the 202 exut status. """ check_error( engine=NelderMead, engine_kwargs=dict( simplex=[[1.2, 0.9], [1., 2.], [2., 1.]], xtol=1e-1, ftol=1e-1, max_iter=10, ), func_workchain_name='rosenbrock', exit_status=202, output_port_names=[ 'engine_outputs__last_simplex', ] )
31,364
def test_search_not_found(): """ test search incorrect project function""" result = CliRunner().invoke( cli, ["search", "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"], catch_exceptions=False, ) assert result.exit_code == 0 assert "Project not found!" in result.output
31,365
def _all_usage_keys(descriptors, aside_types): """ Return a set of all usage_ids for the `descriptors` and for as all asides in `aside_types` for those descriptors. """ usage_ids = set() for descriptor in descriptors: usage_ids.add(descriptor.scope_ids.usage_id) for aside_type in aside_types: usage_ids.add(AsideUsageKeyV1(descriptor.scope_ids.usage_id, aside_type)) usage_ids.add(AsideUsageKeyV2(descriptor.scope_ids.usage_id, aside_type)) return usage_ids
31,366
def svn_client_invoke_get_commit_log2(*args): """svn_client_invoke_get_commit_log2(svn_client_get_commit_log2_t _obj, apr_array_header_t commit_items, void * baton, apr_pool_t pool) -> svn_error_t""" return _client.svn_client_invoke_get_commit_log2(*args)
31,367
def round_extent(extent, cellsize): """Increases the extent until all sides lie on a coordinate divisible by cellsize.""" xmin, ymin, xmax, ymax = extent xmin = np.floor(xmin / cellsize) * cellsize ymin = np.floor(ymin / cellsize) * cellsize xmax = np.ceil(xmax / cellsize) * cellsize ymax = np.ceil(ymax / cellsize) * cellsize return xmin, ymin, xmax, ymax
31,368
def add_numbers(): """Add two numbers server side, ridiculous but well...""" #a = request.args.get('a', 0, type=str)#input from html a = request.args.get('a') print(a) result = chatbot.main(a) print("Result: ", result) #input from html returned=a #return jsonify(returned); return jsonify(''.join(result)) #return jsonify(result = returned[0])#return something back
31,369
def _parse_stack_info(line, re_obj, crash_obj, line_num): """ :param line: line string :param re_obj: re compiled object :param crash_obj: CrashInfo object :return: crash_obj, re_obj, complete:Bool """ if re_obj is None: re_obj = re.compile(_match_stack_item_re()) complete = False match_obj = re_obj.match(line) if match_obj is not None: stack_item = StackItemInfo() stack_item.name = match_obj.group(1) stack_item.invoke_address = match_obj.group(2) stack_item.load_address = match_obj.group(3) stack_item.line_num = line_num crash_obj.function_stacks.append(stack_item) elif re.match(_match_image_header_re(), line) is not None: complete = True re_obj = None return (crash_obj, re_obj, complete)
31,370
def list_findings(DetectorId=None, FindingCriteria=None, SortCriteria=None, MaxResults=None, NextToken=None): """ Lists Amazon GuardDuty findings for the specified detector ID. See also: AWS API Documentation Exceptions :example: response = client.list_findings( DetectorId='string', FindingCriteria={ 'Criterion': { 'string': { 'Eq': [ 'string', ], 'Neq': [ 'string', ], 'Gt': 123, 'Gte': 123, 'Lt': 123, 'Lte': 123, 'Equals': [ 'string', ], 'NotEquals': [ 'string', ], 'GreaterThan': 123, 'GreaterThanOrEqual': 123, 'LessThan': 123, 'LessThanOrEqual': 123 } } }, SortCriteria={ 'AttributeName': 'string', 'OrderBy': 'ASC'|'DESC' }, MaxResults=123, NextToken='string' ) :type DetectorId: string :param DetectorId: [REQUIRED]\nThe ID of the detector that specifies the GuardDuty service whose findings you want to list.\n :type FindingCriteria: dict :param FindingCriteria: Represents the criteria used for querying findings. Valid values include:\n\nJSON field name\naccountId\nregion\nconfidence\nid\nresource.accessKeyDetails.accessKeyId\nresource.accessKeyDetails.principalId\nresource.accessKeyDetails.userName\nresource.accessKeyDetails.userType\nresource.instanceDetails.iamInstanceProfile.id\nresource.instanceDetails.imageId\nresource.instanceDetails.instanceId\nresource.instanceDetails.outpostArn\nresource.instanceDetails.networkInterfaces.ipv6Addresses\nresource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress\nresource.instanceDetails.networkInterfaces.publicDnsName\nresource.instanceDetails.networkInterfaces.publicIp\nresource.instanceDetails.networkInterfaces.securityGroups.groupId\nresource.instanceDetails.networkInterfaces.securityGroups.groupName\nresource.instanceDetails.networkInterfaces.subnetId\nresource.instanceDetails.networkInterfaces.vpcId\nresource.instanceDetails.tags.key\nresource.instanceDetails.tags.value\nresource.resourceType\nservice.action.actionType\nservice.action.awsApiCallAction.api\nservice.action.awsApiCallAction.callerType\nservice.action.awsApiCallAction.remoteIpDetails.city.cityName\nservice.action.awsApiCallAction.remoteIpDetails.country.countryName\nservice.action.awsApiCallAction.remoteIpDetails.ipAddressV4\nservice.action.awsApiCallAction.remoteIpDetails.organization.asn\nservice.action.awsApiCallAction.remoteIpDetails.organization.asnOrg\nservice.action.awsApiCallAction.serviceName\nservice.action.dnsRequestAction.domain\nservice.action.networkConnectionAction.blocked\nservice.action.networkConnectionAction.connectionDirection\nservice.action.networkConnectionAction.localPortDetails.port\nservice.action.networkConnectionAction.protocol\nservice.action.networkConnectionAction.localIpDetails.ipAddressV4\nservice.action.networkConnectionAction.remoteIpDetails.city.cityName\nservice.action.networkConnectionAction.remoteIpDetails.country.countryName\nservice.action.networkConnectionAction.remoteIpDetails.ipAddressV4\nservice.action.networkConnectionAction.remoteIpDetails.organization.asn\nservice.action.networkConnectionAction.remoteIpDetails.organization.asnOrg\nservice.action.networkConnectionAction.remotePortDetails.port\nservice.additionalInfo.threatListName\nservice.archived When this attribute is set to \'true\', only archived findings are listed. When it\'s set to \'false\', only unarchived findings are listed. When this attribute is not set, all existing findings are listed.\nservice.resourceRole\nseverity\ntype\nupdatedAt Type: Timestamp in Unix Epoch millisecond format: 1486685375000\n\n\nCriterion (dict) --Represents a map of finding properties that match specified conditions and values when querying findings.\n\n(string) --\n(dict) --Contains information about the condition.\n\nEq (list) --Represents the equal condition to be applied to a single field when querying for findings.\n\n(string) --\n\n\nNeq (list) --Represents the not equal condition to be applied to a single field when querying for findings.\n\n(string) --\n\n\nGt (integer) --Represents a greater than condition to be applied to a single field when querying for findings.\n\nGte (integer) --Represents a greater than or equal condition to be applied to a single field when querying for findings.\n\nLt (integer) --Represents a less than condition to be applied to a single field when querying for findings.\n\nLte (integer) --Represents a less than or equal condition to be applied to a single field when querying for findings.\n\nEquals (list) --Represents an equal condition to be applied to a single field when querying for findings.\n\n(string) --\n\n\nNotEquals (list) --Represents a not equal condition to be applied to a single field when querying for findings.\n\n(string) --\n\n\nGreaterThan (integer) --Represents a greater than condition to be applied to a single field when querying for findings.\n\nGreaterThanOrEqual (integer) --Represents a greater than or equal condition to be applied to a single field when querying for findings.\n\nLessThan (integer) --Represents a less than condition to be applied to a single field when querying for findings.\n\nLessThanOrEqual (integer) --Represents a less than or equal condition to be applied to a single field when querying for findings.\n\n\n\n\n\n\n\n\n :type SortCriteria: dict :param SortCriteria: Represents the criteria used for sorting findings.\n\nAttributeName (string) --Represents the finding attribute (for example, accountId) to sort findings by.\n\nOrderBy (string) --The order by which the sorted findings are to be displayed.\n\n\n :type MaxResults: integer :param MaxResults: You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50. :type NextToken: string :param NextToken: You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data. :rtype: dict ReturnsResponse Syntax { 'FindingIds': [ 'string', ], 'NextToken': 'string' } Response Structure (dict) -- FindingIds (list) -- The IDs of the findings that you\'re listing. (string) -- NextToken (string) -- The pagination parameter to be used on the next list operation to retrieve more items. Exceptions GuardDuty.Client.exceptions.BadRequestException GuardDuty.Client.exceptions.InternalServerErrorException :return: { 'FindingIds': [ 'string', ], 'NextToken': 'string' } :returns: (string) -- """ pass
31,371
def Orbiter(pos,POS,veloc,MASS,mass): """ Find the new position and velocity of an Orbiter Parameters ---------- pos : list Position vector of the orbiter. POS : list Position vector of the centre object. veloc : list Velocity of the orbiter. MASS : int Mass of the centre object. mass : int Mass of the orbiter. Returns ------- list Returns a list of two vectors, first being the new position vector and the second being the new velocity vector. """ # finding the orbital radius rad=math.sqrt(((pos[0]+POS[0])**2)+((pos[1]-POS[1])**2)) # getting the acceleration # acc=(G*MASS*rad)/abs(rad)**3 acc=[(-(G*MASS)/(rad**2))*((pos[0]-POS[0])/rad),(-(G*MASS)/(rad**2))*((pos[1]-POS[1])/rad)] #(pos[i]/rad) being the unit vector # getting the new velocity vector veloc+=[acc[0]*timeFrameLength,acc[1]*timeFrameLength] for i in range(2): veloc[i]+=acc[i]*timeFrameLength # veloc[0]+=(acc*timeFrameLength)*((pos[0]-POS[0])/rad) #(pos[i]/rad) being to make it go towards the object # veloc[1]+=(acc*timeFrameLength)*((pos[1]-POS[1])/rad) #(pox`s[i]/rad) being to make it go towards the object # for i in range(2): # veloc[i]+=(acc*timeFrameLength)*((pos[i]+POS[i])/rad) #(pos[i]/rad) being to make it go towards the object # getting the new position for i in range(2): pos[i]+=veloc[i]*timeFrameLength return [pos,veloc]
31,372
def remove_queue(queue): """ Removes an SQS queue. When run against an AWS account, it can take up to 60 seconds before the queue is actually deleted. :param queue: The queue to delete. :return: None """ try: queue.delete() logger.info("Deleted queue with URL=%s.", queue.url) except ClientError as error: logger.exception("Couldn't delete queue with URL=%s!", queue.url) raise error
31,373
def build_heading(win, readonly=False): """Generate heading text for screen """ if not win.parent().albumdata['artist'] or not win.parent().albumdata['titel']: text = 'Opvoeren nieuw {}'.format(TYPETXT[win.parent().albumtype]) else: wintext = win.heading.text() newtext = '' for text in ('tracks', 'opnames'): if wintext == text: newtext = ': {}'.format(wintext) break elif wintext.endswith(text): newtext = ': {}'.format(text) break text = 'G' if readonly else 'Wijzigen g' text = '{}egevens van {} {} - {} {}'.format( text, TYPETXT[win.parent().albumtype], win.parent().albumdata['artist'], win.parent().albumdata['titel'], newtext) return text
31,374
def GetTraceValue(): """Return a value to be used for the trace header.""" # Token to be used to route service request traces. trace_token = properties.VALUES.core.trace_token.Get() # Username to which service request traces should be sent. trace_email = properties.VALUES.core.trace_email.Get() # Enable/disable server side logging of service requests. trace_log = properties.VALUES.core.trace_log.GetBool() if trace_token: return 'token:{0}'.format(trace_token) elif trace_email: return 'email:{0}'.format(trace_email) elif trace_log: return 'log' return None
31,375
def get_file_size(file_obj): """获取文件对象的大小 get_file_size(open('/home/ubuntu-14.04.3-desktop-amd64.iso')) :param file_obj: file-like object. """ if (hasattr(file_obj, 'seek') and hasattr(file_obj, 'tell') and (six.PY2 or six.PY3 and file_obj.seekable())): try: curr = file_obj.tell() file_obj.seek(0, os.SEEK_END) size = file_obj.tell() file_obj.seek(curr) return size except IOError as e: if e.errno == errno.ESPIPE: # Illegal seek. This means the file object # is a pipe (e.g. the user is trying # to pipe image data to the client, # echo testdata | bin/glance add blah...), or # that file object is empty, or that a file-like # object which doesn't support 'seek/tell' has # been supplied. return else: raise
31,376
def test_io_set_raw_more(tmpdir): """Test importing EEGLAB .set files.""" tmpdir = str(tmpdir) # test reading file with one event (read old version) eeg = io.loadmat(raw_fname_mat, struct_as_record=False, squeeze_me=True)['EEG'] # test negative event latencies negative_latency_fname = op.join(tmpdir, 'test_negative_latency.set') evnts = deepcopy(eeg.event[0]) evnts.latency = 0 io.savemat(negative_latency_fname, {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': eeg.nbchan, 'data': 'test_negative_latency.fdt', 'epoch': eeg.epoch, 'event': evnts, 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, appendmat=False, oned_as='row') shutil.copyfile(op.join(base_dir, 'test_raw.fdt'), negative_latency_fname.replace('.set', '.fdt')) with pytest.warns(RuntimeWarning, match="has a sample index of -1."): read_raw_eeglab(input_fname=negative_latency_fname, preload=True) evnts.latency = -1 io.savemat(negative_latency_fname, {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': eeg.nbchan, 'data': 'test_negative_latency.fdt', 'epoch': eeg.epoch, 'event': evnts, 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, appendmat=False, oned_as='row') with pytest.raises(ValueError, match='event sample index is negative'): with pytest.warns(RuntimeWarning, match="has a sample index of -1."): read_raw_eeglab(input_fname=negative_latency_fname, preload=True) # test overlapping events overlap_fname = op.join(tmpdir, 'test_overlap_event.set') io.savemat(overlap_fname, {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': eeg.nbchan, 'data': 'test_overlap_event.fdt', 'epoch': eeg.epoch, 'event': [eeg.event[0], eeg.event[0]], 'chanlocs': eeg.chanlocs, 'pnts': eeg.pnts}}, appendmat=False, oned_as='row') shutil.copyfile(op.join(base_dir, 'test_raw.fdt'), overlap_fname.replace('.set', '.fdt')) # test reading file with one channel one_chan_fname = op.join(tmpdir, 'test_one_channel.set') io.savemat(one_chan_fname, {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 1, 'data': np.random.random((1, 3)), 'epoch': eeg.epoch, 'event': eeg.epoch, 'chanlocs': {'labels': 'E1', 'Y': -6.6069, 'X': 6.3023, 'Z': -2.9423}, 'times': eeg.times[:3], 'pnts': 3}}, appendmat=False, oned_as='row') read_raw_eeglab(input_fname=one_chan_fname, preload=True) # test reading file with 3 channels - one without position information # first, create chanlocs structured array ch_names = ['F3', 'unknown', 'FPz'] x, y, z = [1., 2., np.nan], [4., 5., np.nan], [7., 8., np.nan] dt = [('labels', 'S10'), ('X', 'f8'), ('Y', 'f8'), ('Z', 'f8')] nopos_dt = [('labels', 'S10'), ('Z', 'f8')] chanlocs = np.zeros((3,), dtype=dt) nopos_chanlocs = np.zeros((3,), dtype=nopos_dt) for ind, vals in enumerate(zip(ch_names, x, y, z)): for fld in range(4): chanlocs[ind][dt[fld][0]] = vals[fld] if fld in (0, 3): nopos_chanlocs[ind][dt[fld][0]] = vals[fld] # In theory this should work and be simpler, but there is an obscure # SciPy writing bug that pops up sometimes: # nopos_chanlocs = np.array(chanlocs[['labels', 'Z']]) if LooseVersion(np.__version__) == '1.14.0': # There is a bug in 1.14.0 (or maybe with SciPy 1.0.0?) that causes # this write to fail! raise SkipTest('Need to fix bug in NumPy 1.14.0!') # test reading channel names but not positions when there is no X (only Z) # field in the EEG.chanlocs structure nopos_fname = op.join(tmpdir, 'test_no_chanpos.set') io.savemat(nopos_fname, {'EEG': {'trials': eeg.trials, 'srate': eeg.srate, 'nbchan': 3, 'data': np.random.random((3, 2)), 'epoch': eeg.epoch, 'event': eeg.epoch, 'chanlocs': nopos_chanlocs, 'times': eeg.times[:2], 'pnts': 2}}, appendmat=False, oned_as='row') # load the file raw = read_raw_eeglab(input_fname=nopos_fname, preload=True) # test that channel names have been loaded but not channel positions for i in range(3): assert_equal(raw.info['chs'][i]['ch_name'], ch_names[i]) assert_array_equal(raw.info['chs'][i]['loc'][:3], np.array([np.nan, np.nan, np.nan]))
31,377
def sql( where: str, parameters: Optional[Parameters] = None ) -> Union[str, Tuple[str, Parameters]]: """ Return a SQL query, usable for querying the TransitMaster database. If provided, parameters are returned duplicated, to account for the face that the WHERE clause is also duplicated. """ formatted = SQL.format(where=where) if parameters is None: return formatted return (formatted, parameters + parameters)
31,378
def test_encoding_mismatch(tmp_path): """Render a simple template that lies about its encoding. Header says us-ascii, but it contains utf-8. """ template_path = tmp_path / "template.txt" template_path.write_text(textwrap.dedent("""\ TO: to@test.com FROM: from@test.com Content-Type: text/plain; charset="us-ascii" Hello Laȝamon """)) template_message = TemplateMessage(template_path) _, _, message = template_message.render({}) assert message.get_charset() == "utf-8" assert message.get_content_charset() == "utf-8" plaintext = message.get_payload(decode=True).decode("utf-8") assert plaintext == "Hello Laȝamon"
31,379
def write_out_results (output_dir, ofile_prefix, param_suffixes, results): """ Write the results to the disk. The output filename is compiled in the following way: output_dir + "/" + prefix + "_" + dataframe_type + "_" + suffix.csv, with: prefix: string, can be used to index results computed in parallel, e.g. "_00", "_01", ... dataframe_type: string, \in {"accuracies", "correlations", ...} (see 'evaluate_system_pairs', etc.) suffix: string, identifies the "flavor" of the experiment, e.g. "_embso=False" (are molecules excluded from the training those structure in the test), ... :param output_dir: string, output directory for the results. :param ofile_prefix: string, id of the result for example primarily used to allow parallelization :param param_suffixes: dictionary, containing the "flavors" of the experiment: E.g.: {"use_feature_scaler": True, "exclude_molecules_from_training_based_on_structure": False, ...} :param results: dictionary, containing the results to be written out (compare also e.g. 'evaluate_system_pairs'): keys: strings, type of result, e.g. "accuracies" value: pandas.DataFrane, containing the actual results in a table """ CSV_SEP = "\t" CSV_FLOAT_FORMAT = "%.4f" for key, value in results.items(): ofile = output_dir + "/" + ofile_prefix + key + "_" + dict2str (param_suffixes, sep = "_") + ".csv" value.to_csv (ofile, index = False, sep = CSV_SEP, float_format = CSV_FLOAT_FORMAT)
31,380
def clip(a, a_min, a_max): """Clips the values of an array to a given interval. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Args: a (~chainerx.ndarray): Array containing elements to clip. a_min (scalar): Maximum value. a_max (scalar): Minimum value. Returns: ~chainerx.ndarray: An array with the elements of ``a``, but where values < ``a_min`` are replaced with ``a_min``, and those > ``a_max`` with ``a_max``. Note: The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are not supported yet. Note: During backpropagation, this function propagates the gradient of the output array to the input array ``a``. .. seealso:: :func:`numpy.clip` """ if a_min is None and a_max is None: raise ValueError('Must set either a_min or a_max.') if a_min is not None: a = chainerx.maximum(a, a_min) if a_max is not None: a = chainerx.minimum(a, a_max) return a
31,381
def hole_eigenvalue_residual( energy: floatarray, particle: "CoreShellParticle" ) -> float: """This function returns the residual of the hole energy level eigenvalue equation. Used with root-finding methods to calculate the lowest energy state. Parameters ---------- energy : float, eV The energy for which to calculate the wavevector of a hole in in the nanoparticle. particle : CoreShellParticle The particle for which to calculate the hole wavevectors. We pass in the particle directly since there are a lot of parameters to pass in and this keeps the interface clean. References ---------- .. [1] Piryatinski, A., Ivanov, S. A., Tretiak, S., & Klimov, V. I. (2007). Effect of Quantum and Dielectric Confinement on the Exciton−Exciton Interaction Energy in Type II Core/Shell Semiconductor Nanocrystals. Nano Letters, 7(1), 108–115. https://doi.org/10.1021/nl0622404 .. [2] Li, L., Reiss, P., & Protie, M. (2009). Core / Shell Semiconductor Nanocrystals, (2), 154–168. https://doi.org/10.1002/smll.200800841 """ core_hole_wavenumber, shell_hole_wavenumber = (None, None) if particle.type_one: core_hole_wavenumber = wavenumber_from_energy(energy, particle.cmat.m_h) shell_hole_wavenumber = wavenumber_from_energy( energy, particle.smat.m_h, potential_offset=particle.uh ) elif particle.type_one_reverse: core_hole_wavenumber = wavenumber_from_energy( energy, particle.cmat.m_h, potential_offset=particle.uh ) shell_hole_wavenumber = wavenumber_from_energy(energy, particle.smat.m_h) elif particle.type_two: if particle.e_h: core_hole_wavenumber = wavenumber_from_energy( energy, particle.cmat.m_h, potential_offset=particle.uh ) shell_hole_wavenumber = wavenumber_from_energy(energy, particle.smat.m_h) elif particle.h_e: core_hole_wavenumber = wavenumber_from_energy(energy, particle.cmat.m_h) shell_hole_wavenumber = wavenumber_from_energy( energy, particle.smat.m_h, potential_offset=particle.uh ) core_x = core_hole_wavenumber * particle.core_width shell_x = shell_hole_wavenumber * particle.shell_width core_width = particle.core_width shell_width = particle.shell_width mass_ratio = particle.smat.m_h / particle.cmat.m_h if type(core_x) in [np.float64, np.complex128]: return np.real( (1 - 1 / _tanxdivx(core_x)) * mass_ratio - 1 - 1 / _tanxdivx(shell_x) * core_width / shell_width ) else: return np.real( (1 - 1 / tanxdivx(core_x)) * mass_ratio - 1 - 1 / tanxdivx(shell_x) * core_width / shell_width )
31,382
def open_raster(filename): """Take a file path as a string and return a gdal datasource object""" # register all of the GDAL drivers gdal.AllRegister() # open the image img = gdal.Open(filename, GA_ReadOnly) if img is None: print 'Could not open %s' % filename sys.exit(1) else: return img
31,383
def reduce2latlon_seasonal( mv, season=seasonsyr, region=None, vid=None, exclude_axes=[], seasons=seasonsyr ): """as reduce2lat_seasonal, but both lat and lon axes are retained. Axis names (ids) may be listed in exclude_axes, to exclude them from the averaging process. """ # backwards compatibility with old keyword 'seasons': if seasons!=seasonsyr: season = seasons return reduce2any( mv, target_axes=['x','y'], season=season, region=region, vid=vid, exclude_axes=exclude_axes )
31,384
def GetFreshAccessTokenIfEnabled(account=None, scopes=None, min_expiry_duration='1h', allow_account_impersonation=True): """Returns a fresh access token of the given account or the active account. Same as GetAccessTokenIfEnabled except that the access token returned by this function is valid for at least min_expiry_duration. Args: account: str, The account to get the access token for. If None, the account stored in the core.account property is used. scopes: tuple, Custom auth scopes to request. By default CLOUDSDK_SCOPES are requested. min_expiry_duration: Duration str, Refresh the token if they are within this duration from expiration. Must be a valid duration between 0 seconds and 1 hour (e.g. '0s' >x< '1h'). allow_account_impersonation: bool, True to allow use of impersonated service account credentials (if that is configured). """ if properties.VALUES.auth.disable_credentials.GetBool(): return None return GetFreshAccessToken(account, scopes, min_expiry_duration, allow_account_impersonation)
31,385
def my_plot( true, ass ): """画图""" f = plt.figure( ) plt.scatter( true[:,0], true[:,1], marker = '+', color = 'c' ) plt.scatter( ass[:, 0], ass[:,1], marker = 'o', color = 'r' ) plt.legend( ['True Position', 'Located Position']) plt.show( ) plt.savefig('mds-map.png')
31,386
def unicode_to_xes(uni): """Convert unicode characters to our ASCII representation of patterns.""" uni = uni.replace(INVISIBLE_CRAP, '') return ''.join(BOXES[c] for c in uni)
31,387
def AvailableSteps(): """(read-only) Number of Steps available in cap bank to be switched ON.""" return lib.Capacitors_Get_AvailableSteps()
31,388
def alembicConnectAttr(source, target): """ make sure nothing is connected the target and then connect the source to the target """ cmds.ExocortexAlembic_profileBegin(f="Python.ExocortexAlembic._functions.alembicConnectIfUnconnected") currentSource = cmds.connectionInfo(target, sfd=True) if currentSource != "" and currentSource != source: cmds.disconnectAttr(currentSource, target) cmds.connectAttr(source, target) cmds.ExocortexAlembic_profileEnd(f="Python.ExocortexAlembic._functions.alembicConnectIfUnconnected") pass
31,389
def modify_list(result, guess, answer): """ Print all the key in dict. Arguments: result -- a list of the show pattern word. guess -- the letter of user's guess. answer -- the answer of word Returns: result -- the list of word after modified. """ guess = guess.lower() answer = answer.lower() if guess in answer: index_list = [x.start() for x in re.finditer(guess, answer)] for i in index_list: result[i] = guess.upper() else: print("Letter '{}' is not in the word".format(guess.upper())) print(' '.join(result)) return result
31,390
def test_DCT(): """ Test the opDCT operator """ from scipy.misc import lena import matplotlib.pyplot as plt from compsense.utilities import softThreshold, hardThreshold img = lena().astype(np.double) img /= img.max() op = opDCT(img.shape) img_conv = op.T(img) img_recon = op(hardThreshold(img_conv, 0.5)) plt.figure() plt.gray() plt.imshow(img) plt.figure() plt.imshow(img_conv) plt.figure() plt.imshow(img_recon) plt.show()
31,391
def f(p, snm, sfs): """ p: proportion of all SNP's on the X chromosome [float, 0<p<1] snm: standard neutral model spectrum (optimally scaled) sfs: observed SFS """ # modify sfs fs = modify(p, sfs) # return sum of squared deviations of modified SFS with snm spectrum: return np.sum( (fs - snm)**2 )
31,392
def get_N_intransit(tdur, cadence): """Estimates number of in-transit points for transits in a light curve. Parameters ---------- tdur: float Full transit duration cadence: float Cadence/integration time for light curve Returns ------- n_intransit: int Number of flux points in each transit """ n_intransit = tdur//cadence return n_intransit
31,393
def assert_invalid_dimensions(deviceType, serial_interface, width, height): """ Assert an invalid resolution raises a :py:class:`luma.core.error.DeviceDisplayModeError`. """ with pytest.raises(luma.core.error.DeviceDisplayModeError) as ex: deviceType(serial_interface, width=width, height=height, framebuffer=full_frame()) assert f"Unsupported display mode: {width} x {height}" in str(ex.value)
31,394
def test_build_docs(): """Test that the documentation builds. We do this in a subprocess since the Sphinx configuration file activates the veneer and has other side-effects that aren't reset afterward. """ oldDirectory = os.getcwd() try: os.chdir('docs') subprocess.run(['make', 'clean', 'html', 'SPHINXOPTS=-W'], check=True) finally: os.chdir(oldDirectory)
31,395
def version_callback(print_version: bool) -> None: """Print the version of the package.""" if print_version: console.print(f"[yellow]google_img[/] version: [bold blue]{version}[/]") raise typer.Exit()
31,396
def extract_keys(keys, dic, drop=True): """ Extract keys from dictionary and return a dictionary with the extracted values. If key is not included in the dictionary, it will also be absent from the output. """ out = {} for k in keys: try: if drop: out[k] = dic.pop(k) else: out[k] = dic[k] except KeyError: pass return out
31,397
def jsonresolver_loader(url_map): """Resolve the referred EItems for a Document record.""" from flask import current_app def eitems_resolver(document_pid): """Search and return the EItems that reference this Document.""" eitems = [] eitem_search = current_app_ils.eitem_search_cls() for hit in eitem_search.search_by_document_pid(document_pid).scan(): eitem = hit.to_dict() eitems.append({ "pid": eitem.get("pid"), "description": eitem.get("description"), "internal_notes": eitem.get("internal_notes"), "open_access": eitem.get("open_access"), "bucket_id": eitem.get("bucket_id", None), "files": eitem.get("files", []), }) return { "total": len(eitems), "hits": eitems } url_map.add( Rule( "/api/resolver/documents/<document_pid>/eitems", endpoint=eitems_resolver, host=current_app.config.get("JSONSCHEMAS_HOST"), ) )
31,398
def getStations(options, type): """Query stations by specific type ('GHCND', 'ASOS', 'COOP', 'USAF-WBAN') """ conn = sqlite3.connect(options.database) c = conn.cursor() if type == "ALL": c.execute("select rowid, id, name, lat, lon from stations") else: c.execute("select rowid, id, name, lat, lon from stations where type = ?",(type,)) stations = [] for r in c: stations.append(r) conn.close() return stations
31,399