content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import ctypes def sphrec(r, colat, lon): """ Convert from spherical coordinates to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphrec_c.html :param r: Distance of a point from the origin. :type r: float :param colat: Angle of the point from the positive Z-axis. :type colat: float :param lon: Angle of the point from the XZ plane in radians. :type lon: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ r = ctypes.c_double(r) colat = ctypes.c_double(colat) lon = ctypes.c_double(lon) rectan = stypes.emptyDoubleVector(3) libspice.sphrec_c(r, colat, lon, rectan) return stypes.cVectorToPython(rectan)
d633b26cd6776d13b6d0e66a9366676c8b8ac962
21,400
import json def counter_endpoint( event=None, context=None ): """ API endpoint that returns the total number of UFO sightings. An example request might look like: .. sourcecode:: http GET www.x.com/counter HTTP/1.1 Host: example.com Accept: application/json, text/javascript Results will be returned as JSON object with the following format: .. code-block:: json { "count": <number> } """ return app.response_class( json.dumps( count_rows_in_table() ), mimetype='application/json' )
e492253f36736c112dcafc15d0e5c30cf27d5560
21,401
def search_trie(result, trie): """ trie search """ if result.is_null(): return [] # output ret_vals = [] for token_str in result: ret_vals += trie.find(token_str) if result.has_memory(): ret_vals = [ one_string for one_string in ret_vals if result.is_memorized(one_string) == False ] return ret_vals
75ad08db7962b47ea6402e866cf4a7a9861037c9
21,402
def get_nb_entry(path_to_notes: str = None, nb_name: str = None, show_index: bool = True) -> str: """Returns the entry of a notebook. This entry is to be used for the link to the notebook from the table of contents and from the navigators. Depending on the value of the argument `show_index`, the entry can be either the full entry provided by the function `get_nb_full_entry()` or simply the title of the notebook, provided by the function `get_nb_title()`. Parameters ---------- path_to_notes : str The path to the directory that contains the notebooks, either absolute or relative to the script that calls `nbbinder.bind()`. nb_name : str The name of the jupyter notebook file. show_index : boolean Indicates whether to include the chapter and section numbers of the notebook in the table of contents (if True) or just the title (if False). Returns ------- entry : str A string with the entry name. """ if show_index: entry = ''.join(list(get_nb_full_entry(path_to_notes, nb_name)[1:3])) else: entry = get_nb_title(path_to_notes, nb_name) return entry
ec216cae586d2746af80ca88a428c6b907ad5240
21,403
def get_label_encoder(config): """Gets a label encoder given the label type from the config Args: config (ModelConfig): A model configuration Returns: LabelEncoder: The appropriate LabelEncoder object for the given config """ return LABEL_MAP[config.label_type](config)
8c7d6e9058af81c94cde039030fed12c4a65b8e6
21,404
def get_lesson_comment_by_sender_user_id(): """ { "page": "Long", "size": "Long", "sender_user_id": "Long" } """ domain = request.args.to_dict() return lesson_comment_service.get_lesson_comment_by_sender_user_id(domain)
7a20e44af39e2efc5cb83eedd9dfb74124a2777f
21,405
def _inertia_grouping(stf): """Grouping function for class inertia. """ if hasattr(stf[2], 'inertia_constant'): return True else: return False
a7689324ccabf601bf8beaec4c1826e8df25880b
21,406
def parse_input(raw_input: str) -> nx.DiGraph: """Parses Day 12 puzzle input.""" graph = nx.DiGraph() graph.add_nodes_from([START, END]) for line in raw_input.strip().splitlines(): edge = line.split('-') for candidate in [edge, list(reversed(edge))]: if candidate[0] == END: continue if candidate[1] == START: continue graph.add_edge(*candidate) return graph
1c38124fed386829d712041074cc76c891981498
21,407
def zonal_mode_extract(infield, mode_keep, low_pass = False): """ Subfunction to extract or swipe out zonal modes (mode_keep) of (y, x) data. Assumes here that the data is periodic in axis = 1 (in the x-direction) with the end point missing If mode_keep = 0 then this is just the zonal averaged field Input: in_field 2d layer input field mode_keep the zonal mode of the data to be extracted from Opt input: low_pass get rid of all modes from mode_keep + 1 onwards Output: outfield zonal mode of the data """ outfield_h = rfft(infield, axis = 1) outfield_h[:, mode_keep+1::] = 0 if not low_pass: outfield_h[:, 0:mode_keep] = 0 return irfft(outfield_h, axis = 1)
a73015ac000668d11dd97ef0c8f435181fb0b9f7
21,408
import pickle def clone(): """Clone model PUT /models Parameters: { "model_name": <model_name_to_clone>, "new_model_name": <name_for_new_model> } Returns: - {"model_names": <list_of_model_names_in_session>} """ request_json = request.get_json() name = request_json["model_name"] new_name = request_json["new_model_name"] models = None if 'models' in session: models = pickle.loads(session["models"]) else: models = {} if name in models: models[new_name] = models[name].clone() session["models"] = pickle.dumps(models) res = {"model_names": get_model_names()} return jsonify(res)
49aaf81371f197858e4347efdfa04136e3342dc7
21,409
def GetFlagFromDest(dest): """Returns a conventional flag name given a dest name.""" return '--' + dest.replace('_', '-')
021ab8bca05afbb2325d865a299a2af7c3b939c9
21,410
def get_rst_export_elements( file_environment, environment, module_name, module_path_name, skip_data_value=False, skip_attribute_value=False, rst_elements=None ): """Return :term:`reStructuredText` from exported elements within *file_environment*. *environment* is the full :term:`Javascript` environment processed in :mod:`~champollion.parser`. *module_name* is the module alias that should be added to each directive. *module_path_name* is the module path alias that should be added to each directive. *skip_data_value* indicate whether data value should not be displayed. *skip_attribute_value* indicate whether attribute value should not be displayed. *rst_elements* can be an initial dictionary that will be updated and returned. """ export_environment = file_environment["export"] import_environment = file_environment["import"] if rst_elements is None: rst_elements = {} for _exported_env_id, _exported_env in export_environment.items(): from_module_id = _exported_env["module"] line_number = _exported_env["line_number"] if line_number not in rst_elements.keys(): rst_elements[line_number] = [] name = _exported_env["name"] alias = _exported_env["alias"] if alias is None: alias = name # Update module origin and name from import if necessary if (from_module_id is None and _exported_env_id in import_environment.keys()): name = import_environment[_exported_env_id]["name"] from_module_id = import_environment[_exported_env_id]["module"] # Ignore element if the origin module can not be found if from_module_id not in environment["module"].keys(): continue from_module_environment = environment["module"][from_module_id] from_file_id = from_module_environment["file_id"] from_file_env = environment["file"][from_file_id] if name == "default": rst_element = get_rst_default_from_file_environment( from_file_env, alias, module_name, module_path_name, skip_data_value=skip_data_value, skip_attribute_value=skip_attribute_value, ) if rst_element is None: continue rst_elements[line_number].append(rst_element) elif name == "*": extra_options = [ ":force-partial-import:", ":members:", ":skip-description:" ] if skip_data_value: extra_options.append(":skip-data-value:") if skip_attribute_value: extra_options.append(":skip-attribute-value:") rst_element = rst_generate( directive="automodule", element_id=from_module_id, module_alias=module_name, module_path_alias=module_path_name, extra_options=extra_options ) rst_elements[line_number].append(rst_element) else: rst_element = get_rst_name_from_file_environment( name, from_file_env, alias, module_name, module_path_name, skip_data_value=skip_data_value, skip_attribute_value=skip_attribute_value, ) if rst_element is None: continue rst_elements[line_number].append(rst_element) return rst_elements
4b3a055e47b7c859216b26ec50bf21bcec3af076
21,411
def ganache_url(host='127.0.0.1', port='7445'): """Return URL for Ganache test server.""" return f"http://{host}:{port}"
9de6e2c26c0e1235a14c8dd28040fcdfb8a36a7f
21,412
def to_news_detail_list_by_period(uni_id_list: list, start: str, end: str) -> list: """ 根据统一社会信用代码列表,获取企业在给定日期范围的新闻详情列表,使用串行 :param end: :param start: :param uni_id_list: :return: """ detail_list = [] for uni_id in uni_id_list: for summary in to_news_summary_list_by_period(uni_id, start, end): detail_list.append(to_news_detail_by_summary(summary)) return detail_list
27493cc0f50a443cea69be74cd2bb1c494e1687f
21,413
from typing import List def positions_to_df(positions: List[alp_api.entity.Asset]) -> pd.DataFrame: """Generate a df from alpaca api assests Parameters ---------- positions : List[alp_api.entity.Asset] List of alpaca trade assets Returns ------- pd.DataFrame Processed dataframe """ df = pd.DataFrame(columns=["Symbol", "MarketValue", "Quantity", "CostBasis"]) sym = [] mv = [] qty = [] cb = [] for pos in positions: sym.append(pos.symbol) mv.append(float(pos.market_value)) qty.append(float(pos.qty)) cb.append(float(pos.cost_basis)) df["Symbol"] = sym df["MarketValue"] = mv df["Quantity"] = qty df["CostBasis"] = cb df["Broker"] = "alp" return df
5f77f4862f0244ba66e3d99e8a34e2dd8a56d91d
21,414
import requests from bs4 import BeautifulSoup def get_all_pages(date): """For the specific date, get all page URLs.""" r = requests.get(URL, params={"search": date}) soup = BeautifulSoup(r.text, "html.parser") return [ f"https://www.courts.phila.gov/{url}" for url in set([a["href"] for a in soup.select(".pagination li a")]) ]
f74e2167498fa8eb95e81c07c49a79b690adfcb2
21,415
from typing import List def boundary_condition( outer_bc_geometry: List[float], inner_bc_geometry: List[float], bc_num: List[int], T_end: float, ): """ Generate BC points for outer and inner boundaries """ x_l, x_r, y_d, y_u = outer_bc_geometry xc_l, xc_r, yc_d, yc_u = inner_bc_geometry N_x, N_y, N_t, N_bc = bc_num N_bc = N_bc // 4 + 1 # generate bc for outer boundary left_points = np.stack((np.ones(N_y) * x_l, np.linspace(y_d, y_u, N_y)), 1) right_points = np.stack((np.ones(N_y) * x_r, np.linspace(y_d, y_u, N_y)), 1) t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1) X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)]))) X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)]))) X_lr = np.concatenate((X_left, X_right), 1) lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False) X_lr = X_lr[lr_idx] down_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_d), 1) up_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_u), 1) t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1) X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)]))) X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)]))) X_du = np.concatenate((X_down, X_up), 1) ud_idx = np.random.choice(len(X_du), size=N_bc, replace=False) X_du = X_du[ud_idx] X_bc_outer = (X_lr, X_du) # generate bc for inner boundary left_points = np.stack((np.ones(N_y) * xc_l, np.linspace(yc_d, yc_u, N_y)), 1) right_points = np.stack((np.ones(N_y) * xc_r, np.linspace(yc_d, yc_u, N_y)), 1) t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1) X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)]))) X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)]))) X_lr = np.concatenate((X_left, X_right), 1) lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False) X_lr = X_lr[lr_idx] down_points = np.stack((np.linspace(xc_l, xc_r, N_x), np.ones(N_x) * yc_d), 1) up_points = np.stack((np.linspace(xc_l, xc_r, N_x), np.ones(N_x) * yc_u), 1) t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1) X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)]))) X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)]))) X_du = np.concatenate((X_down, X_up), 1) ud_idx = np.random.choice(len(X_du), size=N_bc, replace=False) X_du = X_du[ud_idx] X_bc_inner = (X_lr, X_du) return X_bc_outer, X_bc_inner
5941e213a7c48e39b79969d70b9a53a52207272f
21,416
def extractInfiniteNovelTranslations(item): """ # Infinite Novel Translations """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('Ascendance of a Bookworm', 'Ascendance of a Bookworm', 'translated'), ('Yomigaeri no Maou', 'Yomigaeri no Maou', 'translated'), ('Kakei Senki wo Kakageyo!', 'Kakei Senki wo Kakageyo!', 'translated'), ('Kuro no Shoukan Samurai', 'Kuro no Shoukan Samurai', 'translated'), ('Nidoume no Jinsei wo Isekai de', 'Nidoume no Jinsei wo Isekai de', 'translated'), ('Hachi-nan', 'Hachinan tte, Sore wa Nai Deshou!', 'translated'), ('Summoned Slaughterer', 'Yobidasareta Satsuriku-sha', 'translated'), ('maou no utsuwa', 'Maou no Utsuwa', 'translated'), ('Maou no Ki', 'Maou no Ki', 'translated'), ('Imperial wars and my stratagems', 'Imperial Wars and my Stratagems', 'translated'), ('Kuro no Shoukanshi', 'Kuro no Shoukanshi', 'translated'), ('I work as Healer in Another World\'s Labyrinth City', 'I work as Healer in Another World\'s Labyrinth City', 'translated'), ('The Spearmaster and The Black Cat', 'The Spearmaster and The Black Cat', 'translated'), ('Hakai no Miko', 'Hakai no Miko', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
0b31fcb764840242869eb3aa224b5f04d28beff8
21,417
def fill_diagonal(matrix, value, k=0, unpadded_dim=None): """ Returns a matrix identical to `matrix` except that the `k'th` diagonal has been overwritten with the value `value`. Args: matrix: Matrix whose diagonal to fill. value: The value to fill the diagonal with. k: The diagonal to fill. unpadded_dim: If specified, only the `unpadded_dim x unpadded_dim` top left block will be filled. Returns: A copy of `matrix`, with the `k'th` diagonal replaced by `value`. """ replace_here = on_kth_diagonal(matrix.shape, k=k, unpadded_dim=unpadded_dim) replace_with = jnp.full(replace_here.shape[1], value) return jnp.where(replace_here, x=replace_with, y=matrix)
d3dddf35b70788d832b6df119de8ba2760bb7fa7
21,418
def loop_filter(data, images, features, matches, pairs): """ if there’s an edge between (i, j) where i and j are sequence numbers far apart, check that there also exists an edge (i plus/minus k, j plus/minus k), where k is a small integer, and that the loop formed by the four nodes pass the multiplying-to-identity check. if so, this is a valid "quad". we then merge quads into clusters. each cluster is a loop candidate. we perform checks on the candidates to filter out bad ones, and remove all edges in them. :param data: :param images: :param matches: :param pairs: :return: """ logger.debug("loop pass 1 filtering start") common_feature_thresh = data.config['filtering_common_feature_thresh'] # TODO: cren optionize the following thresholds gap = 6 edges_to_remove = [] all_valid_triplets = [] for (im1, im2) in matches: if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) > gap: valid_triplets = get_valid_triplets(im1, im2, matches, pairs) if valid_triplets: all_valid_triplets.extend(valid_triplets) else: edges_to_remove.append((im1, im2)) for edge in sorted(edges_to_remove): logger.debug("loop pass 1 removing edge {} -{}".format(edge[0], edge[1])) matches.pop(edge) logger.debug("loop pass 1 filtering end, removed {} edges, {:2.1f}% of all". format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs))) logger.debug("loop pass 2 filtering start") radius = gap/2 valid_triplets_set = set(tuple(triplet) for triplet in all_valid_triplets) # cluster quads into loop candidates loop_candidates = cluster_triplets(valid_triplets_set, radius) # apply various checks to figure out bad loop candidates bad_candidates = filter_candidates(images, loop_candidates, matches, features, pairs, common_feature_thresh) # remove matches in bad loop candidates edges_to_remove = set() for cand in bad_candidates: loop_candidates.remove(cand) for im1 in cand.get_ids_0(): for im2 in cand.get_ids_1(): if abs(_shot_id_to_int(im1) - _shot_id_to_int(im2)) > radius: if (im1, im2) in matches: edges_to_remove.add((im1, im2)) elif (im2, im1) in matches: edges_to_remove.add((im2, im1)) for edge in sorted(edges_to_remove): #logger.debug("loop removing edge {} -{}".format(edge[0], edge[1])) matches.pop(edge) logger.debug("loop pass 2 filtering end, removed {} edges, {:2.1f}% of all". format(len(edges_to_remove), 100*len(edges_to_remove)/len(pairs))) return matches
27a15edb5cae636ce6df3f64517cb6089e2a34f4
21,419
from typing import Optional from typing import Callable import functools def container_model(*, model: type, caption: str, icon: Optional[str]) -> Callable: """ ``container_model`` is an object that keeps together many different properties defined by the plugin and allows developers to build user interfaces in a declarative way similar to :func:`data_model`. ``container_model`` can also hold a reference to a :func:`data_model` declared from the plugin, making this object a parent for all new :func:`data_model` created. .. rubric:: **Application Required**: The following options are required when declaring a ``container_model``. :param caption: A text to be displayed over the Tree. :param icon: Name of the icon to be used over the Tree. :param model: A reference to a class decorated with :func:`data_model`. .. note:: Even though the icon parameter is required, it's not currently being used. .. rubric:: **Plugin defined**: Visual elements that allow the user to input information into the application, or to arrange better the user interface. :Input Fields: Visual elements that allow the user to provide input information into the application. :Layout: Elements that assist the developer to arrange input fields in meaningfully way. Check the section :ref:`visual elements <api-types-section>` to see all inputs available, and :ref:`layout elements<api-layout-section>` to see all layouts available. .. rubric:: Example myplugin.py .. code-block:: python @data_model(icon="", caption="My Child") class ChildModel: distance = Quantity(value=1, unit="m", caption="Distance") @container_model(icon='', caption='My Container', model=ChildModel) class MyModelContainer: my_string = String(value='Initial Value', caption='My String') @alfasim_sdk.hookimpl def alfasim_get_data_model_type(): return [MyModelContainer] .. image:: /_static/images/api/container_model_example_1_1.png :scale: 70% .. image:: /_static/images/api/container_model_example_1_2.png :scale: 70% .. image:: /_static/images/api/container_model_example_1_3.png :scale: 70% Container data also includes automatically two actions for the model: .. rubric:: Action: Create new Model An action that creates a new model inside the container selected, you can activate this action by right-clicking in the container over the Tree, or by clicking on the "Plus" icon available at the ``Model Explorer``. .. image:: /_static/images/api/container_model_new_model_1.png :scale: 80% .. image:: /_static/images/api/container_model_new_model_2.png :scale: 80% .. rubric:: Action: Remove An action that remove the selected model, only available for models inside a container, you can activate this action by right-clicking the model over the Tree, or by clicking on the "Trash" icon available at the ``Model Explorer``. .. image:: /_static/images/api/container_model_remove_1.png :scale: 80% .. image:: /_static/images/api/container_model_remove_2.png :scale: 80% """ def apply(class_): @functools.wraps(class_) def wrap_class(class_, caption, icon): return get_attr_class(class_, caption, icon, model) return wrap_class(class_, caption, icon) return apply
e06ad5ab45f75fcc02550497e290fb8c07193645
21,420
def ward_quick(G, feature, verbose = 0): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G graph instance, topology-defining graph feature: array of shape (G.V,dim_feature): some vectorial information related to the graph vertices Returns ------- t: weightForest instance, that represents the dendrogram of the data NOTE ---- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ # basic check if feature.ndim==1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0]!=G.V: raise ValueError, "Incompatible dimension for the\ feature matrix and the graph" Features = [np.ones(2*G.V), np.zeros((2*G.V, feature.shape[1])), np.zeros((2*G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature**2 """ Features = [] for i in range(G.V): Features.append(np.reshape(feature[i],(1,feature.shape[1]))) """ n = G.V nbcc = G.cc().max()+1 # prepare a graph with twice the number of vertices K = _auxiliary_graph(G,Features) parent = np.arange(2*n-nbcc).astype(np.int) pop = np.ones(2*n-nbcc).astype(np.int) height = np.zeros(2*n-nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters q = 0 while (q<n-nbcc): # 1. find the lightest edges aux = np.zeros(2*n) ape = np.nonzero(K.weights<np.infty) ape = np.reshape(ape,np.size(ape)) idx = np.argsort(K.weights[ape]) for e in range(n-nbcc-q): i,j = K.edges[ape[idx[e]],0], K.edges[ape[idx[e]],1] if aux[i]==1: break if aux[j]==1: break aux[i]=1 aux[j]=1 emax = np.maximum(e,1) for e in range(emax): m = ape[idx[e]] cost = K.weights[m] k = q+n #if K.weights[m]>=stop: break i = K.edges[m,0] j = K.edges[m,1] height[k] = cost if verbose: print q,i,j, m,cost # 2. remove the current edge K.edges[m,:] = -1 K.weights[m] = np.infty linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml,1]==i)>0: m = ml[np.flatnonzero(K.edges[ml,1]==i)] K.edges[m,:] = -1 K.weights[m] = np.infty linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] """ totalFeatures = np.vstack((Features[i], Features[j])) Features.append(totalFeatures) Features[i] = [] Features[j] = [] """ linc,rinc = _remap(K, i, j, k, Features, linc, rinc) q+=1 # build a tree to encode the results t = WeightedForest(2*n-nbcc, parent, height) return t
a5c4e847bf6c70acfee1b5d5466b5310d40b528d
21,421
def conv3x3(in_planes, out_planes, stride=1, output_padding=0): """3x3 convolution transpose with padding""" return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, output_padding=output_padding, bias=False)
00c9b5123eaf408a1c4432b962739ec519851a59
21,422
def unwrap(func): """ Returns the object wrapped by decorators. """ def _is_wrapped(f): return hasattr(f, '__wrapped__') unwrapped_f = func while (_is_wrapped(unwrapped_f)): unwrapped_f = unwrapped_f.__wrapped__ return unwrapped_f
17aa0c8cc91578fd1187784ad0396ed91c5ec9b8
21,423
def get_payload_from_scopes(scopes): """ Get a dict to be used in JWT payload. Just merge this dict with the JWT payload. :type roles list[rest_jwt_permission.scopes.Scope] :return dictionary to be merged with the JWT payload :rtype dict """ return { get_setting("JWT_PAYLOAD_SCOPES_KEY"): [scope.identifier for scope in scopes] }
d2192d2eef5cf6e5cc28d2125bef94c438075884
21,424
from typing import Dict def missing_keys_4(data: Dict, lprint=print, eprint=print): """ Add keys: _max_eval_all_epoch, _max_seen_train, _max_seen_eval, _finished_experiment """ if "_finished_experiment" not in data: lprint(f"Add keys _finished_experiment ...") max_eval = -1 for k1, v1 in data["_eval_trace"].items(): for k2, v2 in v1.items(): max_eval += len(v2) max_train = -1 for k1, v1 in data["_train_trace"].items(): for k2, v2 in v1.items(): max_train += len(v2) data["_max_eval_all_epoch"] = max_eval data["_max_train_all_epoch"] = max_train data["_max_seen_train"] = max_seen_train = max(data["_train_trace"].keys()) data["_max_seen_eval"] = max_seen_eval = max(data["_eval_trace"].keys()) # Check if finished or no no_tasks = len(data["_task_info"]) epochs_per_task = data["_args"]["train"]["epochs_per_task"] should_train = no_tasks * epochs_per_task reached_max_train = should_train == max_train + 1 same_seen = data["_max_seen_train"] == data["_max_seen_eval"] all_final_tasks_evaluated = len(data["_eval_trace"][max_seen_eval]) == no_tasks data["_finished_experiment"] = reached_max_train \ and same_seen and all_final_tasks_evaluated return 1 return 0
ad8d3f7c19dd4eefa0db465dd52b5e8dc8f0bd1e
21,425
def translate(txt): """Takes a plain czech text as an input and returns its phonetic transcription.""" txt = txt.lower() txt = simple_replacement(txt) txt = regex_replacement(txt) txt = chain_replacement(txt) txt = grind(txt) return txt
a7f35b7be14dfed0d9a4e68e2e3113d97f2468cb
21,426
def struct_getfield_longlong(ffitype, addr, offset): """ Return the field of type ``ffitype`` at ``addr+offset``, casted to lltype.LongLong. """ value = _struct_getfield(lltype.SignedLongLong, addr, offset) return value
24ff5e6b35de48bccf810bb9b723852f1ab16fb6
21,427
def subscribe(request): """View to subscribe the logged in user to a channel""" if request.method == "POST": channels = set() users = set() for key in request.POST: if key.startswith("youtube-"): channel_id = key[8:] if models.YoutubeChannel.objects.filter(id=channel_id).exists(): channels.add(models.YoutubeChannel.objects.get(id=channel_id)) elif key.startswith("twitch-"): user_id = key[7:] if models.TwitchUser.objects.filter(id=user_id).exists(): users.add(models.TwitchUser.objects.get(id=user_id)) action = request.POST.get("action") if action == "Subscribe": for channel in channels: if not models.YoutubeSubscription.objects.filter(channel=channel, user=request.user).exists(): models.YoutubeSubscription.objects.create(channel=channel, user=request.user) for user in users: if not models.TwitchSubscription.objects.filter(channel=user, user=request.user).exists(): models.TwitchSubscription.objects.create(channel=user, user=request.user) elif action == "Unsubscribe" or action == "Remove from history": for channel in channels: for entry in models.YoutubeSubscription.objects.filter(channel=channel, user=request.user): entry.delete() for user in users: for entry in models.TwitchSubscription.objects.filter(channel=user, user=request.user): entry.delete() history = getattr(request.user, "subscriptionhistory", None) if action == "Remove from history" and history is not None: for channel in channels: history.youtube.remove(channel) for user in users: history.twitch.remove(user) return redirect("notifpy:subscriptions")
6ee833eb6536f7958f74f274a776b31fab7051dc
21,428
import subprocess import sys def RunCommand(cmd, print_cmd=True, error_ok=False, error_message=None, exit_code=False, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None, enter_chroot=False, num_retries=0, log_to_file=None, combine_stdout_stderr=False): """Runs a shell command. Arguments: cmd: cmd to run. Should be input to subprocess.POpen. If a string, converted to an array using split(). print_cmd: prints the command before running it. error_ok: does not raise an exception on error. error_message: prints out this message when an error occurrs. exit_code: returns the return code of the shell command. redirect_stdout: returns the stdout. redirect_stderr: holds stderr output until input is communicated. cwd: the working directory to run this cmd. input: input to pipe into this command through stdin. enter_chroot: this command should be run from within the chroot. If set, cwd must point to the scripts directory. num_retries: the number of retries to perform before dying log_to_file: Redirects all stderr and stdout to file specified by this path. combine_stdout_stderr: Combines stdout and stdin streams into stdout. Auto set to true if log_to_file specifies a file. Returns: If exit_code is True, returns the return code of the shell command. Else returns the output of the shell command. Raises: Exception: Raises RunCommandException on error with optional error_message, but only if exit_code, and error_ok are both False. """ # Set default for variables. stdout = None stderr = None stdin = None file_handle = None output = '' # Modify defaults based on parameters. if log_to_file: file_handle = open(log_to_file, 'w+') stdout = file_handle stderr = file_handle else: if redirect_stdout: stdout = subprocess.PIPE if redirect_stderr: stderr = subprocess.PIPE if combine_stdout_stderr: stderr = subprocess.STDOUT if input: stdin = subprocess.PIPE if enter_chroot: cmd = ['cros_sdk', '--'] + cmd # Print out the command before running. cmd_string = 'PROGRAM(%s) -> RunCommand: %r in dir %s' % (_GetCallerName(), cmd, cwd) if print_cmd: if not log_to_file: _Info(cmd_string) else: _Info('%s -- Logging to %s' % (cmd_string, log_to_file)) for retry_count in range(num_retries + 1): # If it's not the first attempt, it's a retry if retry_count > 0 and print_cmd: _Info('PROGRAM(%s) -> RunCommand: retrying %r in dir %s' % (_GetCallerName(), cmd, cwd)) proc = subprocess.Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=True) (output, error) = proc.communicate(input) # if the command worked, don't retry any more. if proc.returncode == 0: break if file_handle: file_handle.close() # If they asked for an exit_code, give it to them on success or failure if exit_code: return proc.returncode # If the command (and all retries) failed, handle error result if proc.returncode != 0 and not error_ok: if output: print >> sys.stderr, output sys.stderr.flush() error_info = ('Command "%r" failed.\n' % (cmd) + (error_message or error or '')) if log_to_file: error_info += '\nOutput logged to %s' % log_to_file raise RunCommandException(error_info) # return final result return output
16a033fce354f232159aaad51d4ffa1a5cc94e22
21,429
from typing import OrderedDict import json def eval_accuracies(hypotheses, references, sources=None, filename=None, mode='dev'): """An unofficial evalutation helper. Arguments: hypotheses: A mapping from instance id to predicted sequences. references: A mapping from instance id to ground truth sequences. copy_info: Map of id --> copy information. sources: Map of id --> input text sequence. filename: print_copy_info: """ assert (sorted(references.keys()) == sorted(hypotheses.keys())) # Compute BLEU scores _, bleu, ind_bleu = corpus_bleu(hypotheses, references) # Compute ROUGE scores rouge_calculator = Rouge() rouge_l, ind_rouge = rouge_calculator.compute_score(references, hypotheses) # Compute METEOR scores if mode == 'test': meteor_calculator = Meteor() meteor, _ = meteor_calculator.compute_score(references, hypotheses) else: meteor = 0 fw = open(filename, 'w') if filename else None for key in references.keys(): if fw: pred_i = hypotheses[key] logobj = OrderedDict() logobj['id'] = key if sources is not None: logobj['code'] = sources[key] logobj['predictions'] = pred_i logobj['references'] = references[key] logobj['bleu'] = ind_bleu[key] logobj['rouge_l'] = ind_rouge[key] print(json.dumps(logobj), file=fw) if fw: fw.close() return bleu * 100, rouge_l * 100, meteor * 100
ede152bb51fcb53574eec0dfb84b6ca971289d5d
21,430
from datetime import datetime def perform_login(db: Session, user: FidesopsUser) -> ClientDetail: """Performs a login by updating the FidesopsUser instance and creating and returning an associated ClientDetail.""" client: ClientDetail = user.client if not client: logger.info("Creating client for login") client, _ = ClientDetail.create_client_and_secret( db, user.permissions.scopes, user_id=user.id ) user.last_login_at = datetime.utcnow() user.save(db) return client
013875ba1ca30615690d8d477e1246174bdc6279
21,431
import time import os import shutil import subprocess def check(e, compiler_params, cmd=None, time_error=False, nerror=-1, nground=-1, nsamples=0, precompute_samples=None, print_command=False, log_rho=False, do_copy=True, extra_args='', do_run=True, do_compile=True, get_command=False, skip_save=False, ndims=-1, our_id=None, sanity_code=None, code_only=False): """ Convert Expr to finalized source code and (by default) run to check the correctness or performance of the output code. If time_error is True then return time and error information in a dict. Here nerror and nground control the number of samples to estimate the error and estimate the ground truth by convolution (if -1, use default). If log_rho is True then return log_rho as a key in a dict containing correlation coefficients. If precompute_samples is an integer then set the #define PRECOMPUTE_SAMPLES to the given integer in problems.cpp. If do_compile is True then generate and compile C++ code. If do_run is True then run the target program (unless get_command is True, in which case, do not run but instead return the command to run (with full path-name included)." """ ans = {} csolver_path = '../apps' if our_id is None: our_id = util.machine_process_id() h_filename = our_id + COMPILER_PROBLEM_PY orig_h_filename = 'compiler_problem_orig.py' if ndims <= 0: try: arg_array = locate_argument_array(e) arg_array_ndims = arg_array.ndims except NoArgumentArray: arg_array_ndims = 1 if cmd is None: check_g_int = 3 if nsamples != 0: check_g_int = 0 check_command_begin = 'python tf_parser.py ' check_command = check_command_begin + '--ndims %d --check_g %d --samples %d' % (ndims if ndims > 0 else arg_array_ndims, check_g_int, nsamples) else: check_command = cmd if time_error: check_command += ' --error %d --ground %d' % (nerror, nground) if log_rho: check_command += ' --print_rho 1' if len(extra_args): check_command += ' ' + extra_args if skip_save: check_command += ' --skip_save 1' check_command += ' --our_id %s' % our_id if do_compile: T0 = time.time() source = to_source(e, compiler_params, info_d=ans, do_copy=do_copy, sanity_code=sanity_code) T1 = time.time() h_filename_full = os.path.join(csolver_path, h_filename) current_source = '' if os.path.exists(h_filename_full): with open(h_filename_full, 'rt') as f: current_source = f.read() if current_source != source: with open(h_filename_full, 'wt') as f: f.write(source) shutil.copyfile(h_filename_full, orig_h_filename) if print_benchmark: print('Generated C++ code in %f seconds' % (T1-T0)) old_path = os.getcwd() os.chdir(csolver_path) if print_command and do_run: print(check_command) if get_command: ans = check_command do_run = False if do_run: T0 = time.time() check_out = subprocess.check_output(check_command, shell=True) T1 = time.time() check_out = check_out.decode('utf-8') os.chdir(old_path) if get_command: return ans return ans
56c80ab7bbdd10e1bb3c16ffba62a940a07fb0eb
21,432
def _bytestring_to_textstring(bytestring: str, number_of_registers: int = 16) -> str: """Convert a bytestring to a text string. Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits). For example 16 consecutive registers can hold 32 characters (32 bytes). Not much of conversion is done, mostly error checking. Args: * bytestring (str): The string from the slave. Length = 2*number_of_registers * number_of_registers (int): The number of registers allocated for the string. Returns: A the text string (str). Raises: TypeError, ValueError """ _check_int( number_of_registers, minvalue=1, maxvalue=_MAX_NUMBER_OF_REGISTERS_TO_READ, description="number of registers", ) max_characters = _NUMBER_OF_BYTES_PER_REGISTER * number_of_registers _check_string( bytestring, "byte string", minlength=max_characters, maxlength=max_characters ) textstring = bytestring return textstring
474ac26b8fb3e454ce2747300c42b86df988ecc8
21,433
import numpy def sigma_XH(elem,Teff=4500.,M_H=0.,SNR=100.,dr=None): """ NAME: sigma_XH PURPOSE: return uncertainty in a given element at specified effective temperature, metallicity and signal to noise ratio (functional form taken from Holtzman et al 2015) INPUT: elem - string element name following the ASPCAP star naming convention i.e. for DR12 carbon, string is 'C_H' Teff - effective temperature or array thereof in K, defaults to 4500 K M_H - metallicity or array thereof, defaults to 0 SNR - signal to noise ratio or array thereof, defaults to 100 dr - data release OUTPUT: float or array depending on shape of Teff, M_H and SNR input HISTORY: 2017-07-24 - Written - Price-Jones (UofT) """ if dr is None: dr=appath._default_dr() A,B,C,D = drcoeffs[dr][elem] logsig = A + B*((Teff-4500.)/1000.) + C*M_H + D*(SNR-100) return numpy.exp(logsig)
186974970505b21cb9978c8afcfbee1a9c0bf17c
21,434
from typing import Callable def lazy_load_command(import_path: str) -> Callable: """Create a lazy loader for command""" _, _, name = import_path.rpartition('.') def command(*args, **kwargs): func = import_string(import_path) return func(*args, **kwargs) command.__name__ = name # type: ignore return command
273e482412a079e5a59b84422ee409df7b3a7a1c
21,435
def tlam(func, tup): """Split tuple into arguments """ return func(*tup)
0e3a9b93b36795e6c11631f8c8852aba59724f88
21,436
from typing import Counter def sax_df_reformat(sax_data, sax_dict, meter_data, space_btw_saxseq=3): """"Function to format a SAX timeseries original data for SAX heatmap plotting.""" counts_nb = Counter(sax_dict[meter_data]) # Sort the counter dictionnary per value # source: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value counter = {k: v for k, v in sorted(counts_nb.items(), key=lambda item: item[1])} keys = counter.keys() new_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns) empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' '] * space_btw_saxseq) for sax_seq in keys: if counter[sax_seq] > 10: empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' '] * space_btw_saxseq) else: s2 = min(int(round(space_btw_saxseq*(counter[sax_seq]/5))), space_btw_saxseq) empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' ']*s2) # Obtaining sax indexes of corresponding profiles within dataframe indexes = [i for i, x in enumerate(sax_dict[meter_data]) if x == sax_seq] # returns all indexes # Formating a newdataframe from selected sax_seq df_block = sax_data[meter_data].iloc[indexes].copy() df_block["SAX"] = [sax_seq] * len(indexes) new_sax_df = pd.concat([df_block, empty_sax_df, new_sax_df], axis=0) # Reformated dataframe # Mapping the sax sequence to the data index_map_dictionary = dict() index_map_dictionary["SAX_seq"], index_map_dictionary["SAX_idx"] = [], [] for sax_seq in counter: indexes = [i for i, x in enumerate(new_sax_df["SAX"]) if x == sax_seq] # returns all indexes #index_map_dictionary["SAX_seq"].append(sax_seq) if counter[sax_seq] > 10: index_map_dictionary["SAX_seq"].append(sax_seq) else: index_map_dictionary["SAX_seq"].append(" ") index_map_dictionary["SAX_idx"].append(np.median(indexes)) # Droping the SAX column of the dataframe now that we have a mapping variable for it new_sax_df.drop("SAX", axis=1, inplace=True) return new_sax_df, index_map_dictionary
6e241979d673910da2acfd522d1c32a3f1d815a8
21,437
def filter_not_t(func): """ Transformation for Sequence.filter_not :param func: filter_not function :return: transformation """ return Transformation( "filter_not({0})".format(name(func)), partial(filterfalse, func), {ExecutionStrategies.PARALLEL}, )
af548f7cfa60f5b598ad3527d8eaabca09aed4e6
21,438
def get_task_by_id(id): """Return task by its ID""" return TaskJson.json_by_id(id)
c1b1a4137cdab853e7d6c02167b914367120972a
21,439
def priority(floors, elevator): """Priority for a State.""" priority = 3 - elevator for i, floor in enumerate(floors): priority += (3 - i) * len(floor) return priority
b65abac24fb85f50425f2adfd4d98786b41c9a2d
21,440
from typing import Union from typing import List def get_user_groups(user_id: Union[int, str]) -> List[UserSerializer]: """ 获取指定 User 的全部 Groups Args: user_id: 指定 User 的 {login} 或 {id} Returns: Group 列表, 语雀这里将 Group 均视作 User. """ uri = f'/users/{user_id}/groups' method = 'GET' anonymous = True return Request.send(method, uri, anonymous=anonymous)
de02631693c6b31c566f93ee4cdc96bee3db024a
21,441
def user_news_list(): """ 新闻列表 :return: """ user = g.user page = request.args.get("page") try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 # 查询 news_list = [] current_page = 1 total_page = 1 try: paginate = user.news_list.paginate(page, constants.OTHER_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) news_dict_li = [news.to_review_dict() for news in news_list] data = { "news_dict_li": news_dict_li, "current_page": current_page, "total_page": total_page } print(news_list) return render_template("news/user_news_list.html", data=data)
adc202bfdbf2c2e2d7d60c949fdad028a56b63c0
21,442
def unificate_link(link): """Process whitespace, make first letter upper.""" link = process_link_whitespace(link) if len(link) < 2: return link.upper() else: return link[0].upper() + link[1:]
4d9a5a4a88141f2a8e6400186c607615470cabde
21,443
def compute_vel_acc( robo, symo, antRj, antPj, forced=False, gravity=True, floating=False ): """Internal function. Computes speeds and accelerations usitn Parameters ========== robo : Robot Instance of robot description container symo : symbolmgr.SymbolManager Instance of symbolic manager """ #init velocities and accelerations w = ParamsInit.init_w(robo) wdot, vdot = ParamsInit.init_wv_dot(robo, gravity) # decide first link first_link = 1 if floating or robo.is_floating or robo.is_mobile: first_link = 0 #init auxilary matrix U = ParamsInit.init_u(robo) for j in xrange(first_link, robo.NL): if j == 0: w[j] = symo.mat_replace(w[j], 'W', j) wdot[j] = symo.mat_replace(wdot[j], 'WP', j) vdot[j] = symo.mat_replace(vdot[j], 'VP', j) dv0 = ParamsInit.product_combinations(w[j]) symo.mat_replace(dv0, 'DV', j) hatw_hatw = Matrix([ [-dv0[3]-dv0[5], dv0[1], dv0[2]], [dv0[1], -dv0[5]-dv0[0], dv0[4]], [dv0[2], dv0[4], -dv0[3]-dv0[0]] ]) U[j] = hatw_hatw + tools.skew(wdot[j]) symo.mat_replace(U[j], 'U', j) else: jRant = antRj[j].T qdj = Z_AXIS * robo.qdot[j] qddj = Z_AXIS * robo.qddot[j] wi, w[j] = _omega_ij(robo, j, jRant, w, qdj) symo.mat_replace(w[j], 'W', j) symo.mat_replace(wi, 'WI', j) _omega_dot_j(robo, j, jRant, w, wi, wdot, qdj, qddj) symo.mat_replace(wdot[j], 'WP', j, forced) _v_dot_j(robo, symo, j, jRant, antPj, w, wi, wdot, U, vdot, qdj, qddj) symo.mat_replace(vdot[j], 'VP', j, forced) return w, wdot, vdot, U
474729b9329ee21d4bcfffb33916d8d85a21ea62
21,444
def _sample_n_k(n, k): """Sample k distinct elements uniformly from range(n)""" if not 0 <= k <= n: raise ValueError("Sample larger than population or is negative") if 3 * k >= n: return np.random.choice(n, k, replace=False) else: result = np.random.choice(n, 2 * k) selected = set() selected_add = selected.add j = k for i in range(k): x = result[i] while x in selected: x = result[i] = result[j] j += 1 if j == 2 * k: # This is slow, but it rarely happens. result[k:] = np.random.choice(n, k) j = k selected_add(x) return result[:k]
3aad3ed36590655ef079a4d39745d6c59ec499a8
21,445
def _all_usage_keys(descriptors, aside_types): """ Return a set of all usage_ids for the `descriptors` and for as all asides in `aside_types` for those descriptors. """ usage_ids = set() for descriptor in descriptors: usage_ids.add(descriptor.scope_ids.usage_id) for aside_type in aside_types: usage_ids.add(AsideUsageKeyV1(descriptor.scope_ids.usage_id, aside_type)) usage_ids.add(AsideUsageKeyV2(descriptor.scope_ids.usage_id, aside_type)) return usage_ids
75652e9468e6a61763b407bf11d644b1d08dd38c
21,446
def svn_client_invoke_get_commit_log2(*args): """svn_client_invoke_get_commit_log2(svn_client_get_commit_log2_t _obj, apr_array_header_t commit_items, void * baton, apr_pool_t pool) -> svn_error_t""" return _client.svn_client_invoke_get_commit_log2(*args)
fe7652c7e1573c3d688ddde40630b9b24e5bb48c
21,447
def round_extent(extent, cellsize): """Increases the extent until all sides lie on a coordinate divisible by cellsize.""" xmin, ymin, xmax, ymax = extent xmin = np.floor(xmin / cellsize) * cellsize ymin = np.floor(ymin / cellsize) * cellsize xmax = np.ceil(xmax / cellsize) * cellsize ymax = np.ceil(ymax / cellsize) * cellsize return xmin, ymin, xmax, ymax
384cf262f5dd206b0755623ce6d859e4f82efa86
21,448
def add_numbers(): """Add two numbers server side, ridiculous but well...""" #a = request.args.get('a', 0, type=str)#input from html a = request.args.get('a') print(a) result = chatbot.main(a) print("Result: ", result) #input from html returned=a #return jsonify(returned); return jsonify(''.join(result)) #return jsonify(result = returned[0])#return something back
d0e670ea0fc7bff33d5419316f5ebddf12cecea0
21,449
import re def _parse_stack_info(line, re_obj, crash_obj, line_num): """ :param line: line string :param re_obj: re compiled object :param crash_obj: CrashInfo object :return: crash_obj, re_obj, complete:Bool """ if re_obj is None: re_obj = re.compile(_match_stack_item_re()) complete = False match_obj = re_obj.match(line) if match_obj is not None: stack_item = StackItemInfo() stack_item.name = match_obj.group(1) stack_item.invoke_address = match_obj.group(2) stack_item.load_address = match_obj.group(3) stack_item.line_num = line_num crash_obj.function_stacks.append(stack_item) elif re.match(_match_image_header_re(), line) is not None: complete = True re_obj = None return (crash_obj, re_obj, complete)
b360ef9c6d96092f59952fec90fdc41b2463c780
21,450
import math def Orbiter(pos,POS,veloc,MASS,mass): """ Find the new position and velocity of an Orbiter Parameters ---------- pos : list Position vector of the orbiter. POS : list Position vector of the centre object. veloc : list Velocity of the orbiter. MASS : int Mass of the centre object. mass : int Mass of the orbiter. Returns ------- list Returns a list of two vectors, first being the new position vector and the second being the new velocity vector. """ # finding the orbital radius rad=math.sqrt(((pos[0]+POS[0])**2)+((pos[1]-POS[1])**2)) # getting the acceleration # acc=(G*MASS*rad)/abs(rad)**3 acc=[(-(G*MASS)/(rad**2))*((pos[0]-POS[0])/rad),(-(G*MASS)/(rad**2))*((pos[1]-POS[1])/rad)] #(pos[i]/rad) being the unit vector # getting the new velocity vector veloc+=[acc[0]*timeFrameLength,acc[1]*timeFrameLength] for i in range(2): veloc[i]+=acc[i]*timeFrameLength # veloc[0]+=(acc*timeFrameLength)*((pos[0]-POS[0])/rad) #(pos[i]/rad) being to make it go towards the object # veloc[1]+=(acc*timeFrameLength)*((pos[1]-POS[1])/rad) #(pox`s[i]/rad) being to make it go towards the object # for i in range(2): # veloc[i]+=(acc*timeFrameLength)*((pos[i]+POS[i])/rad) #(pos[i]/rad) being to make it go towards the object # getting the new position for i in range(2): pos[i]+=veloc[i]*timeFrameLength return [pos,veloc]
8d6c08fc1a7fa1165550e13944c1dbda414e6e62
21,451
def build_heading(win, readonly=False): """Generate heading text for screen """ if not win.parent().albumdata['artist'] or not win.parent().albumdata['titel']: text = 'Opvoeren nieuw {}'.format(TYPETXT[win.parent().albumtype]) else: wintext = win.heading.text() newtext = '' for text in ('tracks', 'opnames'): if wintext == text: newtext = ': {}'.format(wintext) break elif wintext.endswith(text): newtext = ': {}'.format(text) break text = 'G' if readonly else 'Wijzigen g' text = '{}egevens van {} {} - {} {}'.format( text, TYPETXT[win.parent().albumtype], win.parent().albumdata['artist'], win.parent().albumdata['titel'], newtext) return text
133f4111171ab0bd04bed82455ced9aa9dcc010b
21,452
def GetTraceValue(): """Return a value to be used for the trace header.""" # Token to be used to route service request traces. trace_token = properties.VALUES.core.trace_token.Get() # Username to which service request traces should be sent. trace_email = properties.VALUES.core.trace_email.Get() # Enable/disable server side logging of service requests. trace_log = properties.VALUES.core.trace_log.GetBool() if trace_token: return 'token:{0}'.format(trace_token) elif trace_email: return 'email:{0}'.format(trace_email) elif trace_log: return 'log' return None
67c1fc9d0602ca25c02dd088e1abba1ad951022f
21,453
import six import os import errno def get_file_size(file_obj): """获取文件对象的大小 get_file_size(open('/home/ubuntu-14.04.3-desktop-amd64.iso')) :param file_obj: file-like object. """ if (hasattr(file_obj, 'seek') and hasattr(file_obj, 'tell') and (six.PY2 or six.PY3 and file_obj.seekable())): try: curr = file_obj.tell() file_obj.seek(0, os.SEEK_END) size = file_obj.tell() file_obj.seek(curr) return size except IOError as e: if e.errno == errno.ESPIPE: # Illegal seek. This means the file object # is a pipe (e.g. the user is trying # to pipe image data to the client, # echo testdata | bin/glance add blah...), or # that file object is empty, or that a file-like # object which doesn't support 'seek/tell' has # been supplied. return else: raise
3fd9a4ae91fb302237739da319a53f0b8db04c49
21,454
from typing import Optional from typing import Union from typing import Tuple def sql( where: str, parameters: Optional[Parameters] = None ) -> Union[str, Tuple[str, Parameters]]: """ Return a SQL query, usable for querying the TransitMaster database. If provided, parameters are returned duplicated, to account for the face that the WHERE clause is also duplicated. """ formatted = SQL.format(where=where) if parameters is None: return formatted return (formatted, parameters + parameters)
22ca2194f355deaa4fc55b458c1f1a013ab2902e
21,455
def clip(a, a_min, a_max): """Clips the values of an array to a given interval. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Args: a (~chainerx.ndarray): Array containing elements to clip. a_min (scalar): Maximum value. a_max (scalar): Minimum value. Returns: ~chainerx.ndarray: An array with the elements of ``a``, but where values < ``a_min`` are replaced with ``a_min``, and those > ``a_max`` with ``a_max``. Note: The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are not supported yet. Note: During backpropagation, this function propagates the gradient of the output array to the input array ``a``. .. seealso:: :func:`numpy.clip` """ if a_min is None and a_max is None: raise ValueError('Must set either a_min or a_max.') if a_min is not None: a = chainerx.maximum(a, a_min) if a_max is not None: a = chainerx.minimum(a, a_max) return a
0394b68329c48198ade9a3131c6c26940f09a154
21,456
def hole_eigenvalue_residual( energy: floatarray, particle: "CoreShellParticle" ) -> float: """This function returns the residual of the hole energy level eigenvalue equation. Used with root-finding methods to calculate the lowest energy state. Parameters ---------- energy : float, eV The energy for which to calculate the wavevector of a hole in in the nanoparticle. particle : CoreShellParticle The particle for which to calculate the hole wavevectors. We pass in the particle directly since there are a lot of parameters to pass in and this keeps the interface clean. References ---------- .. [1] Piryatinski, A., Ivanov, S. A., Tretiak, S., & Klimov, V. I. (2007). Effect of Quantum and Dielectric Confinement on the Exciton−Exciton Interaction Energy in Type II Core/Shell Semiconductor Nanocrystals. Nano Letters, 7(1), 108–115. https://doi.org/10.1021/nl0622404 .. [2] Li, L., Reiss, P., & Protie, M. (2009). Core / Shell Semiconductor Nanocrystals, (2), 154–168. https://doi.org/10.1002/smll.200800841 """ core_hole_wavenumber, shell_hole_wavenumber = (None, None) if particle.type_one: core_hole_wavenumber = wavenumber_from_energy(energy, particle.cmat.m_h) shell_hole_wavenumber = wavenumber_from_energy( energy, particle.smat.m_h, potential_offset=particle.uh ) elif particle.type_one_reverse: core_hole_wavenumber = wavenumber_from_energy( energy, particle.cmat.m_h, potential_offset=particle.uh ) shell_hole_wavenumber = wavenumber_from_energy(energy, particle.smat.m_h) elif particle.type_two: if particle.e_h: core_hole_wavenumber = wavenumber_from_energy( energy, particle.cmat.m_h, potential_offset=particle.uh ) shell_hole_wavenumber = wavenumber_from_energy(energy, particle.smat.m_h) elif particle.h_e: core_hole_wavenumber = wavenumber_from_energy(energy, particle.cmat.m_h) shell_hole_wavenumber = wavenumber_from_energy( energy, particle.smat.m_h, potential_offset=particle.uh ) core_x = core_hole_wavenumber * particle.core_width shell_x = shell_hole_wavenumber * particle.shell_width core_width = particle.core_width shell_width = particle.shell_width mass_ratio = particle.smat.m_h / particle.cmat.m_h if type(core_x) in [np.float64, np.complex128]: return np.real( (1 - 1 / _tanxdivx(core_x)) * mass_ratio - 1 - 1 / _tanxdivx(shell_x) * core_width / shell_width ) else: return np.real( (1 - 1 / tanxdivx(core_x)) * mass_ratio - 1 - 1 / tanxdivx(shell_x) * core_width / shell_width )
500033e927c29595c67d2e2327ebe1ae6d39cfd0
21,457
def open_raster(filename): """Take a file path as a string and return a gdal datasource object""" # register all of the GDAL drivers gdal.AllRegister() # open the image img = gdal.Open(filename, GA_ReadOnly) if img is None: print 'Could not open %s' % filename sys.exit(1) else: return img
b1c002be50b59e74a327943af8613b11cddf9b88
21,458
def reduce2latlon_seasonal( mv, season=seasonsyr, region=None, vid=None, exclude_axes=[], seasons=seasonsyr ): """as reduce2lat_seasonal, but both lat and lon axes are retained. Axis names (ids) may be listed in exclude_axes, to exclude them from the averaging process. """ # backwards compatibility with old keyword 'seasons': if seasons!=seasonsyr: season = seasons return reduce2any( mv, target_axes=['x','y'], season=season, region=region, vid=vid, exclude_axes=exclude_axes )
7f101ce4ac5d4382d287901607c455b4d922f847
21,459
def GetFreshAccessTokenIfEnabled(account=None, scopes=None, min_expiry_duration='1h', allow_account_impersonation=True): """Returns a fresh access token of the given account or the active account. Same as GetAccessTokenIfEnabled except that the access token returned by this function is valid for at least min_expiry_duration. Args: account: str, The account to get the access token for. If None, the account stored in the core.account property is used. scopes: tuple, Custom auth scopes to request. By default CLOUDSDK_SCOPES are requested. min_expiry_duration: Duration str, Refresh the token if they are within this duration from expiration. Must be a valid duration between 0 seconds and 1 hour (e.g. '0s' >x< '1h'). allow_account_impersonation: bool, True to allow use of impersonated service account credentials (if that is configured). """ if properties.VALUES.auth.disable_credentials.GetBool(): return None return GetFreshAccessToken(account, scopes, min_expiry_duration, allow_account_impersonation)
7716b44802d84aac1952e936166f3414459cbc4b
21,460
def unicode_to_xes(uni): """Convert unicode characters to our ASCII representation of patterns.""" uni = uni.replace(INVISIBLE_CRAP, '') return ''.join(BOXES[c] for c in uni)
4c6eebcf562804340ef683eec84e28002202d833
21,461
def AvailableSteps(): """(read-only) Number of Steps available in cap bank to be switched ON.""" return lib.Capacitors_Get_AvailableSteps()
210f1316beafcdef266858490411bb9f737cb3de
21,462
import re def modify_list(result, guess, answer): """ Print all the key in dict. Arguments: result -- a list of the show pattern word. guess -- the letter of user's guess. answer -- the answer of word Returns: result -- the list of word after modified. """ guess = guess.lower() answer = answer.lower() if guess in answer: index_list = [x.start() for x in re.finditer(guess, answer)] for i in index_list: result[i] = guess.upper() else: print("Letter '{}' is not in the word".format(guess.upper())) print(' '.join(result)) return result
9384ecd09659c55808a859dd613641ccac46c760
21,463
def f(p, snm, sfs): """ p: proportion of all SNP's on the X chromosome [float, 0<p<1] snm: standard neutral model spectrum (optimally scaled) sfs: observed SFS """ # modify sfs fs = modify(p, sfs) # return sum of squared deviations of modified SFS with snm spectrum: return np.sum( (fs - snm)**2 )
b7d3c8ef188a5126fe7b817c78949fb9feec5b62
21,464
def get_N_intransit(tdur, cadence): """Estimates number of in-transit points for transits in a light curve. Parameters ---------- tdur: float Full transit duration cadence: float Cadence/integration time for light curve Returns ------- n_intransit: int Number of flux points in each transit """ n_intransit = tdur//cadence return n_intransit
d126b5590a8997b8695c1a86360421f2bf4b8357
21,465
def extract_keys(keys, dic, drop=True): """ Extract keys from dictionary and return a dictionary with the extracted values. If key is not included in the dictionary, it will also be absent from the output. """ out = {} for k in keys: try: if drop: out[k] = dic.pop(k) else: out[k] = dic[k] except KeyError: pass return out
15a66fff5207df18d8ece4959e485068f1bd3c9c
21,466
from flask import current_app def jsonresolver_loader(url_map): """Resolve the referred EItems for a Document record.""" def eitems_resolver(document_pid): """Search and return the EItems that reference this Document.""" eitems = [] eitem_search = current_app_ils.eitem_search_cls() for hit in eitem_search.search_by_document_pid(document_pid).scan(): eitem = hit.to_dict() eitems.append({ "pid": eitem.get("pid"), "description": eitem.get("description"), "internal_notes": eitem.get("internal_notes"), "open_access": eitem.get("open_access"), "bucket_id": eitem.get("bucket_id", None), "files": eitem.get("files", []), }) return { "total": len(eitems), "hits": eitems } url_map.add( Rule( "/api/resolver/documents/<document_pid>/eitems", endpoint=eitems_resolver, host=current_app.config.get("JSONSCHEMAS_HOST"), ) )
9da05e92850cbdbedb8d49cdf2cdf3763d0b1ab6
21,467
import sqlite3 def getStations(options, type): """Query stations by specific type ('GHCND', 'ASOS', 'COOP', 'USAF-WBAN') """ conn = sqlite3.connect(options.database) c = conn.cursor() if type == "ALL": c.execute("select rowid, id, name, lat, lon from stations") else: c.execute("select rowid, id, name, lat, lon from stations where type = ?",(type,)) stations = [] for r in c: stations.append(r) conn.close() return stations
59d45a79542e68cd691cf848f3d4fe250389732c
21,468
def test_name(request): """Returns (module_name, function_name[args]) for a given test""" return ( request.module.__name__, request._parent_request._pyfuncitem.name, # pylint: disable=protected-access )
4ef40de8a2c917c0b12cb83db9fd39f6b59777a0
21,469
import textwrap def inputwrap(x, ARG_indented: bool=False, ARG_end_with: str=" "): """Textwrapping for regular 'input' commands. Parameters ---------- x The text to be wrapped. ARG_indented : bool (default is 'False') Whether or not the textwrapped string should be indented. ARG_end_with : str (default is ' ') The string that the textwrapped string will end with. Returns ------- str User input. """ if ARG_indented is True: _input = input (textwrap.fill (x, width=70, subsequent_indent=" ") + ARG_end_with) return _input else: _input = input (textwrap.fill (x, width=70) + ARG_end_with) return _input
af0ab3b69205965b40d3e03bdcfe3148889f7080
21,470
from .utils import phys_size def SBP_single(ell_fix, redshift, pixel_scale, zeropoint, ax=None, offset=0.0, x_min=1.0, x_max=4.0, alpha=1, physical_unit=False, show_dots=False, show_grid=False, show_banner=True, vertical_line=None, linecolor='firebrick', linestyle='-', linewidth=3, labelsize=25, ticksize=30, label='SBP', labelloc='lower left'): """Display the 1-D profiles, without showing PA and ellipticity. Parameters: ell_fix: astropy Table or numpy table, should be the output of IRAF ELLIPSE. redshift (float): redshift of the object. pixel_scale (float): pixel scale in arcsec/pixel. zeropoint (float): zeropoint of the photometry system. ax (``matplotlib.pyplot.axes`` object): The user could provide axes on which the figure will be drawn. offset (float): offset of single surface brightness profile, in the unit of ``count``. x_min (float): Minimum value of x-axis, in ``$x^{1/4}$`` scale. x_max (float): Maximum value of x-axis, in ``$x^{1/4}$`` scale. alpha (float): transparency. physical_unit (bool): If true, the figure will be shown in physical scale. show_dots (bool): If true, it will show all the data points. show_grid (bool): If true, it will show a grid. vertical_line (list of floats): positions of vertical lines. Maximum length is three. linecolor (str): Color of surface brightness profile. linestyle (str): Style of surface brightness profile. Could be "--", "-.", etc. label (string): Label of surface brightness profile. Returns: ax: If the input ``ax`` is not ``None``. """ if ax is None: fig = plt.figure(figsize=(10, 10)) fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0, wspace=0.00, hspace=0.00) ax1 = fig.add_axes([0.08, 0.07, 0.85, 0.88]) ax1.tick_params(direction='in') else: ax1 = ax ax1.tick_params(direction='in') # Calculate physical size at this redshift phys_sclae = phys_size(redshift, verbose=False) # 1-D profile if physical_unit is True: x = ell_fix['sma'] * pixel_scale * phys_scale y = -2.5 * np.log10((ell_fix['intens'] + offset) / (pixel_scale)**2) + zeropoint y_upper = -2.5 * np.log10((ell_fix['intens'] + offset + ell_fix['int_err']) / (pixel_scale)**2) + zeropoint y_lower = -2.5 * np.log10((ell_fix['intens'] + offset - ell_fix['int_err']) / (pixel_scale)**2) + zeropoint upper_yerr = y_lower - y lower_yerr = y - y_upper asymmetric_error = [lower_yerr, upper_yerr] xlabel = r'$(R/\mathrm{kpc})^{1/4}$' ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$' else: x = ell_fix['sma'] * pixel_scale y = -2.5 * np.log10((ell_fix['intens'] + offset) / (pixel_scale)**2) + zeropoint y_upper = -2.5 * np.log10((ell_fix['intens'] + offset + ell_fix['int_err']) / (pixel_scale) ** 2) + zeropoint y_lower = -2.5 * np.log10((ell_fix['intens'] + offset - ell_fix['int_err']) / (pixel_scale) ** 2) + zeropoint upper_yerr = y_lower - y lower_yerr = y - y_upper asymmetric_error = [lower_yerr, upper_yerr] xlabel = r'$(R/\mathrm{arcsec})^{1/4}$' ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$' if show_grid: ax1.grid(linestyle='--', alpha=0.4, linewidth=2) if show_dots: ax1.errorbar((x ** 0.25), y, yerr=asymmetric_error, color='k', alpha=0.2, fmt='o', capsize=4, capthick=1, elinewidth=1) if label is not None: ax1.plot(x**0.25, y, color=linecolor, linewidth=linewidth, linestyle=linestyle, label=r'$\mathrm{' + label + '}$', alpha=alpha) leg = ax1.legend(fontsize=labelsize, frameon=False, loc=labelloc) for l in leg.legendHandles: l.set_alpha(1) else: ax1.plot(x**0.25, y, color=linecolor, linewidth=linewidth, linestyle=linestyle, alpha=alpha) ax1.fill_between(x**0.25, y_upper, y_lower, color=linecolor, alpha=0.3*alpha) for tick in ax1.xaxis.get_major_ticks(): tick.label.set_fontsize(ticksize) for tick in ax1.yaxis.get_major_ticks(): tick.label.set_fontsize(ticksize) ax1.set_xlim(x_min, x_max) ax1.set_xlabel(xlabel, fontsize=ticksize) ax1.set_ylabel(ylabel, fontsize=ticksize) ax1.invert_yaxis() # Twin axis with linear scale if physical_unit and show_banner is True: ax4 = ax1.twiny() ax4.tick_params(direction='in') lin_label = [1, 2, 5, 10, 50, 100, 150, 300] lin_pos = [i**0.25 for i in lin_label] ax4.set_xticks(lin_pos) ax4.set_xlim(ax1.get_xlim()) ax4.set_xlabel(r'$\mathrm{kpc}$', fontsize=ticksize) ax4.xaxis.set_label_coords(1, 1.025) ax4.set_xticklabels([r'$\mathrm{'+str(i)+'}$' for i in lin_label], fontsize=ticksize) for tick in ax4.xaxis.get_major_ticks(): tick.label.set_fontsize(ticksize) # Vertical line if vertical_line is not None: if len(vertical_line) > 3: raise ValueError('Maximum length of vertical_line is 3.') ylim = ax1.get_ylim() style_list = ['-', '--', '-.'] for k, pos in enumerate(vertical_line): ax1.axvline(x=pos**0.25, ymin=0, ymax=1, color='gray', linestyle=style_list[k], linewidth=3, alpha=0.75) plt.ylim(ylim) # Return if ax is None: return fig return ax1
5aab69adfc38afad84b6f45972ffb7ce05d547ac
21,471
def min_conflicts_value(csp, var, current): """Return the value that will give var the least number of conflicts. If there is a tie, choose at random.""" return argmin_random_tie(csp.domains[var], key=lambda val: csp.nconflicts(var, val, current))
ab338ce8b0abd7a77078193fcac3041155ed3e78
21,472
from app.model import TokenRepository def init(jwt): """Initialize the JWTManager. Parameters: jwt (JWTManager): an instance of the jwt manager. """ @jwt.token_in_blacklist_loader def check_if_token_in_blacklist(decoded_token): """Callback to check if a token is in the blacklist. Parameters: decrypted_token (dict): a jwt token decrypted into a dictionary. """ return TokenRepository().is_token_revoked(decoded_token)
406ff6e8ce6169dff6559b141f5c4453cce68f1e
21,473
from typing import Callable import inspect def get_component_rst_string(module: ModuleType, component: Callable, level: int) -> str: """Get a rst string, to autogenerate documentation for a component (class or function) :param module: the module containing the component :param component: the component (class or function) :param level: the level in nested directory structure """ object_name = f"{module.__name__}.{component.__name__}" rst_documentation = "" level_underline = RST_LEVEL_SYMBOLS[level] * 6 if inspect.isclass(component): rst_documentation = SPHINX_CLASS_STRING.format( object_name=object_name, var=component.__name__, level=level_underline ) elif inspect.isfunction(component): rst_documentation = SPHINX_FUNC_STRING.format( object_name=object_name, var=component.__name__, level=level_underline ) elif type(component).__name__ == "Dispatcher": rst_documentation = get_multidispatch_string(component, module, level_underline) return rst_documentation
511c718610456b4c5df5df2bc9e0ae5e7ac6823c
21,474
def log_mse_loss(source, separated, max_snr=1e6, bias_ref_signal=None): """Negative log MSE loss, the negated log of SNR denominator.""" err_pow = tf.math.reduce_sum(tf.math.square(source - separated), axis=-1) snrfactor = 10.**(-max_snr / 10.) if bias_ref_signal is None: ref_pow = tf.math.reduce_sum(tf.square(source), axis=-1) else: ref_pow = tf.math.reduce_sum(tf.math.square(bias_ref_signal), axis=-1) bias = snrfactor * ref_pow return 10. * _stabilized_log_base(bias + err_pow)
46203582f0d0ec2a98248ec000805c9e43f54091
21,475
from typing import Union def crack(password: str) -> Union[str, None]: """ Crack the given password """ # found 96% by caesar return caesar(password)
058779267c400501eecac2d3e43d691e2152ef8d
21,476
def fetchOne(query): """ Returns a dict result from the fetch of one query row """ return sqliteRowToDict(query.fetchone())
5be4753ea541a6e27ece16fb375c8a0664487a71
21,477
def _get_class_rgb(num_classes, predicted_class): """Map from class to RGB value for a specific colormap. Args: num_classes: Integer, the total number of classes. predicted_class: Integer, the predicted class, in [0, num_classes). Returns: Tuple of 3 floats in [0.0, 1.0] representing an RGB color. Raises: ValueError: If predicted class is not in [0, num_classes). """ if not 0 <= predicted_class < num_classes: raise ValueError('Predicted class %d must be in [0, %d).' % (predicted_class, num_classes)) # Map [0, num_classes) to [0, 255) colormap_index = int(predicted_class * 255.0 / num_classes) # Return just the RGB values of the colormap. return matplotlib.pyplot.cm.get_cmap(CLASS_ANNOTATION_COLORMAP)(colormap_index)[0:3]
914824eef57a829a7d67e74f45f56088d73ea34e
21,478
from datetime import datetime def PUT(request): """Update a project's name.""" request.check_required_parameters(body={'project': {'name': 'name'}}, path={'projectId': 'string'}) project = Project.from_id(request.params_path['projectId']) project.check_exists() project.check_user_access(request.google_id, True) project.set_property('name', request.params_body['project']['name']) project.set_property('datetime_last_edited', Database.datetime_to_string(datetime.now())) project.update() return Response(200, 'Successfully updated project.', project.obj)
ab5cc9bea5f5a933293761a852532f2c7c6004ec
21,479
def load_book_details(file_path): """ Read book details from a csv file into a pandas DataFrame. """ books_df = pd.read_csv(file_path, index_col='book_id') return books_df
9240efd9778198e34464fe6f95d312a82fd3894e
21,480
import random import string def random_string_fx() -> str: """ Creates a 16 digit alphanumeric string. For use with logging tests. Returns: 16 digit alphanumeric string. """ result = "".join(random.sample(string.ascii_letters, 16)) return result
835c2dc2716c6ef0ad37f5ae03cfc9dbe2e16725
21,481
from typing import Dict from typing import List import logging def parse_scheduler_nodes( pbscmd: PBSCMD, resource_definitions: Dict[str, PBSProResourceDefinition] ) -> List[Node]: """ Gets the current state of the nodes as the scheduler sees them, including resources, assigned resources, jobs currently running etc. """ ret: List[Node] = [] for ndict in pbscmd.pbsnodes_parsed("-a"): node = parse_scheduler_node(ndict, resource_definitions) if not node.available.get("ccnodeid"): node.metadata["override_resources"] = False logging.fine( "'ccnodeid' is not defined so %s has not been joined to the cluster by the autoscaler" + " yet or this is not a CycleCloud managed node", node, ) ret.append(node) return ret
bceec54b302b0b70e77181d3940fd6e41b8922c4
21,482
def GaugeSet(prefix, *, name, index, **kwargs): """ Factory function for Gauge Set. Parameters ---------- prefix : str Gauge base PV (up to 'GCC'/'GPI'). name : str Name to refer to the gauge set. index : str or int Index for gauge (e.g. '02' or 3). prefix_controller : str, optional Base PV for the controller. onlyGCC : optional If defined and not :keyword:`False`, set has no Pirani. """ onlyGCC = kwargs.pop('onlyGCC', None) if onlyGCC: if 'prefix_controller' in kwargs: return GaugeSetMks( prefix, name=name, index=index, prefix_controller=kwargs.pop('prefix_controller'), **kwargs) else: return GaugeSetBase(prefix, name=name, index=index, **kwargs) else: if 'prefix_controller' in kwargs: return GaugeSetPiraniMks( prefix, name=name, index=index, prefix_controller=kwargs.pop('prefix_controller'), **kwargs) else: return GaugeSetPirani(prefix, name=name, index=index, **kwargs)
ef856c77e414d8bc4532483e5b65aa3ebb0cc132
21,483
def user_rating(user, object, category=""): """ Usage: {% user_rating user obj [category] as var %} """ return user_rating_value(user, object, category)
09ac3ea8d1efcc3dc70d82bf5266f3e768a35c3b
21,484
import numpy def weighted_mean( x: NumericOrIter, w: NumericOrIter = None, na_rm: bool = False, ) -> NumericType: """Calculate weighted mean""" if is_scalar(x): x = [x] # type: ignore if w is not None and is_scalar(w): w = [w] # type: ignore x = Array(x) if w is not None: w = Array(w) if len(x) != len(w): raise ValueError("'x' and 'w' must have the same length") if na_rm: notna = ~numpy.isnan(x) x = x[notna] if w is not None: w = w[notna] if w is not None and sum(w) == 0: return NA return numpy.average(x, weights=w)
4034d642629696f1be73c62384bf6633ccb6efe1
21,485
import socket def internet(host="8.8.8.8", port=53, timeout=10): """ Check Internet Connections. :param host: the host that check connection to :param port: port that check connection with :param timeout: times that check the connnection :type host:str :type port:int :type timeout:int :return bool: True if Connection is Stable >>> internet() # if there is stable internet connection True >>> internet() # if there is no stable internet connection False """ try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) return True except Exception as ex: return False
c0ee11cf7aa9699a077e238d993136aeb4efcead
21,486
def parse_date(td): """helper function to parse time""" resYear = float(td.days)/364.0 # get the number of years including the the numbers after the dot resMonth = int((resYear - int(resYear))*364/30) # get the number of months, by multiply the number after the dot by 364 and divide by 30. resYear = int(resYear) return str(resYear) + "y" + str(resMonth) + "m"
bda78b0968b59c13f763e51f5f15340a377eeb35
21,487
import math def hue_angle(a, b): """ Returns the *hue* angle :math:`h` in degrees. Parameters ---------- a : numeric Opponent colour dimension :math:`a`. b : numeric Opponent colour dimension :math:`b`. Returns ------- numeric *Hue* angle :math:`h` in degrees. Examples -------- >>> a = -0.0006241120682426434 >>> b = -0.0005062701067729668 >>> hue_angle(a, b) # doctest: +ELLIPSIS 219.0484326... """ h = math.degrees(np.arctan2(b, a)) % 360 return h
2f508be9ed0cdcb0e8b193eefb441a6281a464c7
21,488
def perform_similarity_checks(post, name): """ Performs 4 tests to determine similarity between links in the post and the user name :param post: Test of the post :param name: Username to compare against :return: Float ratio of similarity """ max_similarity, similar_links = 0.0, [] # Keep checking links until one is deemed "similar" for link in post_links(post): domain = get_domain(link) # Straight comparison s1 = similar_ratio(domain, name) # Strip all spaces s2 = similar_ratio(domain, name.replace(" ", "")) # Strip all hyphens s3 = similar_ratio(domain.replace("-", ""), name.replace("-", "")) # Strip all hyphens and all spaces s4 = similar_ratio(domain.replace("-", "").replace(" ", ""), name.replace("-", "").replace(" ", "")) similarity = max(s1, s2, s3, s4) max_similarity = max(max_similarity, similarity) if similarity >= SIMILAR_THRESHOLD: similar_links.append(domain) return max_similarity, similar_links
78813c3b2223072a4a5b15a5a71837424a648470
21,489
def create_getters(tuples): """Create a series of itemgetters that return tuples :param tuples: a list of tuples :type tuples: collections.Iterable :returns: a generator of item getters :rtype: generator :: >>> gs = list(create_getters([(0, 2), (), (1,)])) >>> d = ['a', 'b', 'c', 'd'] >>> gs[0](d) ('a', 'c') >>> gs[1](d) () >>> gs[2](d) ('b',) """ def tgetter0(): return lambda x: () def tgetter1(key): it = itemgetter(key) return lambda x: (it(x),) for t in tuples: if not t: yield tgetter0() elif len(t) == 1: yield tgetter1(*t) else: yield itemgetter(*t)
43d6fed8233ee56b91a52c024c533ae72c8e6890
21,490
def report_cots_cv2x_bsm(bsm: dict) -> str: """A function to report the BSM information contained in an SPDU from a COTS C-V2X device :param bsm: a dictionary containing BSM fields from a C-V2X SPDU :type bsm: dict :return: a string representation of the BSM fields :rtype: str """ report = "" for key in bsm.keys(): report += key + "\t\t\t" + str(bsm[key]) + "\n" report += "\n" return report
df0aa5ae4b50980088fe69cb0b776abbf0b0998d
21,491
import logging def get_level_matrix(matrix, level): """Returns a binary matrix with positions exceeding a threshold. matrix = numpy array object level = floating number The matrix it returns has 1 in the positions where matrix has values above level and 0 elsewhere.""" logging.info("Selecting the amino acids contacts.") (n1, n2) = matrix.shape out_matrix = np.empty([n1, n2], dtype=float, order='F') for i in range(n1): for j in range(n2): if i == j: out_matrix[i, j] = 0 elif matrix[i, j] >= level: out_matrix[i, j] = 1 else: out_matrix[i, j] = 0 return out_matrix
1516f14970471c4f9402fcbf2cfb2a0d017e754e
21,492
def bookmark(repo, subset, x): """``bookmark([name])`` The named bookmark or all bookmarks. If `name` starts with `re:`, the remainder of the name is treated as a regular expression. To match a bookmark that actually starts with `re:`, use the prefix `literal:`. """ # i18n: "bookmark" is a keyword args = getargs(x, 0, 1, _('bookmark takes one or no arguments')) if args: bm = getstring(args[0], # i18n: "bookmark" is a keyword _('the argument to bookmark must be a string')) kind, pattern, matcher = _stringmatcher(bm) bms = set() if kind == 'literal': bmrev = repo._bookmarks.get(pattern, None) if not bmrev: raise error.RepoLookupError(_("bookmark '%s' does not exist") % bm) bms.add(repo[bmrev].rev()) else: matchrevs = set() for name, bmrev in repo._bookmarks.iteritems(): if matcher(name): matchrevs.add(bmrev) if not matchrevs: raise error.RepoLookupError(_("no bookmarks exist" " that match '%s'") % pattern) for bmrev in matchrevs: bms.add(repo[bmrev].rev()) else: bms = set([repo[r].rev() for r in repo._bookmarks.values()]) bms -= set([node.nullrev]) return subset & bms
71fd382ad0710e2e54a80b0b739d04c6d5410719
21,493
def indel_protein_processor(df, refgene, proteincdd=None): """Calculate protein features Features not used in the final model are commented out Args: df (pandas.DataFrame) refgene (str): path to refCodingExon.bed.gz proteincdd (str): optional, path to proteinConservedDomains.txt Returns: df (pandas.DataFrame) """ # cds length & indel location acc_len = acc_len_dict(refgene) df["cds_length"], df["indel_location"] = zip( *df.apply(partial(len_loc, d=acc_len), axis=1) ) # check if the indel is in conserved domain (CDD) # acc_dom = acc_domain_dict(proteincdd) # df['is_in_cdd'] = df.apply(partial(is_in_conserved_domain, d=acc_dom), axis=1) return df
721b4b19838ac2d6cd21f471d647c34c3586ebb2
21,494
def perdict_raw(model, *args, **kwargs): """ Tries to call model.predict(*args, **kwargs, prediction_type="RawFormulaVal"). If that fail, calls model.predict(*args, **kwargs) """ try: return model.predict(*args, **kwargs, prediction_type="RawFormulaVal") except TypeError: return model.predict(*args, **kwargs)
2ab7790c0cd48cc9b26f6e7888dd61436cb728b4
21,495
def login_required(arg): """ Decorator to check if a user is logged in""" @wraps(arg) def wrap(*args, **kwargs): """Checking if token exists in the request header""" if request.headers.get('Authorization'): auth_token = request.headers.get('Authorization') token = auth_token.split(" ")[1] resp = User.decode_token(token) user = User.query.filter_by(id=resp).first() if user: return arg(*args, **kwargs) response = jsonify({ 'status': 'error', 'message': "Unauthorized" }) response.status_code = 401 return response return wrap
2d41e2cd41621a0ce6015182badd7a4117c1daf6
21,496
def calc_manual_numbers(n): """ >>> calc_manual_numbers(1) 20151125 >>> calc_manual_numbers(2) 31916031 >>> calc_manual_numbers(3) 18749137 >>> calc_manual_numbers(21) 33511524 """ return (BASE * pow(FACTOR, n - 1, MOD)) % MOD
d0a276da3eb931afcf6b60b1b6172b468e59b95c
21,497
def accuracy(y0, y1): """ compute accuracy for y1 and y2 does not meter if either of them is in vector or integer form :param y0: list of - labels or vector of probabilities :param y1: list of - labels or vector of probabilities :return: accuracy """ if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)): y0 = np.argmax(y0, axis=1) elif isinstance(y0, list): y0 = np.array(y0) if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)): y1 = np.argmax(y1, axis=1) elif isinstance(y1, list): y1 = np.array(y1) out = np.sum(y0==y1)/len(y0) return out
b0e1077a8443e3d325b3238355c1a578af8823e3
21,498
import random def random_function(*args): """Picks one of its arguments uniformly at random, calls it, and returns the result. Example usage: >>> random_function(lambda: numpy.uniform(-2, -1), lambda: numpy.uniform(1, 2)) """ choice = random.randint(0, len(args) - 1) return args[choice]()
3f8d11becc52fde5752671e3045a9c64ddfeec97
21,499