content
stringlengths
22
815k
id
int64
0
4.91M
def test_data_viewer(dataframe): """Test DataView functions.""" dt_viewer = DataViewer(data=dataframe) check.equal(len(dataframe), len(dt_viewer.filtered_data)) _apply_filter(dt_viewer.data_filter, "Account", "contains", "MSTICAlertsWin1") dt_viewer._apply_filter(btn=None) check.equal(3, len(dt_viewer.filtered_data)) col_subset = list(dataframe.columns)[:5] dt_viewer.column_chooser = DataTableColumnChooser( data=dataframe, selected_cols=col_subset ) dt_viewer._update_columns(btn=None) check.equal((3, 5), dt_viewer.filtered_data.shape)
5,331,300
def test_query_xdd(): """Assert number of records equals expected hits.""" s.build_query_urls() # Grab first search term s.next_url = s.search_urls[0] s.query_xdd() if s.response_status == "success": assert s.response_hits == len(s.response_data)
5,331,301
def deceptivemultimodal(x: np.ndarray) -> float: """Infinitely many local optima, as we get closer to the optimum.""" assert len(x) >= 2 distance = np.sqrt(x[0]**2 + x[1]**2) if distance == 0.: return 0. angle = np.arctan(x[0] / x[1]) if x[1] != 0. else np.pi / 2. invdistance = int(1. / distance) if distance > 0. else 0. if np.abs(np.cos(invdistance) - angle) > 0.1: return 1. return float(distance)
5,331,302
def get_basins_scores(memory_array, binarized_cluster_dict, basinscore_method="default"): """ Args: - memory_array: i.e. xi matrix, will be N x K (one memory from each cluster) - binarized_cluster_dict: {k: N x M array for k in 0 ... K-1 (i.e. cluster index)} - basinscore_method: options for different basin scoring algos (one based on crawling the basin exactly, other via low temp dynamics) Returns: - score_dict: {k: M x 1 array for k in 0 ... K-1 (i.e. cluster index)} """ assert basinscore_method in ['crawler', 'trajectories'] num_genes, num_clusters = memory_array.shape cells_each_cluster = [binarized_cluster_dict[idx].shape[1] for idx in range(num_clusters)] num_cells = np.sum(cells_each_cluster) print("num_genes, num_clusters, num_cells:\n%d %d %d" % (num_genes, num_clusters, num_cells)) def basin_score_pairwise(basin_k, memory_vector, data_vector): # OPTION 1 -- is cell in basin yes/no # OPTION 2 -- is cell in basin - some scalar value e.g. projection onto that mem # OPTION 3 -- based on compare if data vec in set of basin states (from aux fn) hd = hamming(memory_vector, data_vector) if tuple(data_vector) in basin_k[hd]: print("data_vector in basin_k[hd]") return 1.0 else: print("data_vector NOT in basin_k[hd]") return 0.0 # 1 is build J_ij from Xi _, a_inv_arr = memory_corr_matrix_and_inv(memory_array, check_invertible=True) eta = predictivity_matrix(memory_array, a_inv_arr) intxn_matrix = interaction_matrix(memory_array, a_inv_arr, "projection") # 2 is score each cell in each cluster based on method score_dict = {k: 0 for k in range(num_clusters)} # setup io io_dict = run_subdir_setup(run_subfolder=ANALYSIS_SUBDIR) if basinscore_method == 'crawler': for k in range(num_clusters): print("Scoring basin for cluster:", k) binary_cluster_data = binarized_cluster_dict[k] memory_k = memory_array[:,k] basin_k = build_basin_states(intxn_matrix, memory_k) for cell_data in binary_cluster_data.T: # TODO make sure his gives columns (len is N) print(len(cell_data), num_genes, cell_data.shape) score_dict[k] += basin_score_pairwise(basin_k, memory_k, cell_data) print(score_dict) else: assert basinscore_method == 'trajectories' for k in range(num_clusters): print("Scoring basin for cluster:", k) #init_conds = binarized_cluster_dict[k] print("WARNING: only looking at first 10 cells in each cluster") init_conds = binarized_cluster_dict[k][:,0:10] trajectories = basin_projection_timeseries(k, memory_array, intxn_matrix, eta, init_conds, io_dict['plotdir'], num_steps=3, plot=True, flag_write=False) print(trajectories) score_dict[k] = np.mean(trajectories[-1,:]) # save to file scores = [score_dict[k] for k in range(num_clusters)] np.savetxt(data_folder + os.sep + "scores.txt", scores) return score_dict, io_dict
5,331,303
def generate_inputs_ph(fixture_sandbox, fixture_localhost, fixture_code, generate_remote_data, generate_kpoints_mesh): """Generate default inputs for a `PhCalculation.""" def _generate_inputs_ph(): """Generate default inputs for a `PhCalculation.""" from aiida.orm import Dict from aiida_quantumespresso.utils.resources import get_default_options inputs = { 'code': fixture_code('quantumespresso.matdyn'), 'parent_folder': generate_remote_data(fixture_localhost, fixture_sandbox.abspath, 'quantumespresso.pw'), 'qpoints': generate_kpoints_mesh(2), 'parameters': Dict(dict={'INPUTPH': {}}), 'metadata': { 'options': get_default_options() } } return inputs return _generate_inputs_ph
5,331,304
def escape_url(raw): """ Escape urls to prevent code injection craziness. (Hopefully.) """ from urllib.parse import quote return quote(raw, safe="/#:")
5,331,305
def split_sample(labels): """ Split the 'Sample' column of a DataFrame into a list. Parameters ---------- labels: DataFrame The Dataframe should contain a 'Sample' column for splitting. Returns ------- DataFrame Updated DataFrame has 'Sample' column with a list of strings. """ sample_names = labels["Sample"].str.split(" ", n=1, expand=False) labels['Sample'] = sample_names return labels
5,331,306
def pmi_odds(pnx, pn, nnx, nn): """ Computes the PMI with odds Args: pnx (int): number of POSITIVE news with the term x pn (int): number of POSITIVE news nnx (int): number of NEGATIVE news with the term x nn (int): number of NEGATIVE news Returns: float: PMI """ #print (pnx, pn, nnx, nn) return _pmi_odds_(p_p(pnx, pn), p_n(nnx, nn))
5,331,307
def berDecodeLength(m, offset=0): """ Return a tuple of (length, lengthLength). m must be atleast one byte long. """ l = ber2int(m[offset + 0:offset + 1]) ll = 1 if l & 0x80: ll = 1 + (l & 0x7F) need(m, offset + ll) l = ber2int(m[offset + 1:offset + ll], signed=0) return (l, ll)
5,331,308
def hasAspect(obj1, obj2, aspList): """ Returns if there is an aspect between objects considering a list of possible aspect types. """ aspType = aspectType(obj1, obj2, aspList) return aspType != const.NO_ASPECT
5,331,309
def gen_decomposition(denovo_name, basis_names, weights, output_path, project, \ mtype, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction=False, statistics=None, sig_version=None, custom_text=None): """ Generate the correct plot based on mtype. Parameters: ---------- denovo_name: (String) Name of denovo signature basis_names: (List of Strings) Names of basis signatures weights: (List of Strings) Percentile contribution for each basis signature output_path: (String) Path to existing output directory project: (String) Project name appended to file names mtype: (String) The context 'mtype_options' has valid values denovo_plots_dict (Dictionary) Signatures are keys, ByteIO plots are values basis_plots_dict (Dictionary) Signatures are keys, ByteIO plots are values reconstruction_plot_dict (Dictionary) Signatures are keys, ByteIO plots are values reconstruction: (Boolean) True to generate plot w/ reconstruction statistics: (Pandas Dataframe) Output from calculate_similarities() """ if mtype == "6": print("Need to add support for SBS6 Decomposition") elif mtype == "24": print("Need to add support for SBS24 Decomposition") elif mtype == "96": byte_plot=spd_96.gen_decomposition(denovo_name, basis_names, weights, output_path, \ project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction, statistics, sig_version, custom_text) return byte_plot elif mtype == "288": byte_plot=spd_288.gen_decomposition(denovo_name, basis_names, weights, output_path, \ project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction, statistics, sig_version, custom_text) return byte_plot elif mtype == "384": print("Need to add support for SBS24 Decomposition") elif mtype == "1536": byte_plot=spd_1536.gen_decomposition(denovo_name, basis_names, weights, output_path, \ project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction, statistics, sig_version, custom_text) return byte_plot elif mtype == "6144": print("Need to add support for SBS6144 Decomposition") elif mtype == "28": print("Need to add support for ID28 Decomposition") elif mtype == "83": byte_plot=spd_83.gen_decomposition(denovo_name, basis_names, weights, output_path, \ project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction, statistics, sig_version, custom_text) return byte_plot elif mtype == "415": print("Need to add support for ID415 Decomposition") elif mtype == "78": byte_plot=spd_78.gen_decomposition(denovo_name, basis_names, weights, output_path, \ project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction, statistics, sig_version, custom_text) return byte_plot elif mtype == "186": print("Need to add support for DBS186 Decomposition") elif mtype == "1248": print("Need to add support for DBS1248 Decomposition") elif mtype == "2976": print("Need to add support for DBS2976 Decomposition") elif mtype == "48": byte_plot=cnv_48.gen_decomposition(denovo_name, basis_names, weights, output_path, \ project, denovo_plots_dict, basis_plots_dict, reconstruction_plot_dict, \ reconstruction, statistics, sig_version, custom_text) return byte_plot
5,331,310
def org(gh): """Creates an Org instance and adds an spy attribute to check for calls""" ret = Organization(gh, name=ORG_NAME) ret._gh = Mock(wraps=ret._gh) ret.spy = ret._gh return ret
5,331,311
def merge_regions( out_path: str, sample1_id: int, regions1_file: File, sample2_id: int, regions2_file: File ) -> File: """ Merge two sorted region files into one. """ def iter_points(regions): for start, end, depth in regions: yield (start, "start", depth) yield (end, "end", -depth) def iter_regions(points): first_point = next(points, None) if first_point is None: return start, _, depth = first_point for pos, kind, delta in points: if pos > start: yield (start, pos, depth) start = pos depth += delta regions1 = read_regions(regions1_file) regions2 = read_regions(regions2_file) points1 = iter_points(regions1) points2 = iter_points(regions2) points = iter_merge(points1, points2) regions = iter_regions(points) region_path = f"{out_path}/regions/{sample1_id}_{sample2_id}.regions" return write_regions(region_path, regions)
5,331,312
def get_text_hexdigest(data): """returns md5 hexadecimal checksum of string/unicode data NOTE ---- The md5 sum of get_text_hexdigest can differ from get_file_hexdigest. This will occur if the line ending character differs from being read in 'rb' versus 'r' modes. """ data_class = data.__class__ # fmt: off if data_class in ("".__class__, u"".__class__): data = data.encode("utf-8") elif data.__class__ != b"".__class__: raise TypeError("can only checksum string, unicode or bytes data") # fmt: on md5 = hashlib.md5() md5.update(data) return md5.hexdigest()
5,331,313
async def test_get_rdf_turtle(client: Any, fs: Any) -> None: """Should return status 200 OK and RDF as turtle.""" contents = '<http://example.com/drewp> <http://example.com/says> "Hello World" .' fs.create_file( "/srv/www/static-rdf-server/data/ontology-type-1/ontology-1/ontology-1.ttl", contents=contents, ) headers = {hdrs.ACCEPT: "text/turtle"} response = await client.get("/ontology-type-1/ontology-1", headers=headers) assert response.status == 200 assert "text/turtle" in response.headers[hdrs.CONTENT_TYPE] text = await response.text() assert text == contents
5,331,314
def rbInsertFixup(T, z): """给结点重新着色,将其保持为红黑树""" RED = RBTree.COLOR_RED BLACK = RBTree.COLOR_BLACK while z.p.color == RED: if z.p == z.p.p.left: y = z.p.p.right if y.color == RED: z.p.color = BLACK y.color = BLACK z.p.p.color = RED z = z.p.p else: if z == z.p.right: z = z.p leftRotate(T, z) z.p.color = BLACK z.p.p.color = RED rightRotate(T, z.p.p) elif z.p == z.p.p.right: y = z.p.p.left if y.color == RED: z.p.color = BLACK y.color = BLACK z.p.p.color = RED z = z.p.p else: if z == z.p.left: z = z.p rightRotate(T, z) z.p.color = BLACK z.p.p.color = RED leftRotate(T, z.p.p) T.root.color = BLACK
5,331,315
def recipe(recipe_id): """ Display the recipe on-page for each recipe id that was requested """ # Update the rating if it's an AJAX call if request.method == "POST": # check if user is login in order to proceed with rating if not session: return json.dumps({'status': 'not logged in'}) # check if the recipe id hasn't been change if not is_valid(recipe_id): return json.dumps({'status': 'error'}) # the query for the specific recipe that has to be rated recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)}) # if user want to rate it's own recipe return denied if recipe["created_by"] == session["user"]: return json.dumps({'status': 'denied'}) # check if user didn't altered the form value new_rating = request.form.get("stars") if int(new_rating) > 0 and int(new_rating) <= 5: # update the recipe rating rating = update_recipe_rating(mongo, new_rating, recipe) return json.dumps({'status': 'success', 'rating': rating}) return json.dumps({'status': 'error'}) # check if the recipe id hasn't been change if not is_valid(recipe_id): return redirect(url_for('error', code=404)) # get the categories that are in use for navigation menu nav_categories = mongo.db.recipes.distinct("category_name") # the query for the specific recipe that the user wants to access recipe = mongo.db.recipes.find_one({"_id": ObjectId(recipe_id)}) # added in case the owner decide to delete the recipe while # other users might by on this recipe page and cause an error # after refresh the page as we access the recipe["recipe_name"] on page_set # due to access None["recipe_name"] if not recipe: return redirect(url_for('error', code=404)) # set up the page_set object page_set = { "title": recipe["recipe_name"].title(), "type": "recipe" } return render_template("pages/recipe.html", recipe=recipe, page_set=page_set, nav_categories=nav_categories)
5,331,316
def api_program_ordering(request, program): """Returns program-wide RF-aware ordering (used after indicator deletion on program page)""" try: data = ProgramPageIndicatorUpdateSerializer.load_for_pk(program).data except Program.DoesNotExist: logger.warning('attempt to access program page ordering for bad pk {}'.format(program)) return JsonResponse({'success': False, 'msg': 'bad Program PK'}) return JsonResponse(data)
5,331,317
def isinteger(x): """ determine if a string can be converted to an integer """ try: a = int(x) except ValueError: return False else: return True
5,331,318
def unmarshal_tools_pcr_values( buf: bytes, selections: TPML_PCR_SELECTION ) -> Tuple[int, List[bytes]]: """Unmarshal PCR digests from tpm2_quote using the values format. Args: buf (bytes): content of tpm2_quote PCR output. selections (TPML_PCR_SELECTION): The selected PCRs. Returns: A tuple of the number of bytes consumed from buf and a list of digests. """ trs = list() for sel in selections: digsize = _get_digest_size(sel.hash) pb = bytes(reversed(bytes(sel.pcrSelect))) pi = int.from_bytes(pb, "big") for i in range(0, sel.sizeofSelect * 8): if pi & (1 << i): trs.append(digsize) n = 0 digs = list() for s in trs: dig = buf[:s] n += s digs.append(dig) buf = buf[s:] return n, digs
5,331,319
def new_topic(request): """添加新主题""" if request.method != 'POST': #未提交数据,创建一个新表单 form = TopicForm() else: #POST提交的数据,对数据进行处理 form = TopicForm(request.POST) if form.is_valid(): new_topic = form.save(commit = False) new_topic.owner = request.user new_topic.save() form.save() return HttpResponseRedirect(reverse('learning_logs:topics')) context = {'form':form} return render(request,'learning_logs/new_topic.html',context)
5,331,320
def save_model(model, model_filepath): """ Save the classifier model to the location specified Args: model (model) Classifier Model to save model_filepath (string) path to model pickle file """ pickle.dump(model, open(model_filepath, 'wb'))
5,331,321
def two_lot_2bid_2com_2win(self): """ Create tender with 2 lots and 2 bids """ self.create_tender(initial_lots=self.test_lots_data * 2) tenderers = self.create_tenderers(2) # create bid self.app.authorization = ("Basic", ("broker", "")) self.app.post_json( "/tenders/{}/bids".format(self.tender_id), { "data": { "selfEligible": True, "selfQualified": True, "tenderers": tenderers[0], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot["id"]} for lot in self.initial_lots], } }, ) # create second bid self.app.authorization = ("Basic", ("broker", "")) self.app.post_json( "/tenders/{}/bids".format(self.tender_id), { "data": { "selfEligible": True, "selfQualified": True, "tenderers": tenderers[1], "lotValues": [{"value": {"amount": 500}, "relatedLot": lot["id"]} for lot in self.initial_lots], } }, ) # switch to active.pre-qualification self.time_shift("active.pre-qualification") self.check_chronograph() response = self.app.get("/tenders/{}/qualifications?acc_token={}".format(self.tender_id, self.tender_token)) self.assertEqual(response.content_type, "application/json") qualifications = response.json["data"] self.assertEqual(len(qualifications), 4) for qualification in qualifications: response = self.app.patch_json( "/tenders/{}/qualifications/{}?acc_token={}".format(self.tender_id, qualification["id"], self.tender_token), {"data": {"status": "active", "qualified": True, "eligible": True}}, ) self.assertEqual(response.status, "200 OK") self.assertEqual(response.json["data"]["status"], "active") response = self.app.patch_json( "/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token), {"data": {"status": "active.pre-qualification.stand-still"}}, ) self.assertEqual(response.status, "200 OK") # switch to active.auction self.time_shift("active.auction") self.check_chronograph() # get auction info self.app.authorization = ("Basic", ("auction", "")) response = self.app.get("/tenders/{}/auction".format(self.tender_id)) auction_bids_data = response.json["data"]["bids"] for lot in self.initial_lots: # posting auction urls self.app.patch_json( "/tenders/{}/auction/{}".format(self.tender_id, lot["id"]), { "data": { "lots": [ {"id": i["id"], "auctionUrl": "https://tender.auction.url"} for i in response.json["data"]["lots"] ], "bids": [ { "id": i["id"], "lotValues": [ { "relatedLot": j["relatedLot"], "participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]), } for j in i["lotValues"] ], } for i in auction_bids_data ], } }, ) # posting auction results self.app.authorization = ("Basic", ("auction", "")) response = self.app.post_json( "/tenders/{}/auction/{}".format(self.tender_id, lot["id"]), {"data": {"bids": auction_bids_data}} ) # for first lot lot_id = self.initial_lots[0]["id"] # get awards self.app.authorization = ("Basic", ("broker", "")) response = self.app.get("/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token)) # get pending award award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0] # set award as active self.app.patch_json( "/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, award_id, self.tender_token), {"data": {"status": "active", "qualified": True, "eligible": True}}, ) # get contract id response = self.app.get("/tenders/{}".format(self.tender_id)) contract_id = response.json["data"]["contracts"][-1]["id"] # after stand slill period self.set_status("complete", {"status": "active.awarded"}) # time travel tender = self.db.get(self.tender_id) for i in tender.get("awards", []): i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"] self.db.save(tender) # sign contract self.app.authorization = ("Basic", ("broker", "")) self.app.patch_json( "/tenders/{}/contracts/{}?acc_token={}".format(self.tender_id, contract_id, self.tender_token), {"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}}, ) # for second lot lot_id = self.initial_lots[1]["id"] # get awards self.app.authorization = ("Basic", ("broker", "")) response = self.app.get("/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token)) # get pending award award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0] # set award as unsuccessful self.app.patch_json( "/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, award_id, self.tender_token), {"data": {"status": "unsuccessful"}}, ) # get awards self.app.authorization = ("Basic", ("broker", "")) response = self.app.get("/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token)) # get pending award award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0] # set award as active self.app.patch_json( "/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, award_id, self.tender_token), {"data": {"status": "active", "qualified": True, "eligible": True}}, ) # get contract id response = self.app.get("/tenders/{}".format(self.tender_id)) contract_id = response.json["data"]["contracts"][-1]["id"] # after stand still period self.set_status("complete", {"status": "active.awarded"}) # time travel tender = self.db.get(self.tender_id) for i in tender.get("awards", []): i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"] self.db.save(tender) # sign contract self.app.authorization = ("Basic", ("broker", "")) self.app.patch_json( "/tenders/{}/contracts/{}?acc_token={}".format(self.tender_id, contract_id, self.tender_token), {"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}}, ) # check status self.app.authorization = ("Basic", ("broker", "")) response = self.app.get("/tenders/{}".format(self.tender_id)) self.assertTrue(all([i["status"] == "complete" for i in response.json["data"]["lots"]])) self.assertEqual(response.json["data"]["status"], "complete")
5,331,322
def address(lst: Union[List[Any], str], dim: Optional[int] = None) -> Address: """ Similar to :meth:`Address.fromList`, except the name is shorter, and the dimension is inferred if possible. Otherwise, an exception is thrown. Here are some examples: >>> address('*') Address(*, 0) >>> address([['*'], [], ['*', '*']]) Address([[*][][**]], 2) """ def dimension(k: Any) -> Optional[int]: """ Tries to infer the dimension. """ if k == []: return None elif k == '*': return 0 elif isinstance(k, list): i = None # type: Optional[int] for a in k: j = dimension(a) if i is None: i = j elif j is not None and i != j: # Contradictory dim inferrences return None if i is None: return None else: return i + 1 else: raise NotImplementedError("[Address from list] Incompatible type: " "a list representation of an address " "(LA) for short, is either the string " "'*', or a list of LA") if isinstance(lst, str): if lst == '*': return Address.epsilon(0) else: raise DerivationError( "Address from list", "The following expression does not represent an address: " "{lst}", lst=lst) elif dim is not None: return Address.fromList(lst, dim) d = dimension(lst) if d is None: raise DerivationError("Address from list", "Cannot infer dimension of list {lst}", lst=lst) else: return Address.fromList(lst, d)
5,331,323
def import_olx(self, user_id, course_key_string, archive_path, archive_name, language): """ Import a course or library from a provided OLX .tar.gz archive. """ current_step = 'Unpacking' courselike_key = CourseKey.from_string(course_key_string) set_code_owner_attribute_from_module(__name__) set_custom_attributes_for_course_key(courselike_key) log_prefix = f'Course import {courselike_key}' self.status.set_state(current_step) data_root = path(settings.GITHUB_REPO_ROOT) subdir = base64.urlsafe_b64encode(repr(courselike_key).encode('utf-8')).decode('utf-8') course_dir = data_root / subdir def validate_user(): """Validate if the user exists otherwise log error. """ try: return User.objects.get(pk=user_id) except User.DoesNotExist as exc: with translation_language(language): self.status.fail(UserErrors.USER_PERMISSION_DENIED) LOGGER.error(f'{log_prefix}: Unknown User: {user_id}') monitor_import_failure(courselike_key, current_step, exception=exc) return def user_has_access(user): """Return True if user has studio write access to the given course.""" has_access = has_course_author_access(user, courselike_key) if not has_access: message = f'User permission denied: {user.username}' with translation_language(language): self.status.fail(UserErrors.COURSE_PERMISSION_DENIED) LOGGER.error(f'{log_prefix}: {message}') monitor_import_failure(courselike_key, current_step, message=message) return has_access def file_is_supported(): """Check if it is a supported file.""" file_is_valid = archive_name.endswith('.tar.gz') if not file_is_valid: message = f'Unsupported file {archive_name}' with translation_language(language): self.status.fail(UserErrors.INVALID_FILE_TYPE) LOGGER.error(f'{log_prefix}: {message}') monitor_import_failure(courselike_key, current_step, message=message) return file_is_valid def file_exists_in_storage(): """Verify archive path exists in storage.""" archive_path_exists = course_import_export_storage.exists(archive_path) if not archive_path_exists: message = f'Uploaded file {archive_path} not found' with translation_language(language): self.status.fail(UserErrors.FILE_NOT_FOUND) LOGGER.error(f'{log_prefix}: {message}') monitor_import_failure(courselike_key, current_step, message=message) return archive_path_exists def verify_root_name_exists(course_dir, root_name): """Verify root xml file exists.""" def get_all_files(directory): """ For each file in the directory, yield a 2-tuple of (file-name, directory-path) """ for directory_path, _dirnames, filenames in os.walk(directory): for filename in filenames: yield (filename, directory_path) def get_dir_for_filename(directory, filename): """ Returns the directory path for the first file found in the directory with the given name. If there is no file in the directory with the specified name, return None. """ for name, directory_path in get_all_files(directory): if name == filename: return directory_path return None dirpath = get_dir_for_filename(course_dir, root_name) if not dirpath: message = UserErrors.FILE_MISSING.format(root_name) with translation_language(language): self.status.fail(message) LOGGER.error(f'{log_prefix}: {message}') monitor_import_failure(courselike_key, current_step, message=message) return return dirpath user = validate_user() if not user: return if not user_has_access(user): return if not file_is_supported(): return is_library = isinstance(courselike_key, LibraryLocator) is_course = not is_library if is_library: root_name = LIBRARY_ROOT courselike_module = modulestore().get_library(courselike_key) import_func = import_library_from_xml else: root_name = COURSE_ROOT courselike_module = modulestore().get_course(courselike_key) import_func = import_course_from_xml # Locate the uploaded OLX archive (and download it from S3 if necessary) # Do everything in a try-except block to make sure everything is properly cleaned up. try: LOGGER.info(f'{log_prefix}: unpacking step started') temp_filepath = course_dir / get_valid_filename(archive_name) if not course_dir.isdir(): os.mkdir(course_dir) LOGGER.info(f'{log_prefix}: importing course to {temp_filepath}') # Copy the OLX archive from where it was uploaded to (S3, Swift, file system, etc.) if not file_exists_in_storage(): return with course_import_export_storage.open(archive_path, 'rb') as source: with open(temp_filepath, 'wb') as destination: def read_chunk(): """ Read and return a sequence of bytes from the source file. """ return source.read(FILE_READ_CHUNK) for chunk in iter(read_chunk, b''): destination.write(chunk) LOGGER.info(f'{log_prefix}: Download from storage complete') # Delete from source location course_import_export_storage.delete(archive_path) # If the course has an entrance exam then remove it and its corresponding milestone. # current course state before import. if is_course: if courselike_module.entrance_exam_enabled: fake_request = RequestFactory().get('/') fake_request.user = user from .views.entrance_exam import remove_entrance_exam_milestone_reference # TODO: Is this really ok? Seems dangerous for a live course remove_entrance_exam_milestone_reference(fake_request, courselike_key) LOGGER.info(f'{log_prefix}: entrance exam milestone content reference has been removed') # Send errors to client with stage at which error occurred. except Exception as exception: # pylint: disable=broad-except if course_dir.isdir(): shutil.rmtree(course_dir) LOGGER.info(f'{log_prefix}: Temp data cleared') self.status.fail(UserErrors.UNKNOWN_ERROR_IN_UNPACKING) LOGGER.exception(f'{log_prefix}: Unknown error while unpacking', exc_info=True) monitor_import_failure(courselike_key, current_step, exception=exception) return # try-finally block for proper clean up after receiving file. try: tar_file = tarfile.open(temp_filepath) # lint-amnesty, pylint: disable=consider-using-with try: safetar_extractall(tar_file, (course_dir + '/')) except SuspiciousOperation as exc: with translation_language(language): self.status.fail(UserErrors.UNSAFE_TAR_FILE) LOGGER.error(f'{log_prefix}: Unsafe tar file') monitor_import_failure(courselike_key, current_step, exception=exc) return finally: tar_file.close() current_step = 'Verifying' self.status.set_state(current_step) self.status.increment_completed_steps() LOGGER.info(f'{log_prefix}: Uploaded file extracted. Verification step started') dirpath = verify_root_name_exists(course_dir, root_name) if not dirpath: return if not validate_course_olx(courselike_key, dirpath, self.status): return dirpath = os.path.relpath(dirpath, data_root) current_step = 'Updating' self.status.set_state(current_step) self.status.increment_completed_steps() LOGGER.info(f'{log_prefix}: Extracted file verified. Updating course started') courselike_items = import_func( modulestore(), user.id, settings.GITHUB_REPO_ROOT, [dirpath], load_error_modules=False, static_content_store=contentstore(), target_id=courselike_key, verbose=True, ) new_location = courselike_items[0].location LOGGER.debug('new course at %s', new_location) LOGGER.info(f'{log_prefix}: Course import successful') set_custom_attribute('course_import_completed', True) except (CourseImportException, InvalidProctoringProvider, DuplicateCourseError) as known_exe: handle_course_import_exception(courselike_key, known_exe, self.status) except Exception as exception: # pylint: disable=broad-except handle_course_import_exception(courselike_key, exception, self.status, known=False) finally: if course_dir.isdir(): shutil.rmtree(course_dir) LOGGER.info(f'{log_prefix}: Temp data cleared') if self.status.state == 'Updating' and is_course: # Reload the course so we have the latest state course = modulestore().get_course(courselike_key) if course.entrance_exam_enabled: entrance_exam_chapter = modulestore().get_items( course.id, qualifiers={'category': 'chapter'}, settings={'is_entrance_exam': True} )[0] metadata = {'entrance_exam_id': str(entrance_exam_chapter.location)} CourseMetadata.update_from_dict(metadata, course, user) from .views.entrance_exam import add_entrance_exam_milestone add_entrance_exam_milestone(course.id, entrance_exam_chapter) LOGGER.info(f'Course import {course.id}: Entrance exam imported')
5,331,324
async def initialize_agent(request): """ Initialize agent. """ agent = request.app['agent'] data = await request.post() agent.owner = data['agent_name'] agent.endpoint = data['endpoint'] wallet_name = '%s-wallet' % agent.owner # pylint: disable=bare-except # TODO: better handle potential exceptions. try: await wallet.create_wallet('pool1', wallet_name, None, None, None) except: pass try: agent.wallet_handle = await wallet.open_wallet(wallet_name, None, None) except: print("Could not open wallet!") raise web.HTTPBadRequest() agent.initialized = True raise web.HTTPFound('/')
5,331,325
def create_scenario_dataframes_geco(scenario): """ Reads GECO dataset and creates a dataframe of the given scenario """ df_sc = pd.read_csv(io["scenario_geco_path"]) df_sc_europe = df_sc.loc[df_sc["Country"] == "EU28"] df_scenario = df_sc_europe.loc[df_sc_europe["Scenario"] == scenario] return df_scenario
5,331,326
def escape_env_var(varname): """ Convert a string to a form suitable for use as an environment variable. The result will be all uppercase, and will have all invalid characters replaced by an underscore. The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]* Example: "my.private.registry/cat/image" will become "MY_PRIVATE_REGISTRY_CAT_IMAGE" """ varname = list(varname.upper()) if not varname[0].isalpha(): varname[0] = "_" for i, c in enumerate(varname): if not c.isalnum() and c != "_": varname[i] = "_" return "".join(varname)
5,331,327
def from_file(handle, output): """ Convert a handle of JSON formatted objects and write a GFF3 file to the given output handle. """ parsed = coord.from_file(handle) features = regions_as_features(parsed) write_gff_text(features, output)
5,331,328
def rms(signal): """ rms(signal) Measures root mean square of a signal Parameters ---------- signal : 1D numpy array """ return np.sqrt(np.mean(np.square(signal)))
5,331,329
def get_template_page(page): """method used to get the a page based on the theme it will check the options.theme defined theme for the page first and if it isnt found it will fallback to options.theme_dir/cling for the template page """ templates = ['%s.html' % os.path.join(options.theme_dir, 'cling', page)] page_template = templates[0] if options.theme is not None: templates.append('%s.html' % os.path.join(options.theme_dir, options.theme, page)) for pt in reversed(templates): if os.path.isfile(pt): page_template = pt break return page_template
5,331,330
def named_masks(module, prefix=""): """Returns an iterator over all masks in the network, yielding both the name of a maskable submodule as well as its current mask. Parameters ---------- module : torch.nn.Module The network, which is scanned for variational modules. prefix : string, default empty The prefix for the yielded names. Yields ------ (string, torch.Tensor): Name and the mask used to activate/deactivate the parameters. Note ---- Masks from duplicate (shared or recurrent) modules are returned only once. """ # yields own mask and masks of every descendant for name, mod in module.named_modules(prefix=prefix): if isinstance(mod, BaseMasked): yield name, mod.mask
5,331,331
def handle_with_item(items: List[_T], item_handler: Callable[[_T], _U], result_handler: Callable[[_T, _U], Any], pool: Optional[ThreadPool] = None) -> None: """ Processes all the items in a separate thread using `item_handler` and post-processes the item - return value pairs of `item_handler` with `result_handler`. Example: >>> def pow2(x): ... return x**2 >>> def printer(x, y = "None"): ... print(x, y) >>> handle_with_item([1, 2, 3], pow2, printer) 1 1 2 4 3 9 Arguments: items (List[_T]): The items to process with `item_handler`. item_handler (Callable[[_T], _U]): Function that can handle one item from `items`. result_handler (Callable[[_T, _U], Any]): Function that handles the item - return value pairs of `item_handler`. pool (Optional[ThreadPool]): An optional thread pool to use to process the given items. If `None`, then the default thread pool will be used. """ from functools import partial if pool is None: pool = get_default_pool() for data in pool.imap_unordered(partial(_with_item, item_handler), items): result_handler(*data)
5,331,332
def get_config( config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner" ): """ Returns a config object for a model in model zoo. Args: config_path (str): config file name relative to d2go's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used instead; this will typically (though not always) initialize a subset of weights using an ImageNet pre-trained model, while randomly initializing the other weights. Returns: CfgNode: a config object """ cfg_file = get_config_file(config_path) runner = create_runner(runner) cfg = runner.get_default_cfg() cfg.merge_from_file(cfg_file) if trained: cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) return cfg
5,331,333
def make_game( width: int = defaults.WIDTH, height: int = defaults.HEIGHT, max_rooms: int = defaults.MAX_ROOMS, seed: Optional[int] = defaults.SEED, slippery_coefficient: float = defaults.SLIPPERY_COEFFICIENT, default_reward: float = defaults.DEFAULT_REWARD, goal_reward: float = defaults.GOAL_REWARD, catastrophe_reward: float = defaults.CATASTROPHE_REWARD, ) -> Engine: """Builds a gridworld `pycolab` game. Args: Returns: A `pycolab` game. """ maze = labmaze.RandomMaze( width=width, height=height, max_rooms=max_rooms, random_seed=seed, spawns_per_room=1, spawn_token="P", objects_per_room=1, object_token="G", ) # Keep only one agent position. agent_positions = np.asarray(np.where(maze.entity_layer == "P")) I_p = np.random.choice(agent_positions.shape[-1]) maze.entity_layer[maze.entity_layer == "P"] = " " maze.entity_layer[tuple(agent_positions[:, I_p])] = "P" # Keep only one goal. goal_positions = np.asarray(np.where(maze.entity_layer == "G")) I_g, I_c = np.random.choice(goal_positions.shape[-1], size=2, replace=False) maze.entity_layer[maze.entity_layer == "G"] = " " maze.entity_layer[tuple(goal_positions[:, I_g])] = "G" maze.entity_layer[tuple(goal_positions[:, I_c])] = "C" art = str(maze.entity_layer).split("\n")[:-1] sprites = { "P": ascii_art.Partial( AgentSprite, default_reward=default_reward, slippery_coefficient=slippery_coefficient, seed=seed, ) } drapes = { "G": ascii_art.Partial( BoxDrape, reward=goal_reward, terminal=True, ), "C": ascii_art.Partial( BoxDrape, reward=catastrophe_reward, terminal=True, ) } return ascii_art.ascii_art_to_game( art, what_lies_beneath=" ", sprites=sprites, drapes=drapes, )
5,331,334
def weighted_var(x, weights=None): """Unbiased weighted variance (sample variance) for the components of x. The weights are assumed to be non random (reliability weights). Parameters ---------- x : np.ndarray 1d or 2d with observations in rows weights : np.ndarray or None 1d array of weights. None defaults to standard variance. Returns ------- s2 : np.array 1d vector of component variances References ---------- [1] https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance """ if weights is None: weights = np.ones(len(x)) V_1 = np.sum(weights) V_2 = np.sum(weights ** 2) xbar = np.average(x, weights=weights, axis=0) numerator = weights.dot((x - xbar) ** 2) s2 = numerator / (V_1 - (V_2 / V_1)) return s2
5,331,335
def start_detailed_result_worker_route(): """ Add detailed result worker if not exist :return: JSON """ # check if worker already exist if check_worker_result(RABBITMQ_DETAILED_RESULT_QUEUE_NAME) == env.HTML_STATUS.OK.value: return jsonify(status=env.HTML_STATUS.OK.value) if 'db_name' in request.json: db_name = request.json["db_name"] else: return jsonify(status=env.HTML_STATUS.ERROR.value, mesasge="No database selected") Process(target=start_result_worker, args=(RABBITMQ_DETAILED_RESULT_QUEUE_NAME, DB_DETAILED_RESULT_COLLECTION_NAME, db_name)).start() return jsonify(status=env.HTML_STATUS.OK.value, detailed_result_worker=check_worker_result(RABBITMQ_DETAILED_RESULT_QUEUE_NAME))
5,331,336
def test_get_http_response_querystring_payload(): """Test get_http_response_querystring_payload""" function = { "name": "code_test", "header": "api", "method": "get", "path": "/test/api", "querystring": {}, "payload": {}, } assert functions.get_http_response(function) == [ ' response = requests.get(f"https://{self.hostname}/test/api", headers=self.api, params=querystring, data=payload)', "", ]
5,331,337
def _find_next_pickup_item(not_visited_neighbors, array_of_edges_from_node): """ Args: not_visited_neighbors: array_of_edges_from_node: Returns: """ # last node in visited_nodes is where the traveling salesman is. cheapest_path = np.argmin( array_of_edges_from_node[not_visited_neighbors]) return not_visited_neighbors[cheapest_path]
5,331,338
def deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None): """ :api_attr: Static Graph **Deformable Convolution op** Compute 2-D deformable convolution on 4-D input. Given input image x, output feature map y, the deformable convolution operation can be expressed as follow: Deformable Convolution v2: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k} Deformable Convolution v1: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)} Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location, Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_. Example: - Input: Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` - Output: Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 Args: input (Variable): The input image with [N, C, H, W] format. A Tensor with type float32, float64. offset (Variable): The input coordinate offset of deformable convolution layer. A Tensor with type float32, float64. Mask (Variable, Optional): The input mask of deformable convolution layer. A Tensor with type float32, float64. It should be None when you use deformable convolution v1. num_filters(int): The number of filter. It is as same as the output image channel. filter_size (int|tuple): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square. stride (int|tuple): The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1. padding (int|tuple): The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0. dilation (int|tuple): The dilation size. If dilation is a tuple, it must contain two integers, (dilation_H, dilation_W). Otherwise, the dilation_H = dilation_W = dilation. Default: dilation = 1. groups (int): The groups number of the deformable conv layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1. deformable_groups (int): The number of deformable group partitions. Default: deformable_groups = 1. im2col_step (int): Maximum number of images per im2col computation; The total batch size should be devisable by this value or smaller than this value; if you face out of memory problem, you can try to use a smaller value here. Default: im2col_step = 64. param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights of deformable conv. If it is set to None or one attribute of ParamAttr, deformable conv will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of deformable conv layer. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv2d will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \ used while True. Default: True. name(str, Optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: Variable: The tensor variable storing the deformable convolution \ result. A Tensor with type float32, float64. Raises: ValueError: If the shapes of input, filter_size, stride, padding and groups mismatch. Examples: .. code-block:: python #deformable conv v2: import paddle.fluid as fluid C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask, num_filters=2, filter_size=filter_size, padding=1, modulated=True) #deformable conv v1: import paddle.fluid as fluid C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None, num_filters=2, filter_size=filter_size, padding=1, modulated=False) """ check_variable_and_dtype(input, "input", ['float32', 'float64'], 'deformable_conv') check_variable_and_dtype(offset, "offset", ['float32', 'float64'], 'deformable_conv') check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv') num_channels = input.shape[1] assert param_attr is not False, "param_attr should not be False here." helper = LayerHelper('deformable_conv', **locals()) dtype = helper.input_dtype() if not isinstance(input, Variable): raise TypeError("Input of deformable_conv must be Variable") if not isinstance(offset, Variable): raise TypeError("Input Offset of deformable_conv must be Variable") if groups is None: num_filter_channels = num_channels else: if num_channels % groups != 0: raise ValueError("num_channels must be divisible by groups.") num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') padding = utils.convert_to_list(padding, 2, 'padding') dilation = utils.convert_to_list(dilation, 2, 'dilation') input_shape = input.shape filter_shape = [num_filters, int(num_filter_channels)] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) if modulated: helper.append_op( type='deformable_conv', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, 'Mask': mask, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) else: helper.append_op( type='deformable_conv_v1', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) return output
5,331,339
def ieee(): """IEEE fixture.""" return t.EUI64.deserialize(b"ieeeaddr")[0]
5,331,340
def test_no_log_none(stdin, capfd): """Allow Ansible to make the decision by matching the argument name against PASSWORD_MATCH.""" arg_spec = { "arg_pass": {} } am = basic.AnsibleModule(arg_spec) # Omitting no_log is only picked up by _log_invocation, so the value never # makes it into am.no_log_values. Instead we can check for the warning # emitted by am._log_invocation. assert len(get_warning_messages()) > 0
5,331,341
def is_in(a_list): """Returns a *function* that checks if its argument is in list. Avoids recalculation of list at every comparison.""" def check(arg): return arg in a_list return check
5,331,342
def get_log_record_extra_fields(record): """Taken from `common` repo logging module""" # The list contains all the attributes listed in # http://docs.python.org/library/logging.html#logrecord-attributes skip_list = ( 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'thread', 'threadName', 'extra', 'stack_info', 'exc_type', 'exc_msg') easy_types = (str, bool, dict, float, int, list, type(None)) fields = {} for key, value in record.__dict__.items(): if key not in skip_list: if isinstance(value, easy_types): fields[key] = value else: fields[key] = repr(value) return fields
5,331,343
def make_ideal(psr): """Adjust the TOAs so that the residuals to zero, then refit.""" psr.stoas[:] -= psr.residuals() / 86400.0 psr.fit()
5,331,344
def test_should_raise_if_wrong_key_dvc_extra_meta(): """ Test raise if not expected keyword in dvc extra meta """ with pytest.raises(MlVToolException): DocstringDvcExtra.from_meta(args=['dvc-wrong'], description='--extra p')
5,331,345
def test_valid_allocation_transfer_agency(database): """If File C (award financial) record has a valid allocation transfer agency, rule always passes.""" cgac = CGACFactory(cgac_code='good') af = AwardFinancialFactory( piid='some_piid', parent_award_id='some_parent_award_id', allocation_transfer_agency=cgac.cgac_code) ap = AwardProcurementFactory(piid='some_other_piid', parent_award_id='some_parent_award_id') assert number_of_errors(_FILE, database, models=[af, ap, cgac]) == 0
5,331,346
def test_enum(): """Test manipulating an Enum member. """ e = Enum('a', 'b') assert e.items == ('a', 'b') assert e.default_value_mode[1] == 'a' e_def = e('b') assert e_def is not e assert e_def.default_value_mode[1] == 'b' with pytest.raises(TypeError): e('c') e_add = e.added('c', 'd') assert e_add is not e assert e_add.items == ('a', 'b', 'c', 'd') e_rem = e.removed('a') assert e_rem is not e assert e_rem.items == ('b',) assert e_rem.default_value_mode[1] == 'b' with pytest.raises(ValueError): e.removed('a', 'b') with pytest.raises(ValueError): Enum()
5,331,347
def ldap_is_intromember(member): """ :param member: A CSHMember instance """ return _ldap_is_member_of_group(member, 'intromembers')
5,331,348
def test_dump_metadata(arterynetwork_def, param): """Test correct execution of dump_metadata. :param arterynetwork_def: Artery network object :param param: Config parameters """ an = arterynetwork_def order, rc, qc, Ru, Rd, L, k1, k2, k3, rho, Re, nu, p0, R1, R2, CT,\ Nt, Nx, T, N_cycles, output_location, theta, Nt_store,\ N_cycles_store, store_area, store_pressure, q0, q_half = param an.dump_metadata(Nt_store, N_cycles_store, store_area, store_pressure) order, Nx, Nt, T0, T, L, rc, qc, rho, mesh_locations, names, locations = \ read_output(an.output_location+'/data.cfg') assert(order == an.order) assert(Nx == an.Nx) assert(Nt == Nt_store*N_cycles_store) assert(near(T0, an.T*(an.N_cycles-N_cycles_store))) assert(near(T, an.T*an.N_cycles)) for i in range(len(L)): assert(near(L[i], an.arteries[i].L)) assert(near(rc, an.rc)) assert(near(qc, an.qc)) assert(near(rho, an.rho)) for i in range(len(mesh_locations)): assert(mesh_locations[i] ==\ ('%s/mesh_%i.xml.gz' % (an.output_location, i))) i = 0 assert(names[i] == 'flow') if store_area: i += 1 assert(names[i] == 'area') if store_pressure: i += 1 assert(names[i] == 'pressure') for i in range(len(locations)): assert(locations[i] == ('%s/%s' % (an.output_location, names[i])))
5,331,349
def details(request, slug): """ Show product set """ productset = get_object_or_404(models.ProductSet, slug=slug) context = {} response = [] variant_instances = productset.variant_instances() signals.product_view.send( sender=type(productset), instances=variant_instances, request=request, response=response, extra_context=context) if len(response) == 1: return response[0] elif len(response) > 1: raise ValueError, "Multiple responses returned." context['variants'] = variant_instances context['productset'] = productset return direct_to_template(request, 'satchless/productset/details.html', context)
5,331,350
def applies(platform_string, to='current'): """ Returns True if the given platform string applies to the platform specified by 'to'.""" def _parse_component(component): component = component.strip() parts = component.split("-") if len(parts) == 1: if parts[0] in VALID_PLATFORMS_FILTER: return parts[0], None elif parts[0] in _ARCHBITS_TO_ARCH: return "all", parts[0] else: raise ValueError( "Invalid filter string: '{}'".format(component) ) elif len(parts) == 2: if ( parts[0] not in VALID_PLATFORMS_FILTER or parts[1] not in _ARCHBITS_TO_ARCH ): raise ValueError( "Invalid filter string: '{}'".format(component) ) return parts[0], parts[1] else: raise ValueError( "Invalid filter string: '{}'".format(component) ) def _are_compatible(short_left, short_right): return short_left == short_right or \ short_left == "rh" and short_right.startswith("rh") \ or short_right == "rh" and short_left.startswith("rh") \ or short_left == "all" if isinstance(to, str): if to == 'current': full = EPDPlatform.from_running_system() to_platform = full.platform_name to_arch_bits = full.arch_bits elif '-' in to: full = EPDPlatform.from_epd_string(to) to_platform = full.platform_name to_arch_bits = full.arch_bits else: if not (to in PLATFORM_NAMES or to == 'rh'): raise ValueError("Invalid 'to' argument: {0!r}".format(to)) to_platform = to to_arch_bits = None else: to_platform = to.platform_name to_arch_bits = to.arch_bits conditions = [] platform_string = platform_string.strip() if platform_string.startswith("!"): invert = True platform_string = platform_string[1:] else: invert = False platform_strings = [s for s in platform_string.split(",")] for platform_string in platform_strings: short, bits = _parse_component(platform_string) if _are_compatible(short, to_platform): if bits is None: conditions.append(True) else: conditions.append(bits == to_arch_bits or to_arch_bits is None) else: conditions.append(False) if invert: return not any(conditions) else: return any(conditions)
5,331,351
def _GetClassLock(cls): """Returns the lock associated with the class.""" with _CLASS_LOCKS_LOCK: if cls not in _CLASS_LOCKS: _CLASS_LOCKS[cls] = threading.Lock() return _CLASS_LOCKS[cls]
5,331,352
def _get_expression_table_name() -> TableName: """ Get a expression table name. This value will be switched whether current scope is event handler's one or not. Returns ------- table_name : str Target expression table name. """ from apysc._expression import event_handler_scope event_handler_scope_count: int = \ event_handler_scope.get_current_event_handler_scope_count() if event_handler_scope_count == 0: return TableName.EXPRESSION_NORMAL return TableName.EXPRESSION_HANDLER
5,331,353
def log(config, results): """ analysis.py plugin: Analyze mongod.log files. :param ConfigDict config: The global config. :param ResultsFile results: Object to add results to. """ LOGGER.info("Checking log files.") reports = config["test_control"]["reports_dir_basename"] perf_json = config["test_control"]["perf_json"]["path"] task = config["test_control"]["task_name"] rules = config["analysis"]["rules"] new_results, _ = analyze_logs(reports, rules, perf_file_path=perf_json, task=task) results.extend(new_results)
5,331,354
def measure_option(mode, number=1, repeat=1, timeout=60, parallel_num=1, pack_size=1, check_correctness=False, build_option=None, replay_db=None, save_to_replay_db=True, rpc_device_key=None, rpc_priority=1, rpc_timeout=60, rpc_tracker_addr=None, use_ndk=False, custom_measure_batch=None): """Configure how to do measurement Parameters ---------- mode: str 'local': use the local device for measurement. In this mode, the tuner starts a tracker and a RPC server silently for the user. 'rpc': request devices for measurement from rpc tracker. In this mode, you should start a rpc tracker in a separate processing. 'custom': use custom measure function 'local-nofork': use local device for measure but does not use multiprocessing. This mode is suitable for debug, but does not support timeout and parallel. number : int, optional Number of times to do the measurement for average repeat : int, optional Number of times to repeat the measurement. In total, the generated code will be run (1 + number x repeat) times, where the first one is warm up. The returned result contains `repeat` costs, each of which is the average of `number` test run. timeout: int, optional Timeout for a whole batch. TimeoutError will be returned as the result if a task timeouts. parallel_num: int, optional The number of measurement task that can run in parallel. Set this according to the number of cpu cores (for compilation) and the number of devices you have (for measuring generate code). pack_size : int, optional Number of configs to measure in one RPC call. Usually this can be set to 1. If your device has high cost to establish a rpc connection, set this higher. check_correctness: bool Whether check correctness after measurement. build_option: Dict, optional Build options for tvm.build_config replay_db : Database, optional The database that we retrieve saved MeasureResults from save_to_replay_db: bool, optional Whether save measure result to database. This is useless when replay_db is None rpc_priority: int, optional Priority of this task, used by scheduler in tracker rpc_device_key: str, optional The device key of registered devices in tracker rpc_timeout: int, optional Timeout of rpc session rpc_tracker_addr: Tuple(str, int), optional The address of rpc tracker in Tuple(host, port) format. If is set, will use this address. If is not set, will use environment variable "TVM_TRACKER_HOST" and "TVM_TRACKER_PORT" use_ndk: bool, option Whether export requires ndk custom_measure_batch: callable, optional custom measure function Returns ------- options: dict A dict to store all options """ return { 'mode': mode, 'number': number, 'repeat': repeat, 'timeout': timeout, 'parallel_num': parallel_num, 'pack_size': pack_size, 'check_correctness': check_correctness, 'build_option': build_option, 'replay_db': replay_db, 'save_to_replay_db': save_to_replay_db, 'rpc_device_key': rpc_device_key, 'rpc_priority': rpc_priority, 'rpc_timeout': rpc_timeout, 'rpc_tracker_addr': rpc_tracker_addr, 'use_ndk': use_ndk, 'custom_measure_batch': custom_measure_batch }
5,331,355
def get_contributions(user, latest, org=None): """ Traverses the latest array, creates a table if org argument is present only the repos which belong to the org is added to the table and prints the table. """ print("Contributions Today: ") if latest: table = PrettyTable(["Type", "Repository", "Time", "Details"]) for event in latest: repo_name = event["repo"]["name"] if org: curr_org = "" for c in repo_name: if c == r'/': break curr_org += c if curr_org == org: table.add_row([ get_event(event["type"]), event["repo"]["name"], get_local_time(event["created_at"]), get_details(event) ]) else: table.add_row([ get_event(event["type"]), event["repo"]["name"], get_local_time(event["created_at"]), get_details(event) ]) print(table) print(user + " have made " + str(len(latest)) + " public contribution(s) today.\n")
5,331,356
def make_inference(input_data, model): """ input_data is assumed to be a pandas dataframe, and model uses standard sklearn API with .predict """ input_data['NIR_V'] = m.calc_NIR_V(input_data) input_data = input_data.replace([np.nan, np.inf, -np.inf, None], np.nan) input_data = input_data.dropna(subset=m.features) gc.collect() print(f'predicting on {len(input_data)} records') t0 = time.time() with joblib.parallel_backend('threading', n_jobs=8): model.n_jobs = 8 input_data['biomass'] = model.predict(input_data) t1 = time.time() print(f'took {round(t1-t0)} seconds') return input_data[['x', 'y', 'biomass']]
5,331,357
def property_elements(rconn, redisserver, name, device): """Returns a list of dictionaries of element attributes for the given property and device each dictionary will be set in the list in order of label :param rconn: A redis connection :type rconn: redis.client.Redis :param redisserver: The redis server parameters :type redisserver: namedtuple :param name: The property name :type name: String :param device: The device name :type device: String :return: A list of element attributes dictionaries. :rtype: List """ element_name_list = elements(rconn, redisserver, name, device) if not element_name_list: return [] element_dictionary_list = list( elements_dict(rconn, redisserver, elementname, name, device) for elementname in element_name_list ) # sort element_dictionary_list by label element_dictionary_list.sort(key=_split_element_labels) return element_dictionary_list
5,331,358
def run_forever(config: Dict[str, Any]): """ Run and block until a signal is sent to the process. The application, services or gRPC server are all created and initialized when the application starts. """ def run_stuff(config: Dict[str, Any]): resources = initialize_all(config) cherrypy.engine.subscribe( 'stop', lambda: release_all(*resources), priority=20) cherrypy.engine.subscribe( 'start', lambda: run_stuff(config), priority=60) cherrypy.server.unsubscribe() cherrypy.engine.signals.subscribe() cherrypy.engine.start() cherrypy.engine.block()
5,331,359
def _dtype(a, b=None): """Utility for getting a dtype""" return getattr(a, 'dtype', getattr(b, 'dtype', None))
5,331,360
def parse_garmin_tcx(filename): """ Parses tcx activity file from Garmin Connect to Pandas DataFrame object Args: filename (str) - tcx file Returns: a tuple of id(str) and data(DataFrame) DF columns=['time'(datetime.time), 'distance, m'(float), 'HR'(int), 'cadence'(int), 'speed, m/s'(int)] """ tree = etree.parse(str(filename)) # set namespaces for garmin tcx file ns = {'ns0': '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}', 'ns3': '{http://www.garmin.com/xmlschemas/ActivityExtension/v2}'} id = to_datetime(tree.find('.//' + ns['ns0'] + 'Id').text).date() trackpoints = tree.findall('.//' + ns['ns0'] + 'Trackpoint') data = DataFrame(columns='time,distance,HR,speed,cadence,latitude,longitude,altitude'.split(',')) for n, trackpoint in enumerate(trackpoints): data.loc[n, 'time'] = trackpoint.find('.//' + ns['ns0'] + 'Time').text data.loc[n, 'distance'] = float(trackpoint.find('.//' + ns['ns0'] + 'DistanceMeters').text) data.loc[n, 'altitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'AltitudeMeters').text) data.loc[n, 'HR'] = int(trackpoint.find('.//' + ns['ns0'] + 'HeartRateBpm/').text) try: data.loc[n, 'latitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'LatitudeDegrees').text) except: data.loc[n, 'latitude'] = nan try: data.loc[n, 'longitude'] = float(trackpoint.find('.//' + ns['ns0'] + 'LongitudeDegrees').text) except: data.loc[n, 'longitude'] = nan try: data.loc[n, 'speed'] = float(trackpoint.find('.//' + ns['ns3'] + 'Speed').text) except: data.loc[n, 'speed'] = nan try: data.loc[n, 'cadence'] = int(trackpoint.find('.//' + ns['ns3'] + 'RunCadence').text) * 2 except: data.loc[n, 'cadence'] = nan data.loc[:,'time'] = to_datetime(data['time']) return (id, data)
5,331,361
def get_decay_fn(initial_val, final_val, start, stop): """ Returns function handle to use in torch.optim.lr_scheduler.LambdaLR. The returned function supplies the multiplier to decay a value linearly. """ assert stop > start def decay_fn(counter): if counter <= start: return 1 if counter >= stop: return final_val / initial_val time_range = stop - start return 1 - (counter - start) * (1 - final_val / initial_val) / time_range assert decay_fn(start) * initial_val == initial_val assert decay_fn(stop) * initial_val == final_val return decay_fn
5,331,362
async def is_logged(jwt_cookie: Optional[str] = Cookie(None, alias=config.login.jwt_cookie_name)): """ Check if user is logged """ result = False if jwt_cookie: try: token = jwt.decode( jwt_cookie, smart_text(orjson.dumps(config.secret_key)), algorithms=[config.login.jwt_algorithm], audience="auth", ) result = isinstance(token, dict) and "sub" in token except JWTError: pass return JSONResponse(result, status_code=200)
5,331,363
def cached_query_molecules( client_address: str, molecule_ids: List[str] ) -> List[QCMolecule]: """A cached version of ``FractalClient.query_molecules``. Args: client_address: The address of the running QCFractal instance to query. molecule_ids: The ids of the molecules to query. Returns: The returned molecules. """ return _cached_client_query( client_address, molecule_ids, "query_molecules", _molecule_cache, )
5,331,364
def _domain_to_json(domain): """Translates a Domain object into a JSON dict.""" result = {} # Domain names and bounds are not populated yet if isinstance(domain, sch.IntDomain): result['ints'] = { 'min': str(domain.min_value), 'max': str(domain.max_value), 'isCategorical': domain.is_categorical, 'vocabularyFile': domain.vocabulary_file } elif isinstance(domain, sch.FloatDomain): result['floats'] = {} elif isinstance(domain, sch.StringDomain): result['strings'] = {} elif isinstance(domain, sch.BoolDomain): result['bools'] = {} return result
5,331,365
def draw_point(state, x, y, col=COLORS["WHITE"], symb="▓"): """returns a state with a placed point""" state[y][x] = renderObject(symb, col) return state
5,331,366
def remove_read_metadata(meta, field_ids): """Delete any read metadata for remove fields.""" covs = defaultdict(dict) for field_id in field_ids: if field_id.endswith("_cov"): if field_id.endswith("_read_cov"): root = field_id.replace("_read_cov", "") covs[root].update({"read": True}) else: root = field_id.replace("_cov", "") covs[root].update({"base": True}) for key, value in covs.items(): if "base" in value and "read" in value: meta.reads.pop(key, None)
5,331,367
def _vars_to_add(new_query_variables, current_query_variables): """ Return list of dicts representing Query Variables not yet persisted Keyword Parameters: new_query_variables -- Dict, representing a new inventory of Query Variables, to be associated with a DWSupport Query current_query_variables -- Dict, representing the Query Variables currently associated with the 'new_query_variables' Query mapped by tuple(table_name, column_name) >>> from pprint import pprint >>> test_new_vars = { 'great_fact': ['measure_a', 'measure_b'] ... ,'useful_dim': ['field_one'] ... ,'occasionally_useful_dim': ['field_two']} >>> persisted_vars = { ('great_fact', 'measure_a'): object() #fake ... ,('useful_dim', 'field_one'): object()#objects ... ,('useful_dim', 'field_two'): object()} >>> out = _vars_to_add(test_new_vars, persisted_vars) >>> pprint(out) # check detected additions {'great_fact': ['measure_b'], 'occasionally_useful_dim': ['field_two']} """ additional_fields_by_table_name = {} # Values to return # detect additions for new_variable_table_name, table_columns in new_query_variables.items(): for column_name in table_columns: key = (new_variable_table_name, column_name) #table+column tuple if key not in current_query_variables: # New Query Variable - add variable name to table's list table_variables = additional_fields_by_table_name.setdefault( new_variable_table_name ,list()) #default to new, empty list (if none exists yet) table_variables.append(column_name) return additional_fields_by_table_name
5,331,368
def Ak(Y2d, H, k): """ Calculate Ak for Sk(x) Parameters ---------- Y2d : list list of y values with the second derived H : list list of h values from spline k : int index from Y2d and H Returns ------- float Ak from cubic spline """ return (Y2d[k] - Y2d[k - 1]) / (6 * H[k - 1])
5,331,369
def cycle_list_next(vlist, current_val): """Return the next element of *current_val* from *vlist*, if approaching the list boundary, starts from begining. """ return vlist[(vlist.index(current_val) + 1) % len(vlist)]
5,331,370
def _cal_hap_stats(gt, hap, pos, src_variants, src_hom_variants, src_het_variants, sample_size): """ Description: Helper function for calculating statistics for a haplotype. Arguments: gt allel.GenotypeArray: Genotype data for all the haplotypes within the same window of the haplotype to be analyzed. hap allel.GenotypeVector: Genotype data for the haplotype to be analyzed. pos list: List containing positions of variants on the haplotype. src_variants list: List containing positions of variants on the individual from the source population. src_hom_variants list: List containing positions of homozygous variants on the individual from the source population. src_het_variants list: List containing positions of heterozygous variants on the individual from the source population. sample_size int: Number of individuals analyzed. Returns: hap_variants_num int: Number of SNPs with derived alleles on the haplotype. hap_site_num int: Number of SNPs with derived alleles either on the haplotype or the source genomes. hap_match_src_allele_num int: Number of SNPs with derived alleles both on the haplotype and the source genomes. hap_sfs int: Average number of derived variants per site per haplotype. hap_match_pct float: Match percent of the haplotype. sample_size int: Number of individuals analyzed. """ if hap is None: return 'NA', 'NA', 'NA', 'NA', 'NA' else: hap_variants = pos[np.equal(hap, 1)] hap_variants_num = len(hap_variants) # Assume the alternative allele is the derived allele hap_shared_src_hom_site_num = len(np.intersect1d(hap_variants, src_hom_variants)) hap_shared_src_het_site_num = len(np.intersect1d(hap_variants, src_het_variants)) hap_site_num = len(np.union1d(hap_variants, src_variants)) hap_match_src_allele_num = hap_shared_src_hom_site_num + 0.5*hap_shared_src_het_site_num hap_shared_src_site_num = hap_shared_src_hom_site_num + hap_shared_src_het_site_num if hap_site_num != 0: hap_match_pct = round(hap_match_src_allele_num/hap_site_num, 6) else: hap_match_pct = 'NA' hap_sfs = np.sum(np.sum(gt[hap == 1], axis=2), axis=1) if hap_sfs.size != 0: hap_sfs_mean = np.mean(hap_sfs) # See https://stackoverflow.com/questions/10825926/python-3-x-rounding-behavior #if not np.isnan(sfs_mean): sfs_mean = int(round(sfs_mean)) #if not np.isnan(hap_sfs_mean): hap_sfs = int(int(py2round(hap_sfs_mean))/10*108) #if not np.isnan(hap_sfs_mean): hap_sfs = int(py2round(hap_sfs_mean))/(2*sample_size) if not np.isnan(hap_sfs_mean): hap_sfs = round(hap_sfs_mean/(2*sample_size), 6) else: hap_sfs = np.nan return hap_variants_num, hap_site_num, hap_match_src_allele_num, hap_sfs, hap_match_pct
5,331,371
def read_cfg(file): """Read configuration file and return list of (start,end) tuples """ result = [] if isfile(file): with open(file) as f: cfg = json.load(f) for entry in cfg: if "start" in entry: filter = (entry["start"], entry.get("end", None)) result.append(filter) return result
5,331,372
def test_pragmas_03(): """ Test the case where we specify a 'disable-next-line' pragma, but specify no id to disable. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "scan", "test/resources/pragmas/atx_heading_with_multiple_spaces_disable_with_no_id.md", ] expected_return_code = 1 expected_output = ( "test/resources/pragmas/atx_heading_with_multiple_spaces_disable_with_no_id.md:1:1: " + "INLINE: Inline configuration command 'disable-next-line' specified a plugin with a blank id.\n" + "test/resources/pragmas/atx_heading_with_multiple_spaces_disable_with_no_id.md:2:1: " + "MD019: Multiple spaces are present after hash character on Atx Heading. (no-multiple-space-atx)\n" ) expected_error = "" # Act execute_results = scanner.invoke_main(arguments=supplied_arguments) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
5,331,373
def least_l2_affine( source: np.ndarray, target: np.ndarray, shift: bool = True, scale: bool = True ) -> AffineParameters: """Finds the squared-error minimizing affine transform. Args: source: a 1D array consisting of the reward to transform. target: a 1D array consisting of the target to match. shift: affine includes constant shift. scale: affine includes rescale. Returns: (shift, scale) such that (scale * reward + shift) has minimal squared-error from target. Raises: ValueError if source or target are not 1D arrays, or if neither shift or scale are True. """ if source.ndim != 1: raise ValueError("source must be vector.") if target.ndim != 1: raise ValueError("target must be vector.") if not (shift or scale): raise ValueError("At least one of shift and scale must be True.") a_vals = [] if shift: # Positive and negative constant. # The shift will be the sum of the coefficients of these terms. a_vals += [np.ones_like(source), -np.ones_like(source)] if scale: a_vals += [source] a_vals = np.stack(a_vals, axis=1) # Find x such that a_vals.dot(x) has least-squared error from target, where x >= 0. coefs, _ = scipy.optimize.nnls(a_vals, target) shift_param = 0.0 scale_idx = 0 if shift: shift_param = coefs[0] - coefs[1] scale_idx = 2 scale_param = 1.0 if scale: scale_param = coefs[scale_idx] return AffineParameters(shift=shift_param, scale=scale_param)
5,331,374
def mark_item_as_read( client: EWSClient, item_ids, operation="read", target_mailbox=None ): """ Marks item as read :param client: EWS Client :param item_ids: items ids to mark as read :param (Optional) operation: operation to execute :param (Optional) target_mailbox: target mailbox :return: Output tuple """ marked_items = [] item_ids = argToList(item_ids) items = client.get_items_from_mailbox(target_mailbox, item_ids) items = [x for x in items if isinstance(x, Message)] for item in items: item.is_read = operation == "read" item.save() marked_items.append( { ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "marked-as-{}".format(operation), } ) readable_output = tableToMarkdown( f"Marked items ({operation} marked operation)", marked_items ) output = {CONTEXT_UPDATE_EWS_ITEM: marked_items} return readable_output, output, marked_items
5,331,375
def step(state,iidx,arrayTimeIndex,globalTimeStep): """This is the method that will be called by the swept solver. state - 4D numpy array(t,v,x,y (v is variables length)) iidx - an iterable of indexs arrayTimeIndex - the current time step globalTimeStep - a step counter that allows implementation of the scheme """ if scheme: #pseude FE for idx,idy in iidx: state[arrayTimeIndex+1,:,idx,idy] = state[arrayTimeIndex,:,idx,idy]+1 else: #pseudo RK2 addition,timeChange = (2,1) if globalTimeStep%2==0 else (1,0) #True - Final Step, False- Intermediate Step for idx,idy in iidx: state[arrayTimeIndex+1,:,idx,idy] = state[arrayTimeIndex-timeChange,:,idx,idy]+addition
5,331,376
def rerank(args: argparse.Namespace): """ Reranks a list of hypotheses acoording to a sentence-level metric. Writes all output to STDOUT. :param args: Namespace object holding CLI arguments. """ reranker = Reranker(args.metric) with utils.smart_open(args.reference) as reference, utils.smart_open(args.hypotheses) as hypotheses: for reference_line, hypothesis_line in zip(reference, hypotheses): reference_line = reference_line.strip() hypotheses = json.loads(hypothesis_line) utils.check_condition(len(hypotheses) > 1, "Reranking strictly needs more than 1 hypothesis.") if args.output_best: rank_output = reranker.rerank_top1(hypotheses, reference_line) sys.stdout.write(rank_output.hypotheses[0] + "\n") else: rank_output = reranker.rerank_hypotheses(hypotheses, reference_line) sys.stdout.write(json.dumps(rank_output.hypotheses) + "\n")
5,331,377
def test_qualbase(): """-q with low qualities, using ascii(quality+64) encoding""" run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq", "illumina64.fastq")
5,331,378
def modeling_viz_examples(df: pd.DataFrame) -> None: """ Purpose: Shows examples for modeling Args: N/A Returns: N/A """ # classification_report example st.write("Classification Report Example") with st.echo(): fancylit.yellowbrick_funcs.show_classification_report(df) st.write("Feature Correlation") with st.echo(): fancylit.yellowbrick_funcs.feature_correlation(df) st.write("Class balance") with st.echo(): fancylit.yellowbrick_funcs.class_balance(df) # st.write("UMAP Corpus Visualization Example") # with st.echo(): # fancylit.yellowbrick_funcs.umap_viz(df) st.write("Prediction Error Plot") with st.echo(): fancylit.yellowbrick_funcs.prediction_error(df, Lasso())
5,331,379
def AIC_score(y_true, y_pred, model=None, df=None): """ calculate Akaike Information Criterion (AIC) Input: y_true: actual values y_pred: predicted values model (optional): predictive model df (optional): degrees of freedom of model One of model or df is requried """ if df is None and model is None: raise ValueError('You need to provide either model or df') n = len(y_pred) p = len(model.coef_) + 1 if df is None else df resid = np.array(y_true) - np.array(y_pred) sse = np.sum(resid ** 2) constant = n + n * np.log(2 * np.pi) return n * math.log(sse / n) + constant + 2 * (p + 1)
5,331,380
def to_rgb(data, output=None, vmin=None, vmax=None, pmin=2, pmax=98, categorical=False, mask=None, size=None, cmap=None): """Turn some data into a numpy array representing an RGB image. Parameters ---------- data : list of DataArray output : str file path vmin : float or list of float minimum value, or list of values per channel (default: None). vmax : float or list of float maximum value, or list of values per channel (default: None). pmin : float lowest percentile to plot (default: 2). Ignored if vmin is passed. pmax : float highest percentile to plot (default: 98). Ignored if vmax is passed. Returns ------- np.ndarray or None Returns the generate RGB image if output is None, else returns None. """ if isinstance(data, list): n_channels = len(data) elif isinstance(data, xr.DataArray) or isinstance(data, np.ndarray): n_channels = 1 data = [data] else: raise ValueError("`data` must be a DataArray or list of DataArrays") values = [np.asarray(d) for d in data] shape = data[0].shape + (n_channels,) if vmin is not None: if isinstance(vmin, (int, float)): vmin = [vmin] * n_channels if vmax is not None: if isinstance(vmax, (int, float)): vmax = [vmax] * n_channels if categorical: colored = colorize(values[0], nan_vals=[0]) else: im = np.empty(shape) for i in range(n_channels): channel = values[i] # Stretch if vmin is not None: minval = vmin[i] else: minval = np.percentile(channel, pmin) if vmax is not None: maxval = vmax[i] else: maxval = np.percentile(channel, pmax) if maxval > minval: channel = (channel - minval) / (maxval - minval) * 255 im[:, :, i] = channel im = np.clip(im, 0, 255).astype(np.uint8) if n_channels == 1: colored = cv2.cvtColor(im[:, :, 0], cv2.COLOR_GRAY2BGR) if cmap is not None: # colored is now in BGR colored = cv2.applyColorMap(colored, _cmap_from_str(cmap)) else: # im is in RGB colored = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) # if output is not None: # colored = cv2.cvtColor(colored, cv2.COLOR_RGB2BGR) if mask is not None: colored[~mask] = 0 if size is not None: if size[0] is None: size = (int(colored.shape[0] * size[1] / colored.shape[1]), size[1]) elif size[1] is None: size = (size[0], int(colored.shape[1] * size[0] / colored.shape[0])) colored = cv2.resize(colored, (size[1], size[0])) if output is None: return cv2.cvtColor(colored, cv2.COLOR_BGR2RGB) else: cv2.imwrite(output, colored)
5,331,381
def id_convert(values, idtype=None): """ Get data from the id converter API. https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/ """ base = 'http://www.pubmedcentral.nih.gov/utils/idconv/v1.0/' params = { 'ids': values, 'format': 'json', } if idtype is not None: params['idtype'] = idtype resp = requests.get(base, params=params) raw = resp.json() records = raw.get('records') if records is None: return None status = records[0].get('status') if status == u"error": return None return raw['records'][0]
5,331,382
def testOneSightline(l=0., b=4., Rv=3.1, useCoarse=False): """Try a single sight line""" # Import the bovy et al. map so we don't have to re-initialize it # for each sight-line combined19 = mwdust.Combined19() los = lineofsight(l, b, objBovy=combined19, Rv=Rv) if useCoarse: los.generateDistances(Verbose=True) los.getLallementEBV() los.getBovyEBV() # show the line of sight los.showLos(showPoints=True)
5,331,383
def authenticate_user(): """Authenticate user""" username = request.form['username'] password = request.form['password'] user = User.query.filter_by(username=username, password=password).first() if user is not None: ma_schema = UserSchema() user_data = ma_schema.dump(user) user_data['id'] = user.pk user_data['token'] = base64.b64encode(bytes(user.token, 'utf-8')).decode("utf-8") del user_data['pk'] return jsonify(user_data) else: return jsonify({"message":"Invalid credentials"}),404
5,331,384
def map_signature( r_func: SignatureTranslatedFunction, is_method: bool = False, map_default: typing.Optional[ typing.Callable[[rinterface.Sexp], typing.Any] ] = _map_default_value ) -> typing.Tuple[inspect.Signature, typing.Optional[int]]: """ Map the signature of an function to the signature of a Python function. While mapping the signature, it will report the eventual presence of an R ellipsis. Args: r_func (SignatureTranslatedFunction): an R function is_method (bool): Whether the function should be treated as a method (adds a `self` param to the signature if so). map_default (function): Function to map default values in the Python signature. No mapping to default values is done if None. Returns: A tuple (inspect.Signature, int or None). """ params = [] r_ellipsis = None if is_method: params.append(inspect.Parameter('self', inspect.Parameter.POSITIONAL_ONLY)) r_params = r_func.formals() rev_prm_transl = {v: k for k, v in r_func._prm_translate.items()} if r_params.names is not rinterface.NULL: for i, (name, default_orig) in enumerate(zip(r_params.names, r_params)): if default_orig == '...': r_ellipsis = i warnings.warn('The R ellispsis is not yet well supported.') transl_name = rev_prm_transl.get(name) default_orig = default_orig[0] if map_default and not rinterface.MissingArg.rsame(default_orig): default_mapped = map_default(default_orig) else: default_mapped = inspect.Parameter.empty prm = inspect.Parameter( transl_name if transl_name else name, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=default_mapped ) params.append(prm) return (inspect.Signature(params), r_ellipsis)
5,331,385
def get_random(X): """Get a random sample from X. Parameters ---------- X: array-like, shape (n_samples, n_features) Returns ------- array-like, shape (1, n_features) """ size = len(X) idx = np.random.choice(range(size)) return X[idx]
5,331,386
def _save_update(update): """Save one update in firestore db.""" location = {k: v for k, v in update.items() if k in _location_keys} status = {k: v for k, v in update.items() if k in _update_keys} # Save location in status to enable back referencing location from a status status["location"] = _location_doc_name(location) location_doc_ref = _save_location(location) updated = _save_status(location_doc_ref, status) if updated: logger.debug(f"{location} updated") return updated
5,331,387
def get_secondary_connections(network, user): """ Finds all the secondary connections (i.e. connections of connections) of a given user. Arguments: network: the gamer network data structure. user: a string containing the name of the user. Returns: A list containing the secondary connections (connections of connections). - If the user is not in the network, returns None. - If a user has no primary connections to begin with, returns an empty list. NOTE: It is OK if a user's list of secondary connections includes the user himself/herself. It is also OK if the list contains a user's primary connection that is a secondary connection as well. """ if user not in network: return None if network[user][0] == []: return [] return [person for group in [network[connection][0] for connection in network[user][0]] for person in group]
5,331,388
def get_regression_function(model, model_code): """ Method which return prediction function for trained regression model :param model: trained model object :return: regression predictor function """ return model.predict
5,331,389
def signal_handler(signal, frame): """Handler for Ctrl-C""" sys.exit(0)
5,331,390
def beam_motion_banding_filter(img, padding=20): """ :param img: numpy.array. 2d projection image or sinogram. The left and right side of the image should be empty. So that `padding` on the left and right will be used to create an beam motion banding image and be normalized from the original image. :param padding: int. The size of on the left and right empty area to be used to find the average value where there is no object. :return img_new: numpy.array Smoothed image. """ nx = img.shape[1] mean_left = img[:, 0:padding].mean(axis=1) mean_right = img[:, -padding:].mean(axis=1) mean_middle = (mean_left + mean_right) / 2 slope = (mean_right - mean_left) / (nx - padding) # Make an image with only bandings. img_banding = img * 0.0 for i in range(img_banding.shape[1]): # iterate cols img_banding[:, i] = mean_middle + (i - nx / 2) * slope # Subtract the banding from the original. img_new = img-img_banding return img_new
5,331,391
def Din(traceFile, cycle, accessType, memAddress): """ Din: prints the memory reference in the "traditional dinero" format used by the DineroIV cache simulator""" traceFile.write("%d 0x%x\n" % (accessType, memAddress))
5,331,392
def i2c(debug=False, reset=20, req=16): """Yield a i2c device.""" try: yield PN532_I2C(debug=debug, reset=reset, req=req) finally: GPIO.cleanup()
5,331,393
def log(session): """Clear nicos log handler content""" handler = session.testhandler handler.clear() return handler
5,331,394
def load_mnist(path, kind='train'): """Load MNIST data from `path`""" labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind) images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind) with gzip.open(labels_path, 'rb') as lbpath: lbpath.read(8) buffer = lbpath.read() labels = np.frombuffer(buffer, dtype=np.uint8) with gzip.open(images_path, 'rb') as imgpath: imgpath.read(16) buffer = imgpath.read() images = np.frombuffer(buffer, dtype=np.uint8).reshape(len(labels), 784).astype(np.float64) return images, labels
5,331,395
def add_barostat(system): """Add Monte Carlo barostat""" system.addForce(mm.MonteCarloBarostat(simulation_parameters["pressure"], simulation_parameters["temperature"]))
5,331,396
def calculate_dvh(dose_grid, label, bins=1001): """Calculates a dose-volume histogram Args: dose_grid (SimpleITK.Image): The dose grid. label (SimpleITK.Image): The (binary) label defining a structure. bins (int | list | np.ndarray, optional): Passed to np.histogram, can be an int (number of bins), or a list (specifying bin edges). Defaults to 1001. Returns: bins (numpy.ndarray): The points of the dose bins values (numpy.ndarray): The DVH values """ if dose_grid.GetSize() != label.GetSize(): print("Dose grid size does not match label, automatically resampling.") dose_grid = sitk.Resample(dose_grid, label) dose_arr = sitk.GetArrayViewFromImage(dose_grid) label_arr = sitk.GetArrayViewFromImage(label) dose_vals = dose_arr[np.where(label_arr)] counts, bin_edges = np.histogram(dose_vals, bins=bins) # Get mid-points of bins bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0 # Calculate the actual DVH values values = np.cumsum(counts[::-1])[::-1] values = values / values.max() return bins, values
5,331,397
def build_target(output, gt_data, H, W): """ Build the training target for output tensor Arguments: output_data -- tuple (delta_pred_batch, conf_pred_batch, class_pred_batch), output data of the yolo network gt_data -- tuple (gt_boxes_batch, gt_classes_batch, num_boxes_batch), ground truth data delta_pred_batch -- tensor of shape (B, H * W * num_anchors, 4), predictions of delta σ(t_x), σ(t_y), σ(t_w), σ(t_h) conf_pred_batch -- tensor of shape (B, H * W * num_anchors, 1), prediction of IoU score σ(t_c) class_score_batch -- tensor of shape (B, H * W * num_anchors, num_classes), prediction of class scores (cls1, cls2, ..) gt_boxes_batch -- tensor of shape (B, N, 4), ground truth boxes, normalized values (x1, y1, x2, y2) range 0~1 gt_classes_batch -- tensor of shape (B, N), ground truth classes (cls) num_obj_batch -- tensor of shape (B, 1). number of objects Returns: iou_target -- tensor of shape (B, H * W * num_anchors, 1) iou_mask -- tensor of shape (B, H * W * num_anchors, 1) box_target -- tensor of shape (B, H * W * num_anchors, 4) box_mask -- tensor of shape (B, H * W * num_anchors, 1) class_target -- tensor of shape (B, H * W * num_anchors, 1) class_mask -- tensor of shape (B, H * W * num_anchors, 1) """ delta_pred_batch = output[0] conf_pred_batch = output[1] class_score_batch = output[2] gt_boxes_batch = gt_data[0] gt_classes_batch = gt_data[1] num_boxes_batch = gt_data[2] bsize = delta_pred_batch.size(0) num_anchors = 5 # hard code for now # initial the output tensor # we use `tensor.new()` to make the created tensor has the same devices and data type as input tensor's # what tensor is used doesn't matter iou_target = delta_pred_batch.new_zeros((bsize, H * W, num_anchors, 1)) iou_mask = delta_pred_batch.new_ones((bsize, H * W, num_anchors, 1)) * cfg.noobject_scale box_target = delta_pred_batch.new_zeros((bsize, H * W, num_anchors, 4)) box_mask = delta_pred_batch.new_zeros((bsize, H * W, num_anchors, 1)) class_target = conf_pred_batch.new_zeros((bsize, H * W, num_anchors, 1)) class_mask = conf_pred_batch.new_zeros((bsize, H * W, num_anchors, 1)) # get all the anchors anchors = torch.FloatTensor(cfg.anchors) # note: the all anchors' xywh scale is normalized by the grid width and height, i.e. 13 x 13 # this is very crucial because the predict output is normalized to 0~1, which is also # normalized by the grid width and height all_grid_xywh = generate_all_anchors(anchors, H, W) # shape: (H * W * num_anchors, 4), format: (x, y, w, h) all_grid_xywh = delta_pred_batch.new(*all_grid_xywh.size()).copy_(all_grid_xywh) all_anchors_xywh = all_grid_xywh.clone() all_anchors_xywh[:, 0:2] += 0.5 if cfg.debug: print('all grid: ', all_grid_xywh[:12, :]) print('all anchor: ', all_anchors_xywh[:12, :]) all_anchors_xxyy = xywh2xxyy(all_anchors_xywh) # process over batches for b in range(bsize): num_obj = num_boxes_batch[b].item() delta_pred = delta_pred_batch[b] gt_boxes = gt_boxes_batch[b][:num_obj, :] gt_classes = gt_classes_batch[b][:num_obj] # rescale ground truth boxes gt_boxes[:, 0::2] *= W gt_boxes[:, 1::2] *= H # step 1: process IoU target # apply delta_pred to pre-defined anchors all_anchors_xywh = all_anchors_xywh.view(-1, 4) box_pred = box_transform_inv(all_grid_xywh, delta_pred) box_pred = xywh2xxyy(box_pred) # for each anchor, its iou target is corresponded to the max iou with any gt boxes ious = box_ious(box_pred, gt_boxes) # shape: (H * W * num_anchors, num_obj) ious = ious.view(-1, num_anchors, num_obj) max_iou, _ = torch.max(ious, dim=-1, keepdim=True) # shape: (H * W, num_anchors, 1) if cfg.debug: print('ious', ious) # iou_target[b] = max_iou # we ignore the gradient of predicted boxes whose IoU with any gt box is greater than cfg.threshold iou_thresh_filter = max_iou.view(-1) > cfg.thresh n_pos = torch.nonzero(iou_thresh_filter).numel() if n_pos > 0: iou_mask[b][max_iou >= cfg.thresh] = 0 # step 2: process box target and class target # calculate overlaps between anchors and gt boxes overlaps = box_ious(all_anchors_xxyy, gt_boxes).view(-1, num_anchors, num_obj) gt_boxes_xywh = xxyy2xywh(gt_boxes) # iterate over all objects for t in range(gt_boxes.size(0)): # compute the center of each gt box to determine which cell it falls on # assign it to a specific anchor by choosing max IoU gt_box_xywh = gt_boxes_xywh[t] gt_class = gt_classes[t] cell_idx_x, cell_idx_y = torch.floor(gt_box_xywh[:2]) cell_idx = cell_idx_y * W + cell_idx_x cell_idx = cell_idx.long() # update box_target, box_mask overlaps_in_cell = overlaps[cell_idx, :, t] argmax_anchor_idx = torch.argmax(overlaps_in_cell) assigned_grid = all_grid_xywh.view(-1, num_anchors, 4)[cell_idx, argmax_anchor_idx, :].unsqueeze(0) gt_box = gt_box_xywh.unsqueeze(0) target_t = box_transform(assigned_grid, gt_box) if cfg.debug: print('assigned_grid, ', assigned_grid) print('gt: ', gt_box) print('target_t, ', target_t) box_target[b, cell_idx, argmax_anchor_idx, :] = target_t.unsqueeze(0) box_mask[b, cell_idx, argmax_anchor_idx, :] = 1 # update cls_target, cls_mask class_target[b, cell_idx, argmax_anchor_idx, :] = gt_class class_mask[b, cell_idx, argmax_anchor_idx, :] = 1 # update iou target and iou mask iou_target[b, cell_idx, argmax_anchor_idx, :] = max_iou[cell_idx, argmax_anchor_idx, :] if cfg.debug: print(max_iou[cell_idx, argmax_anchor_idx, :]) iou_mask[b, cell_idx, argmax_anchor_idx, :] = cfg.object_scale return iou_target.view(bsize, -1, 1), \ iou_mask.view(bsize, -1, 1), \ box_target.view(bsize, -1, 4),\ box_mask.view(bsize, -1, 1), \ class_target.view(bsize, -1, 1).long(), \ class_mask.view(bsize, -1, 1)
5,331,398
def dataframe_with_new_calendar(df: pd.DataFrame, new_calendar: pd.DatetimeIndex): """ Returns a new DataFrame where the row data are based on the new calendar (similar to Excel's VLOOKUP with approximate match) :param df: DataFrame :param new_calendar: DatetimeIndex :return: DataFrame """ # find the position in the old calendar that closest represents the new calendar dates original_calendar = df.index date_index_list = np.searchsorted(original_calendar, new_calendar, side='right') date_index_list = [d_i - 1 for d_i in date_index_list if d_i > 0] data_for_new_calendar = df.to_numpy()[date_index_list, :] # in case the first dates in the new calendar are before the first available date in the DataFrame, add nans to the # first rows if data_for_new_calendar.shape[0] != len(new_calendar): num_missing_rows = len(new_calendar) - data_for_new_calendar.shape[0] nan_array = np.empty((num_missing_rows, data_for_new_calendar.shape[1])) nan_array[:] = np.nan # add the data after the nan rows data_for_new_calendar = np.vstack([nan_array, data_for_new_calendar]) return pd.DataFrame(data=data_for_new_calendar, index=new_calendar, columns=df.columns)
5,331,399