content
stringlengths
22
815k
id
int64
0
4.91M
def test_FeatureSetSelector_3(): """Assert that the StackingEstimator returns transformed X based on 2 subsets' names""" ds = FeatureSetSelector(subset_list="tests/subset_test.csv", sel_subset=["test_subset_1", "test_subset_2"]) ds.fit(test_X, y=None) transformed_X = ds.transform(test_X) assert transformed_X.shape[0] == test_X.shape[0] assert transformed_X.shape[1] != test_X.shape[1] assert transformed_X.shape[1] == 7 assert np.array_equal(transformed_X, test_X[ds.feat_list].values)
29,200
def initialize_builtin_extensions(): """Initialize third-party extensions.""" app = current_app._get_current_object() db.init_app(app) migrate.init_app(app, db=db) babel.init_app(app) initialize_security(app)
29,201
def collaspe_fclusters(data=None, t=None, row_labels=None, col_labels=None, linkage='average', pdist='euclidean', standardize=3, log=False): """a function to collaspe flat clusters by averaging the vectors within each flat clusters achieved from hierarchical clustering""" ## preprocess data if log: data = np.log2(data + 1.0) if standardize == 1: # Standardize along the columns of data data = zscore(data, axis=0) elif standardize == 2: # Standardize along the rows of data data = zscore(data, axis=1) if row_labels is not None and col_labels is None: ## only get fclusters for rows d = dist.pdist(data, metric=pdist) axis = 1 ##!!! haven't checked whether this is correct yet elif row_labels is None and col_labels is not None: ## only get fclusters for cols d = dist.pdist(data.T, metric=pdist) axis = 0 D = dist.squareform(d) Y = sch.linkage(D, method=linkage, metric=pdist) fclusters = sch.fcluster(Y, t, 'distance') fcluster_set = set(fclusters) data_cf = [] for fc in fcluster_set: mask = np.where(fclusters==fc) data_t = data.T vector_avg = np.average(data_t[mask],axis=axis) data_cf.append(vector_avg) data_cf = np.array(data_cf).T return data_cf
29,202
def iapproximate_add_fourier_state(self, lhs: Union[int, QuantumRegister], rhs: QRegisterPhaseLE, qcirc: QuantumCircuit, approximation: int = None) -> ApproximateAddFourierStateGate: """Substract two registers with rhs in quantum fourier state.""" if isinstance(lhs, QuantumRegister): self._check_qreg(lhs) self._check_dups([lhs, rhs]) self._check_qreg(rhs) return self._attach(ApproximateAddFourierStateGate(lhs, rhs, qcirc, approximation).inverse())
29,203
def _get_indices(A): """Gets the index for each element in the array.""" dim_ranges = [range(size) for size in A.shape] if len(dim_ranges) == 1: return dim_ranges[0] return itertools.product(*dim_ranges)
29,204
def test_prediction(prediction_gen, mail_info, api_url="http://127.0.0.1:5000/test-api?prediction_id=", result_wait_time=7): """tests prediction by generating posting prediction, waiting and geting prediction result if result is an error send mail """ try: # make prediction prediction_id = prediction_gen.post_prediction() # wait and get the prediction result time.sleep(result_wait_time) prediction = json.loads(requests.get("{0}{1}".format(api_url, prediction_id)).text) prediction_status = prediction["prediction_status"] logger.info(prediction_status) if(prediction_status != 200): # send the error logger.warning("prediction failed with status {0}".format(prediction_status)) send_mail(mail_info["sender_mail"], mail_info["sender_password"], mail_info["receiver_mail"], "deep_predictor:prediction error {0}".format(prediction_status), mail_body="", smtp_server_incoming="smtp.gmail.com") except: logger.error("test died", exc_info=True) send_mail(mail_info["sender_mail"], mail_info["sender_password"], mail_info["receiver_mail"], "deep_predictor:is up test died", mail_body="", smtp_server_incoming="smtp.gmail.com")
29,205
def calculateOriginalVega(f, k, r, t, v, cp): """计算原始vega值""" price1 = calculatePrice(f, k, r, t, v*STEP_UP, cp) price2 = calculatePrice(f, k, r, t, v*STEP_DOWN, cp) vega = (price1 - price2) / (v * STEP_DIFF) return vega
29,206
def test_string_representation() -> None: """Tests printable representation of Jobs.""" def dummy(msg): pass assert "Job(function=dummy, args=('a',), kwargs={'msg': 'a'})" == Job(dummy, ('a',), {'msg': 'a'}).__repr__() assert "Job(function=dummy, args=('a',), kwargs={'msg': 'a'})" == Job(dummy, ('a',), {'msg': 'a'}).__str__()
29,207
def run_ansible_lint( *argv: str, cwd: Optional[str] = None, executable: Optional[str] = None, env: Optional[Dict[str, str]] = None ) -> CompletedProcess: """Run ansible-lint on a given path and returns its output.""" if not executable: executable = sys.executable args = [sys.executable, "-m", "ansiblelint", *argv] else: args = [executable, *argv] # It is not safe to pass entire env for testing as other tests would # pollute the env, causing weird behaviors, so we pass only a safe list of # vars. safe_list = [ "HOME", "LANG", "LC_ALL", "LC_CTYPE", "NO_COLOR", "PATH", "PYTHONIOENCODING", "PYTHONPATH", "TERM", ] if env is None: _env = {} else: _env = env for v in safe_list: if v in os.environ and v not in _env: _env[v] = os.environ[v] return subprocess.run( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, # needed when command is a list check=False, cwd=cwd, env=_env, universal_newlines=True, )
29,208
def to_numpy(tensor): """ Converts a PyTorch Tensor to a Numpy array""" if isinstance(tensor, np.ndarray): return tensor if hasattr(tensor, 'is_cuda'): if tensor.is_cuda: return tensor.cpu().detach().numpy() if hasattr(tensor, 'detach'): return tensor.detach().numpy() if hasattr(tensor, 'numpy'): return tensor.numpy() return np.array(tensor)
29,209
async def save_image_urls(channel, filename=_CARD_IMG_URLS_PATH): """Like upload_images but saves the resulting dict into a file.""" result = await upload_images(channel) with open(filename, 'w') as f: json.dump(result, f, indent=4, separators=(',', ': '))
29,210
def gradual_light_on(seconds): """ Gradually increases the light brightness from minimum to maximum in inputted amount of time. :param seconds: Time in seconds for the entire procedure to take. """ sleep_time = seconds / 255.0 light_bulb = Light(__connected_bridge(), ROOM_LIGHT_BULB_ID) if light_bulb.reachable is False: print('Light %s switch is OFF' % ROOM_LIGHT_BULB_ID) else: light_bulb.brightness = 0 if light_bulb.on is False: light_bulb.on = True print('Increasing the light brightness for %s seconds.' % seconds) for x in range(1, 254): time.sleep(sleep_time) light_bulb.brightness = x
29,211
def test_delete_request_db_contents(test_client): """ A fixture to simply delete the db contents (starting at Request() as the root - does not delete User() contents) after each test runs """ print('----------Setup test_delete_request_db_contents fixture ----------') yield # this is where the test is run print('-------Teardown test_delete_request_db_contents fixture --------') db_lightsheet.Request().delete() db_lightsheet.AntibodyHistory().delete() db_lightsheet.AntibodyOverview().delete() # db_admin.UserActionLog().delete() # db_admin.LightsheetPipelineSpockJob().delete()
29,212
def show_tables(): """ Load all available data files that have the right format. All files will be assumed to have the same 8 fields in the header. This demonstrates pulling a specific file name as well as wildcard file search in the uploads/ directory. And as an example, filtering the data to only show relevant (IC50 and EC50) rows in the app. """ header_format = ['target_name', 'uniprot_id', 'smiles', 'bindingdb_id', 'affinity_type', 'affinity_value', 'source', 'price'] list_of_data = [] data_path = 'bindingDB_purchase_target_subset.tsv' data = pd.read_csv(data_path, sep='\t', names=header_format, header=0) # for tsv file # data = pd.read_excel(data_path) # for Excel file data.set_index(['bindingdb_id'], inplace=True) list_of_data.append(data) uploaded_files = glob.glob(os.path.join(app.config['UPLOAD_FOLDER'], "*.csv")) app.logger.warning("Loading files:") app.logger.warning(os.path.join(app.config['UPLOAD_FOLDER'],"*.csv")) app.logger.warning(uploaded_files) for upload_file in uploaded_files: app.logger.warning("Loading uploaded file %s" % upload_file) data = pd.read_csv(upload_file, names=header_format, header=0) # for csv file data.set_index(['bindingdb_id'], inplace=True) list_of_data.append(data) df = pd.concat(list_of_data) df.index.name = None ic50_data = df.loc[df['affinity_type'].str.contains("IC50")] ec50_data = df.loc[df['affinity_type'].str.contains("EC50")] return render_template('view.html', tables=[ic50_data.to_html(classes='IC50'), ec50_data.to_html(classes='EC50')], titles=['na', 'IC50 data', 'EC50 data'])
29,213
def _redirect_io(inp, out, f): """Calls the function `f` with ``sys.stdin`` changed to `inp` and ``sys.stdout`` changed to `out`. They are restored when `f` returns. This function returns whatever `f` returns. """ import os import sys oldin, sys.stdin = sys.stdin, inp oldout, sys.stdout = sys.stdout, out try: x = f() finally: sys.stdin = oldin sys.stdout = oldout if os.environ.get('PYPNG_TEST_TMP') and hasattr(out,'getvalue'): name = mycallersname() if name: w = open(name+'.png', 'wb') w.write(out.getvalue()) w.close() return x
29,214
def _maybe_to_dense(obj): """ try to convert to dense """ if hasattr(obj, 'to_dense'): return obj.to_dense() return obj
29,215
def update_visit_counter(visit_counter_matrix, observation, action): """Update the visit counter Counting how many times a state-action pair has been visited. This information can be used during the update. @param visit_counter_matrix a matrix initialised with zeros @param observation the state observed @param action the action taken """ x = observation[0] y = observation[1] z = observation[2] visit_counter_matrix[x,y,z,action] += 1.0 return visit_counter_matrix
29,216
def writeFileHashIndex(Data, Path, Filename=Cfg.HASH_INDEX_FILENAME): """ Write the file hash index to the specified path. """ writeYaml(Path, Filename, Data)
29,217
def pandas_time_safe(series): """Pandas check time safe""" return (series.map(dt_seconds) if isinstance(series.iloc[0], datetime.time) else series)
29,218
def binary_cross_entropy_loss(predicted_y, true_y): """Compute the binary cross entropy loss between a vector of labels of size N and a vector of probabilities of same size Parameters ---------- predicted_y : numpy array of shape (N, 1) The predicted probabilities true_y : numpy array of shape (N, ) The true labels Returns ------- binary_cross_entropy_loss a numpy array of shape (N, ) """ return -np.log(np.squeeze(predicted_y))*true_y - np.log(1 - np.squeeze(predicted_y))*(1 - true_y)
29,219
def money_flow_index(high, low, close, volume, n=14, fillna=False): """Money Flow Index (MFI) Uses both price and volume to measure buying and selling pressure. It is positive when the typical price rises (buying pressure) and negative when the typical price declines (selling pressure). A ratio of positive and negative money flow is then plugged into an RSI formula to create an oscillator that moves between zero and one hundred. http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:money_flow_index_mfi Args: high(pandas.Series): dataset 'High' column. low(pandas.Series): dataset 'Low' column. close(pandas.Series): dataset 'Close' column. volume(pandas.Series): dataset 'Volume' column. n(int): n period. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated. """ # 0 Prepare dataframe to work df = pd.DataFrame([high, low, close, volume]).T df.columns = ['High', 'Low', 'Close', 'Volume'] df['Up_or_Down'] = 0 df.loc[(df['Close'] > df['Close'].shift(1)), 'Up_or_Down'] = 1 df.loc[(df['Close'] < df['Close'].shift(1)), 'Up_or_Down'] = 2 # 1 typical price tp = (df['High'] + df['Low'] + df['Close']) / 3.0 # 2 money flow mf = tp * df['Volume'] # 3 positive and negative money flow with n periods df['1p_Positive_Money_Flow'] = 0.0 df.loc[df['Up_or_Down'] == 1, '1p_Positive_Money_Flow'] = mf n_positive_mf = df['1p_Positive_Money_Flow'].rolling(n).sum() df['1p_Negative_Money_Flow'] = 0.0 df.loc[df['Up_or_Down'] == 2, '1p_Negative_Money_Flow'] = mf n_negative_mf = df['1p_Negative_Money_Flow'].rolling(n).sum() # 4 money flow index mr = n_positive_mf / n_negative_mf mr = (100 - (100 / (1 + mr))) if fillna: mr = mr.fillna(50) return pd.Series(mr, name='mfi_'+str(n))
29,220
def insert_extracted_sources(image_id, results, extract_type='blind', ff_runcat_ids=None, ff_monitor_ids=None): """ Insert all detections from sourcefinder into the extractedsource table. Besides the source properties from sourcefinder, we calculate additional attributes that are increase performance in other tasks. The strict sequence from results (the sourcefinder detections) is given below. Note the units between sourcefinder and database. (0) ra [deg], (1) dec [deg], (2) ra_fit_err [deg], (3) decl_fit_err [deg], (4) peak_flux [Jy], (5) peak_flux_err [Jy], (6) int_flux [Jy], (7) int_flux_err [Jy], (8) significance detection level, (9) beam major width (arcsec), (10) - minor width (arcsec), (11) - parallactic angle [deg], (12) ew_sys_err [arcsec], (13) ns_sys_err [arcsec], (14) error_radius [arcsec] (15) gaussian fit (bool) (16), (17) chisq, reduced_chisq (float) ra_fit_err and decl_fit_err are the 1-sigma errors from the gaussian fit, in degrees. Note that for a source located towards the poles the ra_fit_err increases with absolute declination. error_radius is a pessimistic on-sky error estimate in arcsec. ew_sys_err and ns_sys_err represent the telescope dependent systematic errors and are in arcsec. An on-sky error (declination independent, and used in de ruiter calculations) is then: uncertainty_ew^2 = ew_sys_err^2 + error_radius^2 uncertainty_ns^2 = ns_sys_err^2 + error_radius^2 The units of uncertainty_ew and uncertainty_ns are in degrees. The error on RA is given by ra_err. For a source with an RA of ra and an error of ra_err, its RA lies in the range [ra-ra_err, ra+ra_err]. ra_err^2 = ra_fit_err^2 + [alpha_inflate(ew_sys_err,decl)]^2 decl_err^2 = decl_fit_err^2 + ns_sys_err^2. The units of ra_err and decl_err are in degrees. Here alpha_inflate() is the RA inflation function, it converts an angular on-sky distance to a ra distance at given declination. Input argument "extract" tells whether the source detections originate from: 'blind': blind source extraction 'ff_nd': from forced fits at null detection locations 'ff_ms': from forced fits at monitoringlist positions Input argument ff_runcat is not empty in the case of forced fits from null detections. It contains the runningcatalog ids from which the source positions were derived for the forced fits. In that case the runcat ids will be inserted into the extractedsource table as well, to simplify further null-detection processing. For blind extractions this list is empty (None). For all extracted sources additional parameters are calculated, and appended to the sourcefinder data. Appended and converted are: - the image id to which the extracted sources belong to - the zone in which an extracted source falls is calculated, based on its declination. We adopt a zoneheight of 1 degree, so the floor of the declination represents the zone. - the positional errors are converted from degrees to arcsecs - the Cartesian coordinates of the source position - ra * cos(radians(decl)), this is very often being used in source-distance calculations """ if not len(results): logger.debug("No extract_type=%s sources added to extractedsource for" " image %s" % (extract_type, image_id)) return xtrsrc = [] for i, src in enumerate(results): r = list(src) # Drop any fits with infinite flux errors if math.isinf(r[5]) or math.isinf(r[7]): logger.warn("Dropped source fit with infinite flux errors " "at position {} {} in image {}".format( r[0], r[1], image_id)) continue # Use 360 degree rather than infinite uncertainty for # unconstrained positions. r[14] = substitute_inf(r[14], 360.0) r[15] = int(r[15]) # ra_err: sqrt of quadratic sum of fitted and systematic errors. r.append(math.sqrt(r[2]**2 + alpha_inflate(r[12]/3600., r[1])**2)) # decl_err: sqrt of quadratic sum of fitted and systematic errors. r.append(math.sqrt(r[3]**2 + (r[13]/3600.)**2)) # uncertainty_ew: sqrt of quadratic sum of systematic error and error_radius # divided by 3600 because uncertainty in degrees and others in arcsec. r.append(math.sqrt(r[12]**2 + r[14]**2)/3600.) # uncertainty_ns: sqrt of quadratic sum of systematic error and error_radius # divided by 3600 because uncertainty in degrees and others in arcsec. r.append(math.sqrt(r[13]**2 + r[14]**2)/3600.) r.append(image_id) # id of the image r.append(int(math.floor(r[1]))) # zone r.extend(eq_to_cart(r[0], r[1])) # Cartesian x,y,z r.append(r[0] * math.cos(math.radians(r[1]))) # ra * cos(radians(decl)) if extract_type == 'blind': r.append(0) elif extract_type == 'ff_nd': r.append(1) elif extract_type == 'ff_ms': r.append(2) else: raise ValueError("Not a valid extractedsource insert type: '%s'" % extract_type) if ff_runcat_ids is not None: assert len(results)==len(ff_runcat_ids) r.append(ff_runcat_ids[i]) else: r.append(None) if ff_monitor_ids is not None: assert len(results)==len(ff_monitor_ids) r.append(ff_monitor_ids[i]) else: r.append(None) xtrsrc.append(r) insertion_query = """\ INSERT INTO extractedsource (ra ,decl ,ra_fit_err ,decl_fit_err ,f_peak ,f_peak_err ,f_int ,f_int_err ,det_sigma ,semimajor ,semiminor ,pa ,ew_sys_err ,ns_sys_err ,error_radius ,fit_type ,chisq ,reduced_chisq ,ra_err ,decl_err ,uncertainty_ew ,uncertainty_ns ,image ,zone ,x ,y ,z ,racosdecl ,extract_type ,ff_runcat ,ff_monitor ) VALUES {placeholder} """ if xtrsrc: cols_per_row = len(xtrsrc[0]) placeholder_per_row = '('+ ','.join(['%s']*cols_per_row) +')' placeholder_full = ','.join([placeholder_per_row]*len(xtrsrc)) query = insertion_query.format(placeholder= placeholder_full) cursor = tkp.db.execute(query, tuple(itertools.chain.from_iterable(xtrsrc)), commit=True) insert_num = cursor.rowcount #if insert_num == 0: # logger.info("No forced-fit sources added to extractedsource for " # "image %s" % (image_id,)) if extract_type == 'blind': logger.debug("Inserted %d sources in extractedsource for image %s" % (insert_num, image_id)) elif extract_type == 'ff_nd': logger.debug("Inserted %d forced-fit null detections in extractedsource" " for image %s" % (insert_num, image_id)) elif extract_type == 'ff_ms': logger.debug("Inserted %d forced-fit for monitoring in extractedsource" " for image %s" % (insert_num, image_id))
29,221
def main(**kwargs): """Entry point for GravityBee CLI.""" print("GravityBee CLI,", gravitybee.__version__) # Create an instance args = gravitybee.Arguments(**kwargs) package_generator = gravitybee.PackageGenerator(args) sys.exit(package_generator.generate())
29,222
def finalize(): """Cleans up fault injection visualization """ print "Finalizing fault injection visualization..." #conn.commit() #conn.close()
29,223
def parse_args(args=[], doc=False): """ Handle parsing of arguments and flags. Generates docs using help from `ArgParser` Args: args (list): argv passed to the binary doc (bool): If the function should generate and return manpage Returns: Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage """ parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}") parser.add_argument("file") parser.add_argument("--version", action="store_true", help=f"print program version") args = parser.parse_args(args) arg_helps_with_dups = parser._actions arg_helps = [] [arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps] NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}" SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... " DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n" for item in arg_helps: # Its a positional argument if len(item.option_strings) == 0: # If the argument is optional: if item.nargs == "?": SYNOPSIS += f"[{item.dest.upper()}] " elif item.nargs == "+": SYNOPSIS += f"[{item.dest.upper()}]... " else: SYNOPSIS += f"{item.dest.upper()} " else: # Boolean flag if item.nargs == 0: if len(item.option_strings) == 1: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n" elif item.nargs == "+": DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n" if doc: return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n" else: return args, parser
29,224
def gaussian_kernel_dx_i_dx_j(x, y, sigma=1.): """ Matrix of \frac{\partial k}{\partial x_i \partial x_j}""" assert(len(x.shape) == 1) assert(len(y.shape) == 1) d = x.size pairwise_dist = np.outer(y-x, y-x) x_2d = x[np.newaxis,:] y_2d = y[np.newaxis,:] k = gaussian_kernel(x_2d, y_2d, sigma) term1 = k*pairwise_dist * (2.0/sigma)**2 term2 = k*np.eye(d) * (2.0/sigma) return term1 - term2
29,225
def clone_compressed_repository(base_path, name): """Decompress and clone a repository.""" compressed_repo_path = Path(__file__).parent / "tests" / "fixtures" / f"{name}.tar.gz" working_dir = base_path / name bare_base_path = working_dir / "bare" with tarfile.open(compressed_repo_path, "r") as fixture: fixture.extractall(str(bare_base_path)) bare_path = bare_base_path / name repository_path = working_dir / "repository" repository = Repo(bare_path, search_parent_directories=True).clone(repository_path) return repository
29,226
def slices(series, length): """ Given a string of digits, output all the contiguous substrings of length n in that string in the order that they appear. :param series string - string of digits. :param length int - the length of the series to find. :return list - List of substrings of specified length from series. """ if len(series) < length: raise ValueError("Length requested is shorter than series.") if length < 1: raise ValueError("Length requested is less than 1.") substrings = [] for index, number in enumerate(series): sub = series[index:index + length] if len(sub) == length: substrings.append(sub) return substrings
29,227
def get_flight_time(dset): """Get flight time of GNSS signal between satellite and receiver Args: dset(Dataset): Model data Return: numpy.ndarray: Flight time of GNSS signal between satellite and receiver in [s] """ from where.models.delay import gnss_range # Local import to avoid cyclical import # Get geometric range between satellite and receiver position geometric_range = gnss_range.gnss_range(dset) return geometric_range / constant.c
29,228
def get_salary(request, responder): """ If a user asks for the salary of a specific person, this function returns their hourly salary by querying into the knowledge base according to the employee name. """ responder = _get_person_info(request, responder, 'money') try: responder.reply("{name}'s hourly salary is {money}") except KeyError: responder.reply(NOT_AN_EMPLOYEE)
29,229
def textToTuple(text, defaultTuple): """This will convert the text representation of a tuple into a real tuple. No checking for type or number of elements is done. See textToTypeTuple for that. """ # first make sure that the text starts and ends with brackets text = text.strip() if text[0] != '(': text = '(%s' % (text,) if text[-1] != ')': text = '%s)' % (text,) try: returnTuple = eval('tuple(%s)' % (text,)) except Exception: returnTuple = defaultTuple return returnTuple
29,230
def main(): """Main module execution code path""" AzureRMManagedDiskInfo()
29,231
def draw_bboxes_with_labels(img, bboxes, label_indices, probs, labels): """Drawing bounding boxes with labels on given image. inputs: img = (height, width, channels) bboxes = (total_bboxes, [y1, x1, y2, x2]) in denormalized form label_indices = (total_bboxes) probs = (total_bboxes) labels = [labels string list] """ colors = tf.random.uniform((len(labels), 4), maxval=256, dtype=tf.int32) image = tf.keras.preprocessing.image.array_to_img(img) width, height = image.size draw = ImageDraw.Draw(image) for index, bbox in enumerate(bboxes): y1, x1, y2, x2 = tf.split(bbox, 4) width = x2 - x1 height = y2 - y1 if width <= 0 or height <= 0: continue label_index = int(label_indices[index]) color = tuple(colors[label_index].numpy()) label_text = "{0} {1:0.3f}".format(labels[label_index], probs[index]) draw.text((x1 + 4, y1 + 2), label_text, fill=color) draw.rectangle((x1, y1, x2, y2), outline=color, width=3) # plt.figure() plt.imshow(image) plt.show()
29,232
def m2m_bi2uni(m2m_list): """ Splits a bigram word model into a unique unigram word model i=11, j=3 i=10, j=3 i=9,10,11,12, j=3,4,5,6 ###leilatem### ###leilatem### ###leilatem### ###temum### ###temum### ###temum### ^ ^ ^^^^ m: mismatch m m MMMm M: match """ q = Queue(maxsize=2) phonemes_list = [] while len(m2m_list): # NOTE can be optmised removing this while while not q.full(): bigram = m2m_list.pop(0) q.put(PADD + bigram + PADD) curr_word = q.get() next_word = q.get() i = len(curr_word) - 1 - len(PADD) # to decrease backwards j = len(PADD) # to increase forward unmatch_count = 0 match = False #print(curr_word, '***********************************') #print(next_word, '***********************************') while not match: # scan the first section: mismatch (m) while curr_word[i] != next_word[j]: #print('%-6s %-6s %02d %02d <- bi2uni' % (curr_word[i], # next_word[j], i, j)) i -= 1 unmatch_count += 1 #print('%-6s %-6s' % (curr_word[i], next_word[j])) # gambiarra master to avoid mismatches like in 's e j s' if unmatch_count == 0 and not is_vowel(curr_word[i][0]): i -= 1 unmatch_count += 1 continue #print('possible match') for k in range(unmatch_count + len(PADD)): # scan the second section: a match (M) if curr_word[i + k] == next_word[j + k]: continue else: # found third section: right mismatch with PADD (m) if curr_word[i + k] == '#': # check immediate mismatch match = True #print('match! ->', end=' ') #print(curr_word[len(PADD):i]) else: #print('houston we have a problem: (%s, %s)' % # (curr_word[i + k], next_word[j + k])) i -= 1 unmatch_count += 1 break phonemes_list.append(curr_word[len(PADD):i]) q.put(next_word) phonemes_list.append(next_word[len(PADD):j + k]) phonemes_list.append(next_word[j + k:-len(PADD)]) return phonemes_list
29,233
async def run_server() -> None: """Begin listening for and handling connections on all endpoints.""" # we'll be working on top of transport layer posix sockets. # these implement tcp/ip over ethernet for us, and osu!stable # uses http/1.0 ontop of this. we'll need to parse the http data, # find the appropriate handler, and dispatch the connection. # i'll be using my light web framework to handle parsing & dispatching # of connections to their respective handlers; here, we'll just worry # about the socket-level details, like receiving the data from the clients. # if you're interested in more details, you can see the implementation at # https://github.com/cmyui/cmyui_pkg/blob/master/cmyui/web.py # fetch our server's endpoints; gulag supports # osu!'s handlers across multiple domains. from domains.cho import domain as c_ppy_sh # /c[e4-6]?.ppy.sh/ from domains.osu import domain as osu_ppy_sh from domains.ava import domain as a_ppy_sh from domains.map import domain as b_ppy_sh glob.app.add_domains({c_ppy_sh, osu_ppy_sh, a_ppy_sh, b_ppy_sh}) # support both INET and UNIX sockets if misc.utils.is_inet_address(glob.config.server_addr): sock_family = socket.AF_INET elif isinstance(glob.config.server_addr, str): sock_family = socket.AF_UNIX else: raise ValueError('Invalid socket address.') if sock_family == socket.AF_UNIX: # using unix socket - remove from filesystem if it exists if os.path.exists(glob.config.server_addr): os.remove(glob.config.server_addr) # create our transport layer socket; osu! uses tcp/ip with socket.socket(sock_family, socket.SOCK_STREAM) as listening_sock: listening_sock.setblocking(False) # asynchronous listening_sock.bind(glob.config.server_addr) if sock_family == socket.AF_UNIX: # using unix socket - give the socket file # appropriate (read, write) permissions os.chmod(glob.config.server_addr, 0o666) listening_sock.listen(glob.config.max_conns) log(f'-> Listening @ {glob.config.server_addr}', RGB(0x00ff7f)) glob.ongoing_conns = [] glob.shutting_down = False while not glob.shutting_down: # TODO: this timeout based-solution can be heavily # improved and refactored out. try: conn, _ = await asyncio.wait_for( fut=loop.sock_accept(listening_sock), timeout=0.25 ) except asyncio.TimeoutError: pass else: task = loop.create_task(glob.app.handle(conn)) task.add_done_callback(misc.utils._conn_finished_cb) glob.ongoing_conns.append(task) if sock_family == socket.AF_UNIX: # using unix socket - remove from filesystem os.remove(glob.config.server_addr)
29,234
def time_func(func): """Times how long a function takes to run. It doesn't do anything clever to avoid the various pitfalls of timing a function's runtime. (Interestingly, the timeit module doesn't supply a straightforward interface to run a particular function.) """ def timed(*args, **kwargs): start = time.time() func(*args, **kwargs) end = time.time() return end - start return timed
29,235
def GetPhiPsiChainsAndResiduesInfo(MoleculeName, Categorize = True): """Get phi and psi torsion angle information for residues across chains in a molecule containing amino acids. The phi and psi angles are optionally categorized into the following groups corresponding to four types of Ramachandran plots: General: All residues except glycine, proline, or pre-proline Glycine: Only glycine residues Proline: Only proline residues Pre-Proline: Only residues before proline not including glycine or proline Arguments: MoleculeName (str): Name of a PyMOL molecule object. Returns: dict: A dictionary containing sorted list of residue numbers for each chain and dictionaries of residue names, phi and psi angles for each residue number. Examples: PhiPsiInfoMap = GetPhiPsiChainsAndResiduesInfo(MolName) for ChainID in PhiPsiInfoMap["ChainIDs"]: for ResNum in PhiPsiInfoMap["ResNums"][ChainID]: ResName = PhiPsiInfoMap["ResName"][ChainID][ResNum] Phi = PhiPsiInfoMap["Phi"][ChainID][ResNum] Psi = PhiPsiInfoMap["Psi"][ChainID][ResNum] Category = PhiPsiInfoMap["Category"][ChainID][ResNum] MiscUtil.PrintInfo("ChainID: %s; ResNum: %s; ResName: %s; Phi: %8.2f; Psi: %8.2f; Category: %s" % (ChainID, ResNum, ResName, Phi, Psi, Category)) """ if not len(MoleculeName): return None SelectionCmd = "%s" % (MoleculeName) PhiPsiResiduesInfoMap = _GetSelectionPhiPsiChainsAndResiduesInfo(SelectionCmd, Categorize) return PhiPsiResiduesInfoMap
29,236
def generate_data(n_samples=30): """Generate synthetic dataset. Returns `data_train`, `data_test`, `target_train`.""" x_min, x_max = -3, 3 x = rng.uniform(x_min, x_max, size=n_samples) noise = 4.0 * rng.randn(n_samples) y = x ** 3 - 0.5 * (x + 1) ** 2 + noise y /= y.std() data_train = pd.DataFrame(x, columns=["Feature"]) data_test = pd.DataFrame( np.linspace(x_max, x_min, num=300), columns=["Feature"]) target_train = pd.Series(y, name="Target") return data_train, data_test, target_train
29,237
def get_element_event(element_key): """ Get object's event. """ model = apps.get_model(settings.WORLD_DATA_APP, "event_data") return model.objects.filter(trigger_obj=element_key)
29,238
def test_md020_good_with_html_blocks(): """ Test to make sure we get the expected behavior after scanning a good file from the test/resources/rules/md020 directory that has a closed atx heading with bad spacing inside of the start hashes, except that is is part of a html block. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "--disable-rules", "md033", "scan", "test/resources/rules/md020/with_html_blocks.md", ] expected_return_code = 0 expected_output = "" expected_error = "" # Act execute_results = scanner.invoke_main(arguments=supplied_arguments) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
29,239
def mock_os_path_join(): """Fixture to mock join.""" with patch("os.path.join") as mock_path_join: yield mock_path_join
29,240
def func_3(csidata): """CSI: time-phase""" s_index = 15 # subcarrier index csi = csidata.get_scaled_csi_sm() t = csidata.timestamp_low/1000000 - csidata.timestamp_low[0]/1000000 phase = np.unwrap(np.angle(csi), axis=1) phase = calib(phase) plt.figure() plt.plot(t, phase[:, s_index, 0, 0], linewidth=0.3, label='subcarrier_15_0_0') plt.plot(t, phase[:, s_index, 1, 0], linewidth=0.3, label='subcarrier_15_1_0') plt.plot(t, phase[:, s_index, 2, 0], linewidth=0.3, label='subcarrier_15_2_0') plt.legend() plt.title('csi-phase') plt.xlabel('time(s)') plt.ylabel('phase') plt.show()
29,241
def get_seller_price(sellers,seller_id,core_request): """ sellers is a list of list where each list contains follwing item in order 1. Seller Name 2. Number of available cores 3. Price of each core 4. List of lists where length of main list is equal to number of cores. Length of minor list will be zero. seller_id is the seller index whose price to be determined. You can access this seller by seller[seller_id] core_request is the number of core requested return the total price of this deal using second price auction if seller_id is with largest ask then return its own price """ new_list = list(sellers) new_list.sort(key=operator.itemgetter(2)) i=0; for x in new_list: if x==sellers[seller_id]: break i+=1 #print i if i==len(sellers)-1: return new_list[i][2]*core_request else : price=0 core=core_request price=0 while core>0: i+=1 if i==len(sellers)-1: price+=core*new_list[i][2] core=0 else: if core>new_list[i][1]: core=core-new_list[i][1] price+=new_list[i][1]*new_list[i][2] else: price+=core*new_list[i][2] core=0 return price
29,242
def create_affiliation_ttl(noun_uri: str, noun_text: str, affiliated_text: str, affiliated_type: str) -> list: """ Creates the Turtle for an Affiliation. @param noun_uri: String holding the entity/URI to be affiliated @param noun_text: String holding the sentence text for the entity @param affiliated_text: String specifying the entity (organization, group, etc.) to which the noun is affiliated @param affiliated_type: String specifying the class type of the entity @return: An array of strings holding the Turtle representation of the Affiliation """ affiliated_uri = f':{affiliated_text.replace(" ", "_")}' affiliation_uri = f'{noun_uri}{affiliated_text.replace(" ", "_")}Affiliation' noun_str = f"'{noun_text}'" ttl = [f'{affiliation_uri} a :Affiliation ; :affiliated_with {affiliated_uri} ; :affiliated_agent {noun_uri} .', f'{affiliation_uri} rdfs:label "Relationship based on the text, {noun_str}" .', f'{affiliated_uri} a {affiliated_type} ; rdfs:label "{affiliated_text}" .'] wikidata_desc = get_wikipedia_description(affiliated_text) if wikidata_desc: ttl.append(f'{affiliated_uri} :definition "{wikidata_desc}" .') return ttl
29,243
def get_subpixel_indices(col_num: int) -> Tuple[int, int, int]: """Return a 3-tuple of 1-indexed column indices representing subpixels of a single pixel.""" offset = (col_num - 1) * 2 red_index = col_num + offset green_index = col_num + offset + 1 blue_index = col_num + offset + 2 return red_index, blue_index, green_index
29,244
def sigmoid(x): """ This function computes the sigmoid of x for NeuralNetwork""" return NN.sigmoid(x)
29,245
def extractTranslatingSloth(item): """ 'Translating Sloth' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None tagmap = [ ('娘子我才是娃的爹', 'Wife, I Am the Baby\'s Father', 'translated'), ('Wife, I Am the Baby\'s Father', 'Wife, I Am the Baby\'s Father', 'translated'), ('I want to eat meat Wife', 'I want to eat meat Wife', 'translated'), ('My Lord is a Stone', 'My Lord is a Stone', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
29,246
def set_planet_count(request): """ """ # Get the ip ip = get_ip(request) # Try to fetch count from cache. planets = cache.get(ip) if planets is None: # Get latest blog posts date = now() - timedelta(weeks=1) planets = BlogPost.objects.filter(rank__gte=date)[:100].count() # Expire counts cache in 24 hours expire = 3600 * 24 cache.set(ip, planets, expire) counts = dict(planet_count=planets) # Set the session. request.session[settings.SESSION_COUNT_KEY] = counts
29,247
def human(number: int, suffix='B') -> str: """Return a human readable memory size in a string. Initially written by Fred Cirera, modified and shared by Sridhar Ratnakumar (https://stackoverflow.com/a/1094933/6167478), edited by Victor Domingos. """ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(number) < 1024.0: return f"{number:3.1f} {unit}{suffix}" number = number / 1024.0 return f"{number:.1f}{'Yi'}{suffix}"
29,248
def _get_field_names(field: str, aliases: dict): """ Override this method to customize how :param field: :param aliases: :return: """ trimmed = field.lstrip("-") alias = aliases.get(trimmed, trimmed) return alias.split(",")
29,249
def get_answer(question_with_context): """ Get answer for question and context. """ # Create pipeline question_answering_pipeline = pipeline('question-answering') # Get answer answer = question_answering_pipeline(question_with_context) # Return answer return answer
29,250
def model_init(): """ Initialize models. """ LOGGER.info("model-init")
29,251
def undiskify(z): """Maps SL(2)/U(1) poincare disk coord to Lie algebra generator-factor.""" # Conventions match (2.13) in https://arxiv.org/abs/1909.10969 return 2* numpy.arctanh(abs(z)) * numpy.exp(1j * numpy.angle(z))
29,252
def mean_vertex_normals(vertex_count, faces, face_normals, **kwargs): """ Find vertex normals from the mean of the faces that contain that vertex. Parameters ----------- vertex_count : int The number of vertices faces refer to faces : (n, 3) int List of vertex indices face_normals : (n, 3) float Normal vector for each face Returns ----------- vertex_normals : (vertex_count, 3) float Normals for every vertex Vertices unreferenced by faces will be zero. """ def summed_sparse(): # use a sparse matrix of which face contains each vertex to # figure out the summed normal at each vertex # allow cached sparse matrix to be passed if 'sparse' in kwargs: sparse = kwargs['sparse'] else: sparse = index_sparse(vertex_count, faces) summed = sparse.dot(face_normals) return summed def summed_loop(): # loop through every face, in tests was ~50x slower than # doing this with a sparse matrix summed = np.zeros((vertex_count, 3)) for face, normal in zip(faces, face_normals): summed[face] += normal return summed try: summed = summed_sparse() except BaseException: log.warning( 'unable to generate sparse matrix! Falling back!', exc_info=True) summed = summed_loop() # invalid normals will be returned as zero vertex_normals = util.unitize(summed) return vertex_normals
29,253
def fast_scan(root, path=None, search_filter=fast_scan_regex_filter()): """ >>> import tempfile >>> import pathlib >>> tempdir = tempfile.TemporaryDirectory() >>> for p in (map(partial(pathlib.Path, tempdir.name), ( ... 'test/folder/1/file1.txt', ... 'test/folder/1/file2.txt', ... 'test/folder/3/file1.txt', ... 'test/folder/file4.json', ... 'file5.csv', ... ))): ... p.parent.mkdir(parents=True, exist_ok=True) ... p.touch() >>> files = tuple(fast_scan(tempdir.name)) >>> sorted(f.relative for f in files) ['file5.csv', 'test/folder/1/file1.txt', 'test/folder/1/file2.txt', 'test/folder/3/file1.txt', 'test/folder/file4.json'] >>> files[0].stats.st_size 0 >>> tempdir.cleanup() """ path = path or '' _path = os.path.join(root, path) if not os.path.isdir(_path): log.warning(f'{path} is not an existing directory - aborting scan') return with os.scandir(_path) as scanner: for dir_entry in scanner: if (dir_entry.is_file() or dir_entry.is_symlink()): # BUG: .is_symlink is dangerous, as symlinks can also be folders _filescan = FileScan(root, path, dir_entry) if search_filter(_filescan.relative): yield _filescan if dir_entry.is_dir(): yield from fast_scan(root, os.path.join(path, dir_entry.name), search_filter)
29,254
def _silence_resource_warning(popen): """Silence Popen's ResourceWarning. Note this should only be used if the process was created as a daemon. """ # Set the returncode to avoid this warning when popen is garbage collected: # "ResourceWarning: subprocess XXX is still running". # See https://bugs.python.org/issue38890 and # https://bugs.python.org/issue26741. # popen is None when mongocryptd spawning fails if popen is not None: popen.returncode = 0
29,255
def migrate(): """Run all migrations.""" migrations.upgrade()
29,256
def get_beam_jobs(): """Returns the list of all registered Apache Beam jobs. Returns: list(BeamJob). The list of registered Apache Beam jobs. """ return [beam_job_domain.BeamJob(j) for j in jobs_registry.get_all_jobs()]
29,257
def p_exprlist(p): """exprlist : empty | expr | expr ',' exprlist """ if len(p) > 2: p[0] = p[3] p[0].append(p[1]) else: p[0] = [p[1]] if p[1] else []
29,258
def z_norm(dataset, max_seq_len=50): """Normalize data in the dataset.""" processed = {} text = dataset['text'][:, :max_seq_len, :] vision = dataset['vision'][:, :max_seq_len, :] audio = dataset['audio'][:, :max_seq_len, :] for ind in range(dataset["text"].shape[0]): vision[ind] = np.nan_to_num( (vision[ind] - vision[ind].mean(0, keepdims=True)) / (np.std(vision[ind], axis=0, keepdims=True))) audio[ind] = np.nan_to_num( (audio[ind] - audio[ind].mean(0, keepdims=True)) / (np.std(audio[ind], axis=0, keepdims=True))) text[ind] = np.nan_to_num( (text[ind] - text[ind].mean(0, keepdims=True)) / (np.std(text[ind], axis=0, keepdims=True))) processed['vision'] = vision processed['audio'] = audio processed['text'] = text processed['labels'] = dataset['labels'] return processed
29,259
def days_upto(year): """ Return the number of days from the beginning of the test period to the beginning of the year specified """ return sum([days_in_year(y) for y in range(2000,year)])
29,260
def read_DEM(fn=None, fjord=None): """ Reads in the DEM (only accepts GeoTiffs right now) into an XArray Dataarray with the desired format. """ # intake.open_rasterio accepts a list of input files and may effectively do what this function does! # try using cropped versions of the input files. Doesn't seem to make a difference r.e. crashing ''' cropped_fn = fn.rpartition(".tif")[0] + "_cropped.tif" print(cropped_fn) if os._exists(cropped_fn): fn = cropped_fn elif fjord != None: bbox = fjord_props.get_fjord_bounds(fjord) ds = rioxarray.open_rasterio(fn) trimmed_ds = ds.rio.slice_xy(*bbox) trimmed_ds.rio.to_raster(fn.rpartition(".tif")[0] + "_cropped.tif") del ds del trimmed_ds fn = cropped_fn ''' # try bringing in the rasters as virtual rasters (i.e. lazily loading) with rasterio.open(fn) as src: # print('Source CRS:' +str(src.crs)) # print(src.is_tiled) # print(src.block_shapes) with WarpedVRT(src,src_crs=src.crs,crs=src.crs) as vrt: # warp_mem_limit=12000,warp_extras={'NUM_THREADS':2}) as vrt: # print('Destination CRS:' +str(vrt.crs)) darr = xr.open_rasterio(vrt) # ds = rioxarray.open_rasterio(vrt).chunk({'x':1500,'y':1500,'band':1}).to_dataset(name='HLS_Red') # Rasterio automatically checks that the file exists # ultimately switch to using rioxarray, but it causes issues down the pipeline so it will need to be debugged through # with rioxarray.open_rasterio(fn) as src: # with xr.open_rasterio(fn) as darr: # darr = src # open_rasterio automatically brings the geotiff in as a DataArray with 'band' as a dimensional coordinate # we rename it and remove the band as a coordinate, since our DEM only has one dimension # squeeze removes dimensions of length 0 or 1, in this case our 'band' # Then, drop('band') actually removes the 'band' dimension from the Dataset darr = darr.rename('elevation').squeeze().drop_vars('band') # darr = darr.rename({'band':'dtime'}) # if we wanted to instead convert it to a dataset # attr = darr.attrs # darr = darr.to_dataset(name='elevation').squeeze().drop('band') # darr.attrs = attr # attr=None # newest version of xarray (0.16) has promote_attrs=True kwarg. Earlier versions don't... # darr = darr.to_dataset(name='elevation', promote_attrs=True).squeeze().drop('band') # mask out the nodata values, since the nodatavals attribute is wrong darr = darr.where(darr != -9999.) # the gdalwarp geoid files have this extra attribute in the geoTiff, which when brought in # ultimately causes a "__module__" related error when trying to plot with hvplot try: del darr.attrs["units"] except KeyError: pass if fjord != None: # USE RIOXARRAY - specifically, slicexy() which can be fed the bounding box # darr = darr.rio.slice_xy(fjord_props.get_fjord_bounds(fjord)) bbox = fjord_props.get_fjord_bounds(fjord) if pd.Series(darr.y).is_monotonic_increasing: darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[1], bbox[3])) else: darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[3], bbox[1])) return darr
29,261
def choose_field_size(): """a function that crafts a field""" while True: print('Пожалуйста, задайте размер поля (число от 3 до 5):') try: field_size = int(input()) except ValueError: continue if field_size == 3: print('\nПоле для игры:\n') rows = {'a': 1, 'b': 2, 'c': 3} columns = [1, 2, 3] field = [[[' '], [' '], [' ']], [[' '], [' '], [' ']], [[' '], [' '], [' ']]] rows_name = ['a', 'b', 'c'] print(' 1 2 3\n') for row_num in range(len(field)): print(rows_name[row_num], sep='', end='') for cell in field[row_num]: print(cell[0], '|', sep='', end='') print('\n --------------', sep='', end='') print('\n') break elif field_size == 4: print(""" 1 2 3 4 a | | | -------------- b | | | -------------- c | | | -------------- d | | |""") break elif field_size == 5: print(""" 1 2 3 4 5 a | | | | ------------------ b | | | | ------------------ c | | | | ------------------ d | | | | ------------------ e | | | |""") break else: continue return field, rows, columns
29,262
def mbt_merge(source, *more_sources:str, dest:str, name='', description='', log=print): """ Does a "real" merge, relying on [Upsert](https://www.sqlite.org/lang_UPSERT.html), which was added to SQLite with version 3.24.0 (2018-06-04). It relies on an index for the conflict detection, but at least `gdal` and *Atlas Creator* have none, so: * we deduplicate, keep *last* * we create the index as needed (!) Assumes same image format """ assert dest.endswith('.mbtiles') if not os.path.exists(dest): log(f'cp {source} {dest}') shutil.copyfile(source, os.path.expanduser(dest)) log('<<>>', source[:-8], ':', mbt_info(source)) else: more_sources = (source, *more_sources) db = sqlite3.connect(dest) dbc = db.cursor() try: # Ensure no duplicates in base file dbc.executescript(''' DELETE FROM tiles WHERE rowid NOT IN (SELECT MAX(rowid) FROM tiles GROUP BY zoom_level, tile_column, tile_row); CREATE UNIQUE INDEX IF NOT EXISTS zxy ON tiles (zoom_level, tile_column, tile_row); ''') meta = dict(dbc.execute('SELECT * FROM metadata').fetchall()) name = name or dest[:-8] desc = f"Merge of the following files:\n* {meta['name']} : {meta['description']}\n" bounds = parse_bounds(meta['bounds']) for source in more_sources: log('<<', source[:-8], ':', mbt_info(source)) # >> Merge tiles dbc.executescript(f''' ATTACH "{source}" AS source; INSERT INTO main.tiles SELECT * FROM source.tiles WHERE true ON CONFLICT (zoom_level, tile_column, tile_row) DO UPDATE SET tile_data=excluded.tile_data; ''') # >> Merge description and bounds smeta = dict(dbc.execute('SELECT * FROM source.metadata').fetchall()) desc += f"* {smeta['name']} : {smeta['description']}\n" sbounds = parse_bounds(smeta['bounds']) bounds = merge_bounds(*bounds, *sbounds) set_bounds(dbc, *bounds) # >> Detach to make room for next source dbc.execute(f'DETACH source;') log('>>', dest[:-8], ':', mbt_info(dbc)) print(name, desc) dbc.execute(f"UPDATE metadata SET value = '{name}' WHERE name = 'name'") dbc.execute(f"UPDATE metadata SET value = '{description or desc}' WHERE name = 'description'") finally: dbc.close() db.commit() db.close()
29,263
def delete_submission_change(id): """Delete a post. Ensures that the post exists and that the logged in user is the author of the post. """ db = get_db() db.execute('DELETE FROM submission_change WHERE id = ?', (id,)) db.commit() return jsonify(status='ok')
29,264
async def test_cache_garbage_collection(): """Test caching a template.""" template_string = ( "{% set dict = {'foo': 'x&y', 'bar': 42} %} {{ dict | urlencode }}" ) tpl = template.Template( (template_string), ) tpl.ensure_valid() assert template._NO_HASS_ENV.template_cache.get( template_string ) # pylint: disable=protected-access tpl2 = template.Template( (template_string), ) tpl2.ensure_valid() assert template._NO_HASS_ENV.template_cache.get( template_string ) # pylint: disable=protected-access del tpl assert template._NO_HASS_ENV.template_cache.get( template_string ) # pylint: disable=protected-access del tpl2 assert not template._NO_HASS_ENV.template_cache.get( template_string )
29,265
def get_search_response(db, search_term): """Method to get search result from db or google api. Args: db: The database object. search_term: The search term. Returns: String: List of relevant links separated by line break. """ # Find if the search results for the term is stored in mongo. response = ( db["SearchResults"].find_one( { "searchTerm": search_term } ) or {} ).get("result") if not response: # Fetch search results from Google API if not found in mongo. response = get_google_search_response(search_term) # Cache the results in mongo where lastSearchedOn is a TTL index with timeout of 3600 seconds. db["SearchResults"].insert_one( { "searchTerm": search_term, "lastSearchedOn": datetime.now(), "result": response } ) return response
29,266
async def test_form_user_discover_fails_aborts_already_configured(hass): """Test if we manually configure an existing host we abort after failed discovery.""" await setup.async_setup_component(hass, "persistent_notification", {}) entry = MockConfigEntry(domain=DOMAIN, data=VALID_CONFIG, unique_id="BLID") entry.add_to_hass(hass) with patch( "homeassistant.components.roomba.config_flow.RoombaDiscovery", _mocked_failed_discovery, ): result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) await hass.async_block_till_done() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] is None assert result["step_id"] == "manual" result2 = await hass.config_entries.flow.async_configure( result["flow_id"], {CONF_HOST: MOCK_IP, CONF_BLID: "blid"}, ) await hass.async_block_till_done() assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result2["reason"] == "already_configured"
29,267
def lattice_2d_rescale_wave_profile(kfit, X, dT, Z_C, Y_C, v, dx=1.): """ Fit the wave profile (X, dT) to the ODE solution (X_C, dT_C) """ # recenter the profile around 0 k0 = np.argmax(dT) x0 = X[k0] Z = kfit*(X.copy()-x0) # retain a window corresponding to the input ODE solution zlo = max(np.min(Z_C), np.min(Z)) zhi = min(np.max(Z_C), np.max(Z)) idx = (Z >= zlo) & (Z <= zhi) Z = Z[idx] Y = dT.copy()[idx] if (len(Z) > len(Z_C)): raise ValueError("Increase resolution of ODE solution!") # rescale Y Y /= (v*kfit/2.) return Z, Y
29,268
def extract_upload_zip4_text_files(zip_file_path, extract_file_path, s3_connection=None): """ Extracts text files from the specified zip4 zip file, uploads the files to s3 if bucket provided Args: zip_file_path: file path of the extracted zip4 zip extract_file_path: file path where the extracted text files will be place s3_connection: s3 connection to AWS region Returns: None """ zip_folder = CONFIG_BROKER["zip_folder"] + "/" password = CONFIG_BROKER['usps']['zip4']['password'] with zipfile.ZipFile(os.path.join(extract_file_path, zip_file_path), 'r') as zip_group: for zip_individual in zip_group.namelist(): with zip_group.open(zip_individual, 'r', password.encode('utf-8')) as zip_nested: file_data = BytesIO(zip_nested.read()) with zipfile.ZipFile(file_data) as zip_text_files: for zip_text in zip_text_files.namelist(): logger.info('Extracting file {}'.format(zip_text)) zip_text_files.extract(zip_text, extract_file_path, password.encode('utf-8')) if s3_connection: upload_extracted_file_to_s3(s3_connection, zip_folder + zip_text, extract_file_path + '/' + zip_text)
29,269
def extent_switch_ijk_kji( extent_in: npt.NDArray[np.int_]) -> npt.NDArray[np.int_]: # reverse order of elements in extent """Returns equivalent grid extent switched either way between simulator and python protocols.""" dims = extent_in.size result = np.zeros(dims, dtype = 'int') for d in range(dims): result[d] = extent_in[dims - d - 1] return result
29,270
def create_data(f, x_vals): """Assumes f is a function of one argument x_vals is an array of suitable arguments for f Returns array containing results of applying f to the elements of x_vals""" y_vals = [] for i in x_vals: y_vals.append(f(x_vals[i])) return np.array(y_vals)
29,271
def merge( left: pandas.core.frame.DataFrame, right: pandas.core.frame.DataFrame, how: Literal["left"], ): """ usage.dask: 4 """ ...
29,272
def register(): """注册""" req_dict = request.get_json() phone = req_dict.get("phone") password = req_dict.get("password") password2 = req_dict.get("password2") sms_code = req_dict.get("sms_code") phone = str(phone) sms_code = str(sms_code) # 校验参数 if not all([phone, password, password2, sms_code]): return jsonify(code=400, msg="参数不完整") if password != password2: return jsonify(code=400, msg="两次密码不一致") # 从redis中取出短信验证码 try: real_sms_code = redis_store.get("sms_code_%s" % phone) except Exception as e: current_app.logger.error(e) return jsonify(code=4001, msg="读取真实短信验证码异常") # 判断短信验证码是否过期 if real_sms_code is None: return jsonify(code=4002, msg="短信验证码失效") # 删除redis中的短信验证码,防止重复使用校验 try: redis_store.delete("sms_code_%s" % phone) except Exception as e: current_app.logger.error(e) # 判断用户填写短信验证码的正确性 if real_sms_code != sms_code: return jsonify(code=4003, msg="短信验证码错误") # 判断用户的手机是否注册过 try: user = User.query.filter_by(phone=phone).first() except Exception as e: current_app.logger.error(e) return jsonify(code=400, msg="数据库异常") else: if user is not None: # 表示已被注册 return jsonify(code=400, msg="手机已被注册") # 保存用户的注册数据到数据库中 avatar = constant.ADMIN_AVATAR_URL # 用户头像 user = User(username=phone, phone=phone, password=password, avatar=avatar) try: db.session.add(user) db.session.commit() except Exception as e: db.session.rollback() current_app.logger.error(e) return jsonify(code=400, msg="查询数据库异常") # 保存登录状态到session中 session["username"] = phone session["phone"] = phone session["user_id"] = user.id session["avatar"] = user.avatar # 返回结果 return jsonify(code=200, msg="注册成功")
29,273
def print_exception(t, v, tb, limit=None, file=None, as_html=False, with_filenames=True): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. Similar to 'traceback.print_exception', but adds supplemental information to the traceback and accepts two options, 'as_html' and 'with_filenames'. """ if file is None: # pragma: no cover file = sys.stderr lines = format_exception(t, v, tb, limit, as_html, with_filenames) for line in lines: file.write(line)
29,274
def cpp_flag(compiler): """Return the -std=c++[11/14/17] compiler flag. The newer version is prefered over c++11 (when it is available). """ flags = ["-std=c++17", "-std=c++14", "-std=c++11"] for flag in flags: if has_flag(compiler, flag): return flag raise RuntimeError( "Unsupported compiler -- at least C++11 support is needed!" )
29,275
def make_model_vs_obs_plots( cfg, metadata, model_filename, obs_filename): """ Make a figure showing four maps and the other shows a scatter plot. The four pane image is a latitude vs longitude figures showing: * Top left: model * Top right: observations * Bottom left: model minus observations * Bottom right: model over observations Parameters ---------- cfg: dict the opened global config dictionairy, passed by ESMValTool. metadata: dict the input files dictionairy model_filename: str the preprocessed model file. obs_filename: str the preprocessed observations file. """ filenames = {'model': model_filename, 'obs': obs_filename} logger.debug('make_model_vs_obs_plots filenames: %s', filenames) # #### # Load the data for each layer as a separate cube layers = {} cubes = {} for model_type, input_file in filenames.items(): logger.debug('loading: \t%s, \t%s', model_type, input_file) cube = iris.load_cube(input_file) cube = diagtools.bgc_units(cube, metadata[input_file]['short_name']) cubes[model_type] = diagtools.make_cube_layer_dict(cube) for layer in cubes[model_type]: layers[layer] = True logger.debug('layers: %s', layers) logger.debug('cubes: %s', ', '.join(cubes.keys())) # #### # load names: model = metadata[filenames['model']]['dataset'] obs = metadata[filenames['obs']]['dataset'] long_name = cubes['model'][list(layers.keys())[0]].long_name # Load image format extention image_extention = diagtools.get_image_format(cfg) # Make a plot for each layer for layer in layers: fig = plt.figure() fig.set_size_inches(9, 6) # Create the cubes cube221 = cubes['model'][layer] cube222 = cubes['obs'][layer] cube223 = cubes['model'][layer] - cubes['obs'][layer] cube224 = cubes['model'][layer] / cubes['obs'][layer] # create the z axis for plots 2, 3, 4. zrange12 = diagtools.get_cube_range([cube221, cube222]) zrange3 = diagtools.get_cube_range_diff([cube223]) cube224.data = np.ma.clip(cube224.data, 0.1, 10.) n_points = 12 linspace12 = np.linspace( zrange12[0], zrange12[1], n_points, endpoint=True) linspace3 = np.linspace( zrange3[0], zrange3[1], n_points, endpoint=True) logspace4 = np.logspace(-1., 1., 12, endpoint=True) # Add the sub plots to the figure. add_map_subplot(221, cube221, linspace12, cmap='viridis', title=model) add_map_subplot( 222, cube222, linspace12, cmap='viridis', title=' '.join([ obs, ])) add_map_subplot( 223, cube223, linspace3, cmap='bwr', title=' '.join([model, 'minus', obs])) add_map_subplot( 224, cube224, logspace4, cmap='bwr', title=' '.join([model, 'over', obs]), log=True) # Add overall title fig.suptitle(long_name, fontsize=14) # Determine image filename: fn_list = ['model_vs_obs', long_name, model, obs, str(layer), 'maps'] path = diagtools.folder(cfg['plot_dir']) + '_'.join(fn_list) path = path.replace(' ', '') + image_extention # Saving files: if cfg['write_plots']: logger.info('Saving plots to %s', path) plt.savefig(path) plt.close()
29,276
def _validate_package_name(package_name: str): """Check that the package name matches the pattern r"[a-zA-Z_][a-zA-Z0-9_]*". >>> _validate_package_name("this_is_a_good_package_name") >>> _validate_package_name("this-is-not") Traceback (most recent call last): ... click.exceptions.BadParameter: this-is-not is not a valid package name. """ if re.fullmatch(PublicId.PACKAGE_NAME_REGEX, package_name) is None: raise click.BadParameter("{} is not a valid package name.".format(package_name))
29,277
def to_null(string): """ Usage:: {{ string|to_null}} """ return 'null' if string is None else string
29,278
def get_mtime(path, mustExist=True): """ Get mtime of a path, even if it is inside a zipfile """ warnings.warn("Don't use this function", DeprecationWarning) try: return zipio.getmtime(path) except IOError: if not mustExist: return -1 raise
29,279
def get_table_8(): """表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数 Args: Returns: list: 表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数 """ table_8 = [ (0.9, 1.0), (0.9, 1.0), (1.0, 1.0) ] return table_8
29,280
def csv_file_iterator(root_directory): """Returns a generator (iterator) of absolute file paths for CSV files in a given directory""" for root_path, _, files in os.walk(root_directory, followlinks=True): for f in files: if f.endswith("csv"): yield os.path.join(root_path, f)
29,281
def query_epmc(query): """ Parameters ---------- query : Returns ------- """ url = "https://www.ebi.ac.uk/europepmc/webservices/rest/search?query=" page_term = "&pageSize=999" ## Usual limit is 25 request_url = url + query + page_term r = requests.get(request_url) if r.status_code == 200: return r else: warnings.warn("request to " + str(query) + " has failed to return 200, and has returned " + str(r.status_code)) pass
29,282
def register_unwinder(locus, unwinder, replace=False): """Register unwinder in given locus. The unwinder is prepended to the locus's unwinders list. Unwinder name should be unique. Arguments: locus: Either an objfile, progspace, or None (in which case the unwinder is registered globally). unwinder: An object of a gdb.Unwinder subclass replace: If True, replaces existing unwinder with the same name. Otherwise, raises exception if unwinder with the same name already exists. Returns: Nothing. Raises: RuntimeError: Unwinder name is not unique TypeError: Bad locus type """ if locus is None: if gdb.parameter("verbose"): gdb.write("Registering global %s unwinder ...\n" % unwinder.name) locus = gdb elif isinstance(locus, gdb.Objfile) or isinstance(locus, gdb.Progspace): if gdb.parameter("verbose"): gdb.write("Registering %s unwinder for %s ...\n" % (unwinder.name, locus.filename)) else: raise TypeError("locus should be gdb.Objfile or gdb.Progspace or None") i = 0 for needle in locus.frame_unwinders: if needle.name == unwinder.name: if replace: del locus.frame_unwinders[i] else: raise RuntimeError("Unwinder %s already exists." % unwinder.name) i += 1 locus.frame_unwinders.insert(0, unwinder) gdb.invalidate_cached_frames()
29,283
def is_default_array_type(f, type_map=TYPE_MAP): """ Check whether the field is an array and is made up of default types, e.g. u8 or s16. """ return f.type_id == 'array' and type_map.get(f.options['fill'].value, None)
29,284
def delete_police_station_collection(): """ Helper function to delete station collection in db. """ result = PoliceStation.objects().delete() return result
29,285
def compute_asvspoof_tDCF( asv_target_scores, asv_nontarget_scores, asv_spoof_scores, cm_bonafide_scores, cm_spoof_scores, cost_model, ): """ Compute t-DCF curve as in ASVSpoof2019 competition: Fix ASV threshold to EER point and compute t-DCF curve over thresholds in CM. Code for this is mainly taken from the ASVSpoof2019 competition t-DCF implementation: https://www.asvspoof.org/ Parameters: asv_target_scores (ndarray): Array of ASV target (bonafide) scores (should be high) asv_nontarget_scores (ndarray): Array of ASV nontarget (bonafide) scores (should be low) asv_spoof_scores (ndarray): Array of ASV spoof scores (should be low) cm_bonafide_scores (ndarray): Array of CM target (bonafide) scores (should be high) cm_spoof_scores (ndarray): Array of CM nontarget (spoof) scores (should be low) cost_model (CostParameters): CostParameters object containing cost parameters Returns: tdcf_curve (ndarray): Array of normalized t-DCF values at different CM thresholds cm_thresholds (ndarray): Array of different CM thresholds, corresponding to values in tdcf_curve. """ # Fix ASV FAR and miss to values at EER (with legit samples) asv_frr, asv_far, asv_thresholds = compute_det(asv_target_scores, asv_nontarget_scores) asv_frr_eer, asv_far_eer, asv_eer_threshold = compute_eer(asv_frr, asv_far, asv_thresholds) p_asv_miss = asv_frr_eer p_asv_fa = asv_far_eer # Fraction of spoof samples that were rejected by asv. # Note that speaker labels are not used here, just raw number # of spoof samples rejected by asv in general p_asv_spoof_miss = np.sum(asv_spoof_scores < asv_eer_threshold) / len(asv_spoof_scores) # Copy/pasta from t-DCF implementation in ASVSpoof2019 competition # Obtain miss and false alarm rates of CM p_cm_miss, p_cm_fa, cm_thresholds = compute_det(cm_bonafide_scores, cm_spoof_scores) # See ASVSpoof2019 evaluation plan for more information on these C1 = cost_model.p_tar * (cost_model.c_cm_miss - cost_model.c_asv_miss * p_asv_miss) - \ cost_model.p_nontar * cost_model.c_asv_fa * p_asv_fa # Cost for CM false-accept: # How often we have spoof samples * # Cost of accepting a spoof * # how often ASV accepts spoof C2 = cost_model.c_cm_fa * cost_model.p_spoof * (1 - p_asv_spoof_miss) # Obtain t-DCF curve for all thresholds tDCF = C1 * p_cm_miss + C2 * p_cm_fa # Normalized t-DCF tDCF_norm = tDCF if min(C1, C2) == 0: tDCF_norm = tDCF else: tDCF_norm = tDCF / np.minimum(C1, C2) return tDCF_norm, cm_thresholds
29,286
def generate_ul_fragment_patch(e, depth): """Similar to generate_fragment_patch, but for <ul> """ first_line_prefix = '{}* '.format(' ' * depth) other_line_prefix = '{} '.format(' ' * depth) for item in e: if item.tag != '{http://www.w3.org/1999/xhtml}li': raise ValueError("unrecognized element: " + item.tag) pairs = generate_fragment_patch(item, extra_rules=EXTRA_RULES_FOR_EE) is_first_line = True for ty, line in dedent_pairs(pairs): if is_first_line and line.strip() == '': continue if is_first_line: is_first_line = False yield ty, '{}{}'.format(first_line_prefix, line.strip()) else: yield ty, '{}{}'.format(other_line_prefix, line.strip())
29,287
def print_stderr(exc_info: Any, stream: Optional[TextIO] = None) -> None: """ if the exc_info has stderr attribute (like the subprocess.CalledProcessError) that will be printed to stderr >>> class ExcInfo(object): ... pass >>> exc_info = ExcInfo() >>> # test no stdout attribute >>> print_stderr(exc_info) >>> # test stdout=None >>> exc_info.stderr=None >>> print_stderr(exc_info) >>> # test stdout >>> exc_info.stderr=b'test' >>> print_stderr(exc_info, stream=sys.stdout) b'STDERR: test' """ if stream is None: stream = sys.stderr if hasattr(exc_info, "stderr"): if exc_info.stderr is not None: assert isinstance(exc_info.stderr, bytes) print(b"STDERR: " + exc_info.stderr, file=stream)
29,288
def all_divisor(n, includeN=True): """ >>> all_divisor(28) [1, 2, 4, 7, 14, 28] >>> all_divisor(28, includeN=False) [1, 2, 4, 7, 14] Derived from https://qiita.com/LorseKudos/items/9eb560494862c8b4eb56 """ lower_divisors, upper_divisors = [], [] i = 1 while i * i <= n: if n % i == 0: lower_divisors.append(i) if i != n // i: upper_divisors.append(n//i) i += 1 upper_divisors = upper_divisors[::-1] if not includeN: upper_divisors.pop() return lower_divisors + upper_divisors
29,289
def isTrue(value, noneIsFalse=True): """ Returns True if <value> is one of the valid string representations for True. By default, None is considered False. """ if not value: if noneIsFalse: return False else: return None else: return value.lower() in TRUE_STRINGS
29,290
def test_MMParser_Attributes_NoChannel(): """Will MMParser extract the acquisition info w/o a channel identifier? """ inputFilename = 'Cos7_Microtubules_12_MMStack_Pos1_locResults.dat' datasetType = 'TestType' mmParser = leb.MMParser() mmParser.parseFilename(inputFilename, datasetType) assert_equal(mmParser.dataset.datasetIDs['acqID'], 12) assert_equal(mmParser.dataset.datasetIDs['posID'], (1,)) assert_equal(mmParser.dataset.datasetIDs['prefix'], 'Cos7_Microtubules') assert_equal(mmParser.dataset.datasetType, 'TestType') assert_equal(mmParser.dataset.datasetIDs['channelID'], None)
29,291
def account_factory(app, company_factory): """ Creates factory for creating fake companies """ pass
29,292
def test_inspec_package_installed(host): """ Tests if inspec packages is installed. """ assert host.package(PACKAGE).is_installed
29,293
def read_doc_labels(input_dir): """ :param input_dir: :return: doc labels """ with open(input_dir + "doc_labels.pkl", 'rb') as fin: labels = pickle.load(fin) return labels
29,294
def which(bin_dir, program): """ rough equivalent of the 'which' command to find external programs (current script path is tested first, then PATH envvar) """ def is_executable(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_executable(program): return program else: progpath = os.path.join(bin_dir, program) if is_executable(progpath): return progpath for path in os.environ["PATH"].split(os.pathsep): progpath = os.path.join(path, program) if is_executable(progpath): return progpath return None
29,295
def search(catalog_number): """ A top level `catalog_number` search that returns a list of result dicts. Usually catalog numbers are unique but not always hence the returned list. """ results = query(catalog_number) result_list = [] for result in results: dict_result = vars(result)["data"] result_list.append(result_filter(dict_result)) return result_list
29,296
def login_user(force_login=False, no_browser=False, no_local_server=False): """ Arguments: force_login -- Force a login flow with Globus Auth, even if tokens are valid no_browser -- Disable automaically opening a browser for login no_local_server -- Disable local server for automatically copying auth code """ cfde = CfdeClient() if cfde.service_instance != "prod": click.secho(f"Running on service '{cfde.service_instance}'", fg="yellow") try: if not cfde.is_logged_in(): cfde.login(force=force_login, no_browser=no_browser, no_local_server=no_local_server) click.secho("You are authenticated and your tokens have been cached.", fg='green') cfde.check() except exc.CfdeClientException as ce: click.secho(str(ce), fg='red', err=True)
29,297
def fetch_csv_for_date(dt, session=None): """ Fetches the whole month of the give datetime returns the data as a DataFrame throws an exception data is not available """ if not session: session = requests.session() # build the parameters and fill in the requested date # TODO find something prettier than string concatenation which works # TODO find out whether VIEWSTATE stays valid or needs to be fetched before making the post request datestr = dt.strftime("%m/%d/%Y") parameters = { "__EVENTTARGET": "", "__EVENTARGUMENT": "", "__VIEWSTATE": "/wEPDwUKLTM2ODQwNzIwMw9kFgJmD2QWAgIDD2QWAgIBD2QWCAIBD2QWAmYPZBYCAgMPDxYCHgRUZXh0BTNTaXN0ZW1hIGRlIEluZm9ybWFjacOzbiBkZWwgTWVyY2Fkby4gw4FyZWEgUMO6YmxpY2FkZAIFDzwrABEDAA8WBB4LXyFEYXRhQm91bmRnHgtfIUl0ZW1Db3VudGZkARAWABYAFgAMFCsAAGQCCQ9kFgJmD2QWAgIDD2QWAmYPZBYEZg9kFgYCAQ8PFgQFBE1pbkQGAECJX4pw0wgFBE1heEQGAMBI0Tg61wgPFg4eB01pbkRhdGUGAECJX4pw0wgeDFNlbGVjdGVkRGF0ZQYAwEjRODrXCB4HTWF4RGF0ZQYAwEjRODrXCB4VRW5hYmxlRW1iZWRkZWRTY3JpcHRzZx4cRW5hYmxlRW1iZWRkZWRCYXNlU3R5bGVzaGVldGceElJlc29sdmVkUmVuZGVyTW9kZQspclRlbGVyaWsuV2ViLlVJLlJlbmRlck1vZGUsIFRlbGVyaWsuV2ViLlVJLCBWZXJzaW9uPTIwMTQuMi43MjQuNDUsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49MTIxZmFlNzgxNjViYTNkNAEeF0VuYWJsZUFqYXhTa2luUmVuZGVyaW5naGQWBGYPFCsACA8WEB8ABRMyMDE5LTA5LTE2LTAwLTAwLTAwHhFFbmFibGVBcmlhU3VwcG9ydGgfBmceDUxhYmVsQ3NzQ2xhc3MFB3JpTGFiZWwfCWgfB2ceBFNraW4FB0RlZmF1bHQfCAsrBAFkFggeBVdpZHRoGwAAAAAAAFlABwAAAB4KUmVzaXplTW9kZQspclRlbGVyaWsuV2ViLlVJLlJlc2l6ZU1vZGUsIFRlbGVyaWsuV2ViLlVJLCBWZXJzaW9uPTIwMTQuMi43MjQuNDUsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49MTIxZmFlNzgxNjViYTNkNAAeCENzc0NsYXNzBRFyaVRleHRCb3ggcmlIb3Zlch4EXyFTQgKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEXJpVGV4dEJveCByaUVycm9yHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRNyaVRleHRCb3ggcmlGb2N1c2VkHxACggIWBh8NGwAAAAAAAFlABwAAAB8PBRNyaVRleHRCb3ggcmlFbmFibGVkHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRRyaVRleHRCb3ggcmlEaXNhYmxlZB8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpRW1wdHkfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEHJpVGV4dEJveCByaVJlYWQfEAKCAmQCAg8PFgQfDwUxUmFkQ2FsZW5kYXJNb250aFZpZXcgUmFkQ2FsZW5kYXJNb250aFZpZXdfRGVmYXVsdB8QAgJkFgRmDw8WAh4MVGFibGVTZWN0aW9uCyopU3lzdGVtLldlYi5VSS5XZWJDb250cm9scy5UYWJsZVJvd1NlY3Rpb24AFgIeBXN0eWxlBQ1kaXNwbGF5Om5vbmU7FgJmDw9kFgIeBXNjb3BlBQNjb2xkAgcPZBYCZg8PFgYeCkNvbHVtblNwYW4CBB8PBQlyY0J1dHRvbnMfEAICZGQCBQ8PFgQFBE1pbkQGAECJX4pw0wgFBE1heEQGAMBI0Tg61wgPFg4fAwYAQIlfinDTCB8EBgDASNE4OtcIHwUGAMBI0Tg61wgfBmcfB2cfCAsrBAEfCWhkFgRmDxQrAAgPFhAfAAUTMjAxOS0wOS0xNi0wMC0wMC0wMB8KaB8GZx8LBQdyaUxhYmVsHwloHwdnHwwFB0RlZmF1bHQfCAsrBAFkFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpSG92ZXIfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEXJpVGV4dEJveCByaUVycm9yHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRNyaVRleHRCb3ggcmlGb2N1c2VkHxACggIWBh8NGwAAAAAAAFlABwAAAB8PBRNyaVRleHRCb3ggcmlFbmFibGVkHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRRyaVRleHRCb3ggcmlEaXNhYmxlZB8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpRW1wdHkfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEHJpVGV4dEJveCByaVJlYWQfEAKCAmQCAg8PFgQfDwUxUmFkQ2FsZW5kYXJNb250aFZpZXcgUmFkQ2FsZW5kYXJNb250aFZpZXdfRGVmYXVsdB8QAgJkFgRmDw8WAh8RCysGABYCHxIFDWRpc3BsYXk6bm9uZTsWAmYPD2QWAh8TBQNjb2xkAgcPZBYCZg8PFgYfFAIEHw8FCXJjQnV0dG9ucx8QAgJkZAIHDw8WBAUETWluRAYAQIlfinDTCAUETWF4RAYAwEjRODrXCA8WDh8DBgBAiV+KcNMIHwQGAMBI0Tg61wgfBQYAwEjRODrXCB8GZx8HZx8ICysEAR8JaGQWBGYPFCsACA8WEB8ABRMyMDE5LTA5LTE2LTAwLTAwLTAwHwpoHwZnHwsFB3JpTGFiZWwfCWgfB2cfDAUHRGVmYXVsdB8ICysEAWQWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRFyaVRleHRCb3ggcmlIb3Zlch8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpRXJyb3IfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FE3JpVGV4dEJveCByaUZvY3VzZWQfEAKCAhYGHw0bAAAAAAAAWUAHAAAAHw8FE3JpVGV4dEJveCByaUVuYWJsZWQfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FFHJpVGV4dEJveCByaURpc2FibGVkHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRFyaVRleHRCb3ggcmlFbXB0eR8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwUQcmlUZXh0Qm94IHJpUmVhZB8QAoICZAICDw8WBB8PBTFSYWRDYWxlbmRhck1vbnRoVmlldyBSYWRDYWxlbmRhck1vbnRoVmlld19EZWZhdWx0HxACAmQWBGYPDxYCHxELKwYAFgIfEgUNZGlzcGxheTpub25lOxYCZg8PZBYCHxMFA2NvbGQCBw9kFgJmDw8WBh8UAgQfDwUJcmNCdXR0b25zHxACAmRkAgEPZBYCAgEPPCsADgIAFCsAAg8WDB8BZx8HZx8GZx8CAgEfCWgfCAsrBAFkFwIFD1NlbGVjdGVkSW5kZXhlcxYABQtFZGl0SW5kZXhlcxYAARYCFgsPAgYUKwAGPCsABQEAFgQeCERhdGFUeXBlGSsCHgRvaW5kAgI8KwAFAQAWBB8VGSsCHxYCAxQrAAUWAh8WAgRkZGQFBmNvbHVtbhQrAAUWAh8WAgVkZGQFB2NvbHVtbjEUKwAFFgIfFgIGZGRkBQdjb2x1bW4yPCsABQEAFgQfFRkrAh8WAgdkZRQrAAALKXlUZWxlcmlrLldlYi5VSS5HcmlkQ2hpbGRMb2FkTW9kZSwgVGVsZXJpay5XZWIuVUksIFZlcnNpb249MjAxNC4yLjcyNC40NSwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj0xMjFmYWU3ODE2NWJhM2Q0ATwrAAcACyl0VGVsZXJpay5XZWIuVUkuR3JpZEVkaXRNb2RlLCBUZWxlcmlrLldlYi5VSSwgVmVyc2lvbj0yMDE0LjIuNzI0LjQ1LCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPTEyMWZhZTc4MTY1YmEzZDQBZGQWDB8BZx4USXNCb3VuZFRvRm9yd2FyZE9ubHloHgVfcWVsdBkpZ1N5c3RlbS5EYXRhLkRhdGFSb3dWaWV3LCBTeXN0ZW0uRGF0YSwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODkeCERhdGFLZXlzFgAeBV8hQ0lTFwAfAgIBZGYWBGYPFCsAA2RkZGQCAQ8WBRQrAAIPFgwfAWcfF2gfGBkrCR8ZFgAfGhcAHwICAWQXAwULXyFJdGVtQ291bnQCAQUIXyFQQ291bnRkBQZfIURTSUMCARYCHgNfc2UWAh4CX2NmZBYGZGRkZGRkFgJnZxYCZg9kFghmD2QWAmYPZBYQZg8PFgQfAAUGJm5ic3A7HgdWaXNpYmxlaGRkAgEPDxYEHwAFBiZuYnNwOx8daGRkAgIPDxYCHwAFEU1lcyBkZSBPcGVyYWNpw7NuZGQCAw8PFgIfAAUcTm8uIGRlIExpcXVpZGFjacOzbiBBc29jaWFkYWRkAgQPDxYCHwAFA0NzdmRkAgUPDxYCHwAFA1BkZmRkAgYPDxYCHwAFBEh0bWxkZAIHDw8WAh8ABRVGZWNoYSBkZSBQdWJsaWNhY2nDs25kZAIBDw8WAh8daGQWAmYPZBYQZg8PFgIfAAUGJm5ic3A7ZGQCAQ8PFgIfAAUGJm5ic3A7ZGQCAg8PFgIfAAUGJm5ic3A7ZGQCAw8PFgIfAAUGJm5ic3A7ZGQCBA8PFgIfAAUGJm5ic3A7ZGQCBQ8PFgIfAAUGJm5ic3A7ZGQCBg8PFgIfAAUGJm5ic3A7ZGQCBw8PFgIfAAUGJm5ic3A7ZGQCAg8PFgIeBF9paWgFATBkFhBmDw8WAh8daGRkAgEPDxYEHwAFBiZuYnNwOx8daGRkAgIPDxYCHwAFD1NlcHRpZW1icmUgMjAxOWRkAgMPDxYCHwAFATBkZAIED2QWAmYPDxYEHg1BbHRlcm5hdGVUZXh0ZR4HVG9vbFRpcGVkZAIFD2QWAmYPDxYEHx9lHyBlZGQCBg9kFgJmDw8WBB8fZR8gZWRkAgcPDxYCHwAFGTE0LzEwLzIwMTkgMDU6MDA6MDEgYS4gbS5kZAIDD2QWAmYPDxYCHx1oZGQCCw8PFggfB2cfCWgfCAsrBAEfBmdkFgRmDw8WBh8JaB8GZx8ICysEAWRkAgEPFCsAAhQrAAIUKwACDxYOHwdnHhNFbmFibGVFbWJlZGRlZFNraW5zZx8JaB4URW5hYmxlUm91bmRlZENvcm5lcnNnHg1FbmFibGVTaGFkb3dzaB8GZx8ICysEAWRkZGRkGAIFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYMBSdjdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJEZlY2hhQ29uc3VsdGEFJmN0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkRmVjaGFJbmljaWFsBSRjdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJEZlY2hhRmluYWwFK2N0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkRGVzY2FyZ2FyUmVwb3J0ZXMFKmN0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkR3JpZFJhZFJlc3VsdGFkbwVAY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRHcmlkUmFkUmVzdWx0YWRvJGN0bDAwJGN0bDA0JGdiY2NvbHVtbgVBY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRHcmlkUmFkUmVzdWx0YWRvJGN0bDAwJGN0bDA0JGdiY2NvbHVtbjEFQWN0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkR3JpZFJhZFJlc3VsdGFkbyRjdGwwMCRjdGwwNCRnYmNjb2x1bW4yBSVjdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJE5vdGlmQXZpc29zBS5jdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJE5vdGlmQXZpc29zJFhtbFBhbmVsBS9jdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJE5vdGlmQXZpc29zJFRpdGxlTWVudQUoY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRidG5DZXJyYXJQYW5lbAUfY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRjdGwwMA88KwAMAQhmZHAKRKrT54JyF09yAgRL16DIn42vcyspzOtg86mdF/6Z", "__VIEWSTATEGENERATOR": "5B6503FA", "__EVENTVALIDATION": "/wEdABPIFpMnlAgkSZvMhE+vOQYa0gsvRcXibJrviW3Dmsx0G+jYKkdCU41GOhiZPOlFyBecIegvepvm5r48BtByTWSkIC/PSPgmtogq3vXUp+YNvsMPaGT0F8ZMY05tsTP7KXY5p77wXhhk2nxxmhBw8yYO6yoq09PpCPpnHhKGI5XXqN0NAXFS9Kcv7U1TgXuCACxTET4yjIt6nVt9qCHIyzbla16U6SvCvrhBDl88f4l+A2AwM+Efhx0eY7z5UUNUDwDoCL/OENuuNNFPCRAmSpT1/nxKmb/ucFs0tCWRV4G4iLScixGy8IhVeNkOJJPR8q4msGM8DGO6o6g/gMszmMRrbD50rXo0f8u6b2IB+RzVpsHxVceaRLBN56ddyVdqKV1RL0jZlTtb1Prpo6YdA7cH301O2Ez19CJOtDoyAWUZ982dVJTM6fLOsQokHcEDIxQ=", "ctl00_ContentPlaceHolder1_FechaConsulta_ClientState": "{\"minDateStr\":\"" + datestr + "+0:0:0\",\"maxDateStr\":\"" + datestr + "+0:0:0\"}", "ctl00$ContentPlaceHolder1$GridRadResultado$ctl00$ctl04$gbccolumn.x": "10", "ctl00$ContentPlaceHolder1$GridRadResultado$ctl00$ctl04$gbccolumn.y": "9", } # urlencode the data in the weird form which is expected by the API # plus signs MUST be contained in the date strings but MAY NOT be contained in the VIEWSTATE... data = urllib.parse.urlencode(parameters, quote_via=urllib.parse.quote).replace("%2B0", "+0") response = session.post(MX_PRODUCTION_URL, data=data, headers={"Content-Type": "application/x-www-form-urlencoded"}) response.raise_for_status() # API returns normally status 200 but content type text/html when data is missing if "Content-Type" not in response.headers or response.headers["Content-Type"] != 'application/octet-stream': raise Exception("Error while fetching csv for date {}: No CSV was returned by the API. Probably the data for this date has not yet been published.".format(datestr)) # skip non-csv data, the header starts with "Sistema" csv_str = response.text csv_str = csv_str[csv_str.find("\"Sistema\""):] return pd.read_csv(StringIO(csv_str), parse_dates={'instante' : [1, 2]}, date_parser=parse_date)
29,298
def get_stats_binary(stats, img, var_img, roi, suffix="", ignore_nan=True, ignore_inf=True, ignore_zerovar=True, min_nvoxels=10, mask=None): """ Get a set of statistics for a 3D image within an roi :param img: 3D Numpy array :param roi: 3D Numpy array with same dimensions as img and boolean data type :param ignore_nan: Voxels with care NaN in img are ignored :param ignore_inf: Voxels which are infinite in img are ignored :param min_nvoxels: If the number of voxels in the ROI is less than this number (after removing Nan and infinte values) no value will be returned :return: Mapping from name of statistic to value. This may be NaN or infinite depending on the input arguments. If the number of eligible voxels is less than min_nvoxels, None is returned (not NaN or zero). """ if list(img.shape) != list(roi.shape): raise ValueError("Image must have same dimensions as ROI") if list(var_img.shape) != list(roi.shape): raise ValueError("Variance image must have same dimensions as ROI") if mask is not None and list(mask.shape) != list(roi.shape): raise ValueError("Mask must have same dimensions as ROI") if mask is None: mask = np.ones(roi.shape, dtype=np.int) if ignore_nan: mask = np.logical_and(mask, ~np.isnan(img)) if ignore_inf: mask = np.logical_and(mask, np.isfinite(img)) if ignore_zerovar: mask = np.logical_and(mask, var_img > 0) # Only take voxels where at least one of the ROIs has non-zero percentage effective_roi = np.logical_and(roi, mask) sample_data = img[effective_roi] sample_var = var_img[effective_roi] # Variance should not be zero but sometimes is - maybe masking? sample_var[sample_var == 0] = 1e-6 nvoxels = len(sample_data) stats["Nvoxels" + suffix] = nvoxels for stat, fn in STATS_FNS.items(): if nvoxels < min_nvoxels: stats[stat + suffix] = None else: stats[stat + suffix] = fn(sample_data, sample_var)
29,299