content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import cplex import io def run_and_read_cplex(n, problem_fn, solution_fn, solver_logfile, solver_options, warmstart=None, store_basis=True): """ Solving function. Reads the linear problem file and passes it to the cplex solver. If the solution is successful it returns variable solutions and constraint dual values. Cplex must be installed for using this function """ if find_spec('cplex') is None: raise ModuleNotFoundError("Optional dependency 'cplex' not found." "Install via 'conda install -c ibmdecisionoptimization cplex' " "or 'pip install cplex'") _version = LooseVersion(cplex.__version__) m = cplex.Cplex() if solver_logfile is not None: if _version >= "12.10": log_file_or_path = open(solver_logfile, "w") else: log_file_or_path = solver_logfile m.set_log_stream(log_file_or_path) if solver_options is not None: for key, value in solver_options.items(): param = m.parameters for key_layer in key.split("."): param = getattr(param, key_layer) param.set(value) m.read(problem_fn) if warmstart: m.start.read_basis(warmstart) m.solve() is_lp = m.problem_type[m.get_problem_type()] == 'LP' if solver_logfile is not None: if isinstance(log_file_or_path, io.IOBase): log_file_or_path.close() termination_condition = m.solution.get_status_string() if 'optimal' in termination_condition: status = 'ok' termination_condition = 'optimal' else: status = 'warning' if (status == 'ok') and store_basis and is_lp: n.basis_fn = solution_fn.replace('.sol', '.bas') try: m.solution.basis.write(n.basis_fn) except cplex.exceptions.errors.CplexSolverError: logger.info('No model basis stored') del n.basis_fn objective = m.solution.get_objective_value() variables_sol = pd.Series(m.solution.get_values(), m.variables.get_names())\ .pipe(set_int_index) if is_lp: constraints_dual = pd.Series(m.solution.get_dual_values(), m.linear_constraints.get_names()).pipe(set_int_index) else: logger.warning("Shadow prices of MILP couldn't be parsed") constraints_dual = pd.Series(index=m.linear_constraints.get_names())\ .pipe(set_int_index) del m return (status, termination_condition, variables_sol, constraints_dual, objective)
77a9e12509cde2e40287bab49ffb7f226ce3c2e2
3,633,500
def deharmonize(audio_data, sfreq, shift, high=False, audio_min_freq=200.0, decompose="none"): """Deharmonize audio data using full signal FFT Args: audio_data(numpy.ndarray): Audio data in a NumPy array sfreq(float): Sampling frequency in Hz shift(float): Linear shift in Hz high(bool): True if shifting towards high frequencies audio_min_freq(float): Low cut off of shift in Hz decompose(str): Decomposition type: "none" -- No decomposition "harmonic" -- Perform operation only on harmonic components "percussive" -- Perform operation only on percussive components Returns: (numpy.ndarray): The deharmonized audio in a NumPy array """ # Do the decomposition (if requested) audio_data, leftover = get_decomposed_audio(audio_data, decompose) # Calculate single-sided FFT fft_data, freq_scale = calc_audio_rfft(audio_data, sfreq) # Find the index of the low cut off frequency audio_cut_index = np.where(freq_scale >= audio_min_freq)[0][0] if not high: # Linear shift to low frequencies # The upper limit of the cut freq_cut_index = np.where(freq_scale >= (audio_min_freq + shift))[0][0] # Zero padding to keep the signal the same length padding = np.zeros(freq_cut_index - audio_cut_index) d_fft = [] # Cut and append zero-padding to each channel for channel in fft_data: d_fft.append(np.concatenate([channel[:audio_cut_index], channel[freq_cut_index:], padding])) else: # Linear shift to high frequencies # The upper limit of the cut and its index max_freq = np.max(freq_scale) freq_cutoff = max_freq - shift freq_cut_index = np.where(freq_scale >= (freq_cutoff))[0][0] # Zero padding to keep the signal the same length padding = np.zeros(len(freq_scale) - freq_cut_index) d_fft = [] # Cut and append zero-padding to each channel for channel in fft_data: d_fft.append(np.concatenate([channel[:audio_cut_index], padding, channel[audio_cut_index: freq_cut_index]])) d_fft = np.array(d_fft) # Mix back the decomposed left-over part if decompose in ["percussive", "harmonic"]: decomposed, fs = calc_audio_rfft(leftover, sfreq) d_fft += decomposed # Transform back to time-domain deharmonized_audio = calc_audio_irfft(d_fft) return deharmonized_audio
4e7f05d42673ca7cb9c468a9de14d35d83166ee0
3,633,501
def get_max_sushi(m, features, combs, rank_dict): """ Specifically for DTS :param model: gpflow model :param features: sushi features :param rank_dict: dictionary from sushi idx to place in ranking :return: tuple (index of max sushi, rank) """ y_vals = m.predict_y(combs)[0] num_discrete_points = int(np.sqrt(y_vals.shape[0])) soft_copeland = np.mean(np.reshape(y_vals, [num_discrete_points, num_discrete_points]), axis=1) # (num_discrete_points) max_idx = np.argmax(soft_copeland) return (max_idx, rank_dict[features[max_idx].data.tobytes()])
a1e214c00db7df45d231e9a3f4aa8da544037dc9
3,633,502
import os import logging import time def get_multi_media(shelter_id=0, category_id=2, section = 'Identification'): """ Get pictures for the shelter sent by Dropzone via a POST request. """ first = True ImageFile.LOAD_TRUNCATED_IMAGES = True imgwidth = 1280 shelter = Shelter.query.filter(Shelter.id==shelter_id).first() shelter_id_query = db.session.query(Value.name)\ .join(Association,Property,Shelter)\ .filter(Shelter.id==shelter_id, Property.attribute_id==1)\ .first() shelter_id_attribute = shelter_id_query[0] if not shelter: return 'no such shelter', 400 for f in request.files: if request.files[f] and request.files[f].filename == '': return 'No selected file', 400 if request.files[f] and allowed_file(request.files[f].filename, conf.ALLOWED_EXTENSIONS_PICTURE.union( conf.ALLOWED_EXTENSIONS_DOCUMENT)): path = os.path.join(conf.SHELTERS_PICTURES_SERVER_PATH, str(shelter.id)) logging.debug('path:' + path) if not os.path.exists(path): os.makedirs(path) file_extension = os.path.splitext(request.files[f].filename)[1] exist_main = db.session.query(ShelterPicture.is_main_picture).filter(ShelterPicture.is_main_picture==True,ShelterPicture.shelter_id==shelter.id).first() if not exist_main: filename = str(shelter_id_attribute) + '_' + section + "_" + "Facade" + file_extension thumbname = str(shelter_id_attribute) + '_' + section + "_" + "Facade_thumbnail" + file_extension else: filename = str(shelter_id_attribute) + '_' + section + "_" + str(time.time()) + file_extension thumbname = str(shelter_id_attribute) + '_' + section + "_" + "thumbnail" + file_extension first = False im = Image.open(request.files[f]) if im.size[0] > imgwidth: ratio = (imgwidth/float(im.size[0])) hsize = int((float(im.size[1])*float(ratio))) try: imagefile = im.resize((imgwidth,hsize), Image.BICUBIC) except OSError: return 'Failed to resize picture, please resize to ' + imgwidth + ' pixels width', 400 else: imagefile = im imagefile.save(os.path.join(path , filename), "JPEG",quality=70, optimize=True, progressive=True) create_thumbnail(filename, thumbname, path) # save backup image: backup_dir = os.path.join(conf.SHELTERS_PICTURES_BACKUP_PATH, str(shelter_id_attribute)) if not os.path.exists(backup_dir): os.makedirs(backup_dir) im.save(os.path.join(backup_dir , filename), "JPEG",quality=70, optimize=True, progressive=True) if category_id: new_media = ShelterPicture(file_name=filename, is_main_picture=first, shelter_id=shelter.id, category_id=category_id) new_thumbnail = ShelterPicture(file_name=thumbname, is_main_picture=first, shelter_id=shelter.id, category_id=category_id) db.session.add_all([new_media, new_thumbnail]) db.session.commit() return str(new_media.id), 200
2d70f9318d062e9b98ff3413fa76c1e683ef20a4
3,633,503
def is_watchman_supported(): """ Return ``True`` if watchman is available.""" if WIN: # for now we aren't bothering with windows sockets return False try: sockpath = get_watchman_sockpath() return bool(sockpath) except Exception: return False
7681ba911456196ad01774e0607bd81872e4b82a
3,633,504
def geolocation(data_base, year, latitude, longitude, geofunc): """ Function for geolocating points from database and calculating distance from them to the given user point. >>> 33.5 <= geolocation(pd.DataFrame([["Film1", 2020, "Some info",\ "Los Angeles California USA"]], columns \ = ["name", "year", "addinfo", "place"]), 2020, 30, 30,\ memoize_and_write(RateLimiter(Nominatim(user_agent="Films map").geocode), {})).iloc[0, 4][0]\ <= 34.5 and -118.5 <= geolocation(pd.DataFrame([["Film1", 2020,\ "Some info", "Los Angeles California USA"]], columns \ = ["name", "year", "addinfo", "place"]), 2020, 30, 30,\ memoize_and_write(RateLimiter(Nominatim(user_agent="Films map").geocode),\ {})).iloc[0, 4][1] <= -117.5 True """ valid_films = data_base[data_base['year'] == year] valid_films["points"] = valid_films['place'].apply(geofunc) valid_films["distance_to_the_current_point"] = 0 for i in range(len(valid_films["points"])): point = valid_films.iloc[i, 4] if point is not None: valid_films.iloc[i, 5] = distance.distance(point, (latitude, longitude)).miles else: valid_films.iloc[i, 5] = 10**5 return valid_films
ecd619b23d0f72c29b164f7fdbf498b41f25c0f0
3,633,505
import os def get_clip_details(file_path: str) -> (int, float, int, int): """ Gets a clip's duration, frame rate and dimensions (width, height). :param file_path: The absolute path to a clip. :return: Duration in seconds, frame rate in FPS and width and height in pixels. This is given in the form of a tuple (duration, frame rate, width, height). """ # Check if clip exists if not os.path.isfile(path=file_path): raise FileNotFoundError cap = cv2.VideoCapture(file_path) fps = cap.get(cv2.CAP_PROP_FPS) frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) return int(frames/fps), fps, width, height
9c49a79cbf93481f68ea34b43b550842598bbca1
3,633,506
import shutil import os def notebook(live_mock_server, test_dir): """This launches a live server, configures a notebook to use it, and enables devs to execute arbitrary cells. See tests/test_notebooks.py """ @contextmanager def notebook_loader(nb_path, kernel_name="wandb_python", save_code=True, **kwargs): with open(utils.notebook_path("setup.ipynb")) as f: setupnb = nbformat.read(f, as_version=4) setupcell = setupnb["cells"][0] # Ensure the notebooks talks to our mock server new_source = setupcell["source"].replace( "__WANDB_BASE_URL__", live_mock_server.base_url, ) if save_code: new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", nb_path) else: new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", "") setupcell["source"] = new_source nb_path = utils.notebook_path(nb_path) shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path))) with open(nb_path) as f: nb = nbformat.read(f, as_version=4) nb["cells"].insert(0, setupcell) try: client = utils.WandbNotebookClient(nb, kernel_name=kernel_name) with client.setup_kernel(**kwargs): # Run setup commands for mocks client.execute_cells(-1, store_history=False) yield client finally: with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f: f.write(client.all_output_text()) wandb.termlog("Find debug logs at: %s" % os.getcwd()) wandb.termlog(client.all_output_text()) notebook_loader.base_url = live_mock_server.base_url return notebook_loader
1930792965a3b3492cefca8f1a583754164b103f
3,633,507
from typing import OrderedDict def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'): """Read Polhemus digitizer data from a file. Parameters ---------- fname : str The filepath of Polhemus ISOTrak formatted file. File extension is expected to be '.hsp', '.elp' or '.eeg'. ch_names : None | list of str The names of the points. This will make the points considered as EEG channels. If None, channels will be assumed to be HPI if the extension is ``'.elp'``, and extra headshape points otherwise. unit : 'm' | 'cm' | 'mm' Unit of the digitizer file. Polhemus ISOTrak systems data is usually exported in meters. Defaults to 'm'. Returns ------- montage : instance of DigMontage The montage. See Also -------- DigMontage make_dig_montage read_polhemus_fastscan read_dig_captrak read_dig_dat read_dig_egi read_dig_fif """ VALID_FILE_EXT = ('.hsp', '.elp', '.eeg') _scale = _check_unit_and_get_scaling(unit) _, ext = op.splitext(fname) _check_option('fname', ext, VALID_FILE_EXT) if ext == '.elp': data = _read_isotrak_elp_points(fname) else: # Default case we read points as hsp since is the most likely scenario data = _read_isotrak_hsp_points(fname) if _scale != 1: data = {key: val * _scale for key, val in data.items()} else: pass # noqa if ch_names is None: keyword = 'hpi' if ext == '.elp' else 'hsp' data[keyword] = data.pop('points') else: points = data.pop('points') if points.shape[0] == len(ch_names): data['ch_pos'] = OrderedDict(zip(ch_names, points)) else: raise ValueError(( "Length of ``ch_names`` does not match the number of points" " in {fname}. Expected ``ch_names`` length {n_points:d}," " given {n_chnames:d}" ).format( fname=fname, n_points=points.shape[0], n_chnames=len(ch_names) )) return make_dig_montage(**data)
d048f1f83844bc591a301c046f3c25494ffd0339
3,633,508
def gt_comparison_plot(data, mu=None, sig=None, k=3.3e11, x_c = None): """ Generate the comparison Zipf plot for the data. Parameters ---------- data : array_like Size of each firm, where size is measured by sales, value added, number of employees or some other variable. k : float Smoothness parameter in gradually truncated lognormal. Default value is from equation (5) of Hari, et al (2007, Physica A) x_c : float Truncation parameter """ N = len(data) log_data = np.log(data) sorted_data = sorted(data, reverse=True) # Largest to smallest if mu is None: mu = np.mean(log_data) if sig is None: sig = np.std(log_data) norm_rv = stats.norm() # === Zipf plot for the log-normal distribution === # xvals = lambda i: np.exp(sig * norm_rv.ppf(1 - (i / N)) + mu) # rank the companies by data rf = [(i+1, x) for i, x in enumerate(sorted_data)] # separate the ranking and the data x1, x2 = zip(*(rf)) if x_c is None: # === determine the truncation point: x_c === # Tol = 1e5 for i, x in enumerate(sorted_data): if abs(x - xvals(float(i+1))) < Tol: break x_c = x # The probability density function for y <= x_c pdf_1 = lambda y: np.exp(- ((y - mu)**2) / (2 * (sig**2))) # The probability density function for y > x_c pdf_2 = lambda y: np.exp(- ((y - mu)**2) / (2 * (sig**2))) \ * np.exp(- ((np.exp(y) - x_c) / k)**2) # === compute the value of the constant C in the density === # # compute the integration for [-inf, log(x_c)] lower_1 = mu - 6.0 * sig upper_1 = np.log(x_c) integral_1, error_1 = quad(pdf_1, lower_1, upper_1) # compute the integration for [log(x_c), inf] lower_2 = np.log(x_c) upper_2 = mu + 6.0 * sig integral_2, error_2 = quad(pdf_2, lower_2, upper_2) # Finally, C = np.sqrt(2 * np.pi) * sig / (integral_1 + integral_2) def gt_lognormal(y): """ The Zipf plot for the Gradually Truncated Log-normal distribution. See equation (8) of Hari, etc.(2007, Physica A) """ integrand_1 = lambda y: np.exp(- ((y - mu)**2) / (2 * (sig**2))) integrand_2 = lambda y: np.exp(- ((y - mu)**2) / (2 * (sig**2))) \ * np.exp(- ((np.exp(y) - x_c) / k)**2) constant = N * C / (np.sqrt(2 * np.pi) * sig) if y <= np.log(x_c): part_1, error_1 = quad(integrand_1, y, upper_1) part_2, error_2 = quad(integrand_2, lower_2, upper_2) g_y = constant * (part_1 + part_2) else: part_3, error_3 = quad(integrand_2, y, upper_2) g_y = constant * part_3 return g_y # === equation (8) of Hari, etc(2007, Physica A) === # rank_of_firms = np.empty(N) for i in range(N): rank_of_firms[i] = gt_lognormal(np.log(sorted_data[i])) # == Plots == # fig, ax = plt.subplots(figsize=(10, 6.2)) ax.set_xlabel('rank', fontsize=14) ax.set_ylabel('revenue', fontsize=14) # === use log(10) scale on both axes === # plt.xscale('log') plt.yscale('log') # === set the range of the horizontal axis === # xmin, xmax = 1.0, N * 1.2 ax.set_xlim(xmin, xmax) # === set the range of the vertical axis === # #ymin, ymax = 10.0**2, 10.0**14 #ax.set_ylim(ymin, ymax) # Zipf plot for the log-normal distribution grid = np.linspace(xmin, xmax, 400) ax.plot(grid, [xvals(r) for r in grid], 'k-', lw=2, alpha=0.8, label='lognormal') # Zipf plot for the empirical data ax.plot(x1, x2, 'ko', label='empirical', alpha=0.4) # Zipf plot for the gradually truncated log-normal distribution ax.plot(rank_of_firms, sorted_data, 'b--', lw=2, alpha=0.8, label='G.T. lognormal') ax.legend(frameon=False, loc='center') plt.show()
64a30fd6bf77edbd89ed3194838e518a1022cc11
3,633,509
def make_cache_key(instance): """Construct a cache key for the instance.""" prefix = '{}:{}:{}'.format( instance._meta.app_label, instance._meta.model_name, instance.pk ) return '{}:{}'.format(prefix, str(uuid4()))
6a83d20c94e26ece5ca3d98ad8cb70dd17fa5ea7
3,633,510
import os import pickle def load_model(filename: str, model_dir=config.MODEL_DIR) -> nn.Module: """ Load the model from a pickle. :param filename: name of the file :param model_dir: directory in which the file is located :return: """ with open(os.path.join(model_dir, filename), 'rb') as f: return pickle.load(f)
02bdefdaf5ba24bcd9174ad57cd895a6bcf0a551
3,633,511
def CalculateMediationPEEffect(PointEstimate2, PointEstimate3): """Calculate derived effects from simple mediation model. Given parameter estimates from a simple mediation model, calculate the indirect effect, the total effect and the indirect effects Parameters ---------- PointEstimate2 : array This is an array of parameter estimates for the regression equation of A on B. With no covariates, this will be an array of length 1 PointEstimate3 : array This is an array of parameter estimates for the regression equation of A and B on C. With no covariates, this will be an array of length 2 Returns ------- IE The indirect effect, parameter a times b TE The total effect, which is IE plus DE DE The direct effect, the effect of A on C, when B is in the model a The effect of A on B b The effect of B on C, when A is in the model """ # Indirect Effect a = PointEstimate2[0] # The model of B with A has one beta which is index 0 b = PointEstimate3[1] # The model of C with A and B has two betas, b has index = 1 IE = a*b # Direct Effect DE = PointEstimate3[0] # This is c' # Total Effect TE = DE + IE return IE, TE, DE, a, b
d2247985e46a78bc3333983e09a1030fd59f139d
3,633,512
def get_engine(db_dir_name, echo=False, path_str=None): """数据库引擎""" if path_str: path = path_str else: path = db_path(db_dir_name) engine = create_engine('sqlite:///' + path, echo=echo) return engine
c3f35e7a52619c9ef5e1414efdbebcaebb8b8bd3
3,633,513
def init_websauna(config_uri: str, sanity_check: bool=False, console_app=False, extra_options=None) -> Request: """Initialize Websauna WSGI application for a command line oriented script. :param config_uri: Path to config INI file :param sanity_check: Perform database sanity check on start :param console_app: Set true to setup console-mode logging. See :func:`setup_console_logging` :param extra_options: Passed through bootstrap() and is available as :attr:`websauna.system.Initializer.global_options`. :return: Faux Request object pointing to a site root, having registry and every configured. """ monkey_patch_paster_config_parser() if console_app: setup_console_logging() else: setup_logging(config_uri) # Paster thinks we are a string if sanity_check: sanity_check = "true" else: sanity_check = "false" options = { "sanity_check": sanity_check } if extra_options: options.update(extra_options) bootstrap_env = bootstrap(config_uri, options=options) app = bootstrap_env["app"] initializer = getattr(app, "initializer", None) assert initializer is not None, "Configuration did not yield to Websauna application with Initializer set up" pyramid_env = scripting.prepare(registry=app.initializer.config.registry) request = pyramid_env["request"] # Export application object for testing request.app = app return pyramid_env["request"]
2d56ce6afa1ede2c69c92422cb360e856d84b007
3,633,514
def ts_glm_ridge_pipeline(): """ Return pipeline with the following structure: glm \ -> ridge -> final forecast lagged - ridge / Where glm - Generalized linear model """ node_glm = PrimaryNode("glm") node_lagged = PrimaryNode("lagged") node_ridge_1 = SecondaryNode("ridge", nodes_from=[node_lagged]) node_ridge_2 = SecondaryNode("ridge", nodes_from=[node_ridge_1, node_glm]) return Pipeline(node_ridge_2)
41a6ce2e280ca6a89ba482715ab9bcdc807c0d29
3,633,515
import os def read_validation_annotations(validation_dir): """Reads validation data annotations.""" return read_tiny_imagenet_annotations( os.path.join(validation_dir, 'val_annotations.txt'), os.path.join(validation_dir, 'images'))
6e586a1321db4c13c3ca0d2fb34f92ebfbd62c7b
3,633,516
def norm1to1(operator, n_samples=10000, mxBasis="gm", return_list=False): """ Returns the Hermitian 1-to-1 norm of a superoperator represented in the standard basis, calculated via Monte-Carlo sampling. Definition of Hermitian 1-to-1 norm can be found in arxiv:1109.6887. """ if mxBasis == 'gm': std_operator = change_basis(operator, 'gm', 'std') elif mxBasis == 'pp': std_operator = change_basis(operator, 'pp', 'std') elif mxBasis == 'std': std_operator = operator else: raise ValueError("mxBasis should be 'gm', 'pp' or 'std'!") rand_dim = int(_np.sqrt(float(len(std_operator)))) vals = [norm1(unvec(_np.dot(std_operator, vec(random_hermitian(rand_dim))))) for n in range(n_samples)] if return_list: return vals else: return max(vals)
f0ad0d6a89ab9c3ec275c5ea5ce1a343d275f625
3,633,517
def _load_image_gdal(image_path, value_scale=1.0): """ using gdal to read image, especially for remote sensing multi-spectral images :param image_path: string, image path :param value_scale: float, default 1.0. the data array will divided by the 'value_scale' :return: array of shape (height, width, band) """ ds = gdal.Open(image_path, gdal.GA_ReadOnly) col=ds.RasterXSize row=ds.RasterYSize band=ds.RasterCount img=np.zeros((row, col, band)) for i in range(band): dt = ds.GetRasterBand(i+1) img[:,:,i] = dt.ReadAsArray(0, 0, col, row) return img / value_scale
94d8ce48f069bc311637237d805fd140604cffc2
3,633,518
def f1_chantler(element, energy, _larch=None, **kws): """returns real part of anomalous x-ray scattering factor for a selected element and input energy (or array of energies) in eV. Data is from the Chantler tables. Values returned are in units of electrons arguments --------- element: atomic number, atomic symbol for element energy: energy or array of energies in eV """ xdb = get_xraydb(_larch) return xdb._from_chantler(element, energy, column='f1', **kws)
76b5143e3d9be69ae6f7f8ee246669ee3da9fe08
3,633,519
def rgb_to_hex(red_component=None, green_component=None, blue_component=None): """Return color as #rrggbb for the given color tuple or component values. Can be called as TUPLE VERSION: rgb_to_hex(COLORS['white']) or rgb_to_hex((128, 63, 96)) COMPONENT VERSION rgb_to_hex(64, 183, 22) """ if isinstance(red_component, tuple): red_component, green_component, blue_component = red_component return '#{:02X}{:02X}{:02X}'.format( red_component, green_component, blue_component)
37f5216f7f22f82072db6980541a815d87d02ef3
3,633,520
def remove(predicate, seq): """ Return those items of sequence for which predicate(item) is False >>> def iseven(x): ... return x % 2 == 0 >>> list(remove(iseven, [1, 2, 3, 4])) [1, 3] """ return filterfalse(predicate, seq)
2953386f289894e4f5a052d1f67087dcf4631a3a
3,633,521
def average_coords(coords_list): """Calculate average coords Parameters ---------- coords_list : list[skrobot.coordinates.Coordinates] Returns ------- coords_average : skrobot.coordinates.Coordinates """ q_list = [c.quaternion for c in coords_list] q_average = averageQuaternions(q_list) pos_average = np.mean([c.worldpos() for c in coords_list], axis=0) coords_average = coordinates.Coordinates(pos_average, q_average) return coords_average
3a3e59685311042a91295760ec025e12916c34d5
3,633,522
def lowpassfilter(input_vect, width=101): """ Computes a low-pass filter of an input vector. This is done while properly handling NaN values, but at the same time being reasonably fast. Algorithm: provide an input vector of an arbitrary length and compute a running NaN median over a box of a given length (width value). The running median is NOT computed at every pixel but at steps of 1/4th of the width value. This provides a vector of points where the nan-median has been computed (ymed) and mean position along the input vector (xmed) of valid (non-NaN) pixels. This xmed/ymed combination is then used in a spline to recover a vector for all pixel positions within the input vector. When there are no valid pixel in a 'width' domain, the value is skipped in the creation of xmed and ymed, and the domain is splined over. :param input_vect: numpy 1D vector, vector to low pass :param width: int, width (box size) of the low pass filter :return: """ # indices along input vector index = np.arange(len(input_vect)) # placeholders for x and y position along vector xmed = [] ymed = [] # loop through the lenght of the input vector for it in np.arange(-width // 2, len(input_vect) + width // 2, width // 4): # if we are at the start or end of vector, we go 'off the edge' and # define a box that goes beyond it. It will lead to an effectively # smaller 'width' value, but will provide a consistent result at edges. low_bound = it high_bound = it + int(width) # deal with lower bounds out of bounds --> set to zero if low_bound < 0: low_bound = 0 # deal with upper bounds out of bounds --> set to max if high_bound > (len(input_vect) - 1): high_bound = (len(input_vect) - 1) # get the pixel bounds pixval = index[low_bound:high_bound] # do not low pass if not enough points if len(pixval) < 3: continue # if no finite value, skip if np.max(np.isfinite(input_vect[pixval])) == 0: continue # mean position along vector and NaN median value of # points at those positions xmed.append(np.nanmean(pixval)) ymed.append(np.nanmedian(input_vect[pixval])) # convert to arrays xmed = np.array(xmed, dtype=float) ymed = np.array(ymed, dtype=float) # we need at least 3 valid points to return a # low-passed vector. if len(xmed) < 3: return np.zeros_like(input_vect) + np.nan # low pass with a mean if len(xmed) != len(np.unique(xmed)): xmed2 = np.unique(xmed) ymed2 = np.zeros_like(xmed2) for i in range(len(xmed2)): ymed2[i] = np.mean(ymed[xmed == xmed2[i]]) xmed = xmed2 ymed = ymed2 # splining the vector spline = InterpolatedUnivariateSpline(xmed, ymed, k=1, ext=3) lowpass = spline(np.arange(len(input_vect))) # return the low pass filtered input vector return lowpass
1b71ac8f0a2fc61b0cd3d5ab9e1f218471b3569c
3,633,523
def n_keywords(data): """Return the number of keywords. Arguments --------- data: asreview.data.ASReviewData An ASReviewData object with the records. Return ------ int: The statistic """ if data.keywords is None: return None return np.average([len(keywords) for keywords in data.keywords])
d2692c1e040cf659dcc6eb1aa7c5718d52a345d8
3,633,524
def flatten_reshape(variable, name=''): """Reshapes high-dimension input to a vector. [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask] Parameters ---------- variable : a tensorflow variable name : a string or None An optional name to attach to this layer. Examples -------- >>> W_conv2 = weight_variable([5, 5, 100, 32]) # 64 features for each 5x5 patch >>> b_conv2 = bias_variable([32]) >>> W_fc1 = weight_variable([7 * 7 * 32, 256]) >>> h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) >>> h_pool2 = max_pool_2x2(h_conv2) >>> h_pool2.get_shape()[:].as_list() = [batch_size, 7, 7, 32] ... [batch_size, mask_row, mask_col, n_mask] >>> h_pool2_flat = tl.layers.flatten_reshape(h_pool2) ... [batch_size, mask_row * mask_col * n_mask] >>> h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) ... """ dim = 1 for d in variable.get_shape()[1:].as_list(): dim *= d return tf.reshape(variable, shape=[-1, dim], name=name)
933ec231c15f91122db755f9bac98679cdf9864e
3,633,525
def register_command(data): """Remote command registration service. This has to be enabled by liquer.commands.enable_remote_registration() WARNING: Remote command registration allows to deploy arbitrary python code on LiQuer server, therefore it is a HUGE SECURITY RISK and it only should be used if other security measures are taken (e.g. on localhost or intranet where only trusted users have access). This is on by default on Jupyter server extension. """ return jsonify(command_registry().register_remote_serialized(data.encode("ascii")))
c9b764e6f2758ad4cc90aced854421d7d83ece9e
3,633,526
import json def add_noise(dgen_list, noise): """Add noise decorators to the DataGenerators from `dgen_list` list. Parameters ---------- dgen_list : list of IDataGenerator A list of DataGenerators to be decorated. noise : list of dict or dict or None Noise configuration. If dict then `noise` will be simply passed to the `DataNoise` constructor. If list of dict then multiple `DataNoise` decorators will be applied to the `dgen_list`, one for each dict in `noise`. If None than `dgen_list` will not be modified. Returns ------- list of IDataGenerator DataGenerators from `dgen_list` decorated by `DataNoise`. See Also -------- DataNoise """ if noise is None: return dgen_list if isinstance(noise, list): for n in noise: dgen_list = add_noise(dgen_list, n) return dgen_list LOGGER.info( "Adding noise: %s", json.dumps(noise, sort_keys = True, indent = 4) ) return [ DataNoise(x, **noise) for x in dgen_list ]
f716211ad9f1d35e66845e0887aafa5955575b4b
3,633,527
import random import json def index(): """ Web app index page. It renders two pictures about the data. One bar chart and word cloud. """ # extract data needed for visuals # TODO: Below is an example - modify to extract data for your own visuals genre_counts = df.groupby('genre').count()['message'] genre_names = list(genre_counts.index) # Plot Message cloud, this part need optimization and it is very slow at this moment allwords = " ".join( df['message'] ) wordcloud = WordCloud().generate(allwords) alltexts = list( wordcloud.words_.keys() )[:30] num_words = 30 colors = [plotly.colors.DEFAULT_PLOTLY_COLORS[random.randrange(1, 10)] for i in range(num_words)] weights = [int(num*60) for num in ( list( wordcloud.words_.values() )[:30] ) ] # create visuals # TODO: Below is an example - modify to create your own visuals graphs = [ { 'data': [ Bar( x=genre_names, y=genre_counts ) ], 'layout': { 'title': 'Distribution of Message Genres', 'yaxis': { 'title': "Count" }, 'xaxis': { 'title': "Genre" }, 'width': '500', 'height': '600', 'automargin': 'True', 'titlefont': {'size': '30'} } }, { 'data': [ Scatter( x=[random.random() for i in range(num_words)], y=[random.random() for i in range(num_words)], mode='text', text=alltexts, marker={'opacity': 0.3}, textfont={'size': weights, 'color': colors} ) ], 'layout': { 'title': 'Message Cloud', 'width': '600', 'height': '600', 'automargin': 'True', 'titlefont': {'size': '30'}, 'xaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False}, 'yaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False} } } ] # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template('master.html', ids=ids, graphJSON=graphJSON)
10509856618e9f09c273d20b617e948448623a8e
3,633,528
from pathlib import Path def open(table_file: str, table_map_file: str = None) -> pd.DataFrame: """ Opens a dynamo table file, returning a DynamoTable object :param table_file: :return: dataframe """ # Read into dataframe df = pd.read_csv(table_file, header=None, delim_whitespace=True) n_cols = df.shape[1] if n_cols <= len(COLUMN_NAMES): column_names = COLUMN_NAMES[0:n_cols] df.columns = column_names # In case table has extra columns else: extra_columns_needed = n_cols - len(COLUMN_NAMES) column_names = list(COLUMN_NAMES) + ['' for x in range(extra_columns_needed)] # Take absolute value (daxis column sometimes has complex values) df = df.apply(pd.to_numeric, errors='ignore') # Add table map info if table_map_file is not None and Path(table_map_file).exists(): table_map_dict = table_map_read(table_map_file) tomo_file = [table_map_dict[tomo_idx] for tomo_idx in df['tomo']] df['tomo_file'] = tomo_file return df
7dca5cfc2c3c6201730b99db13680bed81b51a4b
3,633,529
def _evaluate_tags(pcluster_config, preferred_tags=None): """ Merge given tags to the ones defined in the configuration file and convert them into the Key/Value format. :param pcluster_config: PclusterConfig, it can contain tags :param preferred_tags: tags that must take the precedence before the configured ones :return: a merge of the tags + version tag """ tags = {} configured_tags = pcluster_config.get_section("cluster").get_param_value("tags") if configured_tags: tags.update(configured_tags) if preferred_tags: # add tags from command line parameter, by overriding configured ones tags.update(preferred_tags) # add pcluster version tags["Version"] = utils.get_installed_version() # convert to CFN tags return [{"Key": tag, "Value": tags[tag]} for tag in tags]
d23e4c29b463736fa23a65c977e16734b235c4c9
3,633,530
def dict_view(request): """ 字典管理 """ return render_mako_context(request, '/system_permission/dictmgr.html')
56b8b80fb56c032f319f23754c3926cadf74ddc6
3,633,531
def rot_ETA(eta: float) -> np.ndarray: """Return rotation matrix corresponding to eta axis. Parameters ---------- eta: float eta axis angle Returns ------- np.ndarray Rotation matrix as a NumPy array. """ return z_rotation(-eta)
07e3c42f40bba0d73b4718eaa2d56b17e3dffd8e
3,633,532
def show_lists(chat_id): """ It shows all the lists of the given user :param chat_id: :return: """ lists = notelistmodel.find_all_lists(mongodb, chat_id) return {"text": monkeyview.lists_view(lists), "parse_mode": "Markdown"}
b72df596357e0174294589eab413fa1f8f8da840
3,633,533
import logging def test_params_from_fw_spec(tempdir, files, dtool_config, default_create_dataset_task_spec, default_freeze_dataset_task_spec): """Will create dataset with some task parameters pulled from fw_spec.""" logger = logging.getLogger(__name__) t_spec = { **default_create_dataset_task_spec, 'creator_username': {'key': 'deeply->deeply->nested->username'} } t = CreateDatasetTask(**t_spec) fw_action = t.run_task( { 'deeply': { 'deeply': { 'nested': { 'username': 'unittest' } } } } ) logger.debug("FWAction:") _log_nested_dict(logger.debug, fw_action.as_dict()) uri = fw_action.stored_data['uri'] logger.debug("Instantiate FreezeDatasetTask with '{}'".format( {'uri': uri, **default_freeze_dataset_task_spec})) t = FreezeDatasetTask( uri=uri, **default_freeze_dataset_task_spec) fw_action = t.run_task({}) logger.debug("FWAction:") _log_nested_dict(logger.debug, fw_action.as_dict()) uri = fw_action.stored_data['uri'] with TemporaryOSEnviron(_read_json(files['dtool_config_path'])): ret = verify(True, uri) assert ret # check creator_username dataset = dtoolcore.DataSet.from_uri(uri) assert dataset._admin_metadata["creator_username"] == 'unittest' return uri
16bff2b0c79b332211283c53e9ca6630b0ce809a
3,633,534
from re import T def import_string(path: str) -> T.Any: """ Import a dotted Python path to a class or other module attribute. ``import_string('foo.bar.MyClass')`` will return the class ``MyClass`` from the package ``foo.bar``. """ name, attr = path.rsplit('.', 1) return getattr(import_module(name), attr)
3475a6081d64f656ec2c50b74f9314d519d18dee
3,633,535
from re import T def inbox(): """ RESTful CRUD controller for the Inbox - all Inbound Messages are visible here """ if not auth.s3_logged_in(): session.error = T("Requires Login!") redirect(URL(c="default", f="user", args = "login", )) table = s3db.msg_message s3.filter = (table.inbound == True) table.inbound.readable = False tablename = "msg_message" # CRUD Strings s3.crud_strings[tablename] = Storage( title_display = T("Message Details"), title_list = T("InBox"), label_list_button = T("View InBox"), label_delete_button = T("Delete Message"), msg_record_deleted = T("Message deleted"), msg_list_empty = T("No Messages currently in InBox"), ) s3db.configure(tablename, # Permissions-based #deletable = False, editable = False, insertable = False, list_fields = ["date", "channel_id", "from_address", "body", ], ) return s3_rest_controller(c, "message")
486640ebdb1a142f22ac146479fa36c289a8e1be
3,633,536
def glorot_uniform_sigm(shape): """ Glorot style weight initializer for sigmoid activations. Like keras.initializations.glorot_uniform(), but with uniform random interval like in Deeplearning.net tutorials. They claim that the initialization random interval should be +/- sqrt(6 / (fan_in + fan_out)) (like Keras' glorot_uniform()) when tanh activations are used, +/- 4 sqrt(6 / (fan_in + fan_out)) when sigmoid activations are used. See: http://deeplearning.net/tutorial/mlp.html#going-from-logistic-regression-to-mlp """ fan_in, fan_out = get_fans(shape) s = 4. * np.sqrt(6. / (fan_in + fan_out)) return uniform(shape, s)
4cd3a3f40e276aba5b16726af4ce28adefe25748
3,633,537
def pytest_report_header(config): """Display cachedir with --cache-show and if non-default.""" if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": cachedir = config.cache._cachedir # TODO: evaluate generating upward relative paths # starting with .., ../.. if sensible try: displaypath = cachedir.relative_to(config.rootdir) except ValueError: displaypath = cachedir return "cachedir: {}".format(displaypath)
d859b89b11015623a9dd2bc159c855b896d27ee7
3,633,538
def params(kernels, time, target, target_frame, observer, corr): """Input parameters from WGC API example.""" return { 'kernels': kernels, 'times': time, 'target': target, 'target_frame': target_frame, 'observer': observer, 'aberration_correction': corr, }
d030ad459b294a268c8bc3a851a32495dcbf5c02
3,633,539
import re import os def extract_thresholds_of_intensity_criteria(data_path, sub_ses_test, patch_side, new_spacing, out_folder, n_parallel_jobs, overlapping, prints=True): """This function computes the threshold to use for the extraction of the vessel-like negative patches (i.e. the negative patches that roughly have the same average intensity of the positive patches and include vessels) Args: data_path (str): path to BIDS dataset sub_ses_test (list): sub_ses of the test set; we use it to take only the sub_ses of the training set patch_side (int): side of cubic patches new_spacing (list): desired voxel spacing out_folder (str): path to output folder; during ds creation it is where we create the output dataset; at inference, it is where we save segmentation outputs n_parallel_jobs (int): number of jobs to run in parallel overlapping (float): amount of overlapping between patches in sliding-window approach prints (bool): whether to print numerical thresholds that were found; defaults to True Returns: intensity_thresholds (tuple): it contains the values to use for the extraction of the vessel-like negative patches (i.e. negative patches that have an overall intensity that resembles the one of positive patches) """ regexp_sub = re.compile(r'sub') # create a substring template to match ext_gz = '.gz' # type: str # set zipped files extension # create new input lists to create positive patches in parallel all_subdirs = [] all_files = [] for subdir, dirs, files in os.walk(data_path): for file in files: ext = os.path.splitext(file)[-1].lower() # get the file extension if regexp_sub.search(file) and ext == ext_gz and "Lesion" in file and "registrations" not in subdir and "Treated" not in file: sub = re.findall(r"sub-\d+", subdir)[0] ses = re.findall(r"ses-\w{6}\d+", subdir)[0] # extract ses sub_ses = "{}_{}".format(sub, ses) if sub_ses not in sub_ses_test: # only use training sub_ses otherwise we might introduce a bias towards the intensities of the aneurysms in the test set all_subdirs.append(subdir) all_files.append(file) assert all_subdirs and all_files, "Input lists must be non-empty" out_list = Parallel(n_jobs=n_parallel_jobs, backend='loky')(delayed(retrieve_intensity_conditions_one_sub)(all_subdirs[idx], all_files[idx], data_path, new_spacing, patch_side, out_folder, overlapping) for idx in range(len(all_subdirs))) out_list = [x for x in out_list if x] # remove None values from list if present out_list_np = np.asarray(out_list) # type: np.ndarray # convert from list to numpy array q5_local_vessel_mni, q7_local_vessel_mni = np.percentile(out_list_np[:, 0], [5, 7]) # extract local intensity thresholds of vessel mni volume q5_global_vessel_mni, q7_global_vessel_mni = np.percentile(out_list_np[:, 1], [5, 7]) # extract global intensity thresholds of vessel mni volume q5_local_tof_bet, q7_local_tof_bet = np.percentile(out_list_np[:, 2], [5, 7]) # extract local intensity thresholds of tof-bet-n4bfc volume q5_global_tof_bet, q7_global_tof_bet = np.percentile(out_list_np[:, 3], [5, 7]) # extract global intensity thresholds of tof-bet-n4bfc volume q5_nz_vessel_mni = np.percentile(out_list_np[:, 4], [5])[0] # extract threshold related to number of non-zero voxels in vessel-mni patch if prints: print("\nIntensity thresholds with patch_side={}".format(patch_side)) print("\nMean-Max local intensity ratio in vesselMNI positive patches:") print("5th percentile = {}".format(q5_local_vessel_mni)) print("7th percentile = {}".format(q7_local_vessel_mni)) print("\nMean-Max global intensity ratio in vesselMNI positive patches:") print("5th percentile = {}".format(q5_global_vessel_mni)) print("7th percentile = {}".format(q7_global_vessel_mni)) print("\nMean-Max local intensity ratio in bet TOF positive patches:") print("5th percentile = {}".format(q5_local_tof_bet)) print("7th percentile = {}".format(q7_local_tof_bet)) print("\nMean-Max global intensity ratio in bet TOF positive patches:") print("5th percentile = {}".format(q5_global_tof_bet)) print("7th percentile = {}".format(q7_global_tof_bet)) print("\nNumber of non-zero voxels in vesselMNI positive patches:") print("5th percentile = {}".format(q5_nz_vessel_mni)) intensity_thresholds = (q5_local_vessel_mni, q5_global_vessel_mni, q5_local_tof_bet, q5_global_tof_bet, q5_nz_vessel_mni) return intensity_thresholds
8da250d5c25b338d19d1e68b1b27b12a8221b467
3,633,540
def group_activity_list( group_id: str, limit: int, offset: int, include_hidden_activity: bool = False, ) -> list[Activity]: """Return the given group's public activity stream. Returns activities where the given group or one of its datasets is the object of the activity, e.g.: "{USER} updated the group {GROUP}" "{USER} updated the dataset {DATASET}" etc. """ q = _group_activity_query(group_id, include_hidden_activity) return _activities_at_offset(q, limit, offset)
aaab03202571e3eb562fc3b8ec663ac58cc69ab0
3,633,541
def rotMatrixfromXYZ(station, mode='LBA'): """Return a rotation matrix which will rotate a station to (0,0,1)""" loc = station.antField.location[mode] longRotMat = rotationMatrix(0., 0., -1.*np.arctan(loc[1]/loc[0])) loc0 = np.dot(longRotMat, loc) latRotMat = rotationMatrix(0., np.arctan(loc0[0,2]/loc0[0,0]), 0.) return np.dot(latRotMat, longRotMat)
a6df1bc5bc0cd8752cbd71c025a1b5208d1b8a34
3,633,542
import logging def geo_info_for_geo_name( geo_name: str, username: str = CONFIG["geonames_username"] ) -> GeoInfo: """Get geo information (latitude and longitude) for given region name.""" logging.info("Decoding latitude and longitude of '{}'...".format(geo_name)) gn = geocoders.GeoNames(username=username, timeout=10) location = gn.geocode(geo_name) return GeoInfo(geo_name=geo_name, lat=location.latitude, long=location.longitude)
e6827ae4b0e3297311dd16fc3a98865bcc7fc252
3,633,543
def char_accuracy(predictions, targets, rej_char, streaming=False): """Computes character level accuracy. Both predictions and targets should have the same shape [batch_size x seq_length]. Args: predictions: predicted characters ids. targets: ground truth character ids. rej_char: the character id used to mark an empty element (end of sequence). streaming: if True, uses the streaming mean from the slim.metric module. Returns: a update_ops for execution and value tensor whose value on evaluation returns the total character accuracy. """ with tf.variable_scope("CharAccuracy"): predictions.get_shape().assert_is_compatible_with(targets.get_shape()) targets = tf.to_int32(targets) const_rej_char = tf.constant(rej_char, shape=targets.get_shape()) weights = tf.to_float(tf.not_equal(targets, const_rej_char)) correct_chars = tf.to_float(tf.equal(predictions, targets)) accuracy_per_example = tf.div(tf.reduce_sum(tf.multiply(correct_chars, weights), 1), tf.reduce_sum(weights, 1)) if streaming: return tf.contrib.metrics.streaming_mean(accuracy_per_example) else: return tf.reduce_mean(accuracy_per_example)
caccf28fab0aa4127da7b30d95f380452b713974
3,633,544
import json def read_cities_db(fname="world-cities_json.json"): """Read a database file containing names of cities from different countries. Source: https://pkgstore.datahub.io/core/world-cities/world-cities_json/data/5b3dd46ad10990bca47b04b4739a02ba/world-cities_json.json """ with open(fname) as f: world_cities = json.load(f) country_city_pairs = set() processed_sub_countries = [] for city_record in world_cities: country = city_record['country'].lower() if country == "south korea": # See my comment above regarding the special handling of South Korea country = "korea" city = city_record['name'].lower() country_city_pairs.add((country, city)) subcountry = city_record['subcountry'].lower() if city_record['subcountry'] is not None else None if subcountry is not None and subcountry not in processed_sub_countries: # Add (subcountry, country) processed_sub_countries.append(subcountry) country_city_pairs.add((country, subcountry)) # People use these abbreviations, so we can't ignore them country_city_pairs.add(('united states', 'usa')) country_city_pairs.add(('united states', 'u.s.a.')) country_city_pairs.add(('united kingdom', 'uk')) country_city_pairs.add(('united kingdom', 'u.k.')) country_city_pairs.add(('china', 'prc')) country_city_pairs.add(('china', 'p.r.c.')) # Sort by longest city name first, because later we want to do long-string-match country_city_pairs = sorted(country_city_pairs, key=lambda pair: len(pair[1]), reverse=True) return country_city_pairs
1edb970e329e7781cebb61853a13a6f45d349250
3,633,545
def ConcatWith(x, dim, tensor): """ A wrapper around `tf.concat` to support `LinearWrap` :param x: the input tensor :param dim: the dimension along which to concatenate :param tensor: a tensor or list of tensor to concatenate with x. x will be at the beginning :return: tf.concat(dim, [x] + [tensor]) """ if type(tensor) != list: tensor = [tensor] return tf.concat(dim, [x] + tensor)
8d15e008f8e2ec70c2d875a9bb5dcb1786d011ed
3,633,546
def strip_df(data: pd.DataFrame) -> np.ndarray: """Strip dataframe from all index levels to only contain values. Parameters ---------- data : :class:`~pandas.DataFrame` input dataframe Returns ------- :class:`~numpy.ndarray` array of stripped dataframe without index """ return np.array(data.reset_index(drop=True).values)
3cd04b6b6cf144ac63854fbbab5ffa3784ccd707
3,633,547
def isRef(obj): """ """ if isinstance(obj, dict) == True and '_REF' in obj: return obj['_REF'] else: return False
0f1ad92cfafff5dcbc9e90e8544956b05c3452ec
3,633,548
from pathlib import Path from typing import Dict from typing import Tuple import pickle def collect_genes_with_confidence( query: str, *, cache_file: Path = None, client: Neo4jClient, ) -> Dict[Tuple[str, str], Dict[str, Tuple[float, int]]]: """Collect gene sets based on the given query. Parameters ---------- query: A cypher query client : The Neo4j client. Returns ------- : A dictionary whose keys that are 2-tuples of CURIE and name of each queried item and whose values are dicts of HGNC gene identifiers (as strings) pointing to the maximum belief and evidence count associated with the given HGNC gene. """ if cache_file.as_posix() in GENE_SET_CACHE: logger.info("Returning %s from in-memory cache" % cache_file.as_posix()) return GENE_SET_CACHE[cache_file.as_posix()] elif cache_file.exists(): logger.info("Loading %s" % cache_file.as_posix()) with open(cache_file, "rb") as fh: curie_to_hgnc_ids = pickle.load(fh) else: logger.info( "Running new query and caching results into %s" % cache_file.as_posix() ) curie_to_hgnc_ids = defaultdict(dict) max_beliefs = {} max_ev_counts = {} for result in client.query_tx(query): curie = result[0] name = result[1] hgnc_ids = set() for hgnc_curie, belief, ev_count in result[2]: hgnc_id = ( hgnc_curie.lower().replace("hgnc:", "") if hgnc_curie.lower().startswith("hgnc:") else hgnc_curie.lower() ) max_beliefs[(curie, name, hgnc_id)] = max( belief, max_beliefs.get((curie, name, hgnc_id), 0.0) ) max_ev_counts[(curie, name, hgnc_id)] = max( ev_count, max_ev_counts.get((curie, name, hgnc_id), 0) ) hgnc_ids.add(hgnc_id) curie_to_hgnc_ids[(curie, name)] = { hgnc_id: ( max_beliefs[(curie, name, hgnc_id)], max_ev_counts[(curie, name, hgnc_id)], ) for hgnc_id in hgnc_ids } curie_to_hgnc_ids = dict(curie_to_hgnc_ids) with open(cache_file, "wb") as fh: pickle.dump(curie_to_hgnc_ids, fh) GENE_SET_CACHE[cache_file.as_posix()] = curie_to_hgnc_ids return curie_to_hgnc_ids
18f949c1613f05b242a27dff7d16722af4d6bbf6
3,633,549
import copy def intervals_disjoint(intvs): """ Given a list of complex intervals, check whether they are pairwise disjoint. EXAMPLES:: sage: from sage.rings.polynomial.complex_roots import intervals_disjoint sage: a = CIF(RIF(0, 3), 0) sage: b = CIF(0, RIF(1, 3)) sage: c = CIF(RIF(1, 2), RIF(1, 2)) sage: d = CIF(RIF(2, 3), RIF(2, 3)) sage: intervals_disjoint([a,b,c,d]) False sage: d2 = CIF(RIF(2, 3), RIF(2.001, 3)) sage: intervals_disjoint([a,b,c,d2]) True """ # This may be quadratic in perverse cases, but will take only # n log(n) time in typical cases. intvs = sorted(copy(intvs)) column = [] prev_real = None def column_disjoint(): column.sort() row = [] prev_imag = None def row_disjoint(): for a in range(len(row)): for b in range(a+1, len(row)): if row[a].overlaps(row[b]): return False return True for (y_imag, y) in column: if prev_imag is not None and y_imag > prev_imag: if not row_disjoint(): return False row = [] prev_imag = y_imag row.append(y) if not row_disjoint(): return False return True for x in intvs: x_real = x.real() if prev_real is not None and x_real > prev_real: if not column_disjoint(): return False column = [] prev_real = x_real column.append((x.imag(), x)) if not column_disjoint(): return False return True
ebe3208f1af22f7001d3dee10a2dca6a68558cc8
3,633,550
def _get_registered_typelibs(match='HEC River Analysis System'): """ adapted from pywin32 # Copyright (c) 1996-2008, Greg Stein and Mark Hammond. """ # Explicit lookup in the registry. result = [] key = win32api.RegOpenKey(win32con.HKEY_CLASSES_ROOT, "TypeLib") try: num = 0 while 1: try: key_name = win32api.RegEnumKey(key, num) except win32api.error: break # Enumerate all version info sub_key = win32api.RegOpenKey(key, key_name) name = None try: sub_num = 0 best_version = 0.0 while 1: try: version_str = win32api.RegEnumKey(sub_key, sub_num) except win32api.error: break try: version_flt = float(version_str) except ValueError: version_flt = 0 # ???? if version_flt > best_version: best_version = version_flt name = win32api.RegQueryValue(sub_key, version_str) sub_num = sub_num + 1 finally: win32api.RegCloseKey(sub_key) if name is not None and match in name: fname, lcid = _get_typelib_info(key_name, version_str) # Split version major, minor = version_str.split('.') result.append({'name': name, 'filename': fname, 'iid': key_name, 'lcid': lcid, 'major': int(major), 'minor': int(minor)}) num = num + 1 finally: win32api.RegCloseKey(key) return result
88d5cf576454793678b275826d4087e5bcd263e4
3,633,551
def head(content, accesskey:str ="", class_: str ="", contenteditable: str ="", data_key: str="", data_value: str="", dir_: str="", draggable: str="", hidden: str="", id_: str="", lang: str="", spellcheck: str="", style: str="", tabindex: str="", title: str="", translate: str=""): """ Returns a head.\n `content`: The contents of the `head`. """ g_args = global_args(accesskey, class_, contenteditable, data_key, data_value, dir_, draggable, hidden, id_, lang, spellcheck, style, tabindex, title, translate) return f"<head {g_args}>{content}</head>\n"
6ed2622a53b3e3df8254cd6bfbc41cad296dea8c
3,633,552
def standardize(dataset, verbose=True): """ remove all source-specific columns, keeping only those that occur in all repo sources. also adds extra columns with default values """ found = False for source, extra_features in EXTRA_FEATURES.items(): if all(feat in dataset.features for feat in extra_features): found = True break assert found, f"unable to detect dataset type for features {dataset.features}" features_to_add_and_defaults = {'stars': '-1', 'source': source} features_to_keep = set(BASE_FEATURES.keys()) | set(features_to_add_and_defaults.keys()) features_to_remove = [ feature for feature in dataset.features.keys() if feature not in features_to_keep ] if verbose: print(f"removing features {features_to_remove}") dataset = dataset.remove_columns(features_to_remove) features_to_add = { k: v for k, v in features_to_add_and_defaults.items() if k not in dataset.features } if verbose: print(f"adding features with defaults: {features_to_add}") N = len(dataset) # could also do this with a map call but it's much slower for feat, value in features_to_add: values = np.full((N,), value) dataset = dataset.add_column(feat, values) if verbose: print(f"resulting dataset features: {dataset.features}") return dataset
f057c9d98c0525f4536053c20c0467ae2e8b6287
3,633,553
def mark(tv,stars=None,rad=3,auto=False,color='m',new=False,exit=False): """ Interactive mark stars on TV, or recenter current list Args : tv : TV instance from which user will mark stars stars = : existing star table auto= (bool) : if True, recentroid from existing position radius= (int): radius to use for centroiding and for size of circles (default=3) color= (char) : color for circles (default='m') """ # clear display and mark current star list( if not new) if new: tv.tvclear() try: dateobs=Time(tv.hdr['DATE-OBS'],format='fits') except: dateobs=None #try: exptime=tv.hdr['EXPTIME'] #except: exptime=None #try: filt=tv.hdr['FILTER'] #except: filt=None cards=['EXPTIME','FILTER','AIRMASS'] types=['f4','S','f4'] if stars == None : stars = Table(names=('id','x', 'y'), dtype=('i4','f4', 'f4')) stars['x'].info.format = '.2f' stars['y'].info.format = '.2f' if dateobs is not None : stars.add_column(Column([],name='MJD',dtype=('f8'))) stars['MJD'].info.format = '.6f' #if exptime is not None : # stars.add_column(Column([],name='EXPTIME',dtype=('f4'))) #if filt is not None : # stars.add_column(Column([],name='FILTER',dtype=('S'))) for icard,card in enumerate(cards) : try: stars.add_column(Column([],name=card,dtype=(types[icard]))) except: pass else : if auto : # with auto option, recentroid and update from current header for star in stars : x,y = centroid(tv.img,star['x'],star['y'],rad) star['x'] = x star['y'] = y if dateobs is not None : star['MJD'] = dateobs.mjd #if exptime is not None : star['EXPTIME'] = exptime #if filt is not None : star['FILTER'] = filt for icard,card in enumerate(cards) : try: star[card] = tv.hdr[card] except: pass # display stars for star in stars : tv.tvcirc(star['x'],star['y'],rad,color=color) if exit : return stars istar=len(stars)+1 while True : key,x,y = tv.tvmark() if key == 'q' or key == 'e' : break if key == 'i' : # add at nearest integer pixel x = round(x) y = round(y) elif key == 'c' : # centroid around marked position x,y = centroid(tv.img,x,y,rad) # add blank row, recognizing that we may have added other columns stars.add_row() stars[len(stars)-1]['id'] = istar stars[len(stars)-1]['x'] = x stars[len(stars)-1]['y'] = y tv.tvcirc(x,y,rad,color=color) if dateobs is not None : stars[len(stars)-1]['MJD'] = dateobs.mjd for icard,card in enumerate(cards) : try: stars[len(stars)-1][card] = tv.hdr[card] except: pass #if exptime is not None : # stars[len(stars)-1]['EXPTIME'] = exptime #if filt is not None : # stars[len(stars)-1]['FILTER'] = filt istar+=1 return stars
66a291c564329a878aea7658cd9fc071cc303d0b
3,633,554
def result_summary_info(request,object_id): """Present a result summary""" object = get_object_or_404(Results.ResultSummaryList, pk=object_id) protocolfields={} for (fieldname, text, cond) in [("version_intolerant", "Version Intolerant", Results.ResultCondition.RESULTC_VERSION_INTOLERANT), ("extension_intolerant","Extension Intolerant", Results.ResultCondition.RESULTC_EXTENSION_INTOLERANT), ("extension_and_version_intolerant","Extension and Version Intolerant", Results.ResultCondition.RESULTC_VERANDEXT_INTOLERANT), ("extension_or_version_intolerant","Extension or Version Intolerant", Results.ResultCondition.RESULTC_VEROREXT_INTOLERANT), ("bad_version","Require Bad Version", Results.ResultCondition.RESULTC_BADVERSION), ("bad_check","No Version check", Results.ResultCondition.RESULTC_NOVERSION), ]: cond_item = object.conditions.get(condition = cond) for (ver) in [(3,0),(3,1),(3,2),(3,3),(3,4),(3,11),(4,1),(0,3),(0,4)]: if ver == (3,0): title = "SSL v3" elif ver[0] == 0: title = "Summary TLS %d.x" %(ver[1]-2,) else: title = "TLS %d.%d" %(ver[0]-2, (ver[1]-1 if ver[0]==3 else ver[1])) Q = cond_item.resultentryprotocol_set.filter(version_tested_major=ver[0],version_tested_minor=ver[1]) protocolfields.setdefault(str(ver),{"title":title, "values":{}})["values"][fieldname]={"header":text, "count":Q.count()} summary_fields = [] for (fieldname, text, cond) in [("renego", "Renego Patched", Results.ResultCondition.RESULTC_RENEGO), ("renego_noncompliant", "Renego Patched but non-compliant", Results.ResultCondition.RESULTC_RENEGONONCOMPLIANT), ("renego_unpatched", "Not Renego Patched", Results.ResultCondition.RESULTC_NONRENEGO), ("renego_unstable", "Renego Patched, but unstable", Results.ResultCondition.RESULTC_RENEGOUNSTABLE), ]: cond_item = object.conditions.get(condition = cond) Q = cond_item.resultentry_set.all() summary_fields.append({"header":text, "count":Q.count()}) return render_to_response("summary.html", { "object":object, "protocolfields":protocolfields, "summary_fields":summary_fields, "debug_output":[x for x in connection.queries], })
1036fad0d52435912579e9680f5037a8aa052e53
3,633,555
def stackplot(data, add, xlabel='', ylabel='', cmap='Spectral', figsize=(3, 4.5), lw=1, plot=True): """ Plots a stack plot of selected spectras. :type data: list[float] :param data: Data to in the plot. :type add: float :param add: displacement, or difference, between each curve. :type xlabel: str :param xlabel: Label of axis. :type ylabel: str :param ylabel: Label of axis. :type cmap: str :param cmap: Colormap, according to matplotlib options. :type figsize: tuple :param figsize: Size of the plot. Default is (3, 4.5) :type lw: float :param lw: Linewidth of the curves. :type plot: bool :param plot: If True it plot. Only for test purposes. :returns: plot :rtype: bool """ base = [add for _ in range(len(data[0]))] cmap = plt.cm.get_cmap(cmap) color = [] for i in range(len(data)): color.append(cmap(i/(len(data)-1))) if plot: plt.figure(figsize=figsize) for i in range(len(data)): plt.plot(np.array(data[i]) + np.array(base)*i, color=color[i], lw=lw) plt.tick_params(left=False, bottom=False, labelleft=False, labelbottom=False) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.show() return plot
959118905abbfada9d4af3ac0a8ab414b00a9ffe
3,633,556
import torch def batch_detect(net, img_batch, device): """ Inputs: - img_batch: a numpy array of shape (Batch size, Channels, Height, Width) """ B, C, H, W = img_batch.shape orig_size = min(H, W) # BB, HH, WW = img_batch.shape # if img_batch if isinstance(img_batch, torch.Tensor): img_batch = img_batch.cpu().numpy() img_batch = img_batch.transpose((0, 2, 3, 1)) imgs, (xshift, yshift) = resize_and_crop_batch(img_batch, 128) preds = net.predict_on_batch(imgs) bboxlists = [] for pred in preds: shift = np.array([xshift, yshift] * 2) scores = pred[:, -1:] locs = np.concatenate((pred[:, 1:2], pred[:, 0:1], pred[:, 3:4], pred[:, 2:3]), axis=1) bboxlists.append(np.concatenate((locs * orig_size + shift, scores), axis=1)) if 0 == len(bboxlists): bboxlists = np.zeros((1, 1, 5)) return bboxlists
e20de1e4f3915e2e377e790f7e6cb3df5d76b5cb
3,633,557
def int_to_binary(x, n): """Convert an integer into its binary representation Args: x (int): input integer n (int): number of leading zeros to display Returns: (str) binary representation """ if type(x) != int: raise ValueError('x must be an integer.') return format(x, 'b').zfill(n)
c3d68a798f84988290bd4e845a5bcc015872b054
3,633,558
import os def read_requirements(): """Parse requirements from requirements.txt.""" requirements_path = os.path.join('.', 'requirements.txt') with open(requirements_path, 'r') as f: requirements = [line.rstrip() for line in f] return requirements
bc4282532c74d5c2f2bd9cd225b7604d0924035d
3,633,559
def convert_examples_to_features(examples, tokenizer, max_seq_length, max_program_length, is_training, op_list, op_list_size, const_list, const_list_size, verbose=True): """Converts a list of DropExamples into InputFeatures.""" unique_id = 1000000000 res = [] for (example_index, example) in enumerate(examples): features = example.convert_single_example( is_training=is_training, tokenizer=tokenizer, max_seq_length=max_seq_length, max_program_length=max_program_length, op_list=op_list, op_list_size=op_list_size, const_list=const_list, const_list_size=const_list_size, cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token) for feature in features: feature.unique_id = unique_id feature.example_index = example_index res.append(feature) unique_id += 1 return res
d7024a0ff97d94a5c2aa32e63230e972584fb1d2
3,633,560
def negative_mean_successiness(c): """Negative mean successiness over the course of the trial.""" if c.needed_control_arm_events.size > 1: center = float(c.needed_control_arm_events.mean()) width = float(c.needed_control_arm_events.std()) else: center = float(c.needed_control_arm_events) width = center / 3 cum_events = c.control_arm_events.cumsum(axis=-1) successiness = nn.sigmoid((cum_events - center) / width) return -successiness.mean(axis=-1)
ac6e07076c422ef524f46583edd41eff48f6733e
3,633,561
def determina_putere(n): """ Determina ce putere a lui 2 este prima cea mai mare decat n :param (int) n: numarul de IP-uri necesare citit de la tastatura :return (int) putere: puterea lui 2 potrivita """ putere = 1 while 2**putere < n+2: putere += 1 return putere
85e2c1dcd2ea5d86b5db3c6ced28dd65e244c467
3,633,562
def attribute_rename_cmd(oldattr, newattr): """ Rename an attribute. If it's not present nothing is done, and its value is kept. That's it. $ cjio myfile.city.json attribute_rename oldAttr newAttr info """ def processor(cm): utils.print_cmd_status('Rename attribute: "%s" => "%s"' % (oldattr, newattr)) cm.rename_attribute(oldattr, newattr) return cm return processor
b48e0e90bbb75cdf9828e21c115b758250f433e2
3,633,563
from datetime import datetime def editItemInCategory(item_id): """ Edit an item in a given category.""" if 'username' not in login_session: sMsg = "You are not authorized to perform this '%s' " % ('edit item category') sMsg += "action because you are not logged in. You are being redirected to login." flash(sMsg) return redirect('/login') oEditItem = session.query(Item).filter_by(id=item_id).first() if not oEditItem: sMsg = "Invalid item_id value, '%s', used in " % (item_id) sMsg += "'EditCategoryItem' operation." flash(sMsg) return redirect(url_for('showCategories')) iSessionUser = getUserID(login_session['email']) if iSessionUser==None: sMsg = "Fatal error!!! Unable to find a matching Owner.id value for " sMsg += "unique session email address, '%s' " % (login_session['email']) sMsg += "during 'EditCategoryItem' operation." flash(sMsg) return redirect(url_for('showItemsInCategory', category_id=oEditItem.category_id)) if oEditItem.owner_id != iSessionUser: sMsg = "You cannot edit item '%s' because you are not " % (oEditItem.name) sMsg += "the owner of it." flash(sMsg) return redirect(url_for('showItemsInCategory', category_id=oEditItem.category_id)) if request.method == 'POST': if request.form['ItemName'] == "": sMsg = "You must have an item name in order to complete the edit." flash(sMsg) return render_template('categoryItemEdit.html', ItemToEdit=oEditItem) sOrigName = oEditItem.name oEditItem.name = request.form['ItemName'] oEditItem.desc = request.form['ItemDesc'] oEditItem.modifyDt = datetime.datetime.utcnow() updateDBRec(oEditItem) sMsg = "" if sOrigName == oEditItem.name: sMsg = "Item, '%s', content edited." % (oEditItem.name) else: sMsg = "Item '%s' name changed to '%s'." % (sOrigName, oEditItem.name) flash(sMsg) return redirect(url_for('showItemsInCategory', category_id=oEditItem.category_id)) else: return render_template('categoryItemEdit.html', ItemToEdit=oEditItem)
ed2f51eea2713271f82440599edd90f015820b24
3,633,564
def factorial(n, show=False): """ -> Calcula o Fatorial de um número. :param n: O número a ser calculado. :param show: (opcional) Mostra ou não a conta. :return: O valor do Fatorial de um número n. """ f = 1 for c in range(n, 0, -1): if show: print(c, end='') if c > 1: print(' x ', end='') else: print(' = ', end='') f *= c return f
4e2928b2e2b197e40aacd8ec1b18c9afee42e229
3,633,565
def parse_commands(log_content): """ parse cwl commands from the line-by-line generator of log file content and returns the commands as a list of command line lists, each corresponding to a step run. """ command_list = [] command = [] in_command = False line = next(log_content) while(line): line = line.strip('\n') if '[job' in line and line.endswith('docker \\'): line = 'docker \\' # remove the other stuff in_command = True if in_command: command.append(line.strip('\\').rstrip(' ')) if not line.endswith('\\'): in_command = False command_list.append(command) command = [] line = next(log_content) return(command_list)
dff555cd0ec84619425fc05e4c8892c603bcc994
3,633,566
from datetime import datetime import ssl import logging import time def wait_for_operation(client, project, op_id, timeout=datetime.timedelta(hours=1), polling_interval=datetime.timedelta(seconds=5), status_callback=log_operation_status): """Wait for the specified operation to complete. Args: client: Client for the API that owns the operation. project: project op_id: Operation id. timeout: A datetime.timedelta expressing the amount of time to wait before giving up. polling_interval: A datetime.timedelta to represent the amount of time to wait between requests polling for the operation status. Returns: op: The final operation. Raises: TimeoutError: if we timeout waiting for the operation to complete. """ endtime = datetime.datetime.now() + timeout while True: try: op = client.operations().get(project=project, operation=op_id).execute() if status_callback: status_callback(op) status = op.get("status", "") # Need to handle other status's if status == "DONE": return op except ssl.SSLError as e: logging.error("Ignoring error %s", e) if datetime.datetime.now() > endtime: raise TimeoutError( "Timed out waiting for op: {0} to complete.".format(op_id)) time.sleep(polling_interval.total_seconds()) # Linter complains if we don't have a return here even though its unreachable return None
a7487beda110d1b5d52d8073d58fb6a33b0997a2
3,633,567
import re def regex_closest_match(regex: str, string: str) -> str: """Find the longest version of regex that matches something in string.""" no_match = True modified_regex = regex while no_match: # TODO: there may be a better way to do this rather than a try-except (perhaps using sre_parse to strategically remove sections of the regex and/or test if the regex is valid before trying to match on it) try: match = re.match(modified_regex, string) except re.error: modified_regex = modified_regex[:-1] else: if match is not None: no_match = False else: modified_regex = modified_regex[:-1] else: return modified_regex
2154fe51c874fd910e8f5da8b1c687cc598e5cab
3,633,568
def bij_connected_comps(components): """Set of connected planar graphs (possibly derived) to nx.PlanarEmbedding.""" res = nx.PlanarEmbedding() for g in components: g = g.underive_all() g = g.to_planar_embedding() res = nx.PlanarEmbedding(nx.compose(res, g)) return res
7f705f25756e114c91bbff9a09880d5bfb8d37ee
3,633,569
from typing import Tuple def render_responses(intent: Intent, language_data: IntentLanguageData) -> Tuple[IntentResponseDict, str]: """ Return a copy of responses in `language_data` where intent parameter references are replaced with their values from the given :class:`Intent` instance. Args: intent: The intent to read parameters from language_data: A collection of responses for the given intent Return: Intent responses, and plaintext version """ result_messages: IntentResponseDict = IntentResponseDict() for group, response_list in language_data.responses.items(): result_messages[group] = [r.render(intent) for r in response_list] rendered_plaintext = [r.random() for r in result_messages.get(IntentResponseGroup.DEFAULT, [])] result_plaintext = " ".join(rendered_plaintext) return result_messages, result_plaintext
4e83d1b75b25d8e3ab7d030890744f0081c02c10
3,633,570
import os def sp_cpu(file): """Read single-point output for cpu time.""" spe, program, data, cpu = None, None, [], None if os.path.exists(os.path.splitext(file)[0] + '.log'): with open(os.path.splitext(file)[0] + '.log') as f: data = f.readlines() elif os.path.exists(os.path.splitext(file)[0] + '.out'): with open(os.path.splitext(file)[0] + '.out') as f: data = f.readlines() else: raise ValueError("File {} does not exist".format(file)) for line in data: if line.find("Gaussian") > -1: program = "Gaussian" break if line.find("* O R C A *") > -1: program = "Orca" break if line.find("NWChem") > -1: program = "NWChem" break for line in data: if program == "Gaussian": if line.strip().startswith('SCF Done:'): spe = float(line.strip().split()[4]) if line.strip().find("Job cpu time") > -1: days = int(line.split()[3]) hours = int(line.split()[5]) mins = int(line.split()[7]) secs = 0 msecs = int(float(line.split()[9]) * 1000.0) cpu = [days, hours, mins, secs, msecs] if program == "Orca": if line.strip().startswith('FINAL SINGLE POINT ENERGY'): spe = float(line.strip().split()[4]) if line.strip().find("TOTAL RUN TIME") > -1: days = int(line.split()[3]) hours = int(line.split()[5]) mins = int(line.split()[7]) secs = int(line.split()[9]) msecs = float(line.split()[11]) cpu = [days, hours, mins, secs, msecs] if program == "NWChem": if line.strip().startswith('Total DFT energy ='): spe = float(line.strip().split()[4]) if line.strip().find("Total times") > -1: days = 0 hours = 0 mins = 0 secs = float(line.split()[3][0:-1]) msecs = 0 cpu = [days,hours,mins,secs,msecs] return cpu
ca9cb22b0981b3a14eafdd2637eccbe448597432
3,633,571
def cropseq(indexes, l, stride): """generate chunked silencer sequence according to loaded index""" print('Generating silencer samples with length {} bps...'.format(l)) silencers = list() i = 0 for index in indexes: try: [sampleid, chrkey, startpos, endpos, _] = index except: [sampleid, chrkey, startpos, endpos] = index l_orig = endpos - startpos if l_orig < l: for shift in range(0, l - l_orig, stride): start = startpos - shift end = start + l seq, legal = checkseq(chrkey, start, end) if legal: silencers.append([sampleid, chrkey, start, end]) elif l_orig >= l: chunks_ = chunks(range(startpos, endpos), l, l - stride) for chunk in chunks_: start = chunk[0] end = chunk[-1] + 1 if (end - start) == l: seq, legal = checkseq(chrkey, start, end) silencers.append([sampleid, chrkey, start, end]) elif (end - start) < l: break print('Data augmentation: from {} indexes to {} samples'.format(len(indexes), len(silencers))) return silencers
09bac76a6209398cdb5cb230be7aa18f5f895204
3,633,572
import tqdm def generate_claims(model, gen_dset, dl, tokenizer, device): """ Run generation using the given model on the given dataset :param model: BART model to use for generation :param gen_dset: The original dataset :param dl: A dataloader to use for generation :param tokenizer: A tokenizer for the given model :param device: What device to run on :return: The set of generated claims to use for the generative model, and the same claims formatted for input to the fact checking model """ # Predict all_samples = [] j = 0 for b in tqdm(dl): input_ids = b['input_ids'].to(device) # Get the number of return sequences for this batch n_return_sequences = gen_dset['num_return_sequences'][j:j+input_ids.shape[0]] n_gen_seq = max(n_return_sequences) j += input_ids.shape[0] # Generate the max number of needed sequences for the batch samples = model.generate( input_ids, max_length=tokenizer.model_max_length, early_stopping=True, do_sample=True, num_return_sequences=n_gen_seq, top_k=n_gen_seq ) samples = samples.reshape((input_ids.shape[0], n_gen_seq, -1)) # Just get the number of sequences needed for each sample all_samples.extend([s[:n_seq] for s,n_seq in zip(list(samples.detach().cpu().numpy()), n_return_sequences)]) fc_claim_inputs = [] generated_claims = [] count = defaultdict(int) for id, context, claims, evidence, orig_context, citance, paper_id, num_return_sequences in zip(gen_dset['doc_id'], gen_dset['context'], all_samples, gen_dset['evidence'], gen_dset['orig_context'], gen_dset['citance'], gen_dset['paper_id'], gen_dset['num_return_sequences']): gen_claims = set([tokenizer.decode(c, skip_special_tokens=True, clean_up_tokenization_spaces=False) for c in claims]) for c in gen_claims: n = count[id] generated_claims.append( {'id': f"{id}_{n}", 'context': context, 'generated_claim': c, 'evidence': evidence, 'orig_context': orig_context, 'citance': citance, 'paper_id': paper_id, 'num_return_sequences': num_return_sequences}) fc_claim_inputs.append({'id': f"{id}_{n}", 'claim': c, 'evidence': {}, 'cited_doc_ids': evidence, 'retrieved_doc_ids': evidence}) count[id] += 1 return generated_claims, fc_claim_inputs
0f042101ca6c864249c62e1934a9c58f0b21f9e7
3,633,573
def to_geojson(series): """Return a GeoJSON geometry collection from the series (must be in EPSG:4326). Did not use the builtin for the series since it introduces a lot of bloat. """ return { "type": "GeometryCollection", "geometries": series.apply(lambda x: x.__geo_interface__).to_list(), }
2ebdc001ed7a6fb3ee6e6cac9fc7722e19518e20
3,633,574
import os import subprocess def check_kafka_ready(expected_brokers, timeout, config, bootstrap_broker_list=None, zookeeper_connect=None, security_protocol=None): """Waits for a Kafka cluster to be ready and have at least the expected_brokers to present. This commands uses the Java docker-utils library to get the Kafka status. This command supports a secure Kafka cluster. If SSL is enabled, it expects the client_properties file to have the relevant SSL properties. If SASL is enabled, the command expects the JAAS config to be present in the KAFKA_OPTS environment variable and the SASL properties to present in the client_properties file. Args: expected_brokers: expected number of brokers in the cluster. timeout: Time in secs to wait for the Zookeeper to be available. config: properties file with client config for SSL and SASL. security_protocol: Security protocol to use. bootstrap_broker_list: Kafka bootstrap broker list string (host:port, ....) zookeeper_connect: Zookeeper connect string. Returns: False, if the timeout expires and Kafka cluster is unreachable, True otherwise. """ cmd_template = """ java {jvm_opts} \ -cp {classpath} \ io.confluent.admin.utils.cli.KafkaReadyCommand \ {expected_brokers} \ {timeout_in_ms}""" cmd = cmd_template.format( classpath=CLASSPATH, jvm_opts=os.environ.get("KAFKA_OPTS") or "", bootstrap_broker_list=bootstrap_broker_list, expected_brokers=expected_brokers, timeout_in_ms=timeout * 1000) if config: cmd = "{cmd} --config {config_path}".format(cmd=cmd, config_path=config) if security_protocol: cmd = "{cmd} --security-protocol {protocol}".format(cmd=cmd, protocol=security_protocol) if bootstrap_broker_list: cmd = "{cmd} -b {broker_list}".format(cmd=cmd, broker_list=bootstrap_broker_list) else: cmd = "{cmd} -z {zookeeper_connect}".format(cmd=cmd, zookeeper_connect=zookeeper_connect) exit_code = subprocess.call(cmd, shell=True) if exit_code == 0: return True else: return False
ca8aa8a1c8a51b0885bd887bb4a8bc8854160c3c
3,633,575
import copy async def copy_context(ctx: commands.Context, *, author=None, channel=None, **kwargs): """ Returns a new Context with changed message properties. """ # copy the message and update the attributes alt_message: discord.Message = copy.copy(ctx.message) alt_message._update(kwargs) if author is not None: alt_message.author = author if channel is not None: alt_message.channel = channel # obtain and return a context of the same type return await ctx.bot.get_context(alt_message, cls=type(ctx))
78a82922a7740cfcdad0e17a0f85a16ee53a068e
3,633,576
def getConstraintWeightAttr(leader, constraint): """ Return the weight attribute from a constraint that corresponds to a specific leader node. Args: leader (PyNode): A node that is one of the leaders of a constraint constraint (PyNode): A constraint node """ for i, target in enumerate(constraint.getTargetList()): if leader == target: return constraint.getWeightAliasList()[i]
e53ef981f505f1c8fc21fff7b71605764d6da3e0
3,633,577
import sys def load_data(dataset_str): """ Loads input data from gcn/data directory ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object; ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object; ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances (a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object; ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object; ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object; ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object; ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict object; ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object. All objects above must be saved using python pickle module. :param dataset_str: Dataset name :return: All data input files loaded (as well the training/test data). """ #Use mask to translate a fully supervised setting to a semi-supervised setting names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] objects = [] for i in range(len(names)): with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) x, y, tx, ty, allx, ally, graph = tuple(objects) test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str)) test_idx_range = np.sort(test_idx_reorder) if dataset_str == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) ty_extended[test_idx_range-min(test_idx_range), :] = ty ty = ty_extended #combine all training and testing features as sparse matrix features = sp.vstack((allx, tx)).tolil() #change the testing features' order, the testing instances will follow training instances features[test_idx_reorder, :] = features[test_idx_range, :] #change graph adjacency matrix to sparse matrix format adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) #correspondingly adjust testing labels labels = np.vstack((ally, ty)) labels[test_idx_reorder, :] = labels[test_idx_range, :] #attributes, labels = pollute_data_2(labels, features) idx_test = test_idx_range.tolist() idx_train = range(len(y)) #Just choose another 500 training instances as validation set idx_val = range(len(y), len(y)+500) ''' idx_train = range(1208) idx_val = range(1208, 1208+ 500) attributes, labels = pollute_data(labels, features, idx_train, idx_val, idx_test) ''' #testing the label rate of cora dataset if dataset_str == 'cora': num_train = len(y) total_num = len(ally)+len(ty) label_ratio_cora = num_train *1.0/total_num print(label_ratio_cora) if dataset_str == 'citeseer': num_train = len(y) total_num = len(ally) + len(ty) label_ratio_citeseer = num_train * 1.0 / total_num print(label_ratio_citeseer) #vector of size 2708, idx_train as true train_mask = sample_mask(idx_train, labels.shape[0]) val_mask = sample_mask(idx_val, labels.shape[0]) test_mask = sample_mask(idx_test, labels.shape[0]) y_train = np.zeros(labels.shape) y_val = np.zeros(labels.shape) y_test = np.zeros(labels.shape) #only assign label value when the train_mask as true y_train[train_mask, :] = labels[train_mask, :] y_val[val_mask, :] = labels[val_mask, :] #testing instance starts from 1708 y_test[test_mask, :] = labels[test_mask, :] #Translate adj to numpy arrays adj_np = adj.toarray() #translate features to numpy arrays features_np = features.toarray() #generate the graph and id_map, class_map G, IDMap, classMap =create_G_idM_classM(adj_np, features_np, test_mask, val_mask, labels) #at this stage, for all validation nodes, test nodes we have their labels but use mask tp make them #all [0 0 0 0 0 0 0] num_edges =len(G.edges()) print(num_edges) print(G.number_of_edges()) #Dump everything into .json files and one .npy if dataset_str == 'cora': graphFile_prefix = '/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/cora' dataset_name = 'cora_process' dumpJSON(graphFile_prefix, dataset_name, G, IDMap, classMap, features_np) if dataset_str == 'citeseer': graphFile_prefix = '/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/citeseer' dataset_name = 'citeseer_process' dumpJSON(graphFile_prefix, dataset_name, G, IDMap, classMap, features_np) return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
d629e3ceb8f8b030526f15294309b95636e90838
3,633,578
def approximate_mds(dists): """Approximate multidimensional scaling (MDS) Estimate the inter-node distance matrix from source node distances as described in "Iterative Geometry Calibration from Distance Estimates for Wireless Acoustic Sensor Networks" (https://arxiv.org/abs/2012.06142). Subsequently the inter-node distance matrix is used to estimate the source positions via MDS. Args: dists (ndarray (shape=(n_srcs, n_nodes))): Array containing the source node distances """ _, n_nodes = dists.shape approx_dist_mat = np.zeros((n_nodes, n_nodes)) for id1 in range(n_nodes): for id2 in range(id1 + 1, n_nodes): approx_dist = (np.max(np.abs(dists[:, id1] - dists[:, id2])) + np.min(dists[:, id1] + dists[:, id2])) / 2 approx_dist_mat[id1, id2] = approx_dist approx_dist_mat[id2, id1] = approx_dist b = np.eye(n_nodes) - 1 / n_nodes * np.ones((n_nodes, n_nodes)) b = - (b @ approx_dist_mat ** 2 @ b) / 2 b = (b + b.T) / 2 v, e = np.linalg.eigh(b) idx = v.argsort()[::-1] v = v[idx] e = e[:, idx] node_positions = e[:, :2] @ np.diag(np.sqrt(v[:2])) return node_positions.T
6f2aef5e71c439990840143089fa9cc941a81c2c
3,633,579
def build_resnet_fpnindi_backbone(cfg, input_shape: ShapeSpec): """ Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_resnet_backbone(cfg, input_shape) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS backbone = FPNINDI( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone
40329cfb0d414305e7d38966b41e352cc44c6535
3,633,580
from typing import Tuple from typing import Any def _get_min_max_outputs(node: BaseNode, fw_info: FrameworkInfo) -> Tuple[Any, Any]: """ Return the min/max output values of a node if known. If one of them (or both of them) is unknown - return None instead of a value. Args: node: Node to create its prior info. fw_info: Information about a specific framework the node was generated from. Returns: Min/max output values if known. """ min_output, max_output = None, None if node.type == ReLU: min_output = node.framework_attr[THRESHOLD] if node.framework_attr[NEGATIVE_SLOPE] == 0 else None max_output = node.framework_attr[RELU_MAX_VALUE] elif fw_info.layers_has_min_max(node.type): min_output, max_output = fw_info.layer_min_max_mapping[node.type] elif node.type == Activation and fw_info.activation_has_min_max(node.framework_attr[ACTIVATION]): min_output, max_output = fw_info.activation_min_max_mapping[node.framework_attr[ACTIVATION]] return min_output, max_output
c43992c9b2cd64b9970766fe06d6d0c6af3a6954
3,633,581
def remove_stop_words(document): """Returns document without stop words""" document = ' '.join([i for i in document.split() if i not in stop]) return document
c4385790901f09eadeac67dc1035a12bedf8cb45
3,633,582
def Ion_Flux_Relabeling(h,q): """ Oh no! Commander Lambda's latest experiment to improve the efficiency of her LAMBCHOP doomsday device has backfired spectacularly. She had been improving the structure of the ion flux converter tree, but something went terribly wrong and the flux chains exploded. Some of the ion flux converters survived the explosion intact, but others had their position labels blasted off. She's having her henchmen rebuild the ion flux converter tree by hand, but you think you can do it much more quickly - quickly enough, perhaps, to earn a promotion! Flux chains require perfect binary trees, so Lambda's design arranged the ion flux converters to form one. To label them, she performed a post-order traversal of the tree of converters and labeled each converter with the order of that converter in the traversal, starting at 1. For example, a tree of 7 converters would look like the following: 7 3 6 1 2 4 5 Write a function answer(h, q) - where h is the height of the perfect tree of converters and q is a list of positive integers representing different flux converters - which returns a list of integers p where each element in p is the label of the converter that sits on top of the respective converter in q, or -1 if there is no such converter. For example, answer(3, [1, 4, 7]) would return the converters above the converters at indexes 1, 4, and 7 in a perfect binary tree of height 3, which is [3, 6, -1]. The domain of the integer h is 1 <= h <= 30, where h = 1 represents a perfect binary tree containing only the root, h = 2 represents a perfect binary tree with the root and two leaf nodes, h = 3 represents a perfect binary tree with the root, two internal nodes and four leaf nodes (like the example above), and so forth. The lists q and p contain at least one but no more than 10000 distinct integers, all of which will be between 1 and 2^h-1, inclusive. Test case 1: Inputs: (int) h = 3 (int list) q = [7, 3, 5, 1] Output: (int list) [-1, 7, 6, 3] Test case 2: Inputs: (int) h = 5 (int list) q = [19, 14, 28] Output: (int list) [21, 15, 29] """ # Getting all 2^k-1, trains of 1s in binary # for k = 1, 2, 3, ..., h. trains = {n+1:(1<<(n+1))-1 for n in range(h)} def largest_train(x): length = int.bit_length(x) thetrain = trains[length] # In a lower level language one can # simultaneously check if x is a train # and get its bit length. Not sure # if it is worth it to write such loop # in Python. if x == thetrain: return thetrain return trains[length-1] def parent(x): # The longest train of 1s in # binary is the root. if x >= trains[h]: return -1 # Reducing # We subtract the largest train # smaller than the number until # we end up with nothing. diff = int(x) lp = largest_train(x) if x == lp: return (x<<1)+1 continuing = True while continuing: prev_lp = lp lp = largest_train(diff) diff -= lp continuing = not diff == 0 # If the last two trains subtracted # were equal, then we are on a right # child. This means that the parent # is one up. # Otherwise, we are on a left child. # This means that the parent is obtained # by adding the last train subtracted, # this gives the right sibling, and then # adding 1 to get the parent. if prev_lp == lp: return x + 1 else: return x + lp + 1 result = [parent(x) for x in q] return result
8d8694722c8a8d6dcf4aabad3d677fb059d252d9
3,633,583
def process_line(line, previous_state): """ Read line, split it before opening brackets if not in quotes nor escaped and add '\n' in the end of new lines. """ # opening bracket and/or quote can start in other line brackets = previous_state.brackets in_quotes = previous_state.in_quotes escaped = False processed_line = [] start = 0 for i in range(len(line)): if line[i] == '\\': escaped = not escaped elif line[i] == '\'': if escaped: escaped = False else: in_quotes = not in_quotes elif line[i] == '(': if not in_quotes: if brackets == 0: # don't append additional empty line if i > 0: processed_line.append(line[start:i] + "\n") start = i brackets += 1 if escaped: escaped = False elif line[i] == ')': if not in_quotes: if brackets > 0: brackets -= 1 else: print("Warning: closing ')' without opening '('!") print(line) if escaped: escaped = False # line[i] other than mentioned above elif escaped: escaped = False # append remaining part processed_line.append(line[start:]) current_state = State() current_state.in_quotes = in_quotes current_state.brackets = brackets return processed_line, current_state
24b997f61263563a67f58a65f2e070c1b21ee478
3,633,584
def parse_tape6(tape6="TAPE6.OUT"): """Parses an ORIGEN 2.2 TAPE6.OUT file. Parameters ---------- tape6 : str or file-like object Path or file to read the tape6 file from. Returns ------- results : dict Dictionary of parsed values. Warnings -------- This method currently only functions to extract neutronic data from TAPE6 files. It does not yet parse out photonic data. If you would like to see this feature added, please contact the developers. Notes ----- The results dictionary that is returned is highly structured and generally matches the layout of the TAPE6 file. Data is stored as 1d numpy float arrays which (if the TAPE6 is well-formed) will all be of the same length and match the time vector. The possible layout of results is as follows:: |- 'time_sec': time per index in [seconds] |- 'flux': neutron flux at this time [n/cm^2/s] |- 'specific_power_MW': recator specific power at this time [MW] |- 'burnup_MWD': reactor burnup since last time step [MWd/input mass [g] from TAPE4] |- 'k_inf': infinite multiplication factor [unitless] |- 'neutron_production_rate': Total reactor neutron production rate [n/s] |- 'neutron_destruction_rate: Total reactor neutron destruction rate [n/s] |- 'total_burnup': Cummulative burnup over all time [MWd/input mass [g] from TAPE4] |- 'average_flux': average neutron flux over preceeding time interval [n/cm^2/s] |- 'average_specific_power: recator specific power over preceeding time interval [MW] |- 'materials': list of Materials of same length as 'time_sec', only present if | 'table_3' or 'table_5' exist and have 'nuclide' output. |- 'alpha_neutron_source': dict | |- 'title': str | |- 'units': str | |- nuclide or element str: (alpha, n) neutron source [n/s] |- 'spont_fiss_neutron_source': dict | |- 'title': str | |- 'units': str | |- nuclide or element str: spontaneous fission neutron source [n/s] |- 'table_{n}': dict | |- 'nuclide': dict | | |- 'title': str | | |- 'units': str | | |- 'activation_products': dict of (nuc-zzaaam, data) pairs | | |- 'actinides': dict of (nuc-zzaaam, data) pairs | | |- 'fission_products': dict of (nuc-zzaaam, data) pairs | |- 'element': dict | | |- 'title': str | | |- 'units': str | | |- 'activation_products': dict of (elem str, data) pairs | | |- 'actinides': dict of (elem str, data) pairs | | |- 'fission_products': dict of (elem str, data) pairs | |- 'summary': dict | | |- 'title': str | | |- 'units': str | | |- 'activation_products': dict of (elem or nuc str, data) pairs | | |- 'actinides': dict of (elem or nuc str, data) pairs | | |- 'fission_products': dict of (elem or nuc str, data) pairs """ # Read the TAPE6 file opened_here = False if isinstance(tape6, basestring): tape6 = open(tape6, 'r') opened_here = True lines = tape6.readlines() if opened_here: tape6.close() # Prep to parse the file results = {} # Defaults table_key = None table_type = None table_group = None # Read in the file line-by-line for i, line in enumerate(lines): # Get reactivity and burnup data m = _rx_bu_data_line.match(line) if m is not None: key, data = m.groups() new_key = _rx_bu_key_map[key] arr_data = np.array(data.split(), dtype=float) curr_data = results.get(new_key, []) results[new_key] = np.append(curr_data, arr_data) continue # Get table spcies group m = _species_group_line.match(line) if m is not None: table_group = _group_key_map[m.group(1)] continue # Get table header info m = _table_header_line.match(line) or _table_header_alpha_line.match(line) if m is not None: tnum, ttype, ttitle, tunits = m.groups() table_key = "table_{0}".format(tnum) if table_key not in results: results[table_key] = {} table_type = ttype.lower() if table_type not in results[table_key]: results[table_key][table_type] = {} results[table_key][table_type]["title"] = ttitle.strip().lower() results[table_key][table_type]["units"] = tunits.strip().lower() if table_group not in results[table_key][table_type]: results[table_key][table_type][table_group] = {} continue # Grab nuclide data lines m = _nuclide_line.match(line) if (m is not None) and (table_key is not None): nuc, data = m.groups() nuc_name = nuc.replace(' ', '') # Don't know WTF element 'SF' is suppossed to be! # (Spent fuel, spontaneous fission) if nuc_name == 'SF250': continue nuc_zz = nucname.zzaaam(nuc_name) nuc_key = nuc_zz if table_type == 'nuclide' else nuc_name nuc_data = np.array(data.split(), dtype=float) if table_key.startswith('table_'): curr_data = results[table_key][table_type][table_group].get(nuc_key, []) results[table_key][table_type][table_group][nuc_key] = np.append(curr_data, nuc_data) else: curr_data = results[table_key].get(nuc_key, []) results[table_key][nuc_key] = np.append(curr_data, nuc_data) continue # Grab element data line m = _element_line.match(line) if (m is not None) and (table_key is not None): elem, data = m.groups() elem = elem.replace(' ', '') # Still don't know WTF element 'SF' is suppossed to be! # (Spent fuel, spontaneous fission) if elem == 'SF': continue elem_data = np.array(data.split(), dtype=float) if table_key.startswith('table_'): curr_data = results[table_key][table_type][table_group].get(elem, []) results[table_key][table_type][table_group][elem] = np.append(curr_data, elem_data) else: curr_data = results[table_key].get(elem, []) results[table_key][elem] = np.append(curr_data, elem_data) continue # Grab (alpha, n) and spontaneous fission headers m = _alpha_n_header_line.match(line) or _spont_fiss_header_line.match(line) if m is not None: ttitle, tunits = m.groups() table_key = _n_source_key_map[ttitle] if table_key not in results: results[table_key] = {} table_type = None table_group = None results[table_key]["title"] = ttitle.strip().lower() results[table_key]["units"] = tunits.strip().lower() continue # Photon spectra parsing is not yet supported m = _photon_spec_header_line.match(line) if m is not None: table_key = None table_type = None table_group = None # Done with parsing, try to convert to material tbl = None if ('table_5' in results) and ('nuclide' in results['table_5']): tbl = 'table_5' mat_gen = Material elif ('table_3' in results) and ('nuclide' in results['table_3']): tbl = 'table_3' mat_gen = from_atom_frac if tbl is not None: T = len(results['time_sec']) mats = [Material() for t in range(T)] for grp in _group_key_map.values(): if grp in results[tbl]['nuclide']: mats = [m + mat_gen(dict([(nuc, arr[i]) for nuc, arr in \ results[tbl]['nuclide'][grp].items()])) \ for i, m in enumerate(mats)] results['materials'] = mats return results
5082ee35ce8198db680c0b7be86d703c4c349402
3,633,585
def property_values_to_string(pv,extra_indentation = 0): """ Parameters ---------- pv : OrderedDict Keys are properties, values are values """ # Max length keys = pv[::2] values = pv[1::2] values = ['"%s"' %x if isinstance(x,_Quotes) else x for x in values] key_lengths = [len(x) for x in keys] max_key_length = max(key_lengths) + extra_indentation space_padding = [max_key_length - x for x in key_lengths] key_display_strings = [' ' * x + y for x, y in zip(space_padding, keys)] str = u'' for (key, value) in zip(key_display_strings, values): str += '%s: %s\n' % (key, value) return str
0a8f5b188f74d1779c871a843eb7394631162fc4
3,633,586
def parse_cmdline(): """parse command line arguments""" parser = ArgumentParser( description="dtbTool version " + str(QCDT_VERSION)) parser.add_argument("input_dir", help="Input directory") parser.add_argument("-o", "--output-file", type=FileType('wb'), required=True, help="Output file") parser.add_argument("-p", "--dtc-path", default="", help="path to dtc") parser.add_argument("-s", "--page-size", default=PAGE_SIZE_DEF, type=int, help="page size in bytes") parser.add_argument("-d", "--dt-tag", default=QCDT_DT_TAG, help="alternate QCDT_DT_TAG") parser.add_argument("-2", "--force-v2", action="store_true", help="output dtb v2 format") parser.add_argument("-3", "--force-v3", action="store_true", help="output dtb v3 format") return parser.parse_args()
4f2cf506c4463b19859403de0aac0707d4ad5050
3,633,587
def __build_vocab(nlp, datasets): """ Generates the encoder vocabulary (natural language tokens), decoder vocabulary (programming language tokens) and stack vocabulary (terminal and non-terminal symbols, tokens) by parsing each source and target example in each split. :param nlp: nl processing and parsing utils. :param datasets: all dataset splits with source and target samples. """ inp_i2w, inp_w2i = __input_vocab(nlp, datasets) out_i2w, out_w2i = __output_vocab(nlp, datasets) vocab_dicts = { 'src': { 'i2w': inp_i2w, 'w2i': inp_w2i, }, 'tgt': { 'i2w': out_i2w, 'w2i': out_w2i, } } src_vocab = Vocab(vocab_dicts['src']) tgt_vocab = Vocab(vocab_dicts['tgt']) vocab = { 'src': src_vocab, 'tgt': tgt_vocab } nlp.collect_tokens(vocab) stack_i2w, stack_w2i = __stack_vocab(nlp) vocab_dicts.update({'stack': { 'i2w': stack_i2w, 'w2i': stack_w2i }}) stack_vocab = Vocab({ 'i2w': stack_i2w, 'w2i': stack_w2i }) op_i2w, op_w2i = __operator_vocab(nlp, tgt_vocab) vocab_dicts.update({'operator': { 'i2w': op_i2w, 'w2i': op_w2i }}) op_vocab = Vocab({ 'i2w': op_i2w, 'w2i': op_w2i }) vocab.update({'stack': stack_vocab}) vocab.update({'operator': op_vocab}) return vocab, vocab_dicts
d162169874a1f82779641615658aa5be52aafb81
3,633,588
import os def file_contains_exact_text(filename, text): """Returns True iff the file exists and it already contains the given text.""" if not os.path.isfile(filename): return False with open(filename, "r") as infile: intext = infile.read() return text == intext return False
49bbb86c30a5df5d41e78cd64ddb58d44eaaf899
3,633,589
from typing import Optional from typing import Dict def get_profile(key: str) -> Optional[Dict]: """Fetch user profile. Arguments: --------- key: User's database key. Returns: --------- Profile dictionary if exists else None. """ return BASE_PROFILE.get(key=key)
0bf1d86707b14735afb6b832d41b63a25650985e
3,633,590
def rectangle_centered( w: int = 1, h: int = 1, x: None = None, y: None = None, layer: int = 0 ) -> Component: """ a rectangle size (x, y) in layer bad naming with x and y. Replaced with w and h. Keeping x and y for now for backwards compatibility .. plot:: :include-source: import pp c = pp.c.rectangle_centered(w=1, h=1, layer=0) pp.plotgds(c) """ c = pp.Component() if x: w = x if y: h = y points = [ [-w / 2.0, -h / 2.0], [-w / 2.0, h / 2], [w / 2, h / 2], [w / 2, -h / 2.0], ] c.add_polygon(points, layer=layer) return c
a61c07a87c6b1a6d347ddc104863657eafd305ad
3,633,591
from datetime import datetime def sched_time_to_dt(timeStr, targetDate): """Converts a GTFS schedule time string to a datetime Note that a GTFS time may be more than 24 hours, in which case the function removes 24 from the hours part of the time and increases the date part of the datetime by 1 Args: timeStr: time part of the datetime object targetDate: date part of the datetime object """ if targetDate is None: targetDate = datetime.today() tkns = timeStr.split(':') h = int(tkns[0]) if h > 23: h -= 24 delta = timedelta(days=1) else: delta = timedelta(days=0) dt = datetime( year=targetDate.year, month=targetDate.month, day=targetDate.day, hour=h, minute=int(tkns[1]), second=int(tkns[2]) ) + delta return dt
9fb43c4e19d050480649b39f1ac9a79f617338b7
3,633,592
def get_arxiv_csl(*, arxiv_id): """ Generate a CSL Item for an unversioned arXiv identifier using arXiv's OAI_PMH v2.0 API <https://arxiv.org/help/oa>. This endpoint does not support versioned `arxiv_id`. """ # XML namespace prefixes ns_oai = "{http://www.openarchives.org/OAI/2.0/}" ns_arxiv = "{http://arxiv.org/OAI/arXiv/}" xml_tree = query_arxiv_api( url="https://export.arxiv.org/oai2", params={ "verb": "GetRecord", "metadataPrefix": "arXiv", "identifier": f"oai:arXiv.org:{arxiv_id}", }, timeout=5, ) # Extract parent XML elements (header_elem,) = xml_tree.findall( f"{ns_oai}GetRecord/{ns_oai}record/{ns_oai}header" ) (metadata_elem,) = xml_tree.findall( f"{ns_oai}GetRecord/{ns_oai}record/{ns_oai}metadata" ) (arxiv_elem,) = metadata_elem.findall(f"{ns_arxiv}arXiv") # Set identifier fields response_arxiv_id = arxiv_elem.findtext(f"{ns_arxiv}id") if arxiv_id != response_arxiv_id: raise ValueError( f"arXiv oai2 query returned a different arxiv_id:" f" {arxiv_id} became {response_arxiv_id}" ) csl_item = { "id": arxiv_id, "URL": f"https://arxiv.org/abs/{arxiv_id}", "number": arxiv_id, "container-title": "arXiv", "publisher": "arXiv", "type": "manuscript", } # Set title and date title = arxiv_elem.findtext(f"{ns_arxiv}title") if title: csl_item["title"] = " ".join(title.split()) datestamp = header_elem.findtext(f"{ns_oai}datestamp") date_parts = date_to_date_parts(datestamp) if date_parts: csl_item["issued"] = {"date-parts": [date_parts]} # Extract authors author_elems = arxiv_elem.findall(f"{ns_arxiv}authors/{ns_arxiv}author") authors = list() for author_elem in author_elems: author = {} given = author_elem.findtext(f"{ns_arxiv}forenames") family = author_elem.findtext(f"{ns_arxiv}keyname") if given: author["given"] = given if family: author["family"] = family authors.append(author) csl_item["author"] = authors abstract = arxiv_elem.findtext(f"{ns_arxiv}abstract") if abstract: csl_item["abstract"] = ( abstract.replace("\n", " ").replace("\r", "").strip() ) license = arxiv_elem.findtext(f"{ns_arxiv}license") if license: csl_item["license"] = ( license.replace("\n", " ").replace("\r", "").strip() ) doi = arxiv_elem.findtext(f"{ns_arxiv}doi") if doi: csl_item["DOI"] = doi return csl_item
54fcf1df4b6963a95788a1cdf3306d583fe0b8e6
3,633,593
from typing import Mapping from typing import Sequence import math def _equivalent_data_structures(reference, struct_2): """Compare arbitrary data structures for equality. ``reference`` is expected to be the reference data structure. Cannot handle set like data structures. """ if isinstance(reference, np.ndarray): return np.allclose(reference, struct_2) if isinstance(reference, Mapping): # if the non-reference value does not have all the keys # we don't check for the exact same keys, since some values may have # defaults. if set(reference.keys()) - set(struct_2.keys()): return False return all( _equivalent_data_structures(reference[key], struct_2[key]) for key in reference) if isinstance(reference, Sequence): if len(reference) != len(struct_2): return False return all( _equivalent_data_structures(value_1, value_2) for value_1, value_2 in zip(reference, struct_2)) if isinstance(reference, Number): return math.isclose(reference, struct_2)
57ecaa315a1f9a516b4ac2c2528f0f207fffab6f
3,633,594
def _find_line_bounding_boxes(line_segmentation: np.ndarray): """Given a line segmentation, find bounding boxes for connected-component regions corresponding to non-0 labels.""" def _find_line_bounding_boxes_in_channel(line_segmentation_channel: np.ndarray) -> np.ndarray: line_activation_image = cv2.dilate(line_segmentation_channel, kernel=np.ones((3, 3)), iterations=1) line_activation_image = (line_activation_image * 255).astype("uint8") line_activation_image = cv2.threshold(line_activation_image, 0.5, 1, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] bounding_cnts, _ = cv2.findContours(line_activation_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) return np.array([cv2.boundingRect(cnt) for cnt in bounding_cnts]) bboxes_xywh = np.concatenate( [_find_line_bounding_boxes_in_channel(line_segmentation[:, :, i]) for i in [1, 2]], axis=0 ) return bboxes_xywh[np.argsort(bboxes_xywh[:, 1])]
f0a41d5b569601db6eaa4ca061bddbfe6b6c88fa
3,633,595
def split_items(items, num_groups): """Splits a list of items into ``num_groups`` groups fairly (i.e. every item is assigned to exactly one group and no group is more than one item larger than any other).""" per_set = len(items) / float(num_groups) assert per_set >= 1, "At least one set will be empty" small = int(np.floor(per_set)) big = small + 1 num_oversized = len(items) % small rv_items = [] total_allocated = 0 for i in range(num_groups): if i < num_oversized: l = items[total_allocated:total_allocated + big] total_allocated += big else: l = items[total_allocated:total_allocated + small] total_allocated += small rv_items.append(l) assert total_allocated == len(items), "Did not assign exactly 100% of " \ "items to a group" assert len(rv_items) == num_groups, "Wrong number of groups" return rv_items
0e5af3c5d3e394b328bef63b5287bd673fef0241
3,633,596
from typing import OrderedDict import json def generate_tool_flow(tool: GladierBaseTool, modifiers): """Generate a flow definition for a Gladier Tool based on the defined ``funcx_functions``. Accepts modifiers for funcx functions""" flow_moder = FlowModifiers([tool], modifiers, cls=tool) flow_states = OrderedDict() for fx_func in tool.funcx_functions: fx_state = generate_funcx_flow_state(fx_func) flow_states.update(fx_state) if not flow_states: raise FlowGenException(f'Tool {tool} has no flow states. Add a list of python functions ' f'as "{tool}.funcx_functions = [myfunction]" or set a custom flow ' f'definition instead using `{tool}.flow_definition = mydef`') flow_def = ToolChain.combine_flow_states(flow_states, flow_comment=tool.__doc__) flow_def = flow_moder.apply_modifiers(flow_def) return json.loads(json.dumps(flow_def))
1ae457676ee2bfaa872f237036261bbfbdc644fb
3,633,597
def paralellLines(M,axis=1,labels=(), interactive=True, title="",show=True): """ Makes an optionally interactive paralell Lines plot. M: Matrix to visualise. axis: Axis of data values to plot. Needs to be either 1 or 0. Defaults to 1. labels: Labels of the axes to plot. interactive: Determines if the plot is interactive. Defaults to True. title: Title of the Plot to make. Defaults to "". show: Should the figure be shown immediatly. Defaults to True. """ # ensure axis is of the right type. if axis != 1 and axis != 0: raise ValueError("Axis must be 0 or 1") if axis == 0: return paralellLines(M.T,axis=1,components=components,labels=labels,title=title,show=show) # do we have enough components? if M.shape[0] != len(labels): raise ValueError("Not enough labels for all components. ") # make a figure with a title. fig = pyplot.figure() fig.suptitle(title) # and an axis ax = fig.add_subplot(111) xs = range(M.shape[0]) ys = range(M.shape[1]) # plot all the lines. for y in ys: ax.plot(xs, M[:,y], 'k-', picker=5) xlim = ax.get_xlim() ylim = ax.get_ylim() zorder = ax.get_zorder() # add a bunch of new axes for (x, l) in zip(xs, labels): # add another axis. newaxis = ax.twinx() zorder = max(zorder, newaxis.get_zorder()) # set its x and y limits newaxis.set_xlim(xlim) newaxis.set_ylim(ylim) # set the color newaxis.spines['left'].set_color('none') newaxis.spines['right'].set_position(('data',x)) # hide the x axis newaxis.get_xaxis().set_visible(False) # and add a label newaxis.text(x, ylim[1],l, horizontalalignment='right',verticalalignment='top',rotation='horizontal') # Hide all the other axes. ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # clear that plot ax.lines = [] # add a new plot on top finalaxis = ax.twinx() # set the xaxis and yaxis finalaxis.set_xlim(xlim) finalaxis.set_ylim(ylim) # hide the splines finalaxis.get_xaxis().set_visible(False) finalaxis.get_yaxis().set_visible(False) # add all the lines (again) for y in ys: finalaxis.plot(xs, M[:,y], 'k-', picker=5 if interactive else False,zorder=1) # add interactivity def onclick(event): thisline = event.artist if thisline.get_color() == 'red': for l in finalaxis.lines: l.set_color('k') l.set_zorder(1) else: for l in finalaxis.lines: l.set_color('gray') l.set_zorder(1) thisline.set_color('red') thisline.set_zorder(2) pyplot.draw() if interactive: fig.canvas.mpl_connect('pick_event', onclick) # show it if we want to. if show: pyplot.show() # return the figure. return fig
a3ff1f1faa0e1704df24aac0024330458b037027
3,633,598
import types import pandas def hpat_pandas_series_dropna(self, axis=0, inplace=False): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.dropna Limitations ----------- - Parameter ``inplace`` is currently unsupported by Intel Scalable Dataframe Compiler Examples -------- .. literalinclude:: ../../../examples/series/series_dropna.py :language: python :lines: 34- :caption: Return a new Series with missing values removed. :name: ex_series_dropna .. command-output:: python ./series/series_dropna.py :cwd: ../../../examples .. seealso:: :ref:`Series.isna <pandas.Series.isna>` Indicate missing values. :ref:`Series.notna <pandas.Series.notna>` Indicate existing (non-missing) values. :ref:`Series.fillna <pandas.Series.fillna>` Replace missing values. :ref:`DataFrame.dropna <pandas.DataFrame.dropna>` Drop rows or columns which contain NA values. `pandas.absolute <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Index.dropna.html#pandas.Index.dropna>`_ Return Index without NA/NaN values Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.Series.dropna` implementation. .. only:: developer Tests: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_dropna* """ _func_name = 'Method dropna().' ty_checker = TypeChecker(_func_name) ty_checker.check(self, SeriesType) if not (isinstance(axis, (types.Integer, types.StringLiteral, types.UnicodeType, types.Omitted)) or axis == 0): ty_checker.raise_exc(axis, 'int or str', 'axis') if not (inplace is False or isinstance(inplace, types.Omitted)): ty_checker.raise_exc(inplace, 'bool', 'inplace') if isinstance(self.data.dtype, types.Number) and isinstance(self.index, (types.Number, types.NoneType)): def hpat_pandas_series_dropna_impl(self, axis=0, inplace=False): index = self.index return numpy_like.dropna(self._data, index, self._name) return hpat_pandas_series_dropna_impl else: def hpat_pandas_series_dropna_str_impl(self, axis=0, inplace=False): # generate Series index if needed by using SeriesType.index (i.e. not self._index) na_data_arr = sdc.hiframes.api.get_nan_mask(self._data) data = self._data[~na_data_arr] index = self.index[~na_data_arr] return pandas.Series(data, index, self._name) return hpat_pandas_series_dropna_str_impl
9389f3cb90d22435133f04b3c48f761c65eceee3
3,633,599