content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def vectorize(sentence, idf_weight, vocab, convey='idf'): """ idf_weight: {word: weight} vocab: {word: index} """ vec = np.zeros(len(vocab), dtype=np.float32) for word in sentence: if word not in vocab: continue if convey == 'idf': vec[vocab[word]] += idf_weight[word] elif convey == 'count': vec[vocab[word]] += 1 else: raise NotImplementedError return vec
a245bfb82be2193dabb219fd24fd8cf035d3a1a9
3,636,400
import re async def director_v2_service_mock( aioresponses_mocker: AioResponsesMock, ) -> AioResponsesMock: """mocks responses of director-v2""" # computations create_computation_pattern = re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations$" ) get_computation_pattern = re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations/.*$" ) stop_computation_pattern = re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations/.*:stop$" ) delete_computation_pattern = get_computation_pattern projects_networks_pattern = re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/dynamic_services/projects/.*/-/networks$" ) aioresponses_mocker.post( create_computation_pattern, callback=create_computation_cb, status=web.HTTPCreated.status_code, repeat=True, ) aioresponses_mocker.post( stop_computation_pattern, status=web.HTTPAccepted.status_code, repeat=True, ) aioresponses_mocker.get( get_computation_pattern, status=web.HTTPAccepted.status_code, callback=get_computation_cb, repeat=True, ) aioresponses_mocker.delete(delete_computation_pattern, status=204, repeat=True) aioresponses_mocker.patch(projects_networks_pattern, status=204, repeat=True) # clusters cluster_route_pattern = re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)?\?(\w+(?:=\w+)?\&?){1,}$" ) aioresponses_mocker.post( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$" ), callback=create_cluster_cb, status=web.HTTPCreated.status_code, repeat=True, ) aioresponses_mocker.get( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$" ), callback=list_clusters_cb, status=web.HTTPCreated.status_code, repeat=True, ) aioresponses_mocker.get( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" ), callback=get_cluster_cb, status=web.HTTPCreated.status_code, repeat=True, ) aioresponses_mocker.get( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters/[0-9]+/details\?(\w+(?:=\w+)?\&?){1,}$" ), callback=get_cluster_details_cb, status=web.HTTPCreated.status_code, repeat=True, ) aioresponses_mocker.patch( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" ), callback=patch_cluster_cb, status=web.HTTPCreated.status_code, repeat=True, ) aioresponses_mocker.delete( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" ), status=web.HTTPNoContent.status_code, repeat=True, ) aioresponses_mocker.post( re.compile(r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters:ping$"), status=web.HTTPNoContent.status_code, repeat=True, ) aioresponses_mocker.post( re.compile( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+):ping\?(\w+(?:=\w+)?\&?){1,}$" ), status=web.HTTPNoContent.status_code, repeat=True, ) return aioresponses_mocker
2b09e504fc3d1520be220dc81fc403ef63b1e797
3,636,401
def test_validation_unset_type_hints(): """Test that unset type hints are handled correctly (and treated as Any).""" @my_registry.optimizers("test_optimizer.v2") def test_optimizer_v2(rate, steps: int = 10) -> None: return None config = {"test": {"@optimizers": "test_optimizer.v2", "rate": 0.1, "steps": 20}} my_registry.resolve(config)
d4816ee06e05fb2332f35a60f336dd1bf75eb2bd
3,636,402
def get_online_featurestore_connector(featurestore=None): """ Gets a JDBC connector for the online feature store Args: :featurestore: the feature store name Returns: a DTO object of the JDBC connector for the online feature store """ if featurestore is None: featurestore = project_featurestore() try: # try with metadata cache if update_cache_default: core._get_featurestore_metadata(featurestore, update_cache=True) return core._do_get_online_featurestore_connector(featurestore, core._get_featurestore_metadata(featurestore, update_cache=update_cache_default)) except: # retry with updated metadata return core._do_get_online_featurestore_connector(featurestore, core._get_featurestore_metadata(featurestore, update_cache=True))
ada51764b823c0959571ea928d6bddc6b2dbee7b
3,636,403
def create_multipoint_geometry(u, v, osr_spref): """ wrapper; creates multipoint geometry in given projection Parameters ---------- u : list of numbers input coordinates ("Rechtswert") v : list of numbers input coordinates ("Hochwert") osr_spref : OGRSpatialReference spatial reference of given coordinates Returns ------- OGRGeometry a geometry holding all points defined by (u, v) """ point_geom = ogr.Geometry(ogr.wkbMultiPoint) point_geom.AssignSpatialReference(osr_spref) for p, _ in enumerate(u): point = ogr.Geometry(ogr.wkbPoint) point.AddPoint(u[p], v[p], 0) point_geom.AddGeometry(point) return point_geom
cea52b749e7e60a9fea192fd5ff288bced7be388
3,636,404
def open_tif_image(input_path): # type: (function) -> np.array """Function to open tif images. Parameters: input_path (string) = path where the image file is located; return np.array of the tif image""" # get the image_path. image_path = input_path # read image im = skimage.io.imread(image_path, plugin="tifffile") return im
ab834b5b2ab983cab3f79dd1fc1acfe1394df50b
3,636,405
import requests def get_icd(url: str) -> requests.Response: """Get an ICD API endpoint.""" return requests.get(url, headers=get_icd_api_headers())
2a7ce491004cd0b69e7d988e8aaca56b4130b261
3,636,406
def evaluate(words,labels_pred, labels): """ labels_pred, labels, words: are sent-level list eg: words --> [[i love shanghai],[i love u],[i do not know]] words,pred, right: is a sequence, is label index or word index. Evaluates performance on test set """ # true_tags = ['PER', 'LOC', 'ORG', 'PERSON', 'person', 'loc', 'company'] accs = [] correct_preds, total_correct, total_preds = 0., 0., 0. for lab, lab_pred, word_sent in zip(labels, labels_pred, words): accs += [a == b for (a, b) in zip(lab, lab_pred)] lab_chunks = set(get_chunks(lab)) lab_pred_chunks = set(get_chunks(lab_pred)) correct_preds += len(lab_chunks & lab_pred_chunks) total_preds += len(lab_pred_chunks) total_correct += len(lab_chunks) p = correct_preds / total_preds if correct_preds > 0 else 0 r = correct_preds / total_correct if correct_preds > 0 else 0 f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0 acc = np.mean(accs) return acc, f1, p, r
d1c98e5c7cbe94fdc5bd502e6ea33673cead1a7d
3,636,407
import string def getApproximateArialStringWidth(st: str) -> float: """Calculate rough width of a word in a variable width font. By https://stackoverflow.com/users/234270/speedplane Args: st (str): The string you need a width for Returns: float: The rough width in picas To make sure that the names will fit in the space, at the given font size etc., if the space can fit 13 M chars across, then getApproximateArialStringWidth("M"*13) gives 10.53, so the space is 10 picas wide, and we can exclude wider names. """ size = 0 # in milinches for s in st: if s in "lij|' ": size += 37 elif s in "![]fI.,:;/\\t": size += 50 elif s in '`-(){}r"': size += 60 elif s in "*^zcsJkvxy": size += 85 elif s in "aebdhnopqug#$L+<>=?_~FZT" + string.digits: size += 95 elif s in "BSPEAKVXY&UwNRCHD": size += 112 elif s in "QGOMm%W@": size += 135 else: size += 50 return size * 6 / 1000.0
d37cc49e4ffd347ddace5de1d420bc8c3c37b615
3,636,408
def prepare_text(input_string): """Converts an input string into a list containing strings. Parameters ---------- input_string : string String to convert to a list of string. Returns ------- out_list : list List containing the input string. """ # Converting a string into lower cases temp_string = input_string.lower() # Spliting up the characters of the string in lower cases out_list = temp_string.split() return out_list
ddf060728127380ef3ec689f7ee8104b9c12ebea
3,636,409
def TDF_Tool_TagList(*args): """ * Returns the entry of <aLabel> as list of integers in <aTagList>. :param aLabel: :type aLabel: TDF_Label & :param aTagList: :type aTagList: TColStd_ListOfInteger & :rtype: void * Returns the entry expressed by <anEntry> as list of integers in <aTagList>. :param anEntry: :type anEntry: TCollection_AsciiString & :param aTagList: :type aTagList: TColStd_ListOfInteger & :rtype: void """ return _TDF.TDF_Tool_TagList(*args)
92cc1ffb20dad5bd0d49c818cae899fdbc9fafc0
3,636,410
import re def img(header, body=None): """Alternate to Markdown's image tag. See http://octopress.org/docs/plugins/image-tag/ for usage.""" attrs = re.match(__img_re, header).groupdict() m = re.match(__img_re_title, attrs['title']) if m: attrs['title'] = m.groupdict()['title'] attrs['alt'] = m.groupdict()['alt'] elif 'title' in attrs: attrs['alt'] = attrs['title'].replace('"', '&#34') if 'class' in attrs: attrs['class'] = attrs['class'].replace('"', '') if attrs: return '<img ' + ' '.join('%s="%s"' % (k, v) for k, v in iteritems(attrs) if v) + ' />' return ("Error processing input, expected syntax: " "{% img [class name(s)] [http[s]:/]/path/to/image [width [height]] " "[title text | \"title text\" [\"alt text\"]] %}")
8745d00f576bb24d94dbeff465be9f2d82388034
3,636,411
def conexao_bd(): """ Função que se conecta a um banco de dados MySQL """ # Pedido de senha caso o acesso ao BD necessite # caso contrario so dar ENTER conexao = sql.connect( host='localhost', user='root', password=senha ) cursor = conexao.cursor() return cursor
2db6a7aaef02639d506c4ec393851abc3c9f278d
3,636,412
def fill_bin_content(ax, sens, energy_bin, gb, tb): """ Parameters -------- Returns -------- """ for i in range(0,gb): for j in range(0,tb): theta2 = 0.005+0.005/2+((0.05-0.005)/tb)*j gammaness = 0.1/2+(1/gb)*i text = ax.text(theta2, gammaness, "%.2f %%" % sens[energy_bin][i][j], ha="center", va="center", color="w", size=8) return ax
aa2121697429d330da3ec18f08f36248e3f57152
3,636,413
def blackbody1d(temperature, radius, distance=10*u.pc, lambda_min=2000, lambda_max=10000, dlambda=1): """ One dimensional blackbody spectrum. Parameters ---------- temperature : float or `~astropy.units.Quantity` Blackbody temperature. If not a Quantity, it is assumed to be in Kelvin. radius : `~astropy.units.Quantity` Radius of spherical blackbody. Must be a Quantity. distance : `~astropy.units.Quantity` Distance of blackbody source. Must be a Quantity. Default is 10 pc so absolute and apparent magnitudes will be the same. lambda_min : float Minimum wavelength for spectrum (in Angstroms). lambda_max : float Maximum wavelength for spectrum (in Angstroms). dlambda : float Wavelength interval for spectrum (in Angstroms). Returns ------- bb : `~starkit.fix_spectrum1d.SKSpectrum1D` Blackbody spectrum. """ if not hasattr(radius, 'unit'): raise ValueError("radius needs to be a quantity (e.g., 1 * u.cm)") if not hasattr(distance, 'unit'): raise ValueError("distance needs to be a quantity (e.g., 1 * u.pc)") wavelength = np.arange(lambda_min, lambda_max, dlambda) * u.AA # the factor of pi sr is from the angular integral flux = np.pi * u.sr * (radius/distance)**2 * blackbody_lambda(wavelength, temperature) # theoretical quantity has no uncertainty uncertainty = np.zeros_like(flux) bb = SKSpectrum1D.from_array(wavelength, flux, uncertainty) return bb
80bef199c3a11d19f60204913cb44dbf6f40b47f
3,636,414
def expm1_op_tensor(x): """ See :func:`oneflow.expm1` """ return Expm1()(x)
dc844e014a806ae507052618eccd889a9a0b589d
3,636,415
import os def get_unique_filepath(stem): """NOT thread-safe! return stems or stem# where # is the smallest positive integer for which the path does not exist. useful for temp dirs where the client code wants an obvious ordering. """ fp = stem if os.path.exists(stem): n = 1 fp = stem + str(n) while os.path.exists(fp): n += 1 fp = stem + str(n) return fp
29f853bcb1df4bd2b989948ad2b7b8985bff83e9
3,636,416
from onnxruntime import __version__ as onnxruntime_version import os def optimize_model(input, model_type='bert', num_heads=0, hidden_size=0, optimization_options=None, opt_level=None, use_gpu=False, only_onnxruntime=False): """ Optimize Model by OnnxRuntime and/or python fusion logic. ONNX Runtime has graph optimizations (https://onnxruntime.ai/docs/resources/graph-optimizations.html). However, the coverage is limited. We also have graph fusions that implemented in Python to improve the coverage. They can combined: ONNX Runtime will run first when opt_level > 0, then graph fusions in Python will be applied. To use ONNX Runtime only and no Python fusion logic, use only_onnxruntime flag and a positive opt_level like optimize_model(input, opt_level=1, use_gpu=False, only_onnxruntime=True) When opt_level is None, we will choose default optimization level according to model type. When opt_level is 0 and only_onnxruntime is False, only python fusion logic is used and onnxruntime is disabled. When opt_level > 1, use_gpu shall set properly since the optimized graph might contain operators for GPU or CPU only. If your model is intended for GPU inference only (especially float16 or mixed precision model), it is recommended to set use_gpu to be True, otherwise the model is not optimized for GPU inference. For BERT model, num_heads and hidden_size are optional. For other model types, you need specify these parameters. Args: input (str): input model path. model_type (str, optional): model type - like bert, bert_tf, bert_keras or gpt2. Defaults to 'bert'. num_heads (int, optional): number of attention heads. Defaults to 0. 0 allows detect the parameter from graph automatically (for model_type "bert" only). hidden_size (int, optional): hidden size. Defaults to 0. 0 allows detect the parameter from graph automatically (for model_type "bert" only). optimization_options (FusionOptions, optional): optimization options that turn on/off some fusions. Defaults to None. opt_level (int, optional): onnxruntime graph optimization level (0, 1, 2 or 99) or None. Defaults to None. When the value is None, default value (1 for bert and gpt2, 0 for other model types) will be used. When the level > 0, onnxruntime will be used to optimize model first. use_gpu (bool, optional): use gpu or not for onnxruntime. Defaults to False. only_onnxruntime (bool, optional): only use onnxruntime to optimize model, and no python fusion. Defaults to False. Returns: object of an optimizer class. """ assert opt_level is None or opt_level in [0, 1, 2, 99] if model_type != "bert" and (num_heads == 0 or hidden_size == 0): logger.warning("Please specify parameters of num_heads and hidden_size when model_type is not 'bert'") (optimizer_class, producer, default_opt_level) = MODEL_TYPES[model_type] if opt_level is None: opt_level = default_opt_level temp_model_path = None if opt_level > 1: temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level) elif opt_level == 1: # basic optimizations (like constant folding and cast elimation) are not specified to exection provider. # CPU provider is used here so that there is no extra node for GPU memory copy. temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1) if only_onnxruntime and not temp_model_path: logger.warning("Please specify a positive value for opt_level when only_onnxruntime is True") model = load_model(temp_model_path or input, format=None, load_external_data=True) if model.producer_name and producer != model.producer_name: logger.warning( f"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter." ) if optimization_options is None: optimization_options = FusionOptions(model_type) optimizer = optimizer_class(model, num_heads, hidden_size) if not only_onnxruntime: optimizer.optimize(optimization_options) # Remove the temporary model. if temp_model_path: os.remove(temp_model_path) logger.debug("Remove tempoary model: {}".format(temp_model_path)) optimizer.model.producer_name = "onnxruntime.transformers" optimizer.model.producer_version = onnxruntime_version return optimizer
71a2780da98aedda2072fabb5a38cf7999968c53
3,636,417
def transect_rotate(adcp_transect,rotation,xy_line=None): """ Calculates all possible distances between a list of ADCPData objects (twice...ineffcient) Inputs: adcp_obs = list ADCPData objects, shape [n] Returns: centers = list of centorids of ensemble locations of input ADCPData objects, shape [n] distances = xy distance between centers, shape [n-1] """ """ Rotates ADCPTransectData U and V velocities. Inputs: adcp_transect = ADCPTransectData object rotation = one of: None - no rotation of averaged velocity profiles 'normal' - rotation based upon the normal to the plotline (default rotation type) 'pricipal flow' - uses the 1st principal component of variability in uv flow direction 'Rozovski' - individual rotation of each verticle velocity to maximize U 'no transverse flow' - rotation by the net flow vector is used to minnumize V xy_line = numpy array of line defined by 2 points: [[x1,y1],[x2,y2]], or None Returns adcp_transect = ADCPTransectData object with rotated uv velocities """ if rotation == "normal": # find angle of line: if xy_line is None: if adcp_transect.xy is None: raise Exception,"transect_rotate() error: ADCPData must be xy projected, or input xy_line must be supplied for normal rotation" xy_line = adcpy.util.map_xy_to_line(adcp_transect.xy) theta = adcpy.util.calc_normal_rotation(xy_line) elif rotation == "no transverse flow": flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True) theta = adcpy.util.calc_net_flow_rotation(flows[:,0],flows[:,1]) elif rotation == "Rozovski": flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True) theta = adcpy.util.calc_Rozovski_rotation(flows[:,0],flows[:,1]) elif rotation == "principal flow": flows = adcp_transect.calc_ensemble_flow(range_from_velocities=True) theta = adcpy.util.principal_axis(flows[:,0],flows[:,1],calc_type='EOF') elif type(rotation) is str: raise Exception,"In transect_rotate(): input 'rotation' string not understood: %s"%rotation else: theta = rotation adcp_transect.set_rotation(theta,'uv') return adcp_transect
bc493c8cf93cbfdbe614ed39f973ee735fbe3294
3,636,418
def F(x, t, *args, **kwds): """ F(x) = ddx """ return -kwds.get('μ', 1) * x / np.sum(x**2)**(3/2)
bbe4afa78dab5aafa27c26a09f31fa8bcc37d989
3,636,419
from typing import List from typing import Dict def get_capacity_potential_per_country(countries: List[str], is_onshore: float, filters: Dict, power_density: float, processes: int = None): """ Return capacity potentials (GW) in a series of countries. Parameters ---------- countries: List[str] List of ISO codes. is_onshore: bool Whether the technology is onshore located. filters: Dict Dictionary containing a set of values describing the filters to apply to obtain land availability. power_density: float Power density in MW/km2 processes: int (default: None) Number of parallel processes Returns ------- pd.Series Series containing the capacity potentials (GW) for each code. """ which = 'onshore' if is_onshore else 'offshore' shapes = get_shapes(countries, which=which, save=True)["geometry"] land_availability = get_land_availability_for_shapes(shapes, filters, processes) return pd.Series(land_availability*power_density/1e3, index=shapes.index)
f68285a349c147c773d8053afa377e674b0e585a
3,636,420
import ast from typing import Set def all_statements(tree: ast.AST) -> Set[ast.stmt]: """ Return the set of all ast.stmt nodes in a tree. """ return {node for node in ast.walk(tree) if isinstance(node, ast.stmt)}
9f7cc367f01ec3bb90869879e79eb9cbe6636820
3,636,421
def calc_predicted_points_for_pos( pos, gw_range, team_model, player_model, season, tag, session ): """ Calculate points predictions for all players in a given position and put into the DB """ predictions = {} df_player = None if pos != "GK": # don't calculate attacking points for keepers. df_player = get_fitted_player_model(player_model, pos, season, session) for player in list_players(position=pos, dbsession=session): predictions[player.player_id] = calc_predicted_points( player, team_model, df_player, season, tag, session, gw_range ) return predictions
da30553d3cfe0bacd4198f3ae949466596d130a5
3,636,422
def image_preprocess2(img): """ image preprocess version 2 using: yellow threshold, white threshold, sobelX, sobelY, ROI Parameters ---------- img: image (np.array()) Return ---------- the source points """ # set white and yellow threshold white = rgb_threshold(img, r_threshold=(200, 255), g_threshold=(200, 255), b_threshold=(200, 255)) yellow = hsv_threshold(img, h_threshold=(20, 34), s_threshold=(43, 255), v_threshold=(46, 255)) # set sobelX and sobelY threshold gradx = abs_sobel_thresh(img, orient='x', thresh=(35, 120)) grady = abs_sobel_thresh(img, orient='y', thresh=(30, 120)) # filtrate the image using color information (white lane -- RGB space) (yellow lane -- HSV space) # and graident information (SobleX, Soble Y) combined = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8) combined[(white == 1) | (yellow == 1) | ((gradx == 1) & (grady == 1))] = 1 # region of interest height = img.shape[0] width = img.shape[1] vertices = np.array([[(0, height), (width, height), (width * 0.6, height * 0.5), (width * 0.4, height * 0.5)]], dtype=np.int32) roi_image = region_of_interest(img, vertices) return roi_image
8581865049d7c0b9e33936e09bd034d4981f4d57
3,636,423
from typing import Dict def get_all_feeds(cb: CbThreatHunterAPI, include_public=True) -> Dict: """Retrieve all feeds owned by the caller. Provide include_public=true parameter to also include public community feeds. """ url = f"/threathunter/feedmgr/v2/orgs/{cb.credentials.org_key}/feeds" params = {"include_public": include_public} result = cb.get_object(url, query_parameters=params) return result.get("results", [])
e8cfea478a43919cf8753e0c1c9b8bb3228db736
3,636,424
from typing import Tuple import ctypes def spkltc( targ: int, et: float, ref: str, abcorr: str, stobs: ndarray ) -> Tuple[ndarray, float, float]: """ Return the state (position and velocity) of a target body relative to an observer, optionally corrected for light time, expressed relative to an inertial reference frame. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html :param targ: Target body. :param et: Observer epoch. :param ref: Inertial reference frame of output state. :param abcorr: Aberration correction flag. :param stobs: State of the observer relative to the SSB. :return: One way light time between observer and target, Derivative of light time with respect to time """ assert len(stobs) == 6 targ = stypes.c_int(targ) et = ctypes.c_double(et) ref = stypes.string_to_char_p(ref) abcorr = stypes.string_to_char_p(abcorr) stobs = stypes.to_double_vector(stobs) starg = stypes.empty_double_vector(6) lt = ctypes.c_double() dlt = ctypes.c_double() libspice.spkltc_c( targ, et, ref, abcorr, stobs, starg, ctypes.byref(lt), ctypes.byref(dlt) ) return stypes.c_vector_to_python(starg), lt.value, dlt.value
46ad18c4fbf0c654771a7e6568831e6551f52e44
3,636,425
def remove_outlier_from_time_average(df, time=4, multiplier=3): """ Remove outliers when averaging transients before performing the fitting routines, used to improve the signal to noise ratio in low biomass systems. The function sets a time window to average over, using upper and lower limits for outlier detection. The upper and lower limits are determined by mean ± std * [1]. The multiplier [1] can be adjusted by the user. Parameters ---------- df : pandas.DataFrame A dataframe of the raw data, can either be imported from pandas.read_csv or the output from phyto_photo_utils.load time : int, default=4 The time window to average over, e.g. 4 = 4 minute averages multiplier : int, default=3 The multiplier to apply to the standard deviation for determining the upper and lower limits. Returns ------- df : pandas.DataFrame A dataframe of the time averaged data with outliers excluded. Example ------- >>> ppu.remove_outlier_from_time_average(df, time=2, multiplier=3) """ # Convert time window to string dt = str(time)+'T' # Convert dtype of the datetime column df['datetime'] = df.datetime.astype('datetime64') # Group data by time window and flashlet number grp = df.groupby([Grouper(key='datetime', freq=dt), 'flashlet_number']) # Calculate means, standard deviations and counts of the groups mean = grp.mean() std = grp.std() c = grp.count() # Calculate upper and lower limits of each group, and repeat each value by its count ulim = repeat((mean.flevel.values + std.flevel.values * multiplier), c.flevel.values) llim = repeat((mean.flevel.values - std.flevel.values * multiplier), c.flevel.values) # Get indexes of data used to create each group idx = [] for i, items in enumerate(grp.indices.items()): idx.append(items[-1]) idx = concatenate(idx, axis=0) # Create pandas DataFrame of upper and lower using original indexes of data mask = DataFrame([ulim, llim, idx]).T mask.columns = ['ulim','llim','index'] mask = mask.set_index('index').sort_index() # Create boolean array using mask DataFrame m = (df.flevel.values > mask.ulim) | (df.flevel.values < mask.llim) # Where condition is True, set values of fluorescence yield to NaN df.loc[m.values,'flevel'] = nan # Group data that is now corrected df = df.groupby([Grouper(key='datetime', freq=dt), 'flashlet_number']).mean().reset_index() # Return number of measurements that is used to create each average df['nseq'] = c.flevel.values return df
c3c92e25514e02b6baa425672b31c8ec45b4f7fc
3,636,426
import json def parse_tb_file(path, module): """ Parse a translation block coverage file generated by S2E's ``TranslationBlockCoverage`` plugin. """ with open(path, 'r') as f: try: tb_coverage_data = json.load(f) except Exception: logger.warning('Failed to parse translation block JSON file %s', path) return None if not tb_coverage_data: logger.warning('Translation block JSON file %s is empty', path) return None if module not in tb_coverage_data: logger.warning('Target %s not found in translation block JSON file %s', module, path) return None return tb_coverage_data[module]
dac9567c0c931ce9921eb5c766d00b3faa305887
3,636,427
def load(filename): """ Load nifti2 single or pair from `filename` Parameters ---------- filename : str filename of image to be loaded Returns ------- img : Nifti2Image or Nifti2Pair nifti2 single or pair image instance Raises ------ ImageFileError: if `filename` doesn't look like nifti2 IOError : if `filename` does not exist """ try: img = Nifti2Image.load(filename) except ImageFileError: return Nifti2Pair.load(filename) return img
e537f81883b27da4add0a7c16addc3c4f7f66e4b
3,636,428
from sys import path import pickle import time def run_sklearn(args, out_dir, out_flp, ldrs): """ Trains an sklearn model according to the supplied parameters. Returns the test error (lower is better). """ # Unpack the dataloaders. ldr_trn, _, ldr_tst = ldrs # Construct the model. print("Building model...") net = models.MODELS[args["model"]](out_dir) net.log(f"\n\nArguments: {args}") if path.exists(out_flp): # The output file already exists with these parameters, so do not # retrain the model. print( "Skipping training because a trained model already exists with " f"these parameters: {out_flp}") print(f"Loading model: {out_flp}") with open(out_flp, "rb") as fil: net.net = pickle.load(fil) tim_trn_s = 0 else: net.new(**{param: args[param] for param in net.params}) # Extract the training data from the training dataloader. print("Extracting training data...") dat_in, dat_out = list(ldr_trn)[0] print("Training data:") utils.visualize_classes(net, dat_out) # Training. print("Training...") tim_srt_s = time.time() net.train(ldr_trn.dataset.fets, dat_in, dat_out) tim_trn_s = time.time() - tim_srt_s print(f"Finished training - time: {tim_trn_s:.2f} seconds") # Save the model. print(f"Saving final model: {out_flp}") with open(out_flp, "wb") as fil: pickle.dump(net.net, fil) # Testing. # # Use .raw() instead of loading the dataloader because we need dat_extra. fets, dat_in, dat_out, dat_extra = ldr_tst.dataset.raw() print("Test data:") utils.visualize_classes(net, dat_out) print("Testing...") tim_srt_s = time.time() acc_tst = net.test( fets, dat_in, dat_out, dat_extra, graph_prms={ "out_dir": out_dir, "sort_by_unfairness": True, "dur_s": None}) print(f"Finished testing - time: {time.time() - tim_srt_s:.2f} seconds") # Optionally perform feature elimination. if args["analyze_features"]: utils.select_fets( utils.analyze_feature_correlation( net, out_dir, dat_in, args["clusters"]), utils.analyze_feature_importance( net, out_dir, dat_in, dat_out, args["fets_to_pick"], args["perm_imp_repeats"])) return acc_tst, tim_trn_s
d1d6929eed42c53ac43b15d4f4e7702e57c24738
3,636,429
def create_softmax_loss(scores, target_values): """ :param scores: [batch_size, num_candidates] logit scores :param target_values: [batch_size, num_candidates] vector of 0/1 target values. :return: [batch_size] vector of losses (or single number of total loss). """ return tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=target_values)
a4b10b9f72f0e7e38474c5ec887ed3be215fc7fb
3,636,430
def page_not_found(e): """ Catches 404 errors and render a 404 page stylized with the design of the web app. Returns 404 static page. """ return render_template('404.html'), 404
abf420f299f63a2ab3bccfca578f46be040590fd
3,636,431
def effort_remaining_after_servicing_tier_2_leads(): """ Real Name: Effort Remaining after Servicing Tier 2 Leads Original Eqn: MAX(Effort Remaining after Servicing Existing Clients - Effort Devoted to Tier 2 Leads, 0) Units: Hours/Month Limits: (None, None) Type: component Subs: None How much effort remains after higher priority sales and maintenance activities are complete? """ return np.maximum( effort_remaining_after_servicing_existing_clients() - effort_devoted_to_tier_2_leads(), 0, )
2ab3ee8968bb6e667bdf53cf4629ad0b1ecd732d
3,636,432
def sliceThreshold(volume, block_size = 5): """ convert slice into binary using adaptive local ostu method volume --- 3D volume block_size --- int value """ if type(volume) != np.ndarray: raise TypeError('the input must be numpy array!') x, y, z = volume.shape segImg = np.empty_like(volume) for i in range(z): binary_adaptive = threshold_adaptive(volume[:,:,i], block_size, offset=0) segImg[:,:,i] = binary_adaptive return segImg
b66d4a46025ccc9fe6c15e61dcd57e060437f91e
3,636,433
def rastrigin_d_dim(x: chex.Array) -> chex.Array: """ D-Dim. Rastrigin function. x_i ∈ [-5.12, 5.12] f(x*)=0 - Minimum at x*=[0,...,0] """ A = 10 return A * x.shape[0] + jnp.sum(x ** 2 - A * jnp.cos(2 * jnp.pi * x))
a7ac23b0a2b76afceb193629aad265186664c012
3,636,434
def fMaxConfEV(arr3_EvtM_bol, arr3_Evt, arr3_Conf): """ Return highest confidence and its corresponding timing, given arr3_EvtM_bol already masked to year of interest. Something in this fuction or calling it is broken. """ print('\t\tStats (max conf)...', end='') arr3_ConfM_bolY = arr3_EvtM_bol * arr3_Conf arrOutConfMax = arr3_ConfM_bolY.max(axis=0) print('\tGet matching event...', end='') arrOutEvent = np.zeros(arrOutConfMax.shape) for b in range(arr3_ConfM_bolY.shape[0]): arrOutEvent = np.where(arrOutConfMax == arr3_ConfM_bolY[b, :, :], arr3_Evt[b, :, :], arrOutEvent) return arrOutEvent, arrOutConfMax
84a701fde4243c588de43582b3c7aa6c37dd434c
3,636,435
def regex_validation_recursion(node: dict) -> (bool, str): """ Validates the regex inside a singular node of a Spcht Descriptor :param dict node: :return: True, msg or False, msg if any one key is wrong :rtype: (bool, str) """ # * mapping settings if 'map_setting' in node: if '$regex' in node['mapping_settings']: if node['mapping_settings']['$regex'] == True and 'mapping' in node: for key in node['mapping']: if not validate_regex(key): return False, "mapping" if 'cut' in node: if not validate_regex(node['cut']): return False, "cut" if 'match' in node: if not validate_regex(node['match']): return False, "match" if 'fallback' in node: return regex_validation_recursion(node['fallback']) return True, "none"
53f7e605c7bcd83cacba85e8aa0c5dc25e26d05c
3,636,436
import os def get_beat_times(audio_file, beats_folder, include_beat_numbers=False): """ Read beat times from annotation file. :param audio_file: path to audio files :param beats_folder: folder with preanalysed beat times (in .beats.txt format per track) :return: beat times in seconds """ file_name = os.path.splitext(os.path.basename(audio_file))[0] beats_file = os.path.join(beats_folder, file_name + '.beats.txt') if not os.path.isfile(beats_file): print(f"Extracting beat times for {audio_file}") os.system(f"DBNDownBeatTracker single '{audio_file}' -o '{beats_file}'") t = pd.read_table(beats_file, header=None) if include_beat_numbers: return t[0].values, t[1].values else: return t[0].values
a996513c5ec535be0f092a05f2ccd0d433a77296
3,636,437
def build_probability_matrix(graph): """Get square matrix of shape (n, n), where n is number of nodes of the given `graph`. Parameters ---------- graph : :class:`~gensim.summarization.graph.Graph` Given graph. Returns ------- numpy.ndarray, shape = [n, n] Eigenvector of matrix `a`, n is number of nodes of `graph`. """ dimension = len(graph.nodes()) matrix = empty_matrix((dimension, dimension)) probability = 1.0 / float(dimension) matrix.fill(probability) return matrix
44cf85a02d95df8d2d1a7580714e90cab0f087dc
3,636,438
def yuanshanweir_transfer_loss_amount(): """ Real Name: YuanShanWeir Transfer Loss Amount Original Eqn: (Tranfer From YuanShanWeir To DaNanWPP+Transfer From YuanShanWeir To BanXinWPP)/(1-WPP Transfer Loss Rate)*WPP Transfer Loss Rate Units: m3 Limits: (None, None) Type: component Subs: None """ return ( ( tranfer_from_yuanshanweir_to_dananwpp() + transfer_from_yuanshanweir_to_banxinwpp() ) / (1 - wpp_transfer_loss_rate()) * wpp_transfer_loss_rate() )
d5e028fb4450258f7fbdd708e7948f80eda04d2f
3,636,439
def create_request(request: Request) -> Request: """Create a database entry (mongo Document based Request object) from a brewtils Request model object. Some transformations happen on a copy of the supplied Request prior to saving it to the database. The returned Request object is derived from this transformed copy, while the input Request object remains unmodified. Args: request: The brewtils Request object from which a database entry will be created Returns: Request: A brewtils Request model based on the newly created database entry. The parameters of the returned object may have been modified from the during processing of files in "bytes" type parameters. """ # TODO: This deepcopy could be very memory intensive if the request contains large # file parameters. This should be revisited to see if there is a way to persist # remote requests locally without the base64 encoded data while avoiding this copy. request = deepcopy(request) replace_with_raw_file = request.namespace == config.get("garden.name") remove_bytes_parameter_base64(request.parameters, replace_with_raw_file) return db.create(request)
fc4ec6033545ad26db1b5ef375b38f79e5879e9a
3,636,440
def _gifti_to_array(gifti): """ Converts tuple of `gifti` to numpy array """ return np.hstack([load_gifti(img).agg_data() for img in gifti])
363cf55a7509acf842b1d2dcbfb4ff45980e6692
3,636,441
from typing import List from typing import Type from typing import Union def learn_naive_factorization( data: np.ndarray, distributions: List[Type[Leaf]], domains: List[Union[list, tuple]], scope: List[int], learn_leaf_func: LearnLeafFunc, **learn_leaf_kwargs ) -> Node: """ Learn a leaf as a naive factorized model. :param data: The data. :param distributions: The distribution of the random variables. :param domains: The domain of the random variables. :param scope: The scope of the leaf. :param learn_leaf_func: The function to use to learn the sub-distributions parameters. :param learn_leaf_kwargs: Additional parameters for learn_leaf_func. :return: A naive factorized model. :raises ValueError: If there are inconsistencies between the data, distributions and domains. """ _, n_features = data.shape if len(scope) != len(distributions) or len(domains) != len(distributions): raise ValueError("Each data column should correspond to a random variable having a distribution and a domain") node = Product(scope) for i, s in enumerate(scope): leaf = learn_leaf_func(data[:, [i]], [distributions[i]], [domains[i]], [s], **learn_leaf_kwargs) leaf.id = i + 1 # Set the leaves ids sequentially node.children.append(leaf) return node
ef79c457d7c3a0630b8b8734bb892d1b1937e6ab
3,636,442
def opt_IA_search_assist(fun, lbounds, ubounds, budget): """Efficient implementation of uniform random search between `lbounds` and `ubounds` """ lbounds, ubounds = np.array(lbounds), np.array(ubounds) dim, x_min, f_min = len(lbounds), None, None opt_ia = optIA.OptIA(fun, lbounds, ubounds, ssa=True) max_chunk_size = 1 + 4e4 / dim x_min = opt_ia.opt_ia(budget) return x_min
b8d496ce403ae4882dc5a54947febdf1a7f298b4
3,636,443
def load_investigation(fp): """Used for rules 0005 :param fp: A file-like buffer object pointing to an investigation file :return: Dictionary of DataFrames for each section """ def check_labels(section, labels_expected, df): """Checks each section is syntactically structured correctly :param section: The section of interest :param labels_expected: The list of expected labels in the section :param df: The DataFrame slice of the investigation file we are checking :return: None """ labels_found = set([x for x in df.columns if isinstance(x, str)]) if not labels_expected.issubset(labels_found): missing_labels = labels_expected - labels_found log.fatal("(F) In {} section, expected labels {} not found in {}" .format(section, missing_labels, labels_found)) if len(labels_found - labels_expected) > 0: # check extra labels, i.e. make sure they're all comments extra_labels = labels_found - labels_expected for label in extra_labels: if _RX_COMMENT.match(label) is None: log.fatal("(F) In {} section, label {} is not allowed" .format(section, label)) validator_errors.append({ "message": "Invalid label found in investigation file", "supplemental": "In {} section, label {} is not " "allowed".format(section, label), "code": 5 }) elif len(_RX_COMMENT.findall(label)) == 0: log.warning("(W) In {} section, label {} is missing a " "name".format(section, label)) validator_warnings.append({ "message": "Missing name in Comment[] label", "supplemental": "In {} section, label {} is missing a " "name".format(section, label), "code": 4014 }) # Read in investigation file into DataFrames first df_dict = read_investigation_file(fp) log.debug("Loading ONTOLOGY SOURCE REFERENCE section") labels_expected = {'Term Source Name', 'Term Source File', 'Term Source Version', 'Term Source Description'} check_labels('ONTOLOGY SOURCE REFERENCE', labels_expected, df_dict['ontology_sources']) for i in range(0, len(df_dict['studies'])): log.debug("Loading STUDY section") labels_expected = {'Study Identifier', 'Study Title', 'Study Description', 'Study Submission Date', 'Study Public Release Date', 'Study File Name'} check_labels('STUDY', labels_expected, df_dict['studies'][i]) log.debug("Loading STUDY DESIGN DESCRIPTORS section") labels_expected = {'Study Design Type', 'Study Design Type Term Accession Number', 'Study Design Type Term Source REF'} check_labels('STUDY DESIGN DESCRIPTORS', labels_expected, df_dict['s_design_descriptors'][i]) log.debug("Loading STUDY PUBLICATIONS section") labels_expected = {'Study PubMed ID', 'Study Publication DOI'} check_labels('STUDY PUBLICATIONS', labels_expected, df_dict['s_publications'][i]) log.debug("Loading STUDY ASSAYS section") labels_expected = { 'Study Assay Measurement Type', 'Study Assay Measurement Type Term Accession Number', 'Study Assay Measurement Type Term Source REF', 'Study Assay Technology Type', 'Study Assay Technology Type Term Accession Number', 'Study Assay Technology Type Term Source REF', 'Study Assay Technology Platform', 'Study Assay File Name'} check_labels('STUDY ASSAYS', labels_expected, df_dict['s_assays'][i]) log.debug("Loading STUDY PROTOCOLS section") labels_expected = { 'Study Protocol Name', 'Study Protocol Type', 'Study Protocol Type Term Accession Number', 'Study Protocol Type Term Source REF', 'Study Protocol Description', 'Study Protocol Parameters Name', 'Study Protocol Parameters Name Term Accession Number', 'Study Protocol Parameters Name Term Source REF', 'Study Protocol Components Name', 'Study Protocol Components Type', 'Study Protocol Components Type Term Accession Number', 'Study Protocol Components Type Term Source REF'} check_labels('STUDY PROTOCOLS', labels_expected, df_dict['s_protocols'][i]) log.debug("Loading STUDY CONTACTS section") labels_expected = { 'Study Person Last Name', 'Study Person First Name', 'Study Person Mid Initials', 'Study Person Email', 'Study Person Phone', 'Study Person Fax', 'Study Person Address', 'Study Person Affiliation', 'Study Person Roles', 'Study Person Roles', 'Study Person Roles Term Accession Number', 'Study Person Roles Term Source REF'} check_labels('STUDY CONTACTS', labels_expected, df_dict['s_contacts'][i]) return df_dict
a53928a2b6e13cb21d9e76db2792fdba349aba98
3,636,444
def _fi18n(text): """Used to fake translations to ensure pygettext retrieves all the strings we want to translate. Outside of the aforementioned use case, this is exceptionally useless, since this just returns the given input string without any modifications made. """ return text
e505b58f4ff1e64c07b4496f69bee8b6e86b5129
3,636,445
def is_required_version(version, specified_version): """Check to see if there's a hard requirement for version number provided in the Pipfile. """ # Certain packages may be defined with multiple values. if isinstance(specified_version, dict): specified_version = specified_version.get("version", "") if specified_version.startswith("=="): return version.strip() == specified_version.split("==")[1].strip() return True
6c8bfe0fe77f7a7d14e1ca2dd8005a8d82d0998c
3,636,446
async def cmd_project_uninstall(ls: TextXLanguageServer, params) -> bool: """Command that uninstalls a textX language project. Args: params: project name Returns: True if textX project is uninstalled successfully, otherwise False Raises: None """ project_name = params[0] ls.show_message("Uninstalling project {}".format(project_name)) try: await uninstall_project_async(project_name, ls.python_path, ls.show_message_log) ls.show_message("Project {} is successfully uninstalled.".format(project_name)) return True except UninstallTextXProjectError as e: ls.show_errors(str(e), e.detailed_err_msg) return False
cc460057b5ecaf0c97bd715fc98020c9cfbe960f
3,636,447
def _check_load_mat(fname, uint16_codec): """Check if the mat struct contains 'EEG'.""" read_mat = _import_pymatreader_funcs('EEGLAB I/O') eeg = read_mat(fname, uint16_codec=uint16_codec) if 'ALLEEG' in eeg: raise NotImplementedError( 'Loading an ALLEEG array is not supported. Please contact' 'mne-python developers for more information.') if 'EEG' in eeg: # fields are contained in EEG structure eeg = eeg['EEG'] eeg = eeg.get('EEG', eeg) # handle nested EEG structure eeg = Bunch(**eeg) eeg.trials = int(eeg.trials) eeg.nbchan = int(eeg.nbchan) eeg.pnts = int(eeg.pnts) return eeg
384c0034230167ccf66c91aa048a0ef048d2e2bd
3,636,448
def local_desired_velocity(env, veh_ids, fail=False): """ Encourage proximity to a desired velocity. We only observe the velocity of the specified car. If a collison or failure occurs, we return 0. """ vel = np.array(env.k.vehicle.get_speed(veh_ids)) num_vehicles = len(veh_ids) if any(vel < -100) or fail or num_vehicles == 0: return 0. target_vel = env.env_params.additional_params['target_velocity'] max_cost = np.array([target_vel] * num_vehicles) max_cost = np.linalg.norm(max_cost) cost = vel - target_vel cost = np.linalg.norm(cost) # epsilon term (to deal with ZeroDivisionError exceptions) eps = np.finfo(np.float32).eps return max(max_cost - cost, 0) / (max_cost + eps)
6df0ba2c2bc481ca7364aafc1cb05cfd197cfba2
3,636,449
def All(q, value): """ The All operator selects documents where the value of the field is an list that contains all the specified elements. """ return Condition(q._path, to_refs(value), '$all')
b31db5f1c6cf26b339a5de6656db3318eff0c5f1
3,636,450
def key(i): """ Helper method to generate a meaningful key. """ return 'key{}'.format(i)
04658ebead9581ff97406111c9b85e361ee49ff8
3,636,451
def svn_repos_fs_change_rev_prop3(*args): """ svn_repos_fs_change_rev_prop3(svn_repos_t repos, svn_revnum_t rev, char author, char name, svn_string_t new_value, svn_boolean_t use_pre_revprop_change_hook, svn_boolean_t use_post_revprop_change_hook, svn_repos_authz_func_t authz_read_func, apr_pool_t pool) -> svn_error_t """ return _repos.svn_repos_fs_change_rev_prop3(*args)
7c49ab3ff13a3b078831a6ad0214849ae8ee5d8b
3,636,452
def pretty_ct(ct): """ Pretty-print a contingency table Parameters ---------- ct : the contingency table Returns ------- pretty_table : a fancier string representation of the table """ output = StringIO() rich_ct(ct).to_csv(output) output.seek(0) try: pretty_table = prettytable.from_csv(output) pretty_table.padding_width = 0 pretty_table.align = 'r' pretty_table.align[pretty_table.field_names[0]] = 'l' return pretty_table except _csv.Error: exc_info = sys.exc_info() print >> sys.stderr, "[Warning] pretty_table raised an exception :", \ exc_info[1] if exc_info[1].message == "Could not determine delimiter": pt = None output.seek(0) rd = csv.reader(output, delimiter=',') pt = prettytable.PrettyTable(next(rd)) for row in rd: pt.add_row(row) else: raise exc_info[0], exc_info[1], exc_info[2]
547e3d36bb91f2ab2c53783099da04ef3bda1497
3,636,453
def mark(symbol): """Wrap the symbol's result in a tuple where the first element is `symbol`. Used where the information about "which branch of the grammar was used" must be propagated upwards for further checks. """ def mark_action(x): return (symbol, x) return mark_action << symbol
3180c96d4d2a68df2909f23a544879918016fb37
3,636,454
def restore_dimensions(array, from_dims, result_like, result_attrs=None): """ Restores a numpy array to a DataArray with similar dimensions to a reference Data Array. This is meant to be the reverse of get_numpy_array. Parameters ---------- array : ndarray The numpy array from which to create a DataArray from_dims : list of str The directions describing the numpy array. If being used to reverse a call to get_numpy_array, this should be the same as the out_dims argument used in the call to get_numpy_array. 'x', 'y', and 'z' indicate any axes registered to those directions with :py:function:`~sympl.set_direction_names`. '*' indicates an axis which is the flattened collection of all dimensions not explicitly listed in out_dims, including any dimensions with unknown direction. result_like : DataArray A reference array with the desired output dimensions of the DataArray. If being used to reverse a call to get_numpy_array, this should be the same as the data_array argument used in the call to get_numpy_array. result_attrs : dict, optional A dictionary with the desired attributes of the output DataArray. If not given, no attributes will be set. Returns ------- data_array : DataArray The output DataArray with the same dimensions as the reference DataArray. See Also -------- :py:function:~sympl.get_numpy_array: : Retrieves a numpy array with desired dimensions from a given DataArray. """ current_dim_names = {} for dim in from_dims: if dim != '*': current_dim_names[dim] = [dim] direction_to_names = get_input_array_dim_names( result_like, from_dims, current_dim_names) original_shape = [] original_dims = [] original_coords = [] for direction in from_dims: if direction in direction_to_names.keys(): for name in direction_to_names[direction]: original_shape.append(len(result_like.coords[name])) original_dims.append(name) original_coords.append(result_like.coords[name]) if np.product(array.shape) != np.product(original_shape): raise ShapeMismatchError data_array = DataArray( np.reshape(array, original_shape), dims=original_dims, coords=original_coords).transpose( *list(result_like.dims)) if result_attrs is not None: data_array.attrs = result_attrs return data_array
401015b3e33f17bb7e5be078270391efb0543bfa
3,636,455
import collections def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
b9fb88f7fae9c6dda8f2435b8c7fcfab5ab15ad2
3,636,456
import os import shutil def prepare_outdir(): """ prepares the directory structure on disk, returns output directory as well as the s3 destination folder """ out_dir, s3_dest_folder = file_destination() if os.path.exists(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) return out_dir, s3_dest_folder
c16143441d7c589f589925cad217ecdeb8ba99fc
3,636,457
import re def doc( package_name: str, plugin_name: str, long_doc: bool = True, include_details: bool = False, ) -> str: """Document one plug-in Documentation is taken from the module doc-string. If the plug-in is not part of the package an UnknownPluginError is raised. Args: package_name: Name of package containing plug-ins. plugin_name: Name of the plug-in (module). long_doc: Use long doc-string or short one-line string. include_details: Include development details like parameters and return values? Returns: Documentation of the plug-in. """ # Get Plugin-object and pick out doc-string doc = load(package_name, plugin_name).doc if long_doc: # Strip short description and indentation lines = [d.strip() for d in "\n\n".join(doc.split("\n\n")[1:]).split("\n")] # Stop before Args:, Returns: etc if details should not be included idx_args = len(lines) if not include_details: re_args = re.compile("(Args:|Returns:|Details:|Attributes:)$") try: idx_args = [re_args.match(l) is not None for l in lines].index(True) except ValueError: pass return "\n".join(lines[:idx_args]).strip() else: # Return short description return doc.split("\n\n")[0].replace("\n", " ").strip()
a6c3a1c03936262815299657c6264f70be1e92ba
3,636,458
def microsecond(dt): """:yaql:property microsecond Returns microseconds of given datetime. :signature: datetime.microsecond :returnType: integer .. code:: yaql> datetime(2006, 11, 21, 16, 30, 2, 123).microsecond 123 """ return dt.microsecond
31d195fa4ceb468bb5666751e56b836fbec8f822
3,636,459
import math def decompose_label_vector(label_vector, n_xgrids, n_ygrids, mean_lwh, xlim=(0.0, 70.0), ylim=(-50.0,50.0), zlim=(-10.0,10.0), conf_thres=0.5, nms=True, iou_thres=0.1): """ Build the ground-truth label vector given a set of poses, classes, and number of grids. Input: label_vector: label vector outputted from the model n_xgrids: number of grids in the x direction n_ygrids: number of grids in the y direction Output: poses: list of object poses [x,y,z,l,w,h,yaw] classes: list of object classes """ conf = [] poses = [] classes = [] label_dict_list = [] # obtain x index xstop = (xlim[1] - xlim[0]) / float(n_xgrids) # obtain y index ystop = (ylim[1] - ylim[0]) / float(n_ygrids) # length of each object label obj_label_len = pose_vec_len + len(label_map) # 8 for poses, rest for object classes # reshape the vector label_vector_reshaped = np.reshape(label_vector, (-1, obj_label_len)) # get each element obj_confidences = label_vector_reshaped[:, 0] obj_poses = label_vector_reshaped[:, 1:pose_vec_len] obj_class_one_hot = label_vector_reshaped[:, pose_vec_len:] # iterate through each element for i, obj_conf in enumerate(obj_confidences): if obj_conf > conf_thres: # pose vector x_norm, y_norm, z_norm, l_norm, w_norm, h_norm, cos_yaw_norm, sin_yaw_norm = obj_poses[i] cls_ = idx_to_label(np.argmax(obj_class_one_hot[i])) mean_lwh_cls = mean_lwh[cls_] # get indices x_idx = math.floor(i / n_xgrids) y_idx = i - (x_idx * n_xgrids) # denormalize pose x = (x_norm * xstop) + (x_idx * xstop) + xlim[0] y = (y_norm * ystop) + (y_idx * ystop) + ylim[0] z = (z_norm * (zlim[1] - zlim[0])) + zlim[0] l = mean_lwh_cls[0]*math.exp(l_norm) w = mean_lwh_cls[1]*math.exp(w_norm) h = mean_lwh_cls[2]*math.exp(h_norm) cos_yaw = (cos_yaw_norm * 2.0) - 1.0 sin_yaw = (sin_yaw_norm * 2.0) - 1.0 yaw = np.arctan2(sin_yaw, cos_yaw) # add poses, classes, and conf label_dict = {} label_dict['conf'] = obj_conf label_dict['x'] = x label_dict['y'] = y label_dict['z'] = z label_dict['l'] = l label_dict['w'] = w label_dict['h'] = h label_dict['yaw'] = yaw label_dict['class'] = idx_to_label(np.argmax(obj_class_one_hot[i])) # label_dict['conf'] = np.max(obj_class_one_hot[i]) label_dict_list.append(label_dict) # non-max suppression if nms == True: label_dict_list = non_max_suppression(label_dict_list, iou_threshold=iou_thres) # return label dictionary return label_dict_list
0cf34bad28a5c8dc335110be95ace5e41d8fa534
3,636,460
def gen_delay_phs(fqs, ants, dly_rng=(-20, 20)): """ Produce a set of mock complex phasors corresponding to cables delays. Args: fqs (array-like): shape=(NFREQS,), GHz the spectral frequencies of the bandpasses ants (iterable): the indices/names of the antennas dly_range (2-tuple): ns the range of the delay Returns: g (dictionary): a dictionary of ant:exp(2pi*i*tau*fqs) pairs where keys are elements of ants and values are complex arrays with shape (NFREQS,) See Also: :meth:`~gen_gains`: uses this function to generate full gains. """ phs = {} for ai in ants: dly = np.random.uniform(dly_rng[0], dly_rng[1]) phs[ai] = np.exp(2j * np.pi * dly * fqs) return phs
3e9d2b6bab886c8d6b7b3ef5869d74cf21689e06
3,636,461
def sharpen(img, bg=None, t='laplace', blur_radius=30, blur_guided_eps=1e-8, use_guidedfilter='if_large_img'): """Use distortion model to deblur image. Equivalent to usharp mask: 1/t * img - (1-1/t) * blurry(img) Then, apply guided filter to smooth result but preserve edges. img - image to sharpen, assume normalized in [0,1] bg - image background t - the transmission map (inverse amount of sharpening) can be scalar, matrix of same (h, w) as img, or 3 channel image. By default, use a multi-channel sharpened laplace filter on a smoothed image with 10x10 kernel. For enhancing fine details in large images. use_guidedfilter - a bool or the string 'if_large_img' determining whether to clean up the resulting sharpened image. If the min image dimension is less that 1500, this cleanup operation may blur the image, ruining its quality. """ if bg is None: bg = np.zeros(img.shape[:2], dtype='bool') else: img = img.copy() img[bg] = 0 # assert np.isnan(img).sum() == 0 # assert np.isnan(t).sum() == 0 # blurring (faster than ndi.gaussian_filter(I) A = cv2.ximgproc.guidedFilter( # radiance.astype('float32'), img.astype('float32'), img.astype('float32'), blur_radius, blur_guided_eps) if t == 'laplace': t = 1-util.norm01(sharpen(ndi.morphological_laplace( img, (2,2,1), mode='wrap'), bg, 0.15), bg) # t = 1-util.norm01(ndi.morphological_laplace( # img, (2,2,1), mode='wrap'), bg) # todo note: laplace t is 01 normalized. should we keep the max # and just normalize the lower range (or vice versa or something)? # note2: if laplace is all zeros (due to bad input img), t will be all nan. if len(np.shape(t)) + 1 == len(img.shape): t_refined = np.expand_dims(t, -1).astype('float') else: t_refined = t if np.shape(t): t_refined[bg] = 1 # ignore background, but fix division by zero J = ( img.astype('float')-A) / np.maximum(1e-8, np.maximum(t_refined, np.min(t_refined)/2)) + A # assert np.isnan(J).sum() == 0 if bg is not None: J[bg] = 0 # applying a guided filter for smoothing image at this point can be # problematic to the image quality, significantly blurring it. if use_guidedfilter == 'if_large_img': # note: at some point, find a better threshold? This works. use_guidedfilter = min(J.shape[0], J.shape[1]) >= 1500 if not use_guidedfilter: J = check_and_fix_nan(J, img) return J r2 = cv2.ximgproc.guidedFilter( img.astype('float32'), J.astype('float32'), 2, 1e-8) r2 = check_and_fix_nan(r2, img) if bg is not None: r2[bg] = 0 return r2
fd6b3a5e3464cf1948d2dc9de94b1924f484a3e8
3,636,462
def what_to_add(qtype, origword, newword, terminate): """Return a qtype that is needed to finish a partial word. For example, given an origword of '\"frog' and a newword of '\"frogston', returns either: terminate=False: 'ston' terminate=True: 'ston\"' This is useful when calculating tab completion strings for readline. Args: qtype: the type of quoting to use (ie. the first character of origword) origword: the original word that needs completion. newword: the word we want it to be after completion. Must start with origword. terminate: true if we should add the actual quote character at the end. Returns: The string to append to origword to produce (quoted) newword. """ if not newword.startswith(origword): return '' else: qold = quotify(qtype, origword, terminate=False) return quotify(qtype, newword, terminate=terminate)[len(qold):]
c5b06aa1db322e0f6c6d041562ea3585482d789b
3,636,463
def intersect(start1, end1, start2, end2): """Return the intersection point of two lines, else return None. Ideas: For parallel lines to intercept (equal slope and y-intercept), they must be overlapping segments of the same infinite line. Intersection point is given by solving line equation 1 = line equation 2, m1 * x + c1 = m2 * x + c2 x = (c2 - c1) / (m1 - m2) Additionally, the intersection must exist within the x-y boundaries of the two lines. """ if start1.x > end1.x: tmp = end1 end1 = start1 start1 = tmp if start2.x > end2.x: tmp = end2 end2 = start2 start2 = tmp if start1.x > start2.x: tmp = start2 start2 = start1 start1 = tmp tmp = end2 end2 = end1 end1 = tmp l1 = Line(start1, end1) l2 = Line(start2, end2) if l1.slope == l2.slope: if l1.intercept == l2.intercept and start2.is_between(start1, end1): return start2 return None x = (l2.intercept - l1.intercept) / (l1.slope - l2.slope) y = x * l1.slope + l1.intercept res = Point(x, y) if res.is_between(start1, end1) and res.is_between(start2, end2): return res return None
cd5affbdc57d48783cf50f188b979ad24f117c37
3,636,464
def start(update, context): """Displays welcome message.""" # choose_lang = True # If we're starting over we don't need do send a new message if not context.user_data.get(START_OVER): user = update.message.from_user try: context.user_data[LANG] = user.language_code logger.info( f'User language: {texts.LANGUAGE[context.user_data[LANG]]["name"]}') # choose_lang = False except: # Default lang context.user_data[LANG] = 'en' update.message.reply_text( texts.WELCOME[context.user_data[LANG]] + ' \U0001F5FA', parse_mode=ParseMode.HTML) text = texts.COMMANDS[context.user_data[LANG]] update.message.reply_text( text=text, parse_mode=ParseMode.HTML, # resize_keyboard=True, reply_markup=keyboard ) # Clear user context context.user_data.clear() context.user_data[START_OVER] = True return select_lang(update, context)
cbb8e0f49f35de1dbd0f47e71b114b2c22ed5ec0
3,636,465
def model_dir_str(model_dir, hidden_units, logits, processor=lambda: pc.IdentityProcessor(), activation=tf.nn.relu, uuid=None): """Returns a string for the model directory describing the network. Note that it only stores the information that describes the layout of the network - in particular it does not describe any training hyperparameters (in particular dropout rate). """ layer_counter = [(k, sum(1 for _ in g)) for k, g in it.groupby(hidden_units)] for layer_size, layer_repeat in layer_counter: if layer_repeat == 1: model_dir += '{}_'.format(layer_size) else: model_dir += '{}x{}_'.format(layer_size, layer_repeat) model_dir += '{}__'.format(logits) model_dir += processor().__class__.__name__ if isinstance(activation, ft.partial): activation_fn = activation.func alpha = str(activation.keywords['alpha']).replace('.', '') else: activation_fn = activation alpha = '02' model_dir += '_' + activation_fn.__name__.replace('_', '') if activation_fn is tf.nn.leaky_relu: model_dir += alpha if uuid not in (None, ''): model_dir += '_' + str(uuid) return model_dir
00ee6a98dfc1f614f335a187f3f998edc908e25d
3,636,466
def validate_search_inputs(row_id, search_column, search_value): """Function that determines if row_id, search_column and search_value are defined correctly""" return_value = { "valid": True, "msg": None } a_search_var_defined = True if search_column or search_value else False if row_id and a_search_var_defined: return_value["valid"] = False return_value["msg"] = "Only 'row_id' or the 'search_column and search_value' pair can be defined" elif not row_id and not a_search_var_defined: return_value["valid"] = False return_value["msg"] = "You must define either 'row_id' or the 'search_column and search_value' pair" return return_value
ce85ce1b973beab6b0476dfc05edc594fac8c420
3,636,467
def B1(i,n,t): """Restituisce il polinomio di Bernstein (i,n) valutato in t, usando la definizione binomiale""" if i < 0 or i > n: return 0 return binom(n,i)* t**i * (1-t)**(n-i)
ac97d943494e3b194d71de9ae1864633268499ec
3,636,468
def get_text_between(text, before_text, after_text): """Return the substring of text between before_text and after_text.""" pos1 = text.find(before_text) if pos1 != -1: pos1 += len(before_text) pos2 = text.find(after_text, pos1) if pos2 != -1: return text[pos1:pos2].strip() else: error_message = f"Can't find '{after_text}' within a longer text." raise VersionParsingError(error_message) else: error_message = f"Can't find '{before_text}' within a longer text." raise VersionParsingError(error_message)
4ec7f1900881422599b05f64b1c8eec8c992452d
3,636,469
import os def avi_common_argument_spec(): """ Returns common arguments for all Avi modules :return: dict """ credentials_spec = dict( controller=dict(default=os.environ.get('AVI_CONTROLLER', '')), username=dict(default=os.environ.get('AVI_USERNAME', '')), password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True), api_version=dict(default='16.4.4', type='str'), tenant=dict(default='admin'), tenant_uuid=dict(default='', type='str'), port=dict(type='int'), token=dict(default='', type='str', no_log=True), timeout=dict(default=300, type='int'), session_id=dict(default='', type='str', no_log=True), csrftoken=dict(default='', type='str', no_log=True) ) return dict( controller=dict(default=os.environ.get('AVI_CONTROLLER', '')), username=dict(default=os.environ.get('AVI_USERNAME', '')), password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True), tenant=dict(default='admin'), tenant_uuid=dict(default=''), api_version=dict(default='16.4.4', type='str'), avi_credentials=dict(default=None, type='dict', options=credentials_spec), api_context=dict(type='dict'), avi_disable_session_cache_as_fact=dict(default=False, type='bool'))
dfac1913e3b5af435ce8e9e8b53bf2d0d00aad11
3,636,470
def _get_functional_form_section(input_string): """ grabs the section of text containing all of the job keywords for functional form of PIPs """ pattern = (escape('$functional_form') + LINE_FILL + NEWLINE + capturing(one_or_more(WILDCARD, greedy=False)) + escape('$end')) section = first_capture(pattern, input_string) assert section is not None return section
d4f2061f355c6a09ec564b0d60b0cf6b82d022b8
3,636,471
def rfftn(a, s=None, axes=None): """Multi-dimensional discrete Fourier transform for real input. Compute the multi-dimensional discrete Fourier transform for real input. This function is a wrapper for :func:`pyfftw.interfaces.numpy_fft.rfftn`, with an interface similar to that of :func:`numpy.fft.rfftn`. Parameters ---------- a : array_like Input array (taken to be real) s : sequence of ints, optional (default None) Shape of the output along each transformed axis (input is cropped or zero-padded to match). axes : sequence of ints, optional (default None) Axes over which to compute the DFT. Returns ------- af : complex ndarray DFT of input array """ return pyfftw.interfaces.numpy_fft.rfftn( a, s=s, axes=axes, overwrite_input=False, planner_effort='FFTW_MEASURE', threads=pyfftw_threads)
9df68b5655d624d6f095b8a33ce31bc706c7ac7a
3,636,472
from pathlib import Path import pathlib import requests import asyncio async def ChannelLogoAPI( channel_id:str = Path(..., description='チャンネル ID 。ex:gr011'), ): """ チャンネルのロゴを取得する。 """ # チャンネル情報を取得 channel = await Channels.filter(channel_id=channel_id).get_or_none() # 指定されたチャンネル ID が存在しない if channel is None: raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail='Specified channel_id was not found', ) # ブラウザにキャッシュしてもらえるようにヘッダーを設定 # ref: https://qiita.com/yuuuking/items/4f11ccfc822f4c198ab0 header = { 'Cache-Control': 'public, max-age=2592000', # 30日間 } # ***** 同梱のロゴを利用(存在する場合)***** # 放送波から取得できるロゴはどっちみち画質が悪いし、取得できていないケースもありうる # そのため、同梱されているロゴがあればそれを返すようにする # ロゴは NID32736-SID1024.png のようなファイル名の PNG ファイル (256x256) を想定 if pathlib.Path.exists(LOGO_DIR / f'{channel.id}.png'): return FileResponse(LOGO_DIR / f'{channel.id}.png', headers=header) # ***** ロゴが全国共通なので、チャンネル名の前方一致で決め打ち ***** ## NHK総合 if channel.channel_type == 'GR' and channel.channel_name.startswith('NHK総合'): return FileResponse(LOGO_DIR / 'NID32736-SID1024.png', headers=header) ## NHKEテレ if channel.channel_type == 'GR' and channel.channel_name.startswith('NHKEテレ'): return FileResponse(LOGO_DIR / 'NID32737-SID1032.png', headers=header) # 複数の地域で放送しているケーブルテレビの場合、コミュニティチャンネルの NID と SID は地域ごとに異なる # ref: https://youzaka.hatenablog.com/entry/2013/06/30/154243 # J:COMテレビ if channel.channel_type == 'GR' and channel.channel_name.startswith('J:COMテレビ'): return FileResponse(LOGO_DIR / 'NID32397-SID23656.png', headers=header) # J:COMチャンネル if channel.channel_type == 'GR' and channel.channel_name.startswith('J:COMチャンネル'): return FileResponse(LOGO_DIR / 'NID32399-SID23672.png', headers=header) # eo光チャンネル if channel.channel_type == 'GR' and channel.channel_name.startswith('eo光チャンネル'): return FileResponse(LOGO_DIR / 'NID32127-SID41080.png', headers=header) # ZTV if channel.channel_type == 'GR' and channel.channel_name.startswith('ZTV'): return FileResponse(LOGO_DIR / 'NID32047-SID46200.png', headers=header) # ***** サブチャンネルのロゴを取得 ***** # 地デジでかつサブチャンネルのみ、メインチャンネルにロゴがあればそれを利用する if channel.channel_type == 'GR' and channel.is_subchannel is True: # メインチャンネルの情報を取得 # ネットワーク ID が同じチャンネルのうち、一番サービス ID が若いチャンネルを探す main_channel = await Channels.filter(network_id=channel.network_id).order_by('service_id').first() # メインチャンネルが存在し、ロゴも存在する if main_channel is not None and pathlib.Path.exists(LOGO_DIR / f'{main_channel.id}.png'): return FileResponse(LOGO_DIR / f'{main_channel.id}.png', headers=header) # BS でかつサブチャンネルのみ、メインチャンネルにロゴがあればそれを利用する if channel.channel_type == 'BS' and channel.is_subchannel is True: # メインチャンネルのサービス ID を算出 # NHKBS1 と NHKBSプレミアム だけ特別に、それ以外は一の位が1のサービス ID を算出 if channel.service_id == 102: main_service_id = 101 elif channel.service_id == 104: main_service_id = 103 else: main_service_id = int(channel.channel_number[0:2] + '1') # メインチャンネルの情報を取得 main_channel = await Channels.filter(network_id=channel.network_id, service_id=main_service_id).first() # メインチャンネルが存在し、ロゴも存在する if main_channel is not None and pathlib.Path.exists(LOGO_DIR / f'{main_channel.id}.png'): return FileResponse(LOGO_DIR / f'{main_channel.id}.png', headers=header) # ***** Mirakurun からロゴを取得 ***** if CONFIG['general']['backend'] == 'Mirakurun': # Mirakurun 形式のサービス ID # NID と SID を 5 桁でゼロ埋めした上で int に変換する mirakurun_service_id = int(str(channel.network_id).zfill(5) + str(channel.service_id).zfill(5)) # Mirakurun の API からロゴを取得する # 同梱のロゴが存在しない場合のみ mirakurun_logo_api_url = f'{CONFIG["general"]["mirakurun_url"]}/api/services/{mirakurun_service_id}/logo' mirakurun_logo_api_response:requests.Response = await asyncio.to_thread(requests.get, mirakurun_logo_api_url) # ステータスコードが 200 であれば # ステータスコードが 503 の場合はロゴデータが存在しない if mirakurun_logo_api_response.status_code == 200: # 取得したロゴデータを返す mirakurun_logo = mirakurun_logo_api_response.content return Response(content=mirakurun_logo, media_type='image/png', headers=header) # ***** EDCB からロゴを取得 ***** if CONFIG['general']['backend'] == 'EDCB': # CtrlCmdUtil を初期化 edcb = CtrlCmdUtil() # EDCB の LogoData フォルダからロゴを取得 logo = None files = await edcb.sendFileCopy2(['LogoData.ini', 'LogoData\\*.*']) or [] if len(files) == 2: logo_data_ini = EDCBUtil.convertBytesToString(files[0]['data']) logo_dir_index = EDCBUtil.convertBytesToString(files[1]['data']) logo_id = EDCBUtil.getLogoIDFromLogoDataIni(logo_data_ini, channel.network_id, channel.service_id) if logo_id >= 0: # なるべく画質が良いロゴタイプのものを取得 for logo_type in [5, 2, 4, 1, 3, 0]: logo_name = EDCBUtil.getLogoFileNameFromDirectoryIndex(logo_dir_index, channel.network_id, logo_id, logo_type) if logo_name is not None: files = await edcb.sendFileCopy2(['LogoData\\' + logo_name]) or [] if len(files) == 1: logo = files[0]['data'] logo_media_type = 'image/bmp' if logo_name.upper().endswith('.BMP') else 'image/png' break # 取得したロゴデータを返す if logo is not None and len(logo) > 0: return Response(content=logo, media_type=logo_media_type, headers=header) # ***** デフォルトのロゴ画像を利用 ***** # 同梱のロゴファイルも Mirakurun や EDCB からのロゴもない場合のみ return FileResponse(LOGO_DIR / 'default.png', headers=header)
ab0e149141cd678b7890927b5b9e50bb9c34a91e
3,636,473
import pathlib def ImportFromNpb( db: bytecode_database.Database, cmake_build_root: pathlib.Path ) -> int: """Import the cmake files from the given build root.""" bytecodes_to_process = FindBitcodesToImport(cmake_build_root) i = 0 with sqlutil.BufferedDatabaseWriter(db, max_buffer_length=10) as writer: for i, bytecode in enumerate( [ProcessBitcode(b) for b in (bytecodes_to_process)] ): app.Log(1, "%s:%s", bytecode.source_name, bytecode.relpath) writer.AddOne(bytecode) return i
3f8635fe64c7bfcd306e847334badcc5a2c5b2e0
3,636,474
def _partial_ema_scov_init(n_dim=None, r:float=0.025, n_emp=None, target:float=None)->dict: """ Initialize object to track partial moments r: Importance of current data point n_emp: Discouraged. Really only used for tests. This is the number of samples for which empirical is used, rather than running updates. By default n_emp ~ 1/r """ s = dict([ (q,_ema_scov_init(n_dim=n_dim,r=r,n_emp=n_emp)) for q in QUADRANTS ]) q = next(iter(s.keys())) # Choose any s['n_dim'] = s[q]['n_dim'] s['n_emp'] = s[q]['n_emp'] s['rho'] = s[q]['rho'] s['target'] = target s['sma'] = sma({},n_dim,r=r) return s
5c73db5f3758781a7a47cc72ad85784ada6e57fa
3,636,475
def inner(thing): """ one level """ if isinstance(thing, DataPackage): return thing, else: return list(thing)
17eb8b2a272144b4a1732d8f6ce1f40c18f79b8a
3,636,476
def load_dataset(data_name): """Load dataset. Args: data_name (str): The name of dataset. Returns: dataset (pgl.dataset): Return the corresponding dataset, containing graph information, feature, etc. data_mode (str): Currently we have 's' and 'm' mode, which mean small dataset and medium dataset respectively. """ data_name = data_name.lower() if data_name == 'reddit': data_mode = 'm' dataset = pgl.dataset.RedditDataset() y = np.zeros(dataset.graph.num_nodes, dtype="int64") y[dataset.train_index] = dataset.train_label y[dataset.val_index] = dataset.val_label y[dataset.test_index] = dataset.test_label dataset.y = y elif data_name == 'arxiv': data_mode = 'm' dataset = pgl.dataset.OgbnArxivDataset() dataset.graph = to_undirected(dataset.graph, copy_node_feat=False) dataset.graph = add_self_loops(dataset.graph, copy_node_feat=False) elif data_name == 'cora': data_mode = 's' dataset = pgl.dataset.CoraDataset() elif data_name == 'pubmed': data_mode = 's' dataset = pgl.dataset.CitationDataset("pubmed", symmetry_edges=True) elif data_name == 'citeseer': data_mode = 's' dataset = pgl.dataset.CitationDataset("citeseer", symmetry_edges=True) else: raise ValueError(data_name + " dataset doesn't exist currently.") if data_mode == 's': def normalize(feat): return feat / np.maximum(np.sum(feat, -1, keepdims=True), 1) indegree = dataset.graph.indegree() dataset.graph.node_feat["words"] = normalize(dataset.graph.node_feat[ "words"]) dataset.feature = dataset.graph.node_feat["words"] dataset.train_mask = generate_mask(dataset.graph.num_nodes, dataset.train_index) dataset.val_mask = generate_mask(dataset.graph.num_nodes, dataset.val_index) dataset.test_mask = generate_mask(dataset.graph.num_nodes, dataset.test_index) return dataset, data_mode
f99dcc9d64085ef545658d34deb4936f37305f11
3,636,477
def require_dataset(hdf5_data, path, shape, dtype, maxshape=(None)): """ Create or update a dataset, making sure that its shape is resized if needed Args: hdf5_data: object, an already opened hdf5 file path: string, the path to the dataset shape: tuple of integers, the shape of the dataset dtype: string or int, the type of the dataset maxshape: tuple of integers, the maximum shape to which the dataset can be resized to. (Unused currently) Returns: The dataset newly created or updated. """ dset = hdf5_data.get(path, default = None) # Dataset not existing if dset is None: maxshape = [None for i in xrange(len(shape))] dset = hdf5_data.create_dataset(path, shape, dtype, maxshape=tuple(maxshape)) else: # Dataset is already existing dset.resize(shape) return dset
dc9b3b4db56854cc2c770875a754474bbc5f56a3
3,636,478
def findquote(lrrbot, conn, event, respond_to, query): """ Command: !findquote QUERY Section: quotes Search for a quote in the quote database. """ quotes = lrrbot.metadata.tables["quotes"] with lrrbot.engine.begin() as pg_conn: fts_column = sqlalchemy.func.to_tsvector('english', quotes.c.quote) query = sqlalchemy.select([ quotes.c.id, quotes.c.quote, quotes.c.attrib_name, quotes.c.attrib_date, quotes.c.context ]).where( (fts_column.op("@@")(sqlalchemy.func.plainto_tsquery('english', query))) & (~quotes.c.deleted) ) row = common.utils.pick_random_elements(pg_conn.execute(query), 1)[0] if row is None: return conn.privmsg(respond_to, "Could not find any matching quotes.") qid, quote, name, date, context = row conn.privmsg(respond_to, format_quote("Quote", qid, quote, name, date, context))
1d61f7c416d51d6c362b212b0217d1717ef79aa4
3,636,479
import re def find_first_in_register_stop(seq): """ Find first stop codon on lowercase seq that starts at an index that is divisible by three """ # Compile regexes for stop codons regex_stop = re.compile('(taa|tag|tga)') # Stop codon iterator stop_iterator = regex_stop.finditer(seq) # Find next stop codon that is in register for stop in stop_iterator: if stop.end() % 3 == 0: return stop.end() # Return -1 if we failed to find a stop codon return -1
56741828c42ecf0cb96044d03c8d1b6bc4994e01
3,636,480
from scipy import integrate as scint import os def computeScaling( filt1, filt2, camera1=None, camera2=None ) : """determine the flux scaling factor that should be multiplied to filt1 to match the throughput of filt2. This returns just a single number, effectively assuming the source SED is flat across the bandpass, so that we just need to correct for total throughput, not for the shape of the filter. """ if filt1.lower().startswith('f') : filt1 = filtername2datfile( filt1, camera=camera1 ) if filt2.lower().startswith('f') : filt2 = filtername2datfile( filt2, camera=camera2 ) if not filt1.endswith('.dat') or not filt2.endswith('.dat') : print("Must specify a filter name (e.g. F160W) or a .dat file.") return( None ) # read in the transmission curves for filters 1 and 2 topdir = os.path.abspath( '.' ) sndataroot = os.environ['SNDATA_ROOT'] os.chdir( sndataroot+'/filters/HST') w1, f1 = np.loadtxt( filt1, unpack=True ) w2, f2 = np.loadtxt( filt2, unpack=True ) os.chdir( topdir ) # integrate int1 = scint.simps( f1, w1 ) int2 = scint.simps( f2, w2 ) # divide return( int2 / int1 )
4ac04ff6e2013b0898e7414c743d72e7e0e6afba
3,636,481
def greet_person(person: Person) -> str: """Return a greeting message for the given person. The message should have the form 'Hello, <given_name> <family_name>!' >>> david = Person('David', 'Liu', 110, '110 St. George Street') >>> greet_person(david) 'Hello, David Liu!' """ return f'Hello, {person.given_name} {person.family_name}!'
3050e78295dfeee2d80c4d17fa7acc4bbfcb4d41
3,636,482
def sw(s1, s2, pen, matrix): """ Takes as input two sequences, gap penalty, BLOSUM or PAM dictionary and returns the scoring matrix(F) and traceback matrix(P) """ N = len(s1) + 1 M = len(s2) + 1 F = [] #initialize scoring matrix(F) and traceback matrix(P) P = [] F = [[0] * (N) for i in range(M)] # fill F and P with 0, defining P = [[0] * (N) for i in range(M)] # their dimensions for i in range(1, M): P[i][0] = 'u' for j in range(1, N): P[0][j] = 'l' for i in range(1, M): for j in range(1, N): # core of the function: for each i,j position voc = {} # the best score is added to F matrix, up = F[i - 1][j] + pen # adding the gap penalty when necessary left = F[i][j - 1] + pen # and its direction to P matrix diag = F[i - 1][j - 1] + int(matrix[s1[j - 1] + s2[i - 1]]) voc[up] = 'u' voc[left] = 'l' # u = up, l = left, d = diagonal voc[diag] = 'd' max_score = max(up, left, diag) if max_score < 0: # all negative values are excluded and F[i][j] = 0 # recorded as 0 else: F[i][j] = max_score P[i][j] = voc.get(max_score) return(F, P)
c466997476259c4f2736ae0dec892f5f8e5f20e7
3,636,483
from datetime import datetime from sys import path import re import os def open_data(num=None, folder=None, groupname="main", datasetname="data", date=None): """Convenience Load data from an `AuspexDataContainer` given a file number and folder. Assumes that files are named with the convention `ExperimentName-NNNNN.auspex` Parameters: num (int) File number to be loaded. folder (string) Base folder where file is stored. If the `date` parameter is not None, assumes file is a dated folder. If no folder is specified, open a dialogue box. Open the folder with the desired ExperimentName-NNNN.auspex, then press OK groupname (string) Group name of data to be loaded. datasetname (string, optional) Data set name to be loaded. Default is "data". date (string, optional) Date folder from which data is to be loaded. Format is "YYMMDD" Defaults to today's date. Returns: data (numpy.array) Data loaded from file. desc (DataSetDescriptor) Dataset descriptor loaded from file. Examples: Loading a data container >>> data, desc = open_data(42, '/path/to/my/data', "q1-main", date="190301") """ if num is None or folder is None or date is None: return load_data() else: if date == None: date = datetime.date.today().strftime('%y%m%d') folder = path.join(folder, date) assert path.isdir(folder), f"Could not find data folder: {folder}" p = re.compile(r".+-(\d+).auspex") files = [x.name for x in os.scandir(folder) if x.is_dir()] data_file = [x for x in files if p.match(x) and int(p.match(x).groups()[0]) == num] if len(data_file) == 0: raise ValueError("Could not find file!") elif len(data_file) > 1: raise ValueError(f"Ambiguous file information: found {data_file}") data_container = AuspexDataContainer(path.join(folder, data_file[0])) return data_container.open_dataset(groupname, datasetname)
1c70d9c8b81a7ca40e0ee5e691a0f811cfb02d33
3,636,484
def random_multiplex_ER(n,l,p,directed=False): """ random multilayer ER """ if directed: G = nx.MultiDiGraph() else: G = nx.MultiGraph() for lx in range(l): network = nx.fast_gnp_random_graph(n, p, seed=None, directed=directed) for edge in network.edges(): G.add_edge((edge[0],lx),(edge[1],lx),type="default") ## construct the ppx object no = multi_layer_network(network_type="multiplex").load_network(G,input_type="nx",directed=directed) return no
9a70997fb3de5db225b0282a3b217eb4e33f0a8c
3,636,485
def compare_structures(structure_a, structure_b): """Compare two StructureData objects A, B and return a delta (A - B) of the relevant properties.""" delta = AttributeDict() delta.absolute = AttributeDict() delta.relative = AttributeDict() volume_a = structure_a.get_cell_volume() volume_b = structure_b.get_cell_volume() delta.absolute.volume = np.absolute(volume_a - volume_b) delta.relative.volume = np.absolute(volume_a - volume_b) / volume_a pos_a = np.array([site.position for site in structure_a.sites]) pos_b = np.array([site.position for site in structure_b.sites]) delta.absolute.pos = pos_a - pos_b site_vectors = [delta.absolute.pos[i, :] for i in range(delta.absolute.pos.shape[0])] a_lengths = np.linalg.norm(pos_a, axis=1) delta.absolute.pos_lengths = np.array([np.linalg.norm(vector) for vector in site_vectors]) delta.relative.pos_lengths = np.array([np.linalg.norm(vector) for vector in site_vectors]) / a_lengths cell_lengths_a = np.array(structure_a.cell_lengths) delta.absolute.cell_lengths = np.absolute(cell_lengths_a - np.array(structure_b.cell_lengths)) delta.relative.cell_lengths = np.absolute(cell_lengths_a - np.array(structure_b.cell_lengths)) / cell_lengths_a cell_angles_a = np.array(structure_a.cell_angles) delta.absolute.cell_angles = np.absolute(cell_angles_a - np.array(structure_b.cell_angles)) delta.relative.cell_angles = np.absolute(cell_angles_a - np.array(structure_b.cell_angles)) / cell_angles_a return delta
93a7b2a5d28abe844b9daabce840afe275ed851e
3,636,486
from typing import Callable def parse_response(expected: str) -> Callable: """ Decorator for a function that returns a requests.Response object. This decorator parses that response depending on the value of <expected>. If the response indicates the request failed (status >= 400) a dictionary containing the response status and message will be returned. Otherwise, the content will be parsed and a dictionary or list will be returned if expected == 'json', a string will be returned if expected == 'text' and a binary string will be returned if expected == 'content'. This also updates the return annotation for the wrapped function according to the expected return value type. """ def _parser(f): @wraps(f) def _f(*args, **kwargs): response = f(*args, **kwargs) if not response.ok or expected == "json": return response.json() if expected == "content": return response.content if expected == "text": return response.text return response.json() f.__annotations__["return"] = _get_expected_return(expected) return _f return _parser
2d50fb98553e1803ef86056a0455a864c17bb065
3,636,487
def find_parents(candidate, branches): """Find parents genre of a given genre, ordered from the closest to the further parent. """ for branch in branches: try: idx = branch.index(candidate.lower()) return list(reversed(branch[:idx + 1])) except ValueError: continue return [candidate]
17934d9ee1d3098cc3d08f38d9e3c387df6b7c19
3,636,488
import torch def swig_ptr_from_FloatTensor(x): """ gets a Faiss SWIG pointer from a pytorch tensor (on CPU or GPU) """ assert x.is_contiguous() assert x.dtype == torch.float32 return faiss.cast_integer_to_float_ptr( x.storage().data_ptr() + x.storage_offset() * 4)
d1cdf905fcd45053e9cf42306a68408fa68d1ddf
3,636,489
def generate_reference_user_status(user,references): """Generate reference user status instances for a given set of references. WARNING: the new instances are not saved in the database! """ new_ref_status = [] for ref in references: source_query = ref.sources.filter(userprofile=user.userprofile)\ .distinct().order_by("pub_date") try: s = source_query.get() except MultipleObjectsReturned: s = source_query.all()[0] except ObjectDoesNotExist: s = get_unknown_reference() rust = ReferenceUserStatus() rust.main_source = s rust.owner = user rust.reference = ref rust.reference_pub_date = ref.pub_date new_ref_status.append(rust) return new_ref_status
a0d859d06ee4f4a8f47aaad4e6814ae232e6d751
3,636,490
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2): """Read a file by chrom name into a bitset""" bitset = BinnedBitSet( MAX ) for line in f: if line.startswith("#"): continue fields = line.split() if fields[chrom_col] == chrom: start, end = int( fields[start_col] ), int( fields[end_col] ) bitset.set_range( start, end-start ) return bitset
4e45b58d56f0dcb290995814666db36fa0fca0c7
3,636,491
def timeperiod_contains( timeperiod: spec.Timeperiod, other_timeperiod: spec.Timeperiod, ) -> bool: """return bool of whether timeperiod contains other timeperiod""" start, end = timeperiod_crud.compute_timeperiod_start_end(timeperiod) other_start, other_end = timeperiod_crud.compute_timeperiod_start_end( other_timeperiod ) return (start <= other_start) and (end >= other_end)
62c0f48b30e550a6c223aa46f0e63bf7baac9f4d
3,636,492
import copy def asdict(obj, dict_factory=dict, filter_field_type=None): """ Version of dataclasses.asdict that can use field type infomation. """ if _is_dataclass_instance(obj): result = [] for f in fields(obj): if filter_field_type is None: continue field_type_from_metadata = f.metadata.get('type', None) if field_type_from_metadata != filter_field_type and field_type_from_metadata is not None: continue value = asdict(getattr(obj, f.name), dict_factory, filter_field_type) result.append((f.name, value)) return dict_factory(result) elif isinstance(obj, tuple) and hasattr(obj, '_fields'): return type(obj)(*[asdict(v, dict_factory, filter_field_type) for v in obj]) elif isinstance(obj, (list, tuple)): return type(obj)(asdict(v, dict_factory, filter_field_type) for v in obj) elif isinstance(obj, dict): return type(obj)((asdict(k, dict_factory, filter_field_type), asdict(v, dict_factory, filter_field_type)) for k, v in obj.items()) else: return copy.deepcopy(obj)
2f5f60bbe7cef89cd13dbde1ffc1c3e11f8e2152
3,636,493
def process_pdb_file(pdb_file, atom_info_only=False): """ Reads pdb_file data and returns in a dictionary format :param pdb_file: str, the location of the file to be read :param atom_info_only: boolean, whether to read the atom coordinates only or all atom data :return: pdb_data, dict organizing pdb data by section """ pdb_data = {NUM_ATOMS: 0, SEC_HEAD: [], SEC_ATOMS: [], SEC_TAIL: []} if atom_info_only: pdb_data[SEC_ATOMS] = {} atom_id = 0 with open(pdb_file) as f: for line in f: line = line.strip() if len(line) == 0: continue line_head = line[:PDB_LINE_TYPE_LAST_CHAR] # head_content to contain Everything before 'Atoms' section # also capture the number of atoms # match 5 letters so don't need to set up regex for the ones that have numbers following the letters # noinspection SpellCheckingInspection if line_head[:-1] in ['HEADE', 'TITLE', 'REMAR', 'CRYST', 'MODEL', 'COMPN', 'NUMMD', 'ORIGX', 'SCALE', 'SOURC', 'AUTHO', 'CAVEA', 'EXPDT', 'MDLTY', 'KEYWD', 'OBSLT', 'SPLIT', 'SPRSD', 'REVDA', 'JRNL ', 'DBREF', 'SEQRE', 'HET ', 'HETNA', 'HETSY', 'FORMU', 'HELIX', 'SHEET', 'SSBON', 'LINK ', 'CISPE', 'SITE ', ]: # noinspection PyTypeChecker pdb_data[SEC_HEAD].append(line) # atoms_content to contain everything but the xyz elif line_head == 'ATOM ' or line_head == 'HETATM': # By renumbering, handles the case when a PDB template has ***** after atom_id 99999. # For renumbering, making sure prints in the correct format, including num of characters: atom_id += 1 if atom_id > 99999: atom_num = format(atom_id, 'x') else: atom_num = '{:5d}'.format(atom_id) # Alternately, use this: # atom_num = line[cfg[PDB_LINE_TYPE_LAST_CHAR]:cfg[PDB_ATOM_NUM_LAST_CHAR]] atom_type = line[PDB_ATOM_NUM_LAST_CHAR:PDB_ATOM_TYPE_LAST_CHAR] res_type = line[PDB_ATOM_TYPE_LAST_CHAR:PDB_RES_TYPE_LAST_CHAR] mol_num = int(line[PDB_RES_TYPE_LAST_CHAR:PDB_MOL_NUM_LAST_CHAR]) pdb_x = float(line[PDB_MOL_NUM_LAST_CHAR:PDB_X_LAST_CHAR]) pdb_y = float(line[PDB_X_LAST_CHAR:PDB_Y_LAST_CHAR]) pdb_z = float(line[PDB_Y_LAST_CHAR:PDB_Z_LAST_CHAR]) last_cols = line[PDB_Z_LAST_CHAR:] element_type = line[PDB_BEFORE_ELE_LAST_CHAR:PDB_ELE_LAST_CHAR] if atom_info_only: atom_xyz = np.array([pdb_x, pdb_y, pdb_z]) pdb_data[SEC_ATOMS][atom_id] = {ATOM_TYPE: element_type, ATOM_COORDS: atom_xyz} else: line_struct = [line_head, atom_num, atom_type, res_type, mol_num, pdb_x, pdb_y, pdb_z, last_cols] # noinspection PyTypeChecker pdb_data[SEC_ATOMS].append(line_struct) elif line_head == 'END': pdb_data[SEC_TAIL].append(line) break # tail_content to contain everything after the 'Atoms' section else: # noinspection PyTypeChecker pdb_data[SEC_TAIL].append(line) pdb_data[NUM_ATOMS] = len(pdb_data[SEC_ATOMS]) return pdb_data
c3328ec0123d49e2776aee84a1fdce56fb9dc84c
3,636,494
def get_insns(*, cls=None, variant: Variant = RV32I): """ Get all Instructions. This is based on all known subclasses of `cls`. If non is given, all Instructions are returned. Only such instructions are returned that can be generated, i.e., that have a mnemonic, opcode, etc. So other classes in the hierarchy are not matched. :param cls: Base class to get list :type cls: Instruction :return: List of instruction classes """ insns = [] if cls is None: cls = Instruction # This filters out abstract classes if cls.mnemonic: if variant is None or cls.variant <= variant: insns = [cls] for subcls in cls.__subclasses__(): insns += get_insns(cls=subcls, variant=variant) insns = list(dict.fromkeys(insns)) # Remove duplicates return insns
8f0947ebd5750e19f557959f9ccbe6c9e0ee944e
3,636,495
def _assert_all_equal_and_return(tensors, name=None): """Asserts that all tensors are equal and returns the first one.""" with backend.name_scope(name or 'assert_all_equal'): if len(tensors) == 1: return tensors[0] assert_equal_ops = [] for t in tensors[1:]: assert_equal_ops.append(check_ops.assert_equal(tensors[0], t)) with ops.control_dependencies(assert_equal_ops): return array_ops.identity(tensors[0])
2c3043aceebd3bf44a0c2aecb4ed188a4a3d6629
3,636,496
def _get_igraph(G, edge_weights=None, node_weights=None): """ Transforms a NetworkX graph into an iGraph graph. Parameters ---------- G : NetworkX DiGraph or Graph The graph to be converted. edge_weights: list or string weights stored in edges in the original graph to be kept in new graph. If None, no weight will be carried. See get_full_igraph to get all weights and attributes into the graph. node_weights: list or string weights stored in nodes in the original graph to be kept in new graph. If None, no weight will be carried. See get_full_igraph to get all weights and attributes into the graph. Returns ------- iGraph graph """ if type(edge_weights) == str: edge_weights = [edge_weights] if type(node_weights) == str: node_weights = [node_weights] G = G.copy() G = nx.relabel.convert_node_labels_to_integers(G) Gig = ig.Graph(directed=True) Gig.add_vertices(list(G.nodes())) Gig.add_edges(list(G.edges())) if 'kind' not in G.graph.keys(): G.graph['kind']=primal # if not specified, assume graph id primal if G.graph['kind']=='primal': Gig.vs['osmid'] = list(nx.get_node_attributes(G, 'osmid').values()) elif G.graph['kind']=='dual': Gig.vs['osmid'] = list(G.edges) if edge_weights != None: for weight in edge_weights: Gig.es[weight] = [n for _,_,n in G.edges(data=weight)] if node_weights != None: for weight in node_weights: Gig.vs[weight] = [n for _,n in G.nodes(data=weight)] for v in Gig.vs: v['name'] = v['osmid'] return Gig
f444eac372d11c289bf157a24e9fccb5583ce500
3,636,497
def rename(isamAppliance, instance_id, id, new_name, check_mode=False, force=False): """ Deleting a file or directory in the administration pages root :param isamAppliance: :param instance_id: :param id: :param name: :param check_mode: :param force: :return: """ dir_id = None if force is False: dir_id = _check(isamAppliance, instance_id, id, '') if force is True or dir_id != None: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put( "Renaming a directory in the administration pages root", "/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id), { 'id': dir_id, 'new_name': new_name, 'type': 'directory' }) return isamAppliance.create_return_object()
a9d645bdbdc4d5804b57fe93625eba558a9c9c14
3,636,498
def deepset_update_global_fn(feats: jnp.ndarray) -> jnp.ndarray: """Global update function for graph net.""" # we want to sum-pool all our encoded nodes #feats = feats.sum(axis=-1) # sum-pool net = hk.Sequential( [hk.Linear(128), jax.nn.elu, hk.Linear(30), jax.nn.elu, hk.Linear(11)]) # number of variabilities return net(feats)
34fd3038ed56a494d2a09fa829cfe48d583cea49
3,636,499