content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def findRef(pattern, haystack): """Return a reference to the matching subexpression within the original structure (for in-place modifications) or None if there is no match.""" x = searchFirst(pattern, haystack) if x is None: return None else: return get(haystack, x[0])
a0d42658fd2d0b04b6b80f873697ee5c75dc44f3
3,633,100
def parse_record(filename): """ This function parses house related data into a dictionary """ # Initialize content content = {} # Parse all lines with open(filename) as f: # Read all lines at once all_lines = f.readlines() # Skip the first three lines as well as the last one: for i in range(3, len(all_lines)-1): # Skip empty lines if all_lines[i].isspace(): continue # Parse current line (key, value) = _parse_line(all_lines[i]) # Save results content[key] = value # Return parsed content return content
994352f78bf333c6986927e5f0da37186deb663f
3,633,101
import socket import logging def get_hostname(): """ :return: safely return the hostname """ hostname = "<Host Undetermined>" try: hostname = socket.gethostname() except Exception as e: logging.error(f"Could not get hostname.\nException: {e}") return hostname
d5420b275c336b1295b16216a473557a24d54a61
3,633,102
def dict_get(d, key, default=None): """:yaql:get Returns value of a dictionary by given key or default if there is no such key. :signature: dict.get(key, default => null) :receiverArg dict: input dictionary :argType dict: dictionary :arg key: key :argType key: keyword :arg default: default value to be returned if key is missing in dictionary. null by default :argType default: any :returnType: any (appropriate value type) .. code:: yaql> {"a" => 1, "b" => 2}.get("c") null yaql> {"a" => 1, "b" => 2}.get("c", 3) 3 """ return d.get(key, default)
5fb6a71e507f62eb530215385c97c56a75765df7
3,633,103
def expect_keys_middleware(f): """ Returns a 400 error to the client if the route function tries to get a key from an object and cause a KeyError :param f: function :return: function """ def handle(*args, **kwargs): try: result = f(*args, **kwargs) except KeyError: result = None abort(400) return response_from(result) return handle
01f0c00158147ec43a4d6f67c1adf776ce05f6aa
3,633,104
def _Request(endpoint, params): """Sends a request to an endpoint and returns JSON data.""" assert datastore_hooks.IsUnalteredQueryPermitted() return request.RequestJson( endpoint, method='POST', use_cache=False, use_auth=True, **params)
0a998021638bcb1a04b13db5bbd5f2513e8a10ab
3,633,105
def dist(p, q): """ Euclidean distance for multi-dimensional data Examples -------- >>> dist((1, 2), (5, 5)) 5.0 """ return sqrt(sum((x - y) ** 2 for x, y in zip(p, q)))
1de287b22c892ad11d14c4dda1300029a23acf67
3,633,106
from typing import cast def create_item(metadata_href: str) -> pystac.Item: """Creates a STAC Item from modis data. Args: metadata_href (str): The href to the metadata for this hdf. This function will read the metadata file for information to place in the STAC item. Returns: pystac.Item: A STAC Item representing this MODIS image. """ metadata_root = ET.parse(metadata_href).getroot() # Item id name = metadata_root.find('GranuleURMetaData/CollectionMetaData/ShortName') assert name is not None version = metadata_root.find( 'GranuleURMetaData/CollectionMetaData/VersionID') assert version is not None short_item_id = '{}/00{}/{}'.format('MODIS', version.text, name.text) image_name = metadata_root.find( 'GranuleURMetaData/DataFiles/DataFileContainer/DistributedFileName') assert image_name is not None assert image_name.text is not None item_id = image_name.text.replace('.hdf', '') coordinates = [] point_ele = '{}/{}'.format( 'GranuleURMetaData/SpatialDomainContainer/', 'HorizontalSpatialDomainContainer/GPolygon/Boundary/Point') for point in metadata_root.findall(point_ele): lon = point.find('PointLongitude') assert lon is not None assert lon.text is not None lat = point.find('PointLatitude') assert lat is not None assert lat.text is not None coordinates.append([float(lon.text), float(lat.text)]) geom = {'type': 'Polygon', 'coordinates': [coordinates]} bounds = shape(geom).bounds # Item date prod_node = 'GranuleURMetaData/ECSDataGranule/ProductionDateTime' prod_dt_text = metadata_root.find(prod_node) assert prod_dt_text is not None assert prod_dt_text.text is not None prod_dt = str_to_datetime(prod_dt_text.text) item = pystac.Item(id=item_id, geometry=geom, bbox=bounds, datetime=prod_dt, properties=ADDITIONAL_MODIS_PROPERTIES[short_item_id]) # Common metadata item.common_metadata.providers = [ cast(Provider, MODIS_CATALOG_ELEMENTS[short_item_id]['provider']) ] item.common_metadata.description = cast( str, MODIS_CATALOG_ELEMENTS[short_item_id]['description']) instrument_short_name = metadata_root.find( 'GranuleURMetaData/Platform/Instrument/InstrumentShortName') assert instrument_short_name is not None assert instrument_short_name.text is not None item.common_metadata.instruments = [instrument_short_name.text] platform_short_name = metadata_root.find( 'GranuleURMetaData/Platform/PlatformShortName') assert platform_short_name is not None item.common_metadata.platform = platform_short_name.text item.common_metadata.title = cast( str, MODIS_CATALOG_ELEMENTS[short_item_id]['title']) # Hdf item.add_asset( ITEM_TIF_IMAGE_NAME, pystac.Asset(href=image_name.text, media_type=pystac.MediaType.HDF, roles=['data'], title="hdf image")) # Metadata item.add_asset( ITEM_METADATA_NAME, pystac.Asset(href=image_name.text + '.xml', media_type=pystac.MediaType.TEXT, roles=['metadata'], title='FGDC Metdata')) # Bands eo = EOExtension.ext(item, add_if_missing=True) if item_id in MODIS_BAND_DATA: eo.bands = MODIS_BAND_DATA[item_id] return item
3e646852d3cb03215d86643253e17cd90da0241b
3,633,107
import argparse def get_args(): """ get args """ parser = argparse.ArgumentParser('NewP eval') parser.add_argument('--pred_path', required=True) parser.add_argument('--golden_path', required=True) args = parser.parse_args() return args
db9073bfc4d41c387b52fe5253a287940401c45a
3,633,108
def getQRdataImg(file:str = "img.jpg") -> dict: """ Girilen resim dosyasında qr kodunu arar ve bulursa {'barkod':barkod,'tckn':tckn} olarak returnler. """ return parseQRdata(readQRImg(file))
3207faf0cb23c259db0c16b0164797b1978916ab
3,633,109
def removeHandler(handler): """ Remove a handler from the internal logger Parameters ---------- handler: :class:`python.logging.Handler` Handler to be removed """ return __logger__.removeHandler(handler)
b156d9b34759e5062e7079e7dcb15110bd6e12ab
3,633,110
import subprocess def find_telomeres(seq, telomere="ACACCCTA", minlength=24): """Find telomere sequences with NCRF in a list of sequences Assumes that NCRF is in the PATH. Parameters ---------- seq : str Sequence to be scanned telomere : str Sequence of the telomere repeat. Default is ACACCCTA, corresponding to Blepharisma and Stentor telomere. minlength : int Minimum length of consecutive telomeres to detect (bp) """ ncrf_out = subprocess.run( ["NCRF", f"telomere:{telomere}", f"--minlength={str(minlength)}"], capture_output=True, input=str.encode(seq)) return(ncrf_out)
3ae2e190a86b39ca21f59dba13aa98d6eb8247a5
3,633,111
def has_prefix(sub_s, d): """ :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid :return: (bool) If there is any words with prefix stored in sub_s """ for key in d[sub_s[0]]: if key.startswith(sub_s): return True
e5dfca7177362b7ec8d1e5ca67d43f2e963a1a46
3,633,112
def empty_transition_values(): """Return an empty transition value.""" return TransitionValues(0, 0)
837f66baaf8142f3e9bab764dbf6810f147cee3f
3,633,113
def float32(img, start_range, end_range): """Convert image to data type float32""" return cv2.normalize(img, None, start_range, end_range, cv2.NORM_MINMAX, cv2.CV_32F)
34921a34d13e5369f20be22ed87b1213b7e2dc3f
3,633,114
from typing import Iterable import uuid def loadtest_name(prefix: str, scenario_name: str, uniquifiers: Iterable[str]) -> str: """Constructs and returns a valid name for a LoadTest resource.""" base_name = loadtest_base_name(scenario_name, uniquifiers) elements = [] if prefix: elements.append(prefix) elements.append(str(uuid.uuid5(uuid.NAMESPACE_DNS, base_name))) name = '-'.join(elements) validate_loadtest_name(name) return name
f449bba90ddb4bc58ff1de9268a535be34414377
3,633,115
def get_STP_in_multi_cameras(obj_single_camera_stp_cam_1, obj_single_camera_stp_cam_2, associate_dict): """ Parameters: obj_single_camera_stp_cam_1: STP in cam 1 obj_single_camera_stp_cam_2: STP in cam 2 associate_dict:mapping relation between objects in cam_1 and cam_2 """ # chosen_id_cam_1 = 2 # chosen_id_cam_2 = associate_dict[chosen_id_cam_1] # pt_box_1,frame_cam_1 = zip(*pt_trace_1[str(chosen_id_cam_1)]) # pt_box_2,frame_cam_2 = zip(*pt_trace_2[str(chosen_id_cam_2)]) STP_S_4_each_1 = obj_single_camera_stp_cam_1.motion_params_4_each STP_S_4_all_1 = obj_single_camera_stp_cam_1.motion_params_4_all STP_S_4_each_2 = obj_single_camera_stp_cam_2.motion_params_4_each STP_S_4_all_2 = obj_single_camera_stp_cam_2.motion_params_4_all pt_trace_1 = obj_single_camera_stp_cam_1.perspective_trace pt_trace_2 = obj_single_camera_stp_cam_2.perspective_trace # # ===== Method 1: Predict location using motion parameters for all objects ===== # objs_associate_info = [] # for k in associate_dict: # for i in range(min(len(pt_trace_1[str(k)]),len(pt_trace_2[str(k)]))): # objs_associate_info.append([pt_trace_1[str(k)][i],pt_trace_2[str(k)][i]]) # for i,v in enumerate(objs_associate_info): # x,y = v[0][0][0],v[0][0][1] # delta_t = v[1][1]-v[0][1] # pred_x = x + delta_t*STP_S_4_all_1['mean_x'] # pred_y = y + delta_t*STP_S_4_all_1['mean_y'] # v.append([pred_x,pred_y]) # src_x,src_y = np.array([elem[2][0] for elem in objs_associate_info]),np.array([elem[2][1] for elem in objs_associate_info]) # dst_x,dst_y = np.array([elem[1][0][0] for elem in objs_associate_info]),np.array([elem[1][0][1] for elem in objs_associate_info]) # ===== Method 2 : Predict location using each object's parameter objs_associate_info = [] for k in associate_dict: # mapping dict if isinstance(k, (str)): if k in pt_trace_1 and k in pt_trace_2: for i in range(min(len(pt_trace_1[k]), len(pt_trace_2[k]))): # 考虑路径长短不同,取两路径最短的,要不会有控制 objs_associate_info.append([k, pt_trace_1[k][i], pt_trace_2[k][i]]) else: for i in range(min(len(pt_trace_1[str(k)]), len(pt_trace_2[str(k)]))): objs_associate_info.append([k, pt_trace_1[str(k)][i], pt_trace_2[str(k)][i]]) for i, v in enumerate(objs_associate_info): x, y = v[1][0][0], v[1][0][1] delta_t = v[2][1]-v[1][1] try: pred_x = x + delta_t*STP_S_4_each_1[str(objs_associate_info[i][0])]['mean_x'] pred_y = y + delta_t*STP_S_4_each_1[str(objs_associate_info[i][0])]['mean_y'] v.append([pred_x, pred_y]) except: pass cam_1_x, cam_1_y = np.array([elem[3][0] for elem in objs_associate_info]), np.array([elem[3][1] for elem in objs_associate_info]) cam_2_x, cam_2_y = np.array([elem[2][0][0] for elem in objs_associate_info]), np.array([elem[2][0][1] for elem in objs_associate_info]) # for i in range(len(src_y)): # print(cam_1_y[i],cam_2_y[i]) # plt.scatter(src_y,dst_y) # plt.show() cam_1_x = np.reshape(cam_1_x, (-1, 1)) cam_2_x = np.reshape(cam_2_x, (-1, 1)) cam_1_y = np.reshape(cam_1_y, (-1, 1)) cam_2_y = np.reshape(cam_2_y, (-1, 1)) # pred_x = model_x.predict(src_x) # pred_y = model_y.predict(src_y) # plt.plot(src_y,dst_y,'b-',linewidth=3) # plt.scatter(pred_x,dst_x) # plt.show() # for i in range(len(pred_y)): # print(pred_y[i],dst_y[i]) # err = pred_x-dst_x # e_i = np.linspace(1,len(src_x),len(src_x)) # plt.plot(e_i,err,'r',linewidth=3) # plt.show() # # ===== FAIL: 误差较大,原因未查明,暂时放弃 ===== # # Get affine transform matrix using least square algorithm # trans_matrix,_ = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,1.0) # input_vector = np.array(src_pts) # pt = cv2.transform(input_vector[None,:,:],trans_matrix) # M = np.mat(trans_matrix[:2]) # for elem in src_pts: # im = np.mat((elem[0],elem[1],1)).T # rim = M*im # print(rim) return cam_1_x, cam_2_x, cam_1_y, cam_2_y
4f664158d5d8a6c04a659566ce7f207740925fa0
3,633,116
def resize(I, scale): """ Resize the image `I` by a factor of `scale`, while keeping the original aspect ratio. If `scale` is smaller than `1.0`, cubic interpolation is used, otherwise nearest-neighbor interpolation. """ interpolationType = cv2.INTER_CUBIC if (scale < 1.0) else cv2.INTER_NEAREST return cv2.resize(I, None, None, scale, scale, interpolationType)
81aa82b8926ea241a1ab17a86ce4b1ac6eb7e0ad
3,633,117
def collisioncallback(report,fromphysics): """Whenever a collision or physics detects a collision, this function is called""" s = 'collision callback with ' if report.plink1 is not None: s += '%s:%s '%(report.plink1.GetParent().GetName(),report.plink1.GetName()) else: s += '(NONE)' s += ' x ' if report.plink2 is not None: s += ' %s:%s'%(report.plink2.GetParent().GetName(),report.plink2.GetName()) else: s += '(NONE)' print s print 'from physics: ',fromphysics return CollisionAction.DefaultAction
168aec1aba472f5fed1c6ad19821d8aa32ee9897
3,633,118
def flip_3D_bb(x, image_width): """ Flips the annotation of the image around y axis. Input: x: coordinates of points fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl image_width: width of the flipped image Return: x - flipped coordinates """ # First flip the x coordinates of the points x[0,:] = image_width - x[0,:] # Now switch left and right points x_out = np.matrix(np.copy(x)) x_out[:,0] = x[:,2] x_out[:,1] = x[:,3] x_out[:,2] = x[:,0] x_out[:,3] = x[:,1] x_out[:,4] = x[:,6] x_out[:,5] = x[:,7] x_out[:,6] = x[:,4] x_out[:,7] = x[:,5] return x_out
846f3ae9edf4927b2ab58e374085122056d967c4
3,633,119
import torch def train_model(bert_model, dataloader_train, optimizer, scheduler, device): """The architecture's training routine.""" bert_model.train() loss_train_total = 0 for batch_idx, batch in enumerate(dataloader_train): # set gradient to 0 bert_model.zero_grad() batch = tuple(b.to(device) for b in batch) inputs = { "input_ids": batch[0], "token_type_ids": batch[1], "attention_mask": batch[2], "labels": batch[3], } loss, _ = bert_model( inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], labels=inputs["labels"], return_dict=False, ) # Compute train loss loss_train_total += loss.item() loss.backward() # gradient accumulation torch.nn.utils.clip_grad_norm_(bert_model.parameters(), 1.0) optimizer.step() scheduler.step() # torch.save(bert_model.state_dict(), f"models/ BERT_ft_epoch{epoch}.model") loss_train_avg = loss_train_total / len(dataloader_train) return loss_train_avg
fafa12c999d3c2d9716298b1ae64bd2f53dd9d09
3,633,120
from typing import Tuple from typing import Set from typing import List import random import re def get_content(num_pages: int = constants.NUM_PAGES) -> Tuple[Set[str], List[str]]: """Retrieves page contents from random Wikipedia articles Args: num_pages: maximum number of pages to generate output from """ all_words = set() # type: Set[str] all_content = [] # type: List[str] for i in range(0, num_pages): page = None while page is None: try: topic = wikipedia.search(wikipedia.random(pages=1), results=1) page = wikipedia.page(topic) except wikipedia.exceptions.DisambiguationError as e: topic = wikipedia.search(random.choice(e.options), results=1) # use the first suggested page try: page = wikipedia.page(topic) except wikipedia.exceptions.DisambiguationError: # some topics always throw DisambiguationError like "Reference point" continue except wikipedia.exceptions.PageError: continue except wikipedia.exceptions.PageError: continue except Exception: continue content = page.content words = re.findall(r"\d*[a-zA-Z]+\d*[a-zA-Z]+\d*", content) # extract only words, not numbers all_content.append(content) all_words.update(words) return all_words, all_content
c5ef011d7e6a8cc0cffdcdc2b4260de9eda847e5
3,633,121
import requests def find_flight(location_from, location_to, date_from, date_to = None, num_adults = 1, num_children = 0, round=True): """Returns list of flights with given parameters Args: location_from (str): IATA code of departing airport location_to (str): IATA code of arriving airport date_from (str): date of departing format: DD/MM/YYYY date_to (str): date of arriving format: DD/MM/YYYY num_adults (int): number of adults traveling num_children (int): number of children traveling Returns: list: a list of dictionaries containing airlines, link and price """ url = 'https://kiwicom-prod.apigee.net/v2/search' params = { 'apikey' : 'sUjIKIVbdlm8cseNdWGsxUAVCBs47Mjh', 'fly_from' : location_from, 'fly_to' : location_to, 'date_from' : date_from, 'date_to' : date_from, 'return_from' : date_to, 'return_to' : date_to, 'adults' : num_adults, 'children' : num_children, 'select_airlines' : 'LH,JU,TK,AF,TP', 'max_stopovers': 2, 'flight_type': 'round' if round else 'oneway' } response = requests.get(url, params = params) json_response = response.json() result = [] data = json_response['data'] for option in data[:10]: price = option['conversion'] link = option['deep_link'] airlinesFrom = [] airlinesReturn = [] route = option['route'] for flight in route: if flight['return'] == 0: airlinesFrom.append(flight['airline']) else: airlinesReturn.append(flight['airline']) duration_from = create_time_string(option['duration']['departure']) duration_return = create_time_string(option['duration']['return']) result.append({'price' : price, 'link': link, 'airlines_from' : airlinesFrom, 'airlines_return' : airlinesReturn, 'duration_from' : duration_from, 'duration_return' : duration_return}) return result
d5b5e68d3cebb05d4cece6c4c66ca4ab9b22d056
3,633,122
def get_date_row(repositories): """Generate row with dates.""" row = ["Date"] for repository in repositories: row.append("Sources") row.append("Issues") row.append("Correct") # one more for summary row.append("Sources") row.append("Issues") row.append("Correct") return row
460b5d47e631dac4e918965eca9c11d4f6085bb1
3,633,123
import yaml def get_extensions() -> list: """ Gets extensions from the `features.yml` to be loaded into the bot :return: list """ log.info("Getting extensions...") exts = [] if osp.isfile(features_path): with open(features_path, 'r') as file: data = yaml.full_load(file) extensions = data["extensions"] for e in extensions: e_name = e["extension"] if "directory" in e: e_name = f"{e['directory']}.{e_name}" e_enabled = e["enabled"] if "enabled" in e else True if e_enabled: e_external = e["external"] if "external" in e else False if e_external: exts.append(e_name) log.debug(f"Extension Found | External | {e_name}") else: exts.append(f"cogs.{e_name}") log.debug(f"Extension Found | Internal | {e_name}") # else: # exts.append(f"cogs.{category}.{e_name}") # log.debug(f"Extension Found | Cog | {category}.{e_name}") log.info(f"Found *{len(exts)}* extensions.") # log.debug(exts) return exts
c5ca8f7d797e14bf52eb7c3edc7e3c43dd3f4662
3,633,124
def mini_batch_grad_descent(X, y, mini_batch_size=64, seed=0): """an implementation of the mini-batch gradient descent to improve the convergence speed""" np.random.seed(seed) m = X.shape[0] num_mini_batches = m // mini_batch_size mini_batches = [] permutation = list(np.random.permutation(m)) # shuffle the order of inputs and outputs X_shuffled = X[permutation, :] y_shuffled = y[permutation, :] for i in range(num_mini_batches): X_mini_batch = X_shuffled[i * mini_batch_size: (i + 1) * mini_batch_size, :] y_mini_batch = y_shuffled[i * mini_batch_size: (i + 1) * mini_batch_size, :] mini_batches.append((X_mini_batch, y_mini_batch)) # handle the condition when the size of the last mini-batch is less than the mini_batch_size if m & mini_batch_size != 0: X_mini_batch = X_shuffled[num_mini_batches * mini_batch_size: m, :] y_mini_batch = y_shuffled[num_mini_batches * mini_batch_size: m, :] mini_batches.append((X_mini_batch, y_mini_batch)) return mini_batches
4c14dbde155dfc1090505dd5711b66bcf9947f90
3,633,125
def light_percentage_schema(gateway, child, value_type_name): """Return a validation schema for V_PERCENTAGE.""" schema = {"V_PERCENTAGE": cv.string, "V_STATUS": cv.string} return get_child_schema(gateway, child, value_type_name, schema)
22fdf2eb6b4b1d55d628448389cae924cd709aae
3,633,126
def get_generator(ds_root, # dataset root directory (where to find meta.db file) batch_size=8192, # how many samples per batch to load mode='train', # mode of use of the dataset object (may be 'train', 'validation' or 'test') num_workers=None, # how many subprocesses to use for data loading by the Dataloader n_samples=-1, # number of samples to consider (-1 if you want to consider them all) use_malicious_labels=True, # whether to return the malicious label for the data points or not use_count_labels=True, # whether to return the counts for the data points or not use_tag_labels=True, # whether to return the tags for the data points or not return_shas=False, # whether to return the sha256 of the data points or not features_lmdb='ember_features', # name of the file containing the ember_features for the data remove_missing_features='scan', # whether to remove data points with missing features or not; it can be False/None/'scan'/filepath # in case it is 'scan' a scan will be performed on the database in order to remove the data points # with missing features # in case it is a filepath then a file (in Json format) will be used to determine the data points # with missing features shuffle=False): # set to True to have the data reshuffled at every epoch """ Initialize generator factory. Args: ds_root: Dataset root directory (where to find meta.db file) batch_size: How many samples per batch to load (default: 8192) mode: Mode of use of the dataset object (may be 'train', 'validation' or 'test') (default: 'train') num_workers: How many subprocesses to use for data loading by the Dataloader (default: None) n_samples: Number of samples to consider (-1 if you want to consider them all) (default: -1) use_malicious_labels: Whether to return the malicious label for the data points or not (default: True) use_count_labels: Whether to return the counts for the data points or not (default: True) use_tag_labels: Whether to return the tags for the data points or not (default: True) return_shas: Whether to return the sha256 of the data points or not (default: False) features_lmdb: Name of the file containing the ember_features for the data (default: 'ember_features') remove_missing_features: Whether to remove data points with missing features or not; it can be False/None/'scan'/filepath. In case it is 'scan' a scan will be performed on the database in order to remove the data points with missing features; in case it is a filepath then a file (in Json format) will be used to determine the data points with missing features (default: 'scan') shuffle: Set to True to have the data reshuffled at every epoch (default: False) """ # if num_workers was not defined (it is None) then set it to the maximum number of workers previously defined as # the current system cpu_count if num_workers is None: num_workers = max_workers # return the Generator (a.k.a. Dataloader) return GeneratorFactory(ds_root=ds_root, batch_size=batch_size, mode=mode, num_workers=num_workers, n_samples=n_samples, use_malicious_labels=use_malicious_labels, use_count_labels=use_count_labels, use_tag_labels=use_tag_labels, return_shas=return_shas, features_lmdb=features_lmdb, remove_missing_features=remove_missing_features, shuffle=shuffle)()
616ec6e7709aaeb5dbfde61bf516a0444fc3513c
3,633,127
def nodeclass_from_tag(tag): """Get the class for a reST node given its tag name. This searches in antidox's pseudo-elements, docutils' nodes and sphinx' addnodes. """ if tag in PseudoElementMeta.tag_map: return PseudoElementMeta.tag_map[tag] try: return getattr(addnodes, tag) except AttributeError: return getattr(_nodes, tag)
d0403de978cb900b60423e3827fe238d321999ec
3,633,128
def observation_space(): """Return observation space. The state is (susceptible, exposed, infected, recovered). """ state_dim = State.num_variables() state_space_low = np.zeros(state_dim) state_space_high = np.inf * np.ones(state_dim) return spaces.Box(state_space_low, state_space_high, dtype=np.float64)
b988db328736166c23817a2f2ccb19f7b3fed6bf
3,633,129
def generate_sun_hits_products(dataset, prdcfg): """ generates sun hits products. Accepted product types: 'PLOT_SUN_HITS': Plots in a sun-radar azimuth difference-sun-radar elevation difference grid the values of all sun hits obtained during the processing period 'PLOT_SUN_RETRIEVAL': Plots in a sun-radar azimuth difference-sun- radar elevation difference grid the retrieved sun pattern 'PLOT_SUN_RETRIEVAL_TS': Plots time series of the retrieved sun pattern parameters User defined parameters: dpi: int The pixel density of the plot. Default 72 add_date_in_fname: Bool If true the year is added in the plot file name 'PLOT_SUNSCAN': Plots a constant range radar azimuth-elevation of the sunscan field data 'WRITE_SUN_HITS': Writes the information concerning possible sun hits in a csv file 'WRITE_SUN_RETRIEVAL': Writes the retrieved sun pattern parameters in a csv file. User defined parameters: add_date_in_fname: Bool If true the year is added in the csv file name 'WRITE_SUNSCAN': Writes the sunscan parameters in a csv file All the products of the 'VOL' dataset group Parameters ---------- dataset : tuple radar object and sun hits dictionary prdcfg : dictionary of dictionaries product configuration dictionary of dictionaries Returns ------- filename : str the name of the file created. None otherwise """ dssavedir = prdcfg['dsname'] if 'dssavename' in prdcfg: dssavedir = prdcfg['dssavename'] prdcfg['timeinfo'] = dataset['timeinfo'] if prdcfg['type'] == 'WRITE_SUN_HITS': if 'sun_hits' not in dataset: return None savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], timeinfo=dataset['timeinfo']) fname = make_filename( 'info', prdcfg['dstype'], 'detected', ['csv'], timeinfo=dataset['timeinfo'], timeformat='%Y%m%d')[0] fname = savedir+fname write_sun_hits(dataset['sun_hits'], fname) print('saved sun hits file: {}'.format(fname)) return fname[0] if prdcfg['type'] == 'PLOT_SUN_HITS': if 'sun_hits_final' not in dataset: return None field_name = get_fieldname_pyart(prdcfg['voltype']) if prdcfg['voltype'] not in dataset['sun_hits_final']: warn( ' Field type ' + prdcfg['voltype'] + ' not available in data set. Skipping product ' + prdcfg['type']) return None savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], timeinfo=dataset['timeinfo']) fname_list = make_filename( 'detected', prdcfg['dstype'], prdcfg['voltype'], prdcfg['imgformat'], timeinfo=dataset['timeinfo'], timeformat='%Y%m%d') for i, fname in enumerate(fname_list): fname_list[i] = savedir+fname field = create_sun_hits_field( dataset['sun_hits_final']['rad_el'], dataset['sun_hits_final']['rad_az'], dataset['sun_hits_final']['sun_el'], dataset['sun_hits_final']['sun_az'], dataset['sun_hits_final'][prdcfg['voltype']], prdcfg['sunhitsImageConfig']) if field is None: warn( 'Unable to create field '+prdcfg['voltype'] + ' Skipping product ' + prdcfg['type']) return None plot_sun_hits(field, field_name, fname_list, prdcfg) print('----- save to '+' '.join(fname_list)) return fname_list if prdcfg['type'] == 'WRITE_SUN_RETRIEVAL': if 'sun_retrieval' not in dataset: return None timeinfo = None timeformat = None if prdcfg.get('add_date_in_fname', False): timeinfo = dataset['timeinfo'] timeformat = '%Y' savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], timeinfo=None) fname = make_filename( 'info', prdcfg['dstype'], 'retrieval', ['csv'], timeinfo=timeinfo, timeformat=timeformat, runinfo=prdcfg['runinfo'])[0] fname = savedir+fname write_sun_retrieval(dataset['sun_retrieval'], fname) print('saved sun retrieval file: {}'.format(fname)) return fname if prdcfg['type'] == 'PLOT_SUN_RETRIEVAL': if 'sun_retrieval' not in dataset: return None field_name = get_fieldname_pyart(prdcfg['voltype']) par = None if field_name == 'sun_est_power_h': par = 'par_h' elif field_name == 'sun_est_power_v': par = 'par_v' elif field_name == 'sun_est_differential_reflectivity': par = 'par_zdr' if par not in dataset['sun_retrieval']: warn( ' Field type ' + prdcfg['voltype'] + ' not available in data set. Skipping product ' + prdcfg['type']) savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], timeinfo=dataset['timeinfo']) fname_list = make_filename( 'retrieval', prdcfg['dstype'], prdcfg['voltype'], prdcfg['imgformat'], timeinfo=dataset['timeinfo'], timeformat='%Y%m%d') for i, fname in enumerate(fname_list): fname_list[i] = savedir+fname if dataset['sun_retrieval'][par] is None: warn( ' Invalid retrieval parameters. Skipping product ' + prdcfg['type']) return None field = create_sun_retrieval_field( dataset['sun_retrieval'][par], field_name, prdcfg['sunhitsImageConfig'], lant=dataset['sun_retrieval']['lant']) if field is not None: plot_sun_hits(field, field_name, fname_list, prdcfg) print('----- save to '+' '.join(fname_list)) return fname_list if prdcfg['type'] == 'PLOT_SUN_RETRIEVAL_TS': if 'sun_retrieval' not in dataset: return None dpi = prdcfg.get('dpi', 72) timeinfo = None timeformat = None if prdcfg.get('add_date_in_fname', False): timeinfo = dataset['timeinfo'] timeformat = '%Y' savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdid'], timeinfo=None) fname = make_filename( 'info', prdcfg['dstype'], 'retrieval', ['csv'], timeinfo=timeinfo, timeformat=timeformat, runinfo=prdcfg['runinfo']) fname = savedir + fname[0] sun_retrieval = read_sun_retrieval(fname) if sun_retrieval[0] is None: warn( 'Unable to read sun retrieval file '+fname) return None if len(sun_retrieval[0]) < 2: warn( 'Unable to plot sun retrieval time series. ' + 'Not enough data points.') return None savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], timeinfo=None) fname_list = make_filename( 'retrieval_ts', prdcfg['dstype'], prdcfg['voltype'], prdcfg['imgformat'], timeinfo=timeinfo, timeformat=timeformat, runinfo=prdcfg['runinfo']) for i, fname in enumerate(fname_list): fname_list[i] = savedir+fname titl = (prdcfg['runinfo']+' Sun Retrieval ' + sun_retrieval[1][0].strftime('%Y%m%d')+'-' + sun_retrieval[1][-1].strftime('%Y%m%d')) figfname = plot_sun_retrieval_ts( sun_retrieval, prdcfg['voltype'], fname_list, titl=titl, dpi=dpi) if figfname is None: return None print('----- save to '+' '.join(fname_list)) return fname_list if prdcfg['type'] == 'WRITE_SUNSCAN': if 'sun_retrieval' not in dataset: return None text = [ "SunScan info", "sun_az: [deg] Azimuth sun position ", "sun_el: [deg] Elevation sun position", "noise_pwr: [dBm] Noise power", "sun_maxpwr_noise: [dBm]" " sun maximal power sample (including noise)", "sun_maxpwr_nonoise: [dBm]" " sun maximal power sample without noise", "sun_maxpwr_fit: [dBm]" " sun maximal fitted power (without noise)", "sun_maxpwr_toa: [dBm]" " sun maximal power at top of atmosphere", "az_offset: [deg]" " Azimuth shift of fitted maxima to sun azimuth", "el_offset: [deg]" " Elevation shift of fitted maxima to sun elevation", "az_phi3db: [deg]" " Half-power beam width in azimuth", "el_phi3db: [deg]" " Half-power beam width in elevation", "fit_stddev: [dBm]" " Standard deviation (fit to samples)", "num_samples: [#]" " Number of samples used for the sun power fitting" ] sunRdata = dataset['sun_retrieval'] if dataset['field_name'] == 'noisedBm_hh': data = { 'dstype': prdcfg['dstype'], 'unit': 'dBm', 'time': sunRdata['sunscan_time'], 'label': [ "sun_az", "sun_el", "noise_pwr", "sun_maxpwr_noise", "sun_maxpwr_nonoise", "sun_maxpwr_fit", "sun_maxpwr_toa", "az_offset", "el_offset", "az_phi3db", "el_phi3db", "fit_stddev", "num_samples"], 'value': [ sunRdata['sunpos_az'], sunRdata['sunpos_el'], sunRdata['noise_pwr'], sunRdata['sun_maxpwr_noise'], sunRdata['sun_maxpwr_nonoise'], sunRdata['dBm_sun_est'], sunRdata['dBm_sun_est_toa'], sunRdata['az_bias_h'], sunRdata['el_bias_h'], sunRdata['az_width_h'], sunRdata['el_width_h'], sunRdata['std(dBm_sun_est)'], sunRdata['nhits_h']] } elif dataset['field_name'] == 'noisedBm_vv': data = { 'dstype': prdcfg['dstype'], 'unit': 'dBm', 'time': sunRdata['sunscan_time'], 'label': [ "sun_az", "sun_el", "noise_pwr", "sun_maxpwr_noise", "sun_maxpwr_nonoise", "sun_maxpwr_fit", "sun_maxpwr_toa", "az_offset", "el_offset", "az_phi3db", "el_phi3db", "fit_stddev", "num_samples"], 'value': [ sunRdata['sunpos_az'], sunRdata['sunpos_el'], sunRdata['noise_pwr'], sunRdata['sun_maxpwr_noise'], sunRdata['sun_maxpwr_nonoise'], sunRdata['dBmv_sun_est'], sunRdata['dBmv_sun_est_toa'], sunRdata['az_bias_v'], sunRdata['el_bias_v'], sunRdata['az_width_v'], sunRdata['el_width_v'], sunRdata['std(dBmv_sun_est)'], sunRdata['nhits_v']] } else: warn('ERROR: No valid datatype for WRITE_SUNSCAN product.') savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], prdcfg['timeinfo']) fname1 = make_filename( 'ts', prdcfg['dstype'], dataset['field_name'], ['csv'], timeinfo=prdcfg['timeinfo'], timeformat='%Y%m%d', runinfo=prdcfg['runinfo'])[0] fname1 = savedir+fname1 write_timeseries_point(fname1, data, prdcfg['dstype'], text) print('saved sunscan file: {}'.format(fname1)) return fname1 if prdcfg['type'] == 'PLOT_SUNSCAN': radar = dataset['radar_out'] sun_hits = dataset['sun_hits'] field_name = dataset['field_name'] if field_name not in radar.fields: warn( ' Field type ' + field_name + ' not available in data set. Skipping product ' + prdcfg['type']) return None # user defined parameters azi_res = prdcfg.get('azi_res', None) ele_res = prdcfg.get('ele_res', None) vmin = prdcfg.get('vmin', None) vmax = prdcfg.get('vmax', None) angtol = prdcfg.get('ang_tol', 0.5) savedir = get_save_dir( prdcfg['basepath'], prdcfg['procname'], dssavedir, prdcfg['prdname'], prdcfg['timeinfo']) fname_list = make_filename( 'constr', prdcfg['dstype'], prdcfg['dsname'], prdcfg['imgformat'], prdcfginfo='rng'+'{:.1f}'.format( dataset['radar_out'].range['data'][0]), timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo']) for i, fname in enumerate(fname_list): fname_list[i] = savedir+fname plot_fixed_rng_sun( radar, field_name, sun_hits, prdcfg, fname_list, azi_res=None, ele_res=None, ang_tol=angtol, vmin=vmin, vmax=vmax) print('----- save to '+' '.join(fname_list)) return fname_list if 'radar_out' in dataset: return generate_vol_products(dataset, prdcfg) return None
ecf86308a2a4a3d4e9dca0dd4884e79748e46473
3,633,130
import requests def get_option_chains(symbol: str, expiry: str) -> pd.DataFrame: """ Parameters ---------- symbol : str Ticker to get options for expiry : str Expiration date in the form of "YYYY-MM-DD" Returns ------- chains: pd.DataFrame Dataframe with options for the gievn Symbol and Expiration date """ params = {"symbol": symbol, "expiration": expiry, "greeks": "true"} headers = {"Authorization": f"Bearer {TRADIER_TOKEN}", "Accept": "application/json"} response = requests.get( "https://sandbox.tradier.com/v1/markets/options/chains", params=params, headers=headers, ) if response.status_code == 401: print("Error in request.get -- check token") return chains = process_chains(response) return chains
ac955c87543ac03f1019bc0ba359e2d251e3bd39
3,633,131
def _fid(mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations, precalculated on an representive data set. -- sigma1: The covariance matrix over activations for generated samples. -- sigma2: The covariance matrix over activations, precalculated on an representive data set. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, \ 'Training and test mean vectors have different lengths' assert sigma1.shape == sigma2.shape, \ 'Training and test covariances have different dimensions' diff = mu1 - mu2 # Product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = ('fid calculation produces singular product; ' 'adding %s to diagonal of cov estimates') % eps print(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError('Imaginary component {}'.format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean)
ac70f17f30160e34f40dd3fbd953303175684263
3,633,132
from typing import Tuple def gen_structures_n(size: Tuple[int, int], n: int) -> Pylist: """Generates a list of numpy arrays, where each array is a grid representation of the adsorbate distribution, 1 means that an adsorbate is present on a grid space and 0 means that the adsorbate is not present Args: size (Tuple[int, int]): the size of the adsorbate grid n (int): the number of adsorbates to fill Returns: List[np.array]: list of combinations """ flat = size[0] * size[1] combinations_ = [] reduced_combinations = [] for positions in combinations(range(flat), n): p = [0] * flat for i in positions: p[i] = 1 p = np.array(p).reshape(size) combinations_.append(p) itcoord_list = [] for perm in combinations_: coords = perm_to_coords(perm) itcoords = intranslatable_coords(coords) symmetry = False for ref in itcoord_list: if test_symmetry(itcoords, ref): # don't add symmetry = True break if not symmetry: itcoord_list.append(itcoords) reduced_combinations.append(perm) return reduced_combinations
b437469247b34fff5e57aef8d69a2e566837544a
3,633,133
def get_neutron_subnetpool_name(subnet_cidr): """Returns a Neutron subnetpool name. :param subnet_cidr: The subnetpool allocation cidr :returns: the Neutron subnetpool_name name formatted appropriately """ name_prefix = cfg.CONF.subnetpool_name_prefix return '-'.join([name_prefix, subnet_cidr])
2f438ad9f53e76d5ff8cfe11e7eb613e8c2eab08
3,633,134
def verify(ui, repo): """verify the integrity of the repository Verify the integrity of the current repository. This will perform an extensive check of the repository's integrity, validating the hashes and checksums of each entry in the changelog, manifest, and tracked files, as well as the integrity of their crosslinks and indices. """ return hg.verify(repo)
07689fb0f75408f3a976218d08f20fa0783586dc
3,633,135
def error500(request): """ http 500 page :param request: :return: """ return render(request, "50x.html", status=500)
3ee60a73a526dc1337bbac01d3515c332febb74b
3,633,136
def apply_anti_area( self: Player, target: Player, rules: dict, left: bool ) -> EffectReturn: """ Apply the effects of anti_attack: Take damage if self uses area """ if self.action == "area": self = inflict_damage(100, self) return self, target, rules
65bd6f39e88b93fd03bce54db0ead906ac8cf924
3,633,137
import pytz def as_utc(time): """Convert a time to a UTC time.""" return time.astimezone(pytz.utc)
716858e88daa43b61f5cedae72e74dafcf67d423
3,633,138
import math def gelu_new(x): """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1 + th.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * th.pow(x, 3))))
7cb1108b33617341c71c08f470c68a273b654fe1
3,633,139
def message(): """Mimic message abstraction""" def _message(fields): required_fields = { "is_direct": False, "is_private_chat": False, "is_group_chat": True, "will_is_mentioned": False, "will_said_it": False, "sender": "TBD", "backend_supports_acl": True, "content": "TBD", "backend": "TBD", "original_incoming_event": "TBD" } required_fields.update(fields) return Message(**required_fields) return _message
d17cc18562d76cce4dbc77409e9b53d2c04b5d40
3,633,140
def detail_url(product_id): """Return product detail Url""" return reverse('api:products-detail', args=[product_id])
362b6101b352cdc8b241af6cc4e4747b4feed282
3,633,141
import itertools def encode(data): """Encode data using LZ78 compression. Args: data: A str to encode. Returns: A list of two-element tuples of int and str. """ dictionary = {} index = itertools.count(1) word = '' result = [] for character in data: new_word = word + character if new_word not in dictionary: result.append((dictionary.get(word, 0), character)) dictionary[new_word] = index.__next__() word = '' else: word = new_word # Corner-case: without this resulting list will be incomplete if word: result.append((dictionary.get(word[:-1], 0), word[-1:])) return result
fcfe0b294eed92812380a60d1ec1b642084c8dfe
3,633,142
import ray def start_router(router_class, router_name): """Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor. """ handle = router_class.remote(router_name) ray.experimental.register_actor(router_name, handle) handle.start.remote() return handle
e309b318d48991bc9fca86c4cfbd49a675a01a39
3,633,143
def completeness_scores(include_commutative=False): """ Provide a dict with the completeness scores of rings in database :param include_commutative: if False, it will filter out the specialized commutative Properties. If True, it will include all Properties. :return: A dict keyed by Ring ids providing scores for each Ring """ query = """ SELECT id, sum(score) FROM (SELECT "ringapp_ringproperty"."ring_id" AS id, CASE WHEN "ringapp_property"."symmetric" IS TRUE AND "ringapp_ringproperty"."has_on_left" IS NOT NULL THEN 1 WHEN "ringapp_property"."symmetric" IS TRUE AND "ringapp_ringproperty"."has_on_left" IS NULL THEN 0 WHEN "ringapp_property"."symmetric" IS FALSE THEN ("ringapp_ringproperty"."has_on_left" IS NOT NULL)::int + ("ringapp_ringproperty"."has_on_right" IS NOT NULL)::int END AS score FROM "ringapp_ringproperty" INNER JOIN "ringapp_property" ON ("ringapp_ringproperty"."property_id" = "ringapp_property"."id") {}) AS t1 GROUP BY id """ if include_commutative is False: query = query.format('WHERE "ringapp_property"."commutative_only" = FALSE') else: query = query.format('') query = Ring.objects.raw(query) data = {ring.id: ring.sum for ring in query} for key, datum in data.items(): if datum is None: data[key] = 0 return data
376b748da0ebff964b73059c9a2e2b1e3793487e
3,633,144
def _is_activity_overlapping(df): """ checks if any activity is overlapping another Parameters ---------- df : pd.DataFrame """ assert df.shape[1] == 3 epsilon = pd.Timedelta('0ms') mask = (df[END_TIME].shift()-df[START_TIME]) > epsilon overlapping = df[mask] return not overlapping.empty
4037966bfcb3453a0cd528cb3fc2a5e345db8b74
3,633,145
def logarithmic(x, y, new_x): """ Linearly interpolates values in new_x based in the log space of y. Parameters ---------- x : array_like Independent values. y : array_like Dependent values. new_x : array_like The x values to return interpolated y values at. """ x = atleast_1d_and_contiguous(x, np.float64) y = atleast_1d_and_contiguous(y, np.float64) new_x = atleast_1d_and_contiguous(new_x, np.float64) if y.ndim > 2: raise ValueError("`linear` only works with 1-D or 2-D arrays.") if len(y.shape) == 2: new_y = np.zeros((y.shape[0], len(new_x)), np.float64) for i in range(len(new_y)): _interpolate.loginterp_dddd(x, y[i], new_x, new_y[i]) else: new_y = np.zeros(len(new_x), np.float64) _interpolate.loginterp_dddd(x, y, new_x, new_y) return new_y
0df18f969ef0baba082410f8794592a2f7cc5b8e
3,633,146
def replace_num(s): """Remueve los numeros de los tweets""" for i in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]: s = s.replace(i, "") return s
8d46f3e8d44cfc80f1efbb685b4a952c253763bd
3,633,147
def rotation_phi_beta(x, y, L, phi_deg, beta_deg, scale) : """Returns horizontal and vertical components of the scattering vector in units of scale (k) x, y can be arrays, L-scalar in the same units, ex. scale = k[1/A] or in number of pixels etc. """ xrot, yrot = rotation(np.array(x), np.array(y), phi_deg) return fraser_xyz(xrot, yrot, L, beta_deg, scale)
5d971c86fd8ecbfe076281cb4689e747f95be68c
3,633,148
def _sample_rois(self, all_rois, gt_boxes, gt_labels, fg_rois_per_image, rois_per_image, num_classes): """Generate a random sample of RoIs comprising foreground and background examples. """ # overlaps: (rois x gt_boxes) overlaps = bbox_overlaps( np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float), np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float)) overlaps = overlaps.numpy() gt_assignment = overlaps.argmax(axis=1) max_overlaps = overlaps.max(axis=1) #labels = gt_boxes[gt_assignment, 4] labels = gt_labels[gt_assignment] # Select foreground RoIs as those with >= FG_THRESH overlap fg_inds = np.where(max_overlaps >= self.fg_threshold)[0] # Guard against the case when an image has fewer than fg_rois_per_image # foreground RoIs fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size) # Sample foreground regions without replacement if fg_inds.size > 0: fg_inds = np.random.choice(fg_inds, size=fg_rois_per_this_image, replace=False) # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = np.where((max_overlaps < self.bg_threshold[1]) & (max_overlaps >= self.bg_threshold[0]))[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size) # Sample background regions without replacement if bg_inds.size > 0: bg_inds = np.random.choice(bg_inds, size=bg_rois_per_this_image, replace=False) # The indices that we're selecting (both fg and bg) keep_inds = np.append(fg_inds, bg_inds) # Select sampled values from various arrays: labels = labels[keep_inds] # Clamp labels for the background RoIs to 0 labels[fg_rois_per_this_image:] = 0 rois = all_rois[keep_inds] bbox_target_data = _compute_targets(rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels) bbox_targets = _get_bbox_regression_labels(bbox_target_data, num_classes) return labels, rois, bbox_targets
4acbf052dddd5fed0c3d024728c8893341f4fc1c
3,633,149
import math def hough_line (im, nr=512, na=512, yc=None, xc=None, threshold=10,\ disp=False, dispacc=False): """ Perform the Hough transform for lines of the image 'im'. This routine performs a straight-line Hough transform of the image 'im', which should normally contain output from an edge detector. It returns a list of the significant peaks found (see find_peaks for a description of its content) and the image that forms the accumulator. The accumulator is of dimension [na, nr], the distance from the origin (yc, xc) being plotted along the x-direction and the corresponding angle along the y-direction. Arguments: im image for which the Hough transform is to be performed nr number of radial values (x-direction of the accumulator) na number of angle values (y-direction of the accumulator) yc y-value of the origin on the image array (default: image centre) xc x-value of the origin on the image array (default: image centre) threshold minimum value for a significant peak in the accumulator (default: 10) disp if True, draw the lines found over the image (default: false) dispcc if True, display the accumulator array (default: false) """ ny, nx, nc = sizes (im) if yc is None: yc = ny / 2 if xc is None: xc = nx / 2 acc = image ((na, nr, 1)) ainc = math.pi / na rinc = nr / math.sqrt (ny**2 + nx**2) # Find edge points and update the Hough array. for y in xrange (0, ny): for x in xrange (0, nx): v = im[y,x,0] if v > 0: for a in xrange (0, na): ang = a * ainc r = ((x - xc) * math.cos(ang) + (y - yc) * math.sin (ang)) r += ny if r >= 0 and r < nr: acc[a,r,0] += 1 # Now find peaks in the accumulator. peaks = find_peaks (acc, threshold=threshold) # If the user wants to display what has been found, draw the lines over # the image. (This implmentation is ugly.) if dispacc: display (acc) if disp: d = image ((ny, nx, 3)) d[:,:,0] = im[:,:,0] * 0.5 d[:,:,1] = im[:,:,0] * 0.5 d[:,:,2] = im[:,:,0] * 0.5 for h, a, r in peaks: da = a for y in range (0, ny): for x in range (0, nx): t = (x - xc) * math.cos (da) + (y - yc) * math.sin (da) if abs(t - r) < 1.0e-3: d[y,x,0] = max_image_value display (d) return peaks, acc
d546e2684f06ac0fe99111a64c17ecbf7b0708f5
3,633,150
def build_response(session_attributes, speechlet_response): """builds resopnse""" return { 'version': '1.0', 'sessionAttributes': session_attributes, 'response': speechlet_response }
96bc754e1a58300b2861851be678aa0e984845a8
3,633,151
def insertion_sort(L): """Implementation of insertion sort.""" n = len(L) if n < 2: return L for i in range(1, n): tmp = L[i] j = i while j > 0 and tmp < L[j - 1]: L[j] = L[j - 1] j -= 1 L[j] = tmp
ca7cbb5c676173ad10ce98d8b9e579a65afad0fb
3,633,152
def get_bgp_peer(): """ collects local and bgp neighbor ip along with device name in below format { 'local_addr1':['neighbor_device1_name', 'neighbor_device1_ip'], 'local_addr2':['neighbor_device2_name', 'neighbor_device2_ip'] } """ config_db = ConfigDBConnector() config_db.connect() data = config_db.get_table('BGP_NEIGHBOR') bgp_peer = {} for neighbor_ip in data.keys(): local_addr = data[neighbor_ip]['local_addr'] neighbor_name = data[neighbor_ip]['name'] bgp_peer.setdefault(local_addr, [neighbor_name, neighbor_ip]) return bgp_peer
681b70cbef1b2c551c9d5040f9921f25edea6a17
3,633,153
def _do_nothing(string): """Makes the ConfigParser case sensitive.""" return string
4011cbe5b00bef5fe9fc13420bbd988729641676
3,633,154
def show_game(game_id): """Return the rendered template for a game with game_id. Args: game_id(int) Returns: Rendered html template """ game = session.query(Games).filter_by(id=game_id).one() date = game.release_date.date().strftime("%d, %B %Y") category = session.query(Category).filter_by(id=game.category_id).one() return render_template('game_page.html', game=game, category=category, date=date, embed_link=embed_link(game.video_path))
63a9fc5240164953e37de7dbc3e323a60bc46364
3,633,155
def is_symmetric(mat): """Return whether a sy.Matrix is symmetric.""" n, nc = mat.shape return nc == n and all(mat[j,i] == mat[i,j] for i in range(n) for j in range(i+1, n))
e23cb03ec06f16584a99c82d66770491a6635ef5
3,633,156
def download_ftp_text(file_url: str) -> str: """Download FTP file to memory return it as string (ASCII encoding is assumed). :param file_url: file URL which must start with ftp:// """ with open_ftp_file(file_url) as fp: return fp.read().decode('ASCII')
daf8dbe8f0d06b42d228b58084e074e6543fc216
3,633,157
def choice_likeFestival(user_id): """ 가고 싶은 축제 선택 """ print('choice_likeFestival') for row in userInfoDB.rows: if row[0].value == user_id: # 방문했던 적이 있었던 사람, 리스트로 저장되므로 리스트로 접근 userRow = row[0].row() index = contentListCode.index(stateDB.loc[user_id, 'contentCode']) userInfoDB.cell(row=userRow, column=index+3).value += 1 db.save(EXCEL_FILE_NAME) return False
46b0ea332c19a5b2998c4a04c665d6f59d32cd17
3,633,158
def least_square_regression(x, y, xlabel = "x", ylabel = "y", prefix="", suffix=""): """ Perform least square regression to find the best fit line and returns the slope of the line. **Parameters** x : List of values along x axis. y : List of values along y axis. """ X = np.asarray(x).reshape((len(x), 1)) Y = np.asarray(y).reshape((len(y), 1)) regr = linear_model.LinearRegression() regr.fit(X, Y) label_string = "Best fit line, y = "+str(regr.coef_[0][0])+" * x + "+str(regr.intercept_[0]) print(label_string) print("Residual sum of squares: %.2f" % np.mean((regr.predict(X) - Y) ** 2)) print("Variance score: %.2f" % regr.score(X, Y)) # Plot outputs original_data, = plt.plot(X, Y,'go', label="original data") # best_fit_line, = plt.plot(X, map(lambda x: pow(e, -x), X), 'bo', label=label_string) best_fit_line, = plt.plot(X, regr.predict(X), color='blue', linewidth=3, label=label_string) plt.title("Least Square Regression"+suffix) plt.ylabel(ylabel) plt.xlabel(xlabel) curves = [original_data, best_fit_line] labels = [curve.get_label() for curve in curves] plt.legend(curves, labels) plt.savefig(prefix+"least_square_regression_fit"+suffix+".png") plt.show() return regr.coef_[0][0]
88d4601203031659e905be210e8621a9f1f72ab1
3,633,159
def build_dict(*param_dicts, **param_dict): """ Create a merged dictionary from the supplied dictionaries and keyword parameters. """ merged_param_dict = param_dict.copy() for d in param_dicts: if d is not None: # log.info("param_dicts %r"%(d,)) merged_param_dict.update(d) return merged_param_dict
1f77e1ca81051913ed32ea5bfdb684cf468e27af
3,633,160
def calc_volume(all_vertices, element): """Calculate the volume for the given element based on type. Returns length between points (2D) and volume within points (3D).""" vertices = all_vertices[element.vertice_ids] # 1D case with lines (volume) and points (area) if element.element_type == ElementType.LINE: a_v = vertices[1] - vertices[0] volume = len_vector(a_v) # 2D case with quadrangles or triangles (volume) and lines (area) elif element.element_type == ElementType.QUADRANGLE: a_v = vertices[1] - vertices[0] b_v = vertices[2] - vertices[0] c_v = vertices[3] - vertices[0] volume = len_vector(cross_product(a_v, b_v)) volume += len_vector(cross_product(b_v, c_v)) volume *= 0.5 elif element.element_type == ElementType.TRIANGLE: a_v = vertices[1] - vertices[0] b_v = vertices[2] - vertices[0] volume = len_vector(cross_product(a_v, b_v)) volume *= 0.5 # 3D case with heaxahedrons, prisms, pyramids or tetrahedrons (volume) # and quadrangles or triangles (area) elif element.element_type == ElementType.HEXAEDER: volume = abs(np.dot(vertices[5] - vertices[4], cross_product( vertices[1] - vertices[4], vertices[6] - vertices[4]))) volume += abs(np.dot(vertices[1] - vertices[4], cross_product( vertices[3] - vertices[4], vertices[6] - vertices[4]))) volume += abs(np.dot(vertices[1] - vertices[4], cross_product( vertices[0] - vertices[4], vertices[3] - vertices[4]))) volume += abs(np.dot(vertices[6] - vertices[4], cross_product( vertices[3] - vertices[4], vertices[7] - vertices[4]))) volume += abs(np.dot(vertices[3] - vertices[1], cross_product( vertices[6] - vertices[1], vertices[2] - vertices[1]))) volume *= 1.0/6.0 elif element.element_type == ElementType.PRISM: a_v = vertices[1] - vertices[0] b_v = vertices[2] - vertices[0] c_v = vertices[3] - vertices[0] d_v = vertices[4] - vertices[0] e_v = vertices[5] - vertices[0] volume = abs(np.dot(a_v, cross_product(b_v, e_v))) volume += abs(np.dot(a_v, cross_product(e_v, d_v))) volume += abs(np.dot(d_v, cross_product(e_v, c_v))) volume *= 1.0/6.0 elif element.element_type == ElementType.PYRAMID: a_v = vertices[1] - vertices[0] b_v = vertices[2] - vertices[0] c_v = vertices[3] - vertices[0] d_v = vertices[4] - vertices[0] volume = np.abs(np.dot(a_v, cross_product(b_v, d_v))) volume += np.abs(np.dot(b_v, cross_product(c_v, d_v))) volume *= 1.0/6.0 elif element.element_type == ElementType.TETRAEDER: a_v = vertices[1] - vertices[0] b_v = vertices[2] - vertices[0] c_v = vertices[3] - vertices[0] volume = np.abs(np.dot(a_v, cross_product(b_v, c_v))) volume *= 1.0/6.0 else: raise TypeError(element.element_type) if volume < 0.0: raise ValueError('Volume', volume) return volume
39c631c8dc4bc8e00776ee14237fc054a4d6546e
3,633,161
def Mode(hist): """Returns the value with the highest frequency. hist: Hist object returns: value from Hist """ p, x = max([(p, x) for x, p in hist.Items()]) return x
2db7d658ad58a80041c3f450104aada006b11eaf
3,633,162
def get_img_space(wsp, img): """ Find out what image space an image is in Note that this only compares the voxel->world transformation matrix to the reference image for each space. It is quite possible for two images to be in the same space but not be registered to one another. In this case, the returned space may not be accurate when determining whether a registration is required. :param wsp: Workspace object :param img: Image :return: Name of image space for ``img``, e.g. ``native``, ``struc`` """ img_space = None for space in ('native', 'calib', 'struc', 'std', 'custom'): ref = getattr(wsp.reg, "%sref" % space) if ref is not None and img.sameSpace(ref): img_space = space break if img_space is None: raise RuntimeError("Could not determine space for image: %s" % str(img)) return img_space
b455dd6300cf13cbba5d8e2d44685e06d8fb4cad
3,633,163
import os def get_dir(algorithm, mode): """ Used to determine the plotting folder Parameters ---------- algorithm: String The name of the current ANC algorithm running mode: String The MODE (precorded or anc) the server is currently running Returns ------- path : Path The path of the plots directory of diminish Raises ------ None """ base_dir = os.getcwd() results_dir = os.path.join(base_dir, f'plots/{algorithm}/{mode}/') if not os.path.isdir(results_dir): os.makedirs(results_dir) return results_dir
350e47f729bddbc7d85e306b6b6af5d6a14222ea
3,633,164
def get_comma_delimiter(include_a_space=True): """Return the comma delimiter appropriate for the current language (Arabic or English). When include_a_space is True (the default), the delimiter includes a space to make the returned value easy to use with join() when constructing a list. """ delimiter = ARABIC_COMMA if (get_language() == 'ar') else ',' if include_a_space: delimiter += ' ' return delimiter
60622204de1ccf381b4c87abd2dedc5406f99a1a
3,633,165
def str_to_bool(string): """Used as a type in argparse so that we get back a proper bool instead of always True """ return string.lower() in ("y", "yes", "1", "true")
ab54e7cff5721f91f78c90498c16d68f2760ee11
3,633,166
import re def _run_canned_tests(env,osenv): """Run the tests from the tests subdirectory""" retval = 0 # success env['test_dir'] = env.escape_string(mbuild.join(env['src_dir'],'tests')) cmd = "%(python)s %(test_dir)s/run-cmd.py --build-dir %(build_dir)s/examples " dirs = ['tests-base', 'tests-knc', 'tests-avx512', 'tests-xop'] if env['cet']: dirs.append('tests-cet') for d in dirs: x = env.escape_string(mbuild.join(env['test_dir'],d)) cmd += " --tests %s " % (x) # add test restriction/subetting codes codes = [] if env['encoder']: codes.append('ENC') if env['decoder']: codes.append('DEC') if env['avx']: codes.append('AVX') if env['knc']: codes.append('KNC') if env['skx']: codes.append('AVX512X') if env['knm'] or env['knl']: codes.append('AVX512PF') if env['hsw']: codes.append('HSW') if env['amd_enabled'] and env['avx']: codes.append('XOP') for c in codes: cmd += ' -c ' + c output_file = env.build_dir_join('TEST.OUT.txt') cmd = env.expand_string(cmd) if mbuild.verbose(2): mbuild.msgb("TEST COMMAND", "%s > %s" %(cmd,str(output_file))) (retcode, stdout, stderr) = mbuild.run_command_output_file(cmd, output_file, osenv=osenv) if retcode == 1: for l in stdout: print(l.rstrip()) for l in stdout: l = l.rstrip() if re.search(r'^[[](TESTS|ERRORS|SKIPPED|PASS_PCT|FAIL)[]]',l): mbuild.msgb("TESTSUMMARY", l) if retcode == 0: mbuild.msgb("CANNED TESTS", "PASSED") else: mbuild.msgb("CANNED TESTS", "FAILED") retval = 1 # failure return retval
17b85edeacbfeff77ebdd238da817a869b04ebde
3,633,167
def new(nbits, prefix=b"", suffix=b"", initial_value=1, little_endian=False, allow_wraparound=False): """Create a stateful counter block function suitable for CTR encryption modes. Each call to the function returns the next counter block. Each counter block is made up by three parts: +------+--------------+-------+ |prefix| counter value|postfix| +------+--------------+-------+ The counter value is incremented by 1 at each call. Args: nbits (integer): Length of the desired counter value, in bits. It must be a multiple of 8. prefix (byte string): The constant prefix of the counter block. By default, no prefix is used. suffix (byte string): The constant postfix of the counter block. By default, no suffix is used. initial_value (integer): The initial value of the counter. Default value is 1. little_endian (boolean): If ``True``, the counter number will be encoded in little endian format. If ``False`` (default), in big endian format. allow_wraparound (boolean): This parameter is ignored. Returns: An object that can be passed with the :data:`counter` parameter to a CTR mode cipher. It must hold that *len(prefix) + nbits//8 + len(suffix)* matches the block size of the underlying block cipher. """ if (nbits % 8) != 0: raise ValueError("'nbits' must be a multiple of 8") # Ignore wraparound return {"counter_len": nbits // 8, "prefix": prefix, "suffix": suffix, "initial_value": initial_value, "little_endian": little_endian }
a0cdadcf6eb81ad323c205e08db97d094326f513
3,633,168
from typing import Tuple def scoreNF(shelf: Shelf, item: Item, self=None) -> Tuple[int, Shelf, bool]: """ Next Fit """ if self.shelves: open_shelf = self.shelves[-1] if self._item_fits_shelf(item, open_shelf): return (0, open_shelf, False) if self.rotation and self._item_fits_shelf(item, open_shelf, True): return (0, open_shelf, True) return (0, None, False)
33205a6b87ae894156778d515b2bf68635c15586
3,633,169
def parse_str_to_date(date_str): """ Recieves a Date String (e.g. 2019-10-09) and returns a datetime object """ #T22:25:00-03:00 if type(date_str) != str: return None return dt.datetime( int(date_str[0:4]), #Year int(date_str[5:7]), #Month int(date_str[8:10]), #Day )
42083fbcc37fb9739154759ae7f278a1f958b0ca
3,633,170
def size(imageOrFilter) : """Return the size of an image, or of the output image of a filter This method take care of updating the needed informations """ # we don't need the entire output, only its size imageOrFilter.UpdateOutputInformation() img = image(imageOrFilter) return img.GetLargestPossibleRegion().GetSize()
1459e7d5f8b1c762e2a351a0b933d3bc9c651e48
3,633,171
def fetch_eigenv(odb_name, step_name, n_eigen): """ Get eigenvalues. Return the eigenvalues of a perturbation buckling analysis from an abaqus database. Parameters ---------- odb_name : class Abaqus model containing the eigenvalues step_name : string Name of the step n_eigen : int Number of eigenvalues to return Attributes ---------- Notes ----- References ---------- """ bckl_odb = odbAccess.openOdb(path=odb_name + '.odb') bckl_step = bckl_odb.steps[step_name] # Gather the eigenvalues eigenvalues = () eigen_string = "" for J_eigenvalues in range(1, n_eigen + 1): current_eigen = float(bckl_step.frames[J_eigenvalues].description.split()[-1]) eigenvalues = eigenvalues + (current_eigen,) eigen_string = eigen_string + "%.3E " % current_eigen # Close the odb odbAccess.closeOdb(bckl_odb) # Return variables return eigenvalues, eigen_string
603c0b40e95a181437c5f6b615f282db6c9eea90
3,633,172
def _not_exhausted(last_fetched): """Check if the last fetched tasks were the last available.""" return len(last_fetched) == 100
570cf94ba9c723cced8ec3a746f2ce070d780fd5
3,633,173
import os def files_by_extension(root, extensions): """ Returns a list of files that match the extensions given after crawling the root directory """ assert(os.path.isdir(root)) file_list = [] for roots, _, files in os.walk(root): for f in files: ext = os.path.splitext(f)[1][1:].strip().lower() if ext in extensions: file_list.append(os.path.join(roots, f)) return file_list
637d8f2fc8d35f1f78e81c328541519b32b34d7b
3,633,174
import requests def playonyt(topic: str, use_api: bool = False, open_video: bool = True) -> str: """Play a YouTube Video""" if use_api: response = requests.get( f"https://pywhatkit.herokuapp.com/playonyt?topic={topic}" ) if open_video: web.open(response.content.decode("ascii")) return response.content.decode("ascii") else: url = f"https://www.youtube.com/results?q={topic}" count = 0 cont = requests.get(url) data = cont.content data = str(data) lst = data.split('"') for i in lst: count += 1 if i == "WEB_PAGE_TYPE_WATCH": break if lst[count - 5] == "/results": raise Exception("No Video Found for this Topic!") if open_video: web.open(f"https://www.youtube.com{lst[count - 5]}") return f"https://www.youtube.com{lst[count - 5]}"
b897fb7271b2aaf6b421702dc851fa90fcc79b17
3,633,175
def get_data(filename: str = "test", roi: str = []) -> list: """ Return data (pixel values) from an ROI in an image for every extension. NOT FINISHED! Args: filename: image filename. roi: Region-Of-Interest. Returns: list of pixel values. """ filename = azcam.utils.make_image_filename(filename) roi = _get_data_roi(roi) with pyfits.open(filename) as im: NumExt, first_ext, last_ext = get_extensions(filename) if NumExt == 0: datalist = im[0].data[roi[0] : roi[1], roi[2] : roi[3]] else: datalist = [ im[chan].data[roi[0] : roi[1], roi[2] : roi[3]] for chan in range(first_ext, last_ext) ] return datalist
1c4fd0634853f06a82525245374eec24016769df
3,633,176
import logging def execute_select_dataframe_columns(dataframe, select_dataframe_columns): """ Filter dataframe using the provided columns Args: dataframe (array[str]): Array of columns names select_dataframe_columns (pandas.Dataframe): Dataframe Returns: pandas.Dataframe: Dataframe filtered """ logging.debug( '>>>>>>>>> Using select dataframe columns strategy <<<<<<<<<<<<') dataframe_columns = list(dataframe.columns) diff_selected_columns = list( set(select_dataframe_columns) - set(dataframe_columns)) if len(diff_selected_columns) == 0: filtered_dataframe_columns = dataframe if len( select_dataframe_columns) == 0 else dataframe[select_dataframe_columns] else: raise ValueError('The provided columns [%s] does not exists.' % ', '.join( diff_selected_columns)) return filtered_dataframe_columns
12ce4343e97886820a3fecb3d79d5eddd593a7fa
3,633,177
import os import argparse def extant_file(x): """ 'Type' for argparse - checks that file exists but does not open. """ if not os.path.exists(x): # Argparse uses the ArgumentTypeError to give a rejection message like: # error: argument input: x does not exist raise argparse.ArgumentTypeError("{0} does not exist".format(x)) return x
2572516acbc1b6a661e4d85f36d8adb96f832d0f
3,633,178
def test_from_format(schema, to_fn, buf_cls): """ Test that check_types-guarded function reads data from source serialization format. """ @pa.check_types def fn(df: pa.typing.DataFrame[schema]): return df for df, invalid in [ (mock_dataframe(), False), (invalid_input_dataframe(), True), ]: buf = None if buf_cls is None else buf_cls() arg = to_fn(df, *([buf] if buf else [])) if buf: if buf.closed: pytest.skip( "skip test for older pandas versions where to_pickle " "closes user-provided buffers: " "https://github.com/pandas-dev/pandas/issues/35679" ) buf.seek(0) arg = buf if invalid: with pytest.raises(pa.errors.SchemaError): fn(arg) return out = fn(arg) assert df.equals(out)
d1ce34befb2255ee15b86d36d28d24075c4638f0
3,633,179
def has_oxidation_states(comp): """Check if a composition object has oxidation states for each element Args: comp (Composition): Composition to check Returns: (boolean) Whether this composition object contains oxidation states """ for el in comp.elements: if not hasattr(el, "oxi_state") or el.oxi_state is None: return False return True
702595070b588761142055bc1532ce26acd287fb
3,633,180
def check_fsig_int(quad_int, cryst_ptgrp, sigma, *args): """ For specific sigma rotations, a function of m, U, V, W (fsig) is computed. The ratio of fsig and sigma should be a divisor of kmax. This condition is checked and those integer quadruples that satisfy this condition are returned Parameters ---------------- quad_int: numpy.array Integer quadruples cryst_ptgrp: str Proper point group in Schoenflies notation sigma: float sigma number args[0]: dic keys: 'nu', 'mu', 'kmax' Returns ----------- quad_int: numpy.array Integer quadruple array that satisfy the above mentioned condition """ mu = args[0]['mu'] nu = args[0]['nu'] kmax = args[0]['kmax'] m = quad_int[0, :] u = quad_int[1, :] v = quad_int[2, :] w = quad_int[3, :] sigma = float(sigma) if cryst_ptgrp == 'D3': # $\frac{F}{$\Sigma$}$ should be a divisor of kmax # $\in (12\mu\nu, 6\mu\nu, 3\mu\nu)$ # Keep only those quadruples for which the above condition is met fsig = ((mu * (m ** 2) + (mu - 2 * nu) * (u ** 2 + v ** 2 + w ** 2) + 2 * nu * (u * v + v * w + w * u)) / sigma) cond1 = np.where(abs(fsig - np.round(fsig)) < 1e-06)[0] cond2 = np.where(np.remainder(kmax, fsig[cond1]) == 0)[0] quad_int = quad_int[:, cond1[cond2]] if cryst_ptgrp == 'D4': # $\frac{F}{$\Sigma$}$ should be a divisor of kmax # $\in (12\mu\nu, 6\mu\nu, 3\mu\nu)$ # Keep only those quadruples for which the above condition is met fsig = (mu * (m ** 2 + w ** 2) + nu * (u ** 2 + v ** 2)) / sigma cond1 = np.where(abs(fsig - np.round(fsig)) < 1e-06)[0] cond2 = np.where(np.remainder(kmax, fsig[cond1]) == 0)[0] quad_int = quad_int[:, cond1[cond2]] if cryst_ptgrp == 'D6': # $\frac{F}{$\Sigma$}$ should be a divisor of kmax # $\in (12\mu\nu, 6\mu\nu, 3\mu\nu)$ # Keep only those quadruples for which the above condition is met fsig = ((mu * (3 * (m ** 2) + w ** 2) + nu * (u ** 2 - u * v + v ** 2)) / sigma) cond1 = np.where(abs(fsig - np.round(fsig)) < 1e-06)[0] cond1 = cond1[cond1!=0] cond2 = np.where(np.remainder(kmax, fsig[cond1]) == 0)[0] quad_int = quad_int[:, cond1[cond2]] return quad_int
c610da0a1055354fd1a0f27b73cf251b7a7db458
3,633,181
def exit_flow(exit_inputs: Tensor) -> Tensor: """ Exit flow Implements the second of the three broad parts of the model. Includes the optional fully-connected layers, and the logistic regression segment of the model. :param exit_inputs: Tensor output generated by the Middle Flow segment, having shape [*, new_rows, new_cols, 728] :return: Output tensor of shape [*, 1000], representing the classifier output for 1000 classes """ # Block 5 - Conv C (Orange) res = convolutional_unit(exit_inputs, 1024, (1, 1), (2, 2), post_activation=False) exit_outputs = exit_inputs for num_filters in [728, 1024]: exit_outputs = separable_convolutional_unit(exit_outputs, num_filters) exit_outputs = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="same")(exit_outputs) exit_outputs = Add()([res, exit_outputs]) for num_filters in [1536, 2048]: exit_outputs = separable_convolutional_unit(exit_outputs, num_filters, pre_activation=False, post_activation=True) # Block 6 Global Average Pool (Gray) exit_outputs = GlobalAveragePooling2D()(exit_outputs) # Optional fully-connected layer(s) (Blue) exit_outputs = Dense(units=2048)(exit_outputs) exit_outputs = Activation("relu")(exit_outputs) # Logistic regression (Blue) exit_outputs = Dense(units=1000)(exit_outputs) exit_outputs = Activation("softmax")(exit_outputs) return exit_outputs
fa73280187448f0acd89ea834d78b261e9f95415
3,633,182
def check_Latitude(observation): """ Validates that observation contains valid age value Returns: - assertion value: True if age is valid, False otherwise - error message: empty if age is valid, False otherwise """ value = observation.get("Latitude") if isinstance(value, str): error = "Field `Latitude` is not an float or a null value. Current value: " + str(value) return False, error return True, ""
65582eea8a5c40a08054eb5b4889aa3bc6d0af68
3,633,183
def read_and_reshape_data(filename): """ Read in the Snake grid output and reshape it into a 3d array of (ncycles, ncells, ncols). Parameters ---------- None Returns ------- sgrid: (ncycles, ncells, ncols) array of str The reshaped grid output from Snake. The columns are as follows: col 0: cell number col 1: spatial coordinate col 2: cell density col 3: cell rosseland mean opacity col 4: cell optical depth col 5: cumulative optical depth col 6: cell temperature """ sgrid = SpectrumUtils.read_spec(filename, numpy=True) # Figure out the number of cells, cycles and cols from the dimensions ncells = int(sgrid[:, 0].max()) + 1 ncycles = int(sgrid.shape[0] / ncells) ncols = sgrid.shape[1] return np.reshape(sgrid, (ncycles, ncells, ncols))
f9be8e78d359e4c74d7122c11294ca478f1958c7
3,633,184
def chart(start: str, stop: str): """График с данными.""" return flask.render_template('chart.html', start=start, stop=stop)
745ec2f593cdac79494da36e33e7c29ae7facb01
3,633,185
def toa_error_cross_corr(snr, bandwidth, pulse_len, bandwidth_rms=None): """ Computes the timing error for a Cross-Correlation time of arrival estimator, given the input signal's bandwidth, pulse length, and RMS bandwidth. Ported from MATLAB Code Nicholas O'Donoughue 11 March 2021 :param snr: Signal-to-Noise ratio [dB] :param bandwidth: Input signal bandwidth [Hz] :param pulse_len: Length of the input signal [s] :param bandwidth_rms: RMS bandwidth of input signal [Hz] :return: Expected time of arrival error variance [s^2] """ # Convert input SNR to linear units snr_lin = unit_conversions.db_to_lin(snr) # Compute the product of SNR, bandwidth, pulse length, and RMS bandwidth a = snr_lin * bandwidth * pulse_len * bandwidth_rms # Invert and apply 8*pi scale factor return 1/(8*np.pi*a)
0fe5fc399108bc6a57ce3e5e5b340e1b5dfa45eb
3,633,186
import re def search_projects(): """ Search for projects - When given the name of a known project (*modulo* normalization), redirect to that project's page - When given an unknown project name, search for all known project names that have it as a prefix - When given a search term with a ``*`` in it, normalize the rest of the search term and perform file glob matching against all known normalized project names """ search_term = request.args.get('q', '').strip() if search_term: per_page = current_app.config["WHEELODEX_SEARCH_RESULTS_PER_PAGE"] normterm = re.sub(r'[-_.]+', '-', search_term.lower()) # Only search projects that have wheels: q = Project.query.filter(Project.has_wheels) if '*' in normterm or '?' in normterm: q = q.filter(Project.name.like(glob2like(normterm), escape='\\')) elif q.filter(Project.name == normterm).one_or_none() is not None: return redirect(url_for('.project', project=normterm), code=307) else: q = q.filter( Project.name.like(like_escape(normterm) + '%', escape='\\') ) results = q.order_by(Project.name.asc()).paginate(per_page=per_page) else: results = None return render_template( 'search_projects.html', search_term = search_term, results = results, )
3d4823f6c26324e80f8250f51d32fa466b25ca61
3,633,187
from typing import Optional def parse_opt_int(s: Optional[str]) -> Optional[int]: """ parse_opt_int(s: Optional[str]) -> Optional[int] If s is a string, parse it for an integer value (raising a ValueError if it cannot be parsed correctly.) If s is None, return None. Otherwise, raise a TypeError. """ if s is None: return None if isinstance(s, str): return int(s) raise TypeError(f"value must be a string or None: {type(s).__name__}")
91a102c8c8e6a6ee109e9c88c56d9a6959f1f838
3,633,188
def get_record_parser(config, is_test=False): """ Get the tfrecords sample parser. :param config: Contains the configurations to be used. :param is_test: Indicate if the data_type is test. :return: The parser method. """ def parse(example): """ Extract features from a single tfrecords sample. :param example: Contain the example. :return: context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id """ para_limit = config.test_para_limit if is_test else config.para_limit ques_limit = config.test_ques_limit if is_test else config.ques_limit char_limit = config.char_limit features = tf.parse_single_example(example, features={ "context_idxs": tf.FixedLenFeature([], tf.string), "ques_idxs": tf.FixedLenFeature([], tf.string), "context_char_idxs": tf.FixedLenFeature([], tf.string), "ques_char_idxs": tf.FixedLenFeature([], tf.string), "y1": tf.FixedLenFeature([], tf.string), "y2": tf.FixedLenFeature([], tf.string), "id": tf.FixedLenFeature([], tf.int64) }) context_idxs = tf.reshape(tf.decode_raw( features["context_idxs"], tf.int32), [para_limit]) ques_idxs = tf.reshape(tf.decode_raw( features["ques_idxs"], tf.int32), [ques_limit]) context_char_idxs = tf.reshape(tf.decode_raw( features["context_char_idxs"], tf.int32), [para_limit, char_limit]) ques_char_idxs = tf.reshape(tf.decode_raw( features["ques_char_idxs"], tf.int32), [ques_limit, char_limit]) y1 = tf.reshape(tf.decode_raw( features["y1"], tf.float32), [para_limit]) y2 = tf.reshape(tf.decode_raw( features["y2"], tf.float32), [para_limit]) qa_id = features["id"] return context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id return parse
0daec653596082160872d2222569849c6caa6fdd
3,633,189
from typing import Optional def get_model_loader(namespace: Optional[str] = None) -> ModelConfigLoader[SegmentationModelBase]: """ Returns a ModelConfigLoader for segmentation models, with the given non-default namespace (if not None) to search under. """ return ModelConfigLoader[SegmentationModelBase](model_configs_namespace=namespace)
cd57837a21b49d04876ef9162b5b47984a38b109
3,633,190
from typing import Optional def beam_search(mat: np.ndarray, chars: str, beam_width: int = 25, lm: Optional[LanguageModel] = None) -> str: """Beam search decoder. See the paper of Hwang et al. and the paper of Graves et al. Args: mat: Output of neural network of shape TxC. chars: The set of characters the neural network can recognize, excluding the CTC-blank. beam_width: Number of beams kept per iteration. lm: Character level language model if specified. Returns: The decoded text. """ blank_idx = len(chars) max_T, max_C = mat.shape # initialise beam state last = BeamList() labeling = () last.entries[labeling] = BeamEntry() last.entries[labeling].pr_blank = log(1) last.entries[labeling].pr_total = log(1) # go over all time-steps for t in range(max_T): curr = BeamList() # get beam-labelings of best beams best_labelings = last.sort_labelings()[:beam_width] # go over best beams for labeling in best_labelings: # probability of paths ending with a non-blank pr_non_blank = log(0) # in case of non-empty beam if labeling: # probability of paths with repeated last char at the end pr_non_blank = last.entries[labeling].pr_non_blank + log(mat[t, labeling[-1]]) # probability of paths ending with a blank pr_blank = last.entries[labeling].pr_total + log(mat[t, blank_idx]) # fill in data for current beam curr.entries[labeling].labeling = labeling curr.entries[labeling].pr_non_blank = np.logaddexp(curr.entries[labeling].pr_non_blank, pr_non_blank) curr.entries[labeling].pr_blank = np.logaddexp(curr.entries[labeling].pr_blank, pr_blank) curr.entries[labeling].pr_total = np.logaddexp(curr.entries[labeling].pr_total, np.logaddexp(pr_blank, pr_non_blank)) curr.entries[labeling].pr_text = last.entries[labeling].pr_text curr.entries[labeling].lm_applied = True # LM already applied at previous time-step for this beam-labeling # extend current beam-labeling for c in range(max_C - 1): # add new char to current beam-labeling new_labeling = labeling + (c,) # if new labeling contains duplicate char at the end, only consider paths ending with a blank if labeling and labeling[-1] == c: pr_non_blank = last.entries[labeling].pr_blank + log(mat[t, c]) else: pr_non_blank = last.entries[labeling].pr_total + log(mat[t, c]) # fill in data curr.entries[new_labeling].labeling = new_labeling curr.entries[new_labeling].pr_non_blank = np.logaddexp(curr.entries[new_labeling].pr_non_blank, pr_non_blank) curr.entries[new_labeling].pr_total = np.logaddexp(curr.entries[new_labeling].pr_total, pr_non_blank) # apply LM apply_lm(curr.entries[labeling], curr.entries[new_labeling], chars, lm) # set new beam state last = curr # normalise LM scores according to beam-labeling-length last.normalize() # sort by probability best_labeling = last.sort_labelings()[0] # get most probable labeling # map label string to char string res = ''.join([chars[label] for label in best_labeling]) return res
fe6575a42c02dd0174125b588590d9d46318b614
3,633,191
def generate_hyperparameters(k): """ generate k sets of hyperparameters randomly : (mutate_prob, elite, alpha, beta) Args: k: number of sets Returns: list of settings, where each setting is a set of hyperparameters """ settings = [] for i in range(k): setting = [np.random.rand(), np.random.randint(3, 20), np.random.rand(), np.random.rand()] settings.append(setting) return settings
7e326170b872ba8bcd82b0411a4d88f004f1f773
3,633,192
def unparse_point_sources(point_sources, strict=False, expand_env_vars=False, properties=lambda x:''): """ Convert a list (or other iterable) of PointSource objects into XML. strict : bool set True to generate exception, error message identifying offending source, reason properties : a function the function, if specified, returns a string for the source element with properties, like TS Example: >>> def ps2xml(ps, expand_env_vars=False): ... ret=unparse_point_sources([ps], expand_env_vars=expand_env_vars) ... return ret[0].strip().replace('\\t', ' '*4) >>> ps = PointSource(name='test', model=Models.Constant(), skydir=SkyDir(-30,30)) >>> print (ps2xml(ps)) <source name="test" type="PointSource" > <spectrum type="ConstantValue"> <parameter name="Value" value="1.0" free="1" max="10" min="0.001" scale="1" /> </spectrum> <spatialModel type="SkyDirFunction"> <parameter name="RA" value="330.0" free="0" max="360.0" min="-360.0" scale="1.0" /> <parameter name="DEC" value="30.0" free="0" max="90" min="-90" scale="1.0" /> </spatialModel> </source> Previously, this was buggy. The expand_env_vars flag would case the Prefactor to be nan. This doctest protects against that edge case. >>> ps = PointSource(name='test', model=Models.PowerLaw(), skydir=SkyDir(20,-88)) >>> print (ps2xml(ps, expand_env_vars=True)) <source name="test" type="PointSource" > <spectrum type="PowerLaw"> <parameter name="Prefactor" value="1.0" free="1" max="100.0" min="0.01" scale="1e-11" /> <parameter name="Index" value="2.0" free="1" max="5" min="-5" scale="-1" /> <parameter name="Scale" value="1000.0" free="0" max="1000.0" min="1000.0" scale="1" /> </spectrum> <spatialModel type="SkyDirFunction"> <parameter name="RA" value="20.0" free="0" max="360.0" min="-360.0" scale="1.0" /> <parameter name="DEC" value="-88.0" free="0" max="90" min="-90" scale="1.0" /> </spatialModel> </source> """ xml_blurbs = Stack() m2x = Model_to_XML(strict=strict) for ps in point_sources: skyxml = makePSSpatialModel(ps.skydir) try: m2x.process_model(ps.model, expand_env_vars=expand_env_vars) except Exception as emsg: print ('Failed to process source %s: %s' %(ps.name, emsg)) specxml = m2x.getXML() s1 = '\n<source name="%s" type="PointSource" %s >\n'%(ps.name, properties(ps)) s2 = '</source>' xml_blurbs.push(''.join([s1,specxml,skyxml,s2])) return xml_blurbs
03fcc44b85523b7f082eed774fc1f4a271d30867
3,633,193
def pion_to_muon_avg(x_lower, x_upper): """ Energy distribution of a numu from the decay of pi Args: x_lower,x_lower (float): energy fraction transferred to the secondary, lower/upper bin edge Returns: float: average probability density in bins (xmin,xmax) """ if x_lower.shape != x_upper.shape: raise Exception('different grids for xmin, xmax provided') bins_width = x_upper - x_lower res = np.zeros(x_lower.shape) m_muon = spec_data[7]['mass'] m_pion = spec_data[2]['mass'] r = m_muon**2 / m_pion**2 xmin = r xmax = 1. # lower bin edged not contained cond = np.where(np.logical_and(xmin > x_lower, xmin < x_upper)) res[cond] = 1 / (1 - r) * (x_upper[cond] - xmin) / bins_width[cond] # upper bin edge not contained cond = np.where(np.logical_and(x_lower < xmax, x_upper > xmax)) res[cond] = 1 / (1 - r) * (xmax - x_lower[cond]) / bins_width[cond] # bins fully contained cond = np.where(np.logical_and(xmin <= x_lower, x_upper <= xmax)) res[cond] = 1 / (1 - r) return res
d6be736cb901555e3539c892941e887bda0ac0e2
3,633,194
def secord_update(t, x, u, params={}): """Second order system dynamics""" omega0 = params.get('omega0', 1.) zeta = params.get('zeta', 0.5) u = np.array(u, ndmin=1) return np.array([ x[1], -2 * zeta * omega0 * x[1] - omega0*omega0 * x[0] + u[0] ])
b5af03a1b1da8a3b5d7ca9ef9f50d77279f567b2
3,633,195
from javax.swing.event import ChangeListener def addChangeListener(target, listener, *args, **kwargs): """ Shortcut for addEventListener(target, ChangeListener, 'stateChanged', listener). """ return addEventListener(target, ChangeListener, 'stateChanged', listener, *args, **kwargs)
c9cde3efe17f321b79097c84887f2b1c852c7345
3,633,196
from typing import List def drop_no_image(df: pd.DataFrame, imaged_samples: List[int]) -> pd.DataFrame: """ Pandas pipe function to drop any rows from table for samples that have no images. """ df_temp = df[df['Sample'].isin(imaged_samples)] return df_temp
796f23720828c265e0d96b330f23802b7f14dd02
3,633,197
from typing import OrderedDict def parse_fasta(handle): """Parse sequences in a FASTA file. Sequence headers are trimmed after the first whitespace. Returns: Sequences in FASTA file keyed on their headers (i.e. > line) """ sequences = OrderedDict() skip = False for line in handle: try: line = line.decode().strip() except AttributeError: line = line.strip() if line.startswith(">"): header = line[1:].split(" ", 1)[0] skip = header in sequences if skip: LOG.warning("Skipping duplicate sequence: %s", header) else: sequences[header] = "" else: if not skip: sequences[header] += line return sequences
acab430c325aaca5de0bc5851ffe948830dc21f0
3,633,198
def assign_cat(plugin): """Assigns `symbols` module mapping to the `Warp` plugin.""" items = [] for item in MAPPING_SYMBOLS: items.append( plugin.create_item( category=plugin.CATEGORY_SYMBOLS, label=item[0], short_desc=item[1], target=item[1], args_hint=kp.ItemArgsHint.FORBIDDEN, hit_hint=kp.ItemHitHint.IGNORE)) return items
f5ec84d0e3662cafe69ac85d7e52965720109528
3,633,199