content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_tokenizer_from_saved_model(saved_model: SavedModel) -> SentencepieceTokenizer: """ Get tokenizer from tf SavedModel. :param SavedModel saved_model: tf SavedModel. :return: tokenizer. :rtype: SentencepieceTokenizer """ # extract functions that contain SentencePiece somewhere in there functions_with_sp = [ f for f in saved_model.meta_graphs[0].graph_def.library.function if "sentencepiecetokenizeop" in str(f).lower() ] assert len(functions_with_sp) == 1 # find SentencePieceOp (contains the model) in the found function nodes_with_sp = [ n for n in functions_with_sp[0].node_def if n.op == "SentencepieceOp" ] assert len(nodes_with_sp) == 1 # we can pretty much save the model into a file since it does not change model = nodes_with_sp[0].attr["model"].s # instantiate the model tokenizer = SentencepieceTokenizer(model) return tokenizer
6b524f9f14e286aa6ef43fe77773f9ec6503cf75
21,300
import heapq def heapq_merge(*iters, **kwargs): """Drop-in replacement for heapq.merge with key support""" if kwargs.get('key') is None: return heapq.merge(*iters) def wrap(x, key=kwargs.get('key')): return key(x), x def unwrap(x): _, value = x return value iters = tuple((wrap(x) for x in it) for it in iters) return (unwrap(x) for x in heapq.merge(*iters))
0693f667fb6b495680066488347d9894e84f6f0a
21,301
import argparse def parse_args(): """ parse CLI arguments Parameters ---------- args : :class: `list` A list of arguments; generally, this should be ``sys.argv``. Resturns ---------- :class: `argpase.Namespace` An object returned by ``argparse.parse_args``. """ parser = argparse.ArgumentParser(description='RPA to create language cards on Anki', prog='clac', usage='%(prog)s <word-or-list> [-y|--yes-rpa] [-r|--auto-remove]', epilog='Do you want to help? Collabore on my project! :)') parser.add_argument('word_or_list', help='A single word or a path to a line-separated .txt file in which each line is a single word. CLAC will first assume that the argument is a path. If it fails, the argument will be treated as a single word.') parser.add_argument('-y', '--yes-rpa', help='Optional argument to run the RPA as soon as it saves the word.', action='store_true') # optional argument # parser.add_argument('-r', '--auto-remove', help='Optional argument to remove the folders used to save the data.', action='store_true') # TODO return parser.parse_args()
70c0db9745125ca040cf054b15f8840803738176
21,302
def parse_archive_links(html): """Parse the HTML of an archive links page.""" parser = _ArchiveLinkHTMLParser() parser.feed(html) return parser.archive_links
7894052d602cbe0db195b6fb9a9c1252163d5266
21,303
import json def processing_requests(): """ Handles the request for what is in processing. :return: JSON """ global processing global processing_mutex rc = [] response.content_type = "application/json" with processing_mutex: if processing: rc.append(processing) return json.dumps(rc)
76334b997efb659fb9d7502ec14357e8e6660293
21,304
def detect_feature(a, b=None): """ Detect the feature used in a relay program. Parameters ---------- a : Union[tvm.relay.Expr, tvm.IRModule] The input expression or module. b : Optional[Union[tvm.relay.Expr, tvm.IRModule]] The input expression or module. The two arguments cannot both be expression or module. Returns ------- features : Set[Feature] Features used in the program. """ if isinstance(a, IRModule): a, b = b, a return {Feature(int(x)) for x in _ffi_api.detect_feature(a, b)}
2b9bf11d9b37da7b4473a6da83867911b22586ec
21,305
def get_urls_from_loaded_sitemapindex(sitemapindex): """Get all the webpage urls in a retrieved sitemap index XML""" urls = set() # for loc_elem in sitemapindex_elem.findall('/sitemap/loc'): for loc_elem in sitemapindex.findall('//{http://www.sitemaps.org/schemas/sitemap/0.9}loc'): urls.update(get_urls_from_sitemap(loc_elem.text)) for loc_elem in sitemapindex.findall('//loc'): urls.update(get_urls_from_sitemap(loc_elem.text)) return urls
1a94166272385768929e1db70b643293e7c325b5
21,306
def genLinesegsnp(verts, colors = [], thickness = 2.0): """ gen objmnp :param objpath: :return: """ segs = LineSegs() segs.setThickness(thickness) if len(colors) == 0: segs.setColor(Vec4(.2, .2, .2, 1)) else: segs.setColor(colors[0], colors[1], colors[2], colors[3]) for i in range(len(verts)-1): segs.moveTo(verts[i][0], verts[i][1], verts[i][2]) segs.drawTo(verts[i+1][0], verts[i+1][1], verts[i+1][2]) objmnp = NodePath('linesegs') objmnp.attachNewNode(segs.create()) objmnp.setTransparency(TransparencyAttrib.MAlpha) return objmnp
71fc5c936fbe5dfdc528fc14fd6c0dd10d15ff3c
21,307
import os from datetime import datetime import time def ncores_traditional_recommendation_process(user_model_df, user_model_genres_distr_df, user_expected_items_df, items_mapping_dict, user_blocked_items_df, recommender_label, popularity_df, transaction_mean, control_count=None, start_time=None): """ A user by time Responsible for: the recommender algorithm prediction, the models to be used in the pos process and the pos processing :param start_time: :param control_count: :param user_blocked_items_df: :param transaction_mean: the users transactions mean :param popularity_df: DataFrame with items popularity :param user_model_df: All user transactions :param user_model_genres_distr_df: The user genres distribution :param user_expected_items_df: The user expected items in the final recommendation :param items_mapping_dict: A dict with all items in the system :param recommender_label: The recommender algorithm label :return: None """ # Get known items ids by the user items_ids = items_mapping_dict.keys() know_items_ids = user_model_df[ITEM_LABEL].unique().tolist() blocked_items_ids = user_blocked_items_df[ITEM_LABEL].unique().tolist() items_ids = set(items_ids) - set(blocked_items_ids) # Get unknown items ids by the user unknowing_items_ids = list(set(items_ids) - set(know_items_ids)) user_candidate_items_max_df = popularity_df[popularity_df[ITEM_LABEL].isin(unknowing_items_ids)] user_candidate_items_max_df.sort_values(by=[TRANSACTION_VALUE_LABEL], ascending=False) user_candidate_items_max_df = user_candidate_items_max_df[:CANDIDATES_LIST_SIZE] user_candidate_items_max_dict = user_transactions_df_to_item_mapping(user_candidate_items_max_df, items_mapping_dict) user_evaluation_results_df = pos_processing_calibration(user_model_genres_distr_df=user_model_genres_distr_df, candidates_items_mapping=user_candidate_items_max_dict, user_expected_items_ids=user_expected_items_df[ ITEM_LABEL].tolist(), recommender_label=recommender_label, transaction_mean=transaction_mean) if control_count is not None and control_count % 100 == 0: logger.info(' '.join(['PId:', str(os.getpid()), '->', 'Total of users done:', str(control_count), '->', 'Total time:', str(datetime.timedelta(seconds=time.time() - start_time))])) return user_evaluation_results_df
18a808bac73fc51596f89a3219180431e0109cfa
21,308
def enhance_puncta(img, level=7): """ Removing low frequency wavelet signals to enhance puncta. Dependent on image size, try level 6~8. """ if level == 0: return img wp = pywt.WaveletPacket2D(data=img, wavelet='haar', mode='sym') back = resize(np.array(wp['d'*level].data), img.shape, order=3, mode='reflect')/(2**level) cimg = img - back cimg[cimg < 0] = 0 return cimg
7c05531bd85dd42296871f884a04cd30c187346e
21,309
def thumbnail(img, size = (1000,1000)): """Converts Pillow images to a different size without modifying the original image """ img_thumbnail = img.copy() img_thumbnail.thumbnail(size) return img_thumbnail
4eb49869a53d9ddd42ca8c184a12f0fedb8586a5
21,310
def calculate_new_ratings(P1, P2, winner, type): """ calculate and return the new rating/rating_deviation for both songs Args: P1 (tuple or float): rating data for song 1 P2 (tuple or float): rating data for song 2 winner (str): left or right type (str): elo or glicko Returns: tuple: newly calculated ratings, rating_deviations """ s1, s2 = None, None if winner == 'left': s1, s2 = 1, 0 elif winner == 'right': s1, s2 = 0, 1 if type == 'elo': return calculate_elo(P1, P2, s1), calculate_elo(P2, P1, s2) elif type == 'glicko': return calculate_glicko_rating(P1, P2, s1), calculate_glicko_rating(P2, P1, s2)
23853c6fd4d6a977e0c0f28b5665baebcab3ae86
21,311
def age(a): """age in yr - age(scale factor)""" return _cosmocalc.age(a)
7f4cb143c1b5e56f3f7b1ebc0a916a371070740d
21,312
import argparse def cli_parser() -> argparse.ArgumentParser: """Create parser with set arguments.""" parser = argparse.ArgumentParser( # Also possible to add prog title to output, # if ommitted the filename is used (e.g. cli-simple.py) prog="CLI-COPERNICUS-DOWNLOAD", description="A simple example app to aid download of data from Copernicus", epilog=" --- ", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) # Arguments in argparse can be optional, positional or required # Add named arguments (that is required for the tool to run) # Set the argument type and limit choices from a list parser.add_argument( "--portal", type=str, help="Data source portal", required=True, choices=["COP"] ) parser.add_argument( "--dataset", type=str, help="Dataset to be downloaded", required=True, choices=["E-OBS", "OTHER"], ) parser.add_argument( "--dryrun", type=str, help="Dry run of dataset to be downloaded, " "test only, no data will be downloaded", required=True, choices=["True", "False"], ) # Returns a parser object return parser
849a44cfd6ec32f3082a96f4b74c48865644b5ec
21,313
def _read_array(raster, band, bounds): """ Read array from raster """ if bounds is None: return raster._gdal_dataset.ReadAsArray() else: x_min, y_min, x_max, y_max = bounds forward_transform = affine.Affine.from_gdal(*raster.geo_transform) reverse_transform = ~forward_transform px_min, py_max = reverse_transform * (x_min, y_min) px_max, py_min = reverse_transform * (x_max, y_max) x_size = int(px_max - px_min) + 1 y_size = int(py_max - py_min) + 1 if band is not None: return raster._gdal_dataset.GetRasterBand(band).ReadAsArray(int(px_min), int(py_min), x_size, y_size) else: return raster._gdal_dataset.ReadAsArray(int(px_min), int(py_min), x_size, y_size)
12ad55500950d89bdc84ab29157de9faac17e76a
21,314
def makePlayerInfo(pl_name): """ Recupere toutes les infos d'un player :param arg1: nom du joueur :type arg1: chaine de caracteres :return: infos du player : budget, profit & ventes (depuis le debut de la partie), boissons a vendre ce jour :rtype: Json """ info = calculeMoneyInfo(pl_name, 0) drinkInfo = makeDrinkOffered(pl_name) return ({ "cash" : info['cash'], "profit" : info['profit'], "sales" : info['sales'], "drinksOffered" : drinkInfo })
4ebc7f11397091fa3d0c62db7fcfd82720eac530
21,315
def _FinalizeHeaders(found_fields, headers, flags): """Helper to organize the final headers that show in the report. The fields discovered in the user objects are kept separate from those created in the flattening process in order to allow checking the found fields against a list of those expected. Unexpected fields are identified. If the report is a subset of all fields, the headers are trimmed. Args: found_fields: A set of the fields found in all the user objects. headers: A set of the fields created in the flattening helpers. Will return with the complete set of fields to be printed. flags: Argparse flags object with csv_fields. Returns: Sorted list of headers. """ # Track known fields to notify user if/when fields change. A few are known # but not printed (they are denormalized and replaced below): expected_fields = set(_UserDictionaryParser.GetExpectedUserFields()) if found_fields > expected_fields: unexpected_fields = ', '.join(found_fields - expected_fields) log_utils.LogWarning( 'Unexpected user fields noticed: %s.' % unexpected_fields) headers |= found_fields headers -= set(['emails', 'name', 'nonEditableAliases']) # Prune the headers reference object that is used outside this # function by using discard() if a subset of fields is desired. if flags.csv_fields: extra_csv_fields = set(flags.csv_fields) - headers if extra_csv_fields: print '** Ignoring unknown csv_fields: %s.' % ', '.join( sorted(extra_csv_fields)) for field in list(headers): if field not in flags.csv_fields: headers.discard(field) return sorted(headers)
9d44f10c4890ca48cc00f79b24e0019e346028d0
21,316
import zipfile import os def create_zip(archive, compression, cmd, verbosity, interactive, filenames): """Create a ZIP archive with the zipfile Python module.""" try: with zipfile.ZipFile(archive, 'w') as zfile: for filename in filenames: if os.path.isdir(filename): write_directory(zfile, filename) else: zfile.write(filename) except Exception as err: msg = "error creating %s: %s" % (archive, err) raise util.PatoolError(msg) return None
2fcb5bd079762d16fca8f3c0998f54812e8d9122
21,317
def get_outmost_points(contours): """Get the bounding rectangle of all the contours""" all_points = np.concatenate(contours) return get_bounding_rect(all_points)
173631e3397226459d0bf3a91157d2e74660e506
21,318
def dhcp_release_packet(eth_dst='ff:ff:ff:ff:ff:ff', eth_src='00:01:02:03:04:05', ip_src='0.0.0.0', ip_dst='255.255.255.255', src_port=68, dst_port=67, bootp_chaddr='00:01:02:03:04:05', bootp_ciaddr='1.2.3.4', dhcp_server_ip='1.2.3.4'): """ Return a dhcp release packet Supports a few parameters: @param eth_dst Destination MAC, should be broadcast address @param eth_src Source MAC, should be address of client @param ip_src Source IP, should be default route IP address @param ip_dst Destination IP, broadcast IP address @param src_port Source Port, 68 for DHCP client @param dst_port Destination Port, 67 for DHCP Server @param bootp_chaddr MAC Address of client @param bootp_ciaddr Client IP Address @param dhcp_server_ip IP address of DHCP server """ pkt = scapy.Ether(dst=eth_dst, src=eth_src)/ \ scapy.IP(src=ip_src, dst=ip_dst)/ \ scapy.UDP(sport=src_port, dport=dst_port)/ \ scapy.BOOTP(chaddr=bootp_chaddr, ciaddr=bootp_ciaddr)/ \ scapy.DHCP(options=[('message-type', 'release'), ('server_id', dhcp_server_ip), ('end')]) return pkt
63885cb982fbea5f5ff45c850b1bbf00e1154004
21,319
import os def get_unique_dir(log_dir='', max_num=100, keep_original=False): """Get a unique dir name based on log_dir. If keep_original is True, it checks the list {log_dir, log_dir-0, log_dir-1, ..., log_dir-[max_num-1]} and returns the first non-existing dir name. If keep_original is False then log_dir is excluded from the list. """ if keep_original and not os.path.exists(log_dir): if log_dir == '': raise ValueError('log_dir cannot be empty with keep_original=True.') return log_dir else: for i in range(max_num): _dir = '{}-{}'.format(log_dir, i) if not os.path.exists(_dir): return _dir raise ValueError('Too many dirs starting with {}.'.format(log_dir))
0cc517f67929fd29e1a38d0dc0aae73e3e4c9252
21,320
from typing import Optional from datetime import datetime def dcfc_30_e_plus_360(start: Date, asof: Date, end: Date, freq: Optional[Decimal] = None) -> Decimal: """ Computes the day count fraction for the "30E+/360" convention. :param start: The start date of the period. :param asof: The date which the day count fraction to be calculated as of. :param end: The end date of the period (a.k.a. termination date). :return: Day count fraction. >>> ex1_start, ex1_asof = datetime.date(2007, 12, 28), datetime.date(2008, 2, 28) >>> ex2_start, ex2_asof = datetime.date(2007, 12, 28), datetime.date(2008, 2, 29) >>> ex3_start, ex3_asof = datetime.date(2007, 10, 31), datetime.date(2008, 11, 30) >>> ex4_start, ex4_asof = datetime.date(2008, 2, 1), datetime.date(2009, 5, 31) >>> round(dcfc_30_e_plus_360(start=ex1_start, asof=ex1_asof, end=ex1_asof), 14) Decimal('0.16666666666667') >>> round(dcfc_30_e_plus_360(start=ex2_start, asof=ex2_asof, end=ex2_asof), 14) Decimal('0.16944444444444') >>> round(dcfc_30_e_plus_360(start=ex3_start, asof=ex3_asof, end=ex3_asof), 14) Decimal('1.08333333333333') >>> round(dcfc_30_e_plus_360(start=ex4_start, asof=ex4_asof, end=ex4_asof), 14) Decimal('1.33333333333333') """ ## Get the new start date, if required: if start.day == 31: start = datetime.date(start.year, start.month, 30) ## Get the new asof date, if required: if asof.day == 31: asof = asof + datetime.timedelta(days=1) ## Compute number of days: nod = (asof.day - start.day) + 30 * (asof.month - start.month) + 360 * (asof.year - start.year) ## Done, compute and return the day count fraction: return nod / Decimal(360)
99cc53d69eb1151056475967459be072cff4f773
21,321
def get_current_func_info_by_traceback(self=None, logger=None) -> None: """ 通过traceback获取函数执行信息并打印 use eg: class A: def a(self): def cc(): def dd(): get_current_func_info_by_traceback(self=self) dd() cc() def b(): get_current_func_info_by_traceback() aa = A() aa.a() b() # -> A.a.cc.dd in line_num: 131 invoked # -> <module>.b in line_num: 136 invoked :param self: 类的self :param logger: :return: """ try: extract_stack_info = extract_stack() # pprint(extract_stack_info) # 除类名外的函数名调用组合str detail_func_invoked_info = '' for item in extract_stack_info[1:-1]: # extract_stack_info[1:-1]不包含get_current_func_info_by_traceback tmp_str = '{}' if detail_func_invoked_info == '' else '.{}' detail_func_invoked_info += tmp_str.format(item[2]) # print(detail_func_invoked_info) # func_name = extract_stack_info[-2][2], line_num = extract_stack_info[-2][1] _print(msg='-> {}.{} in line_num: {} invoked'.format( # class name extract_stack_info[0][2] if self is None else self.__class__.__name__, detail_func_invoked_info, line_num,), logger=logger, log_level=1,) except Exception as e: _print(msg='遇到错误:', logger=logger, exception=e, log_level=2) return None
c89496eb7303acb91ef64587d10d5b7350e9a00e
21,322
def augment_timeseries_shift(x: tf.Tensor, max_shift: int = 10) -> tf.Tensor: """Randomly shift the time series. Parameters ---------- x : tf.Tensor (T, ...) The tensor to be augmented. max_shift : int The maximum shift to be randomly applied to the tensor. Returns ------- x : tf.Tensor The augmented tensor. """ # shift the data by removing a random number of later time points dt = tf.random.uniform(shape=[], minval=0, maxval=max_shift, dtype=tf.int32) return x[:-dt, ...]
2a9265ea72478d9c860f549637ea629e4b86f4f0
21,323
def endpoint(fun): """Decorator to denote a method which returns some result to the user""" if not hasattr(fun, '_zweb_post'): fun._zweb_post = [] fun._zweb = _LEAF_METHOD fun._zweb_sig = _compile_signature(fun, partial=False) return fun
8050a6d1c6e23c1feeec4744edd45b7ae589aab8
21,324
import torch def focal_prob(attn, batch_size, queryL, sourceL): """ consider the confidence g(x) for each fragment as the sqrt of their similarity probability to the query fragment sigma_{j} (xi - xj)gj = sigma_{j} xi*gj - sigma_{j} xj*gj attn: (batch, queryL, sourceL) """ # -> (batch, queryL, sourceL, 1) xi = attn.unsqueeze(-1).contiguous() # -> (batch, queryL, 1, sourceL) xj = attn.unsqueeze(2).contiguous() # -> (batch, queryL, 1, sourceL) xj_confi = torch.sqrt(xj) xi = xi.view(batch_size*queryL, sourceL, 1) xj = xj.view(batch_size*queryL, 1, sourceL) xj_confi = xj_confi.view(batch_size*queryL, 1, sourceL) # -> (batch*queryL, sourceL, sourceL) term1 = torch.bmm(xi, xj_confi) term2 = xj * xj_confi funcF = torch.sum(term1-term2, dim=-1) # -> (batch*queryL, sourceL) funcF = funcF.view(batch_size, queryL, sourceL) fattn = torch.where(funcF > 0, torch.ones_like(attn), torch.zeros_like(attn)) return fattn
968baad0fa6f78b49eeca1056556a6c2ff3a9cef
21,325
def get_fibonacci_iterative(n: int) -> int: """ Calculate the fibonacci number at position 'n' in an iterative way :param n: position number :return: position n of Fibonacci series """ a = 0 b = 1 for i in range(n): a, b = b, a + b return a
0ece23b00d810ce1c67cf5434cf26e1e21685c20
21,326
def get_sample_content(filename): """Return sample content form file.""" with open( "tests/xml/{filename}".format( filename=filename), encoding="utf-8") as file: return file.read()
2ba60ad6473ec53f6488b42ceb7090b0f7c8f985
21,327
def create_contrasts(task): """ Create a contrasts list """ contrasts = [] contrasts += [('Go', 'T', ['GO'], [1])] contrasts += [('GoRT', 'T', ['GO_rt'], [1])] contrasts += [('StopSuccess', 'T', ['STOP_SUCCESS'], [1])] contrasts += [('StopUnsuccess', 'T', ['STOP_UNSUCCESS'], [1])] contrasts += [('StopUnsuccessRT', 'T', ['STOP_UNSUCCESS_rt'], [1])] contrasts += [('Go-StopSuccess', 'T', ['GO', 'STOP_SUCCESS'], [1, -1])] contrasts += [('Go-StopUnsuccess', 'T', ['GO', 'STOP_UNSUCCESS'], [1, -1])] contrasts += [('StopSuccess-StopUnsuccess', 'T', ['STOP_SUCCESS', 'STOP_UNSUCCESS'], [1, -1])] # add negative repl_w_neg = [] for con in contrasts: if '-' not in con[0]: newname = 'neg_%s' % con[0] else: newname = "-".join(con[0].split("-")[::-1]) new = (newname, 'T', con[2], [-x for x in con[3]]) repl_w_neg.append(con) repl_w_neg.append(new) return repl_w_neg
221b1b1ebcc6c8d0e2fcb32d004794d1b0a47522
21,328
def project(raster_path, boxes): """Project boxes into utm""" with rasterio.open(raster_path) as dataset: bounds = dataset.bounds pixelSizeX, pixelSizeY = dataset.res #subtract origin. Recall that numpy origin is top left! Not bottom left. boxes["left"] = (boxes["xmin"] * pixelSizeX) + bounds.left boxes["right"] = (boxes["xmax"] * pixelSizeX) + bounds.left boxes["top"] = bounds.top - (boxes["ymin"] * pixelSizeY) boxes["bottom"] = bounds.top - (boxes["ymax"] * pixelSizeY) # combine column to a shapely Box() object, save shapefile boxes['geometry'] = boxes.apply( lambda x: shapely.geometry.box(x.left, x.top, x.right, x.bottom), axis=1) boxes = geopandas.GeoDataFrame(boxes, geometry='geometry') #set projection, (see dataset.crs) hard coded here boxes.crs = {'init': "{}".format(dataset.crs)} #Select columns boxes = boxes[["left", "bottom", "right", "top", "score", "label", "geometry"]] return boxes
92e7bc01492b3370767ac56b18b2f937caafc6c3
21,329
def mean_relative_error(preds: Tensor, target: Tensor) -> Tensor: """ Computes mean relative error Args: preds: estimated labels target: ground truth labels Return: Tensor with mean relative error Example: >>> from torchmetrics.functional import mean_relative_error >>> x = torch.tensor([0., 1, 2, 3]) >>> y = torch.tensor([0., 1, 2, 2]) >>> mean_relative_error(x, y) tensor(0.1250) .. deprecated:: v0.4 Use :func:`torchmetrics.functional.mean_absolute_percentage_error`. Will be removed in v0.5. """ warn( "Function `mean_relative_error` was deprecated v0.4 and will be removed in v0.5." "Use `mean_absolute_percentage_error` instead.", DeprecationWarning ) sum_rltv_error, n_obs = _mean_absolute_percentage_error_update(preds, target) return _mean_absolute_percentage_error_compute(sum_rltv_error, n_obs)
23c7efe3a91179c670383b1687583dc903052a54
21,330
from typing import Dict from typing import Any def render_dendrogram(dend: Dict["str", Any], plot_width: int, plot_height: int) -> Figure: """ Render a missing dendrogram. """ # list of lists of dcoords and icoords from scipy.dendrogram xs, ys, cols = dend["icoord"], dend["dcoord"], dend["ivl"] # if the number of columns is greater than 20, make the plot wider if len(cols) > 20: plot_width = 28 * len(cols) fig = Figure( plot_width=plot_width, plot_height=plot_height, toolbar_location=None, tools="", ) # round the coordinates to integers, and plot the dendrogram xs = [[round(coord) for coord in coords] for coords in xs] ys = [[round(coord, 2) for coord in coords] for coords in ys] fig.multi_line(xs=xs, ys=ys, line_color="#8073ac") # extract the horizontal lines for the hover tooltip h_lns_x = [coords[1:3] for coords in xs] h_lns_y = [coords[1:3] for coords in ys] null_mismatch_vals = [coord[0] for coord in h_lns_y] source = ColumnDataSource(dict(x=h_lns_x, y=h_lns_y, n=null_mismatch_vals)) h_lns = fig.multi_line(xs="x", ys="y", source=source, line_color="#8073ac") hover_pts = HoverTool( renderers=[h_lns], tooltips=[("Average distance", "@n{0.1f}")], line_policy="interp", ) fig.add_tools(hover_pts) # shorten column labels if necessary, and override coordinates with column names cols = [f"{col[:16]}..." if len(col) > 18 else col for col in cols] axis_coords = list(range(5, 10 * len(cols) + 1, 10)) axis_overrides = dict(zip(axis_coords, cols)) fig.xaxis.ticker = axis_coords fig.xaxis.major_label_overrides = axis_overrides fig.xaxis.major_label_orientation = np.pi / 3 fig.yaxis.axis_label = "Average Distance Between Clusters" fig.grid.visible = False return fig
1dc61a5ddffc85e6baa9bfbb28620a3039dc8993
21,331
from typing import List import logging def sort_by_fullname(data: List[dict]) -> List[dict]: """ sort data by full name :param data: :return: """ logging.info("Sorting data by fullname...") try: data.sort(key=lambda info: info["FULL_NAME"], reverse=False) except Exception as exception: logging.exception(exception) raise logging.info("Sort data by fullname successfully!") return data
0b4ecf53893bda7d226b3c26fe51b9abc073294b
21,332
def get_vrf_interface(device, vrf): """ Gets the subinterfaces for vrf Args: device ('obj'): device to run on vrf ('str'): vrf to search under Returns: interfaces('list'): List of interfaces under specified vrf None Raises: None """ log.info("Getting the interfaces under vrf {vrf}".format(vrf=vrf)) try: out = device.parse("show vrf {vrf}".format(vrf=vrf)) except SchemaEmptyParserError: return None if out and "vrf" in out and vrf in out["vrf"]: return out["vrf"][vrf].get("interfaces", None)
57dedbd148f208038bd523c1901827ac7eca8754
21,333
def rsptext(rsp,subcode1=0,subcode2=0,erri='',cmd='',subcmd1='',subcmd2=''): """ Adabas response code to text conversion """ global rspplugins if rsp in rspplugins: plugin = rspplugins[rsp] # get the plugin function return plugin(rsp, subcode1=subcode1, subcode2=subcode2, cmd=cmd,subcmd1=subcmd1,subcmd2=subcmd2) c1=chr(subcode1 & 0xff) c2=chr( (subcode1 >> 8)& 0xff) c3=chr(subcode2 & 0xff) c4=chr( (subcode2 >> 8)& 0xff) if subcode2 == 0: if subcode1>>16: c1=chr( (subcode1 >> 24)& 0xff) c2=chr( (subcode1 >> 16)& 0xff) if c1 > '\x80' and c2 > '\x80': c1 = str2asc(c1) c2 = str2asc(c2) if c1>' ' and c2>' ': # ff = field name if both bytes > ' ' ff='"'+c1+c2+'"' elif c3>' ' and c4>' ': ff='"'+c3+c4+'"' else: ff='' if subcode2==0 and subcode1==0: ss='' else: ss=' sub=%d,%d X%04X,%04X %s' % (subcode1,subcode2,subcode1,subcode2,ff) if erri: ss+=' errinf=%08X %r' % (erri,erri) if rsp in rspdict: subx='' # subcode text rspx = rspdict[rsp] if type(rspx) == type( (1,)) : # tuple type ? subdict = rspx[1] # subcode dictionary rspx=rspx[0] # response code text sx2 = subcode2 & 0xffff sx1 = subcode1 & 0xffff subx = '' if sx2 and sx2 in subdict: subx += ' - \n\tSubcode %d: %s' % (sx2,subdict[sx2]) elif sx1 and sx1 in subdict: subx = ' - \n\tSubcode %d: %s' % (sx1,subdict[sx1]) elif rsp==132: # if LOB resp & subcode not listed subx = ' - \n\t'+rspdict.get(subcode2,'No details for subcode') return 'Adabas Response %d%s: %s%s' %\ (rsp, ss, rspx, subx) else: return 'Adabas Response %s: no explanation available' % rsp
3cc817e812ea7bba346338e09965e025639631eb
21,334
def to_dataframe(data: xr.DataArray, *args, **kwargs) -> pd.DataFrame: """ Replacement for `xr.DataArray.to_dataframe` that adds the attrs for the given DataArray into the resultant DataFrame. Parameters ---------- data : xr.DataArray the data to convert to DataFrame Returns ------- pd.DataFrame a pandas DataFrame containing the data in the given DataArray, including the global attributes """ df = data.to_dataframe(*args, **kwargs) for k, v in data.attrs.items(): df[k] = v return df
69179fc48ce9ca04e8ee99967ce44b15946f9a57
21,335
def check_position(position): """Determines if the transform is valid. That is, not off-keypad.""" if position == (0, -3) or position == (4, -3): return False if (-1 < position[0] < 5) and (-4 < position[1] < 1): return True else: return False
f95ab22ce8da386284040626ac90c908a17b53fa
21,336
def mobilenet_wd4_cub(num_classes=200, **kwargs): """ 0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(num_classes=num_classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs)
f9e367058261da89a3714b543270628ab3941e12
21,337
import torch def base_plus_copy_indices(words, dynamic_vocabs, base_vocab, volatile=False): """Compute base + copy indices. Args: words (list[list[unicode]]) dynamic_vocabs (list[HardCopyDynamicVocab]) base_vocab (HardCopyVocab) volatile (bool) Returns: MultiVocabIndices """ unk = base_vocab.UNK copy_seqs = [] for seq, dyna_vocab in izip(words, dynamic_vocabs): word_to_copy = dyna_vocab.word_to_copy_token normal_copy_seq = [] for w in seq: normal_copy_seq.append(word_to_copy.get(w, unk)) copy_seqs.append(normal_copy_seq) # each SeqBatch.values has shape (batch_size, seq_length) base_indices = SequenceBatch.from_sequences(words, base_vocab, volatile=volatile) copy_indices = SequenceBatch.from_sequences(copy_seqs, base_vocab, volatile=volatile) assert_tensor_equal(base_indices.mask, copy_indices.mask) # has shape (batch_size, seq_length, 2) concat_values = torch.stack([base_indices.values, copy_indices.values], 2) return MultiVocabIndices(concat_values, base_indices.mask)
e6e9d42186c05d33a04c58c506e0e9b97eadac6a
21,338
def font_encoding(psname): """Return encoding name given a psname""" return LIBRARY.encoding(psname)
fd5d2b000624a4d04980c88cc78cd97bf49bca94
21,339
def shader_with_tex_offset(offset): """Returns a vertex FileShader using a texture access with the given offset.""" return FileShader(shader_source_with_tex_offset(offset), ".vert")
0df316dd97889b3b2541d6d21970768e1cb70fe6
21,340
def braycurtis(u, v): """ d = braycurtis(u, v) Computes the Bray-Curtis distance between two n-vectors u and v, \sum{|u_i-v_i|} / \sum{|u_i+v_i|}. """ u = np.asarray(u) v = np.asarray(v) return abs(u-v).sum() / abs(u+v).sum()
693b7f0108f9f99e0950d81c2be1e9dc0bd25d86
21,341
def _load_pyfunc(path): """ Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. :param path: Local filesystem path to the MLflow Model with the ``fastai`` flavor. """ return _FastaiModelWrapper(_load_model(path))
f8349d3580c5ca407a47b24f901c8f9a7f532c77
21,342
def fit_pseudo_voigt(x,y,p0=None,fit_alpha=True,alpha_guess=0.5): """Fits the data with a pseudo-voigt peak. Parameters ----------- x: np.ndarray Array with x values y: np.ndarray Array with y values p0: list (Optional) It contains a initial guess the for the pseudo-voigt variables, in the order: p0 = [x0,sigma,amplitude,constant,alpha]. If None, the code will create a guess. fit_alpha: boolean (Optional) Option to fit the alpha parameter. alpha_guess: float (Optional) If alpha is being fitted, then this will be the initial guess. Otherwise it will be the fixed parameter used. For lorenzian: alpha = 1, for gaussian: alpha = 0. Returns ----------- popt: np.ndarray Array with the optimized pseudo-voigt parameters. """ if p0 is None: width = (x.max()-x.min())/10. index = y == y.max() p0 = [x[index][0],width,y.max()*width*np.sqrt(np.pi/np.log(2)),y[0],alpha_guess] if fit_alpha is False: popt,pcov = curve_fit(lambda x,x0,sigma,amplitude,constant: pseudo_voigt(x,x0,sigma,amplitude,constant,alpha_guess), x,y,p0=p0[:-1]) popt = np.append(popt,alpha_guess) else: popt,pcov = curve_fit(pseudo_voigt,x,y,p0=p0) return popt
8abd61b44665632cc4e2ae21f52116757e00d2b9
21,343
import os import logging def get_ready_directories(directory): """Returns a directory with list of files That directories should have a 'buildinfo' and 'inventory.yaml' file which are not empty. """ log_files = {} for root, _, files in os.walk(directory): build_uuid = root.split('/')[-1] if check_info_files(root, files): files.remove("buildinfo") files.remove("inventory.yaml") log_files[build_uuid] = files else: logging.info("Skipping build with uuid %s. Probably all files " "are not dowloaded yet." % build_uuid) continue return log_files
5ffca6fd995d3ed977967b8090d2478d42be919b
21,344
from datetime import datetime def get_name_of_day(str_date): """ Возвращает имя дня. """ day = datetime.fromisoformat(str_date).weekday() return DAYS_NAME.get(day)
ae12d7b8ec44c2fb6edcf252ed3463a385353f30
21,345
def k_fold_split(ratings, min_num_ratings=10, k=4): """ Creates the k (training set, test_set) used for k_fold cross validation :param ratings: initial sparse matrix of shape (num_items, num_users) :param min_num_ratings: all users and items must have at least min_num_ratings per user and per item to be kept :param k: number of fold :return: a list fold of length k such that - fold[l][0] is a list of tuples (i,j) of the entries of 'ratings' that are the l-th testing set - fold[l][1] is a list of tuples (i,j) of the entries of 'ratings' that are the l-th training set """ num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten() num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten() # set seed np.random.seed(988) # select user and item based on the condition. valid_users = np.where(num_items_per_user >= min_num_ratings)[0] valid_items = np.where(num_users_per_item >= min_num_ratings)[0] valid_ratings = ratings[valid_items, :][:, valid_users] nnz_row, nnz_col = valid_ratings.nonzero() nnz = list(zip(nnz_row, nnz_col)) nnz = np.random.permutation(nnz) len_splits = int(len(nnz) / k) splits = [] for i in range(k): splits.append(nnz[i * len_splits: (i + 1) * len_splits]) splits = [f.tolist() for f in splits] folds = [] for i in range(k): tmp = [] for j in range(k): if j != i: tmp = tmp + splits[j] folds.append([splits[i], tmp]) return folds
8b151e291e3365d7986cdc7b876ef630efcb60b4
21,346
def merge_dict(a, b, path:str=None): """ Args: a: b: path(str, optional): (Default value = None) Returns: Raises: """ "merges b into a" if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dict(a[key], b[key], path + [str(key)]) else: a[key] = b[key] else: a[key] = b[key] return a
cd260c005b07c9c84b14a14cae3d4dc54fe26b8c
21,347
import json def validateSignedOfferData(adat, ser, sig, tdat, method="igo"): """ Returns deserialized version of serialization ser which Offer if offer request is correctly formed. Otherwise returns None adat is thing's holder/owner agent resource ser is json encoded unicode string of request sig is base64 encoded signature from request header "signer" tag tdat is thing data resource offer request fields { "uid": offeruniqueid, "thing": thingDID, "aspirant": AgentDID, "duration": timeinsecondsofferisopen, } """ try: try: # get signing key of request from thing resource (adid, index, akey) = extractDatSignerParts(tdat) except ValueError as ex: raise ValidationError("Missing or invalid signer") # get agent key at index from signer data. assumes that resource is valid try: averkey = adat["keys"][index]["key"] except (TypeError, KeyError, IndexError) as ex: raise ValidationError("Missing or invalid signer key") if len(averkey) != 44: raise ValidationError("Invalid signer key") # invalid length for base64 encoded key # verify request using agent signer verify key if not verify64u(sig, ser, averkey): raise ValidationError("Unverifiable signatrue") # signature fails # now validate offer data try: dat = json.loads(ser, object_pairs_hook=ODict) except ValueError as ex: raise ValidationError("Invalid json") # invalid json if not dat: # offer request must not be empty raise ValidationError("Empty body") if not isinstance(dat, dict): # must be dict subclass raise ValidationError("JSON not dict") requireds = ("uid", "thing", "aspirant", "duration") for field in requireds: if field not in dat: raise ValidationError("Missing missing required field {}".format(field)) if not dat["uid"]: # uid must not be empty raise ValidationError("Empty uid") if dat["thing"] != tdat['did']: raise ValidationError("Not same thing") aspirant = dat["aspirant"] try: # correct did format pre:method:keystr pre, meth, keystr = aspirant.split(":") except ValueError as ex: raise ValidationError("Invalid aspirant") if pre != "did" or meth != method: raise ValidationError("Invalid aspirant") # did format bad try: duration = float(dat["duration"]) except ValueError as ex: raise ValidationError("Invalid duration") if duration < PROPAGATION_DELAY * 2.0: raise ValidationError("Duration too short") except ValidationError: raise except Exception as ex: # unknown problem raise ValidationError("Unexpected error") return dat
4aebe14b8a90dc1c3e47763ce49e142d77f99bd9
21,348
def get_relevant_phrases(obj=None): """ Get all phrases to be searched for. This includes all SensitivePhrases, and any RelatedSensitivePhrases that refer to the given object. :param obj: A model instance to check for sensitive phrases made specifically for that instance. :return: a dictionary of replacement phrases keyed by the phrases being replaced. """ replacements = [] content_type = ContentType.objects.get_for_model(obj) related_sensitive_phrases = RelatedSensitivePhrase.objects.filter( content_type__pk=content_type.id, object_id=obj.id ).extra(select={'length': 'Length(phrase)'}).order_by('-length', 'phrase') for phrase in related_sensitive_phrases: replacements.append({ 'phrase': phrase.phrase, 'replacement': phrase.replace_phrase, 'start_boundary': phrase.check_for_word_boundary_start, 'end_boundary': phrase.check_for_word_boundary_end }) sensitive_phrases = SensitivePhrase.objects.all() \ .extra(select={'length': 'Length(phrase)'}).order_by('-length', 'phrase') for phrase in sensitive_phrases: replacements.append({ 'phrase': phrase.phrase, 'replacement': phrase.replace_phrase, 'start_boundary': phrase.check_for_word_boundary_start, 'end_boundary': phrase.check_for_word_boundary_end }) return replacements
951166c89dc8e257bce512d13cee592e1266efae
21,349
import struct def _prepare_cabal_inputs( hs, cc, posix, dep_info, cc_info, direct_cc_info, component, package_id, tool_inputs, tool_input_manifests, cabal, setup, setup_deps, setup_dep_info, srcs, compiler_flags, flags, generate_haddock, cabal_wrapper, package_database, verbose, transitive_haddocks, dynamic_binary = None): """Compute Cabal wrapper, arguments, inputs.""" with_profiling = is_profiling_enabled(hs) # Haskell library dependencies or indirect C library dependencies are # already covered by their corresponding package-db entries. We only need # to add libraries and headers for direct C library dependencies to the # command line. direct_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.cc_libraries) # The regular Haskell rules perform mostly static linking, i.e. where # possible all C library dependencies are linked statically. Cabal has no # such mode, and since we have to provide dynamic C libraries for # compilation, they will also be used for linking. Hence, we need to add # RUNPATH flags for all dynamic C library dependencies. Cabal also produces # a dynamic and a static Haskell library in one go. The dynamic library # will link other Haskell libraries dynamically. For those we need to also # provide RUNPATH flags for dynamic Haskell libraries. (_, dynamic_libs) = get_library_files( hs, cc.cc_libraries_info, cc.transitive_libraries, dynamic = True, ) # Executables build by Cabal will link Haskell libraries statically, so we # only need to include dynamic C libraries in the runfiles tree. (_, runfiles_libs) = get_library_files( hs, cc.cc_libraries_info, get_cc_libraries(cc.cc_libraries_info, cc.transitive_libraries), dynamic = True, ) # Setup dependencies are loaded by runghc. setup_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.setup_libraries) # The regular Haskell rules have separate actions for linking and # compilation to which we pass different sets of libraries as inputs. The # Cabal rules, in contrast, only have a single action for compilation and # linking, so we must provide both sets of libraries as inputs to the same # action. transitive_compile_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.transitive_libraries) transitive_link_libs = _concat(get_library_files(hs, cc.cc_libraries_info, cc.transitive_libraries)) env = dict(hs.env) env["PATH"] = join_path_list(hs, _binary_paths(tool_inputs) + posix.paths) if hs.toolchain.is_darwin: env["SDKROOT"] = "macosx" # See haskell/private/actions/link.bzl if verbose: env["CABAL_VERBOSE"] = "True" args = hs.actions.args() package_databases = dep_info.package_databases transitive_headers = cc_info.compilation_context.headers direct_include_dirs = depset(transitive = [ direct_cc_info.compilation_context.includes, direct_cc_info.compilation_context.quote_includes, direct_cc_info.compilation_context.system_includes, ]) direct_lib_dirs = [file.dirname for file in direct_libs] args.add_all([component, package_id, generate_haddock, setup, cabal.dirname, package_database.dirname]) args.add_joined([ arg for package_id in setup_deps for arg in ["-package-id", package_id] ] + [ arg for package_db in setup_dep_info.package_databases.to_list() for arg in ["-package-db", "./" + _dirname(package_db)] ], join_with = " ", format_each = "--ghc-arg=%s", omit_if_empty = False) args.add("--flags=" + " ".join(flags)) args.add_all(compiler_flags, format_each = "--ghc-option=%s") if dynamic_binary: args.add_all( [ "--ghc-option=-optl-Wl,-rpath," + create_rpath_entry( binary = dynamic_binary, dependency = lib, keep_filename = False, prefix = relative_rpath_prefix(hs.toolchain.is_darwin), ) for lib in dynamic_libs ], uniquify = True, ) args.add("--") args.add_all(package_databases, map_each = _dirname, format_each = "--package-db=%s") args.add_all(direct_include_dirs, format_each = "--extra-include-dirs=%s") args.add_all(direct_lib_dirs, format_each = "--extra-lib-dirs=%s", uniquify = True) if with_profiling: args.add("--enable-profiling") # Redundant with _binary_paths() above, but better be explicit when we can. args.add_all(tool_inputs, map_each = _cabal_tool_flag) inputs = depset( [setup, hs.tools.ghc, hs.tools.ghc_pkg, hs.tools.runghc], transitive = [ depset(srcs), depset(cc.files), package_databases, setup_dep_info.package_databases, transitive_headers, depset(setup_libs), depset(transitive_compile_libs), depset(transitive_link_libs), depset(transitive_haddocks), setup_dep_info.interface_dirs, setup_dep_info.hs_libraries, dep_info.interface_dirs, dep_info.hs_libraries, tool_inputs, ], ) input_manifests = tool_input_manifests + hs.toolchain.cc_wrapper.manifests return struct( cabal_wrapper = cabal_wrapper, args = args, inputs = inputs, input_manifests = input_manifests, env = env, runfiles = depset(direct = runfiles_libs), )
4d42e6b772a64bc721e30907417dd8c734ce79e6
21,350
def split_kp(kp_joined, detach=False): """ Split the given keypoints into two sets(one for driving video frames, and the other for source image) """ if detach: kp_video = {k: v[:, 1:].detach() for k, v in kp_joined.items()} kp_appearance = {k: v[:, :1].detach() for k, v in kp_joined.items()} else: kp_video = {k: v[:, 1:] for k, v in kp_joined.items()} kp_appearance = {k: v[:, :1] for k, v in kp_joined.items()} return {'kp_driving': kp_video, 'kp_source': kp_appearance}
0396003a17172a75b121ddb43c9b9cf14ee3e458
21,351
def low_shelve(signal, frequency, gain, order, shelve_type='I', sampling_rate=None): """ Create and apply first or second order low shelve filter. Uses the implementation of [#]_. Parameters ---------- signal : Signal, None The Signal to be filtered. Pass None to create the filter without applying it. frequency : number Characteristic frequency of the shelve in Hz gain : number Gain of the shelve in dB order : number The shelve order. Must be ``1`` or ``2``. shelve_type : str Defines the characteristic frequency. The default is ``'I'`` ``'I'`` defines the characteristic frequency 3 dB below the gain value if the gain is positive and 3 dB above the gain value otherwise ``'II'`` defines the characteristic frequency at 3 dB if the gain is positive and at -3 dB if the gain is negative. ``'III'`` defines the characteristic frequency at gain/2 dB sampling_rate : None, number The sampling rate in Hz. Only required if signal is ``None``. The default is ``None``. Returns ------- signal : Signal The filtered signal. Only returned if ``sampling_rate = None``. filter : FilterIIR Filter object. Only returned if ``signal = None``. References ---------- .. [#] https://github.com/spatialaudio/digital-signal-processing-lecture/\ blob/master/filter_design/audiofilter.py """ output = _shelve( signal, frequency, gain, order, shelve_type, sampling_rate, 'low') return output
130fd593988d1fd0b85795389dab554d59fedb97
21,352
from typing import Union def get_breast_zone(mask: np.ndarray, convex_contour: bool = False) -> Union[np.ndarray, tuple]: """ Función de obtener la zona del seno de una imagen a partir del area mayor contenido en una mascara. :param mask: mascara sobre la cual se realizará la búsqueda de contornos y de las zonas más largas. :param convex_contour: boleano para aplicar contornos convexos. :return: Máscara que contiene el contorno con mayor area juntamente con el vértice x e y con la anchura y la altura del cuadrado que contienen la zona de mayor area de la mascara- """ # Se obtienen los contornos de las zonas de la imagen de color blanco. contours = get_contours(img=mask) # Se obtiene el contorno más grande a partir del area que contiene largest_countour = sorted(contours, key=cv2.contourArea, reverse=True)[0] # Se modifican los contornos si se decide obtener contornos convexos. if convex_contour: largest_countour = cv2.convexHull(largest_countour) # Se crea la máscara con el area y el contorno obtenidos. breast_zone = cv2.drawContours( image=np.zeros(mask.shape, np.uint8), contours=[largest_countour], contourIdx=-1, color=(255, 255, 255), thickness=-1 ) # Se obtiene el rectangulo que contiene el pecho x, y, w, h = cv2.boundingRect(largest_countour) return breast_zone, (x, y, w, h)
429344c0645fa7bcfa49abcaf9b022f61c48bc35
21,353
def replace(temporaryans, enterword, answer): """ :param temporaryans: str, temporary answer. :param enterword: str, the character that user guesses. :param answer: str, the answer for this hangman game. :return: str, the temporary answer after hyphens replacement. """ # s = replace('-----', 'A', answer) while True: i = answer.find(enterword) if i >= 0: y = temporaryans[:i] # --- y += enterword # ---A y += temporaryans[i+1:] # ---A- temporaryans = y answer = answer[:i] + '-' + answer[i+1:] else: ans = y break return ans
80d8625dca573744e9945190ee169438754b1829
21,354
def extract_timestamp(line): """Extract timestamp and convert to a form that gives the expected result in a comparison """ # return unixtime value return line.split('\t')[6]
84618f02e4116c70d9f6a1518aafb0691a29ef07
21,355
def svn_stream_from_stringbuf(*args): """svn_stream_from_stringbuf(svn_stringbuf_t str, apr_pool_t pool) -> svn_stream_t""" return _core.svn_stream_from_stringbuf(*args)
9710061adb6d80527a3f3afa84bf41e0fa6406c6
21,356
def get_autoencoder_model(hidden_units, target_predictor_fn, activation, add_noise=None, dropout=None): """Returns a function that creates a Autoencoder TensorFlow subgraph. Args: hidden_units: List of values of hidden units for layers. target_predictor_fn: Function that will predict target from input features. This can be logistic regression, linear regression or any other model, that takes x, y and returns predictions and loss tensors. activation: activation function used to map inner latent layer onto reconstruction layer. add_noise: a function that adds noise to tensor_in, e.g. def add_noise(x): return(x + np.random.normal(0, 0.1, (len(x), len(x[0])))) dropout: When not none, causes dropout regularization to be used, with the specified probability of removing a given coordinate. Returns: A function that creates the subgraph. """ def dnn_autoencoder_estimator(x): """Autoencoder estimator with target predictor function on top.""" encoder, decoder = autoencoder_ops.dnn_autoencoder( x, hidden_units, activation, add_noise=add_noise, dropout=dropout) return encoder, decoder, target_predictor_fn(x, decoder) return dnn_autoencoder_estimator
88c58b2c43c26aa8e71baf684688d27db251cdb6
21,357
def plot_histogram(ax,values,bins,colors='r',log=False,xminmax=None): """ plot 1 histogram """ #print (type(values)) ax.hist(values, histtype="bar", bins=bins,color=colors,log=log, alpha=0.8, density=False, range=xminmax) # Add a small annotation. # ax.annotate('Annotation', xy=(0.25, 4.25), # xytext=(0.9, 0.9), textcoords=ax.transAxes, # va="top", ha="right", # bbox=dict(boxstyle="round", alpha=0.2), # arrowprops=dict( # arrowstyle="->", # connectionstyle="angle,angleA=-95,angleB=35,rad=10"), # ) return ax
d11e89c005275a176fd00d0e2ac5173ee8f490b1
21,358
def build_model(): """Build the model. Returns ------- tensorflow.keras.Model The model. """ input_x = tf.keras.Input( shape=(30,), name='input_x' ) # shape does not include the batch size. layer1 = tf.keras.layers.Dense(5, activation=tf.keras.activations.tanh) layer2 = tf.keras.layers.Dense( 1, activation=tf.keras.activations.sigmoid, name='output_layer' ) h = layer1(input_x) output = layer2(h) return tf.keras.Model(inputs=[input_x], outputs=[output])
81e2ee2533903beaa4a087613e63ea383d8a746b
21,359
import torch def evaluate_generator(generator, backbone_pool, lookup_table, CONFIG, device, val=True): """ Evaluate kendetall and hardware constraint loss of generator """ total_loss = 0 evaluate_metric = {"gen_macs":[], "true_macs":[]} for mac in range(CONFIG.low_macs, CONFIG.high_macs, 10): hardware_constraint = torch.tensor(mac, dtype=torch.float32) hardware_constraint = hardware_constraint.view(-1, 1) hardware_constraint = hardware_constraint.to(device) backbone = backbone_pool.get_backbone(hardware_constraint.item()) backbone = backbone.to(device) normalize_hardware_constraint = min_max_normalize(CONFIG.high_macs, CONFIG.low_macs, hardware_constraint) noise = torch.randn(*backbone.shape) noise = noise.to(device) noise *= 0 arch_param = generator(backbone, normalize_hardware_constraint, noise) arch_param = lookup_table.get_validation_arch_param(arch_param) layers_config = lookup_table.decode_arch_param(arch_param) print(layers_config) gen_mac = lookup_table.get_model_macs(arch_param) hc_loss = cal_hc_loss(gen_mac.cuda(), hardware_constraint.item(), CONFIG.alpha, CONFIG.loss_penalty) evaluate_metric["gen_macs"].append(gen_mac.item()) evaluate_metric["true_macs"].append(mac) total_loss += hc_loss.item() tau, _ = stats.kendalltau(evaluate_metric["gen_macs"], evaluate_metric["true_macs"]) return evaluate_metric, total_loss, tau
53941e29cae9a89c46ce598291638d7df28db4ff
21,360
import os import pickle def load_data(config, var_mode): """Main data loading routine""" print("Loading {} data".format(var_mode)) # use only the first two characters for shorter abbrv var_mode = var_mode[:2] # Now load data. var_name_list = [ "xs", "ys", "Rs", "ts", "img1s", "cx1s", "cy1s", "f1s", "img2s", "cx2s", "cy2s", "f2s", ] data_folder = config.data_dump_prefix if config.use_lift: data_folder += "_lift" # Let's unpickle and save data data = {} data_names = getattr(config, "data_" + var_mode) data_names = data_names.split(".") for data_name in data_names: cur_data_folder = "/".join([ data_folder, data_name, "numkp-{}".format(config.obj_num_kp), "nn-{}".format(config.obj_num_nn), ]) if not config.data_crop_center: cur_data_folder = os.path.join(cur_data_folder, "nocrop") suffix = "{}-{}".format( var_mode, getattr(config, "train_max_" + var_mode + "_sample") ) cur_folder = os.path.join(cur_data_folder, suffix) ready_file = os.path.join(cur_folder, "ready") if not os.path.exists(ready_file): # data_gen_lock.unlock() raise RuntimeError("Data is not prepared!") for var_name in var_name_list: cur_var_name = var_name + "_" + var_mode in_file_name = os.path.join(cur_folder, cur_var_name) + ".pkl" with open(in_file_name, "rb") as ifp: if var_name in data: data[var_name] += pickle.load(ifp) else: data[var_name] = pickle.load(ifp) return data
9fcc7138494ed3e91567364fe3fa968d44ec69ba
21,361
def getCameras(): """Return a list of cameras in the current maya scene.""" return cmds.listRelatives(cmds.ls(type='camera'), p=True)
a3a6f202250f92c1cab46df92c78b53b15fd5cae
21,362
import re def convert_quotes(text): """ Convert quotes in *text* into HTML curly quote entities. >>> print(convert_quotes('"Isn\\'t this fun?"')) &#8220;Isn&#8217;t this fun?&#8221; """ punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" # Special case if the very first character is a quote # followed by punctuation at a non-word-break. Close the quotes by brute # force: text = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), '&#8217;', text) text = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), '&#8221;', text) # Special case for double sets of quotes, e.g.: # <p>He said, "'Quoted' words in a larger quote."</p> text = re.sub(r""""'(?=\w)""", '&#8220;&#8216;', text) text = re.sub(r"""'"(?=\w)""", '&#8216;&#8220;', text) # Special case for decade abbreviations (the '80s): text = re.sub(r"""\b'(?=\d{2}s)""", '&#8217;', text) close_class = r'[^\ \t\r\n\[\{\(\-]' dec_dashes = '&#8211;|&#8212;' # Get most opening single quotes: opening_single_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) ' # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_single_quotes_regex.sub(r'\1&#8216;', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (?!\s | s\b | \d) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1&#8217;', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (\s | s\b) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1&#8217;\2', text) # Any remaining single quotes should be opening ones: text = re.sub("'", '&#8216;', text) # Get most opening double quotes: opening_double_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) " # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_double_quotes_regex.sub(r'\1&#8220;', text) # Double closing quotes: closing_double_quotes_regex = re.compile(r""" #(%s)? # character that indicates the quote should be closing " (?=\s) """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub('&#8221;', text) closing_double_quotes_regex = re.compile(r""" (%s) # character that indicates the quote should be closing " """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub(r'\1&#8221;', text) # Any remaining quotes should be opening ones. text = re.sub('"', '&#8220;', text) return text
82b5cabc2f4b77f5c39ab785c02e04ff4ca4f517
21,363
import warnings def time_shift(signal, n_samples_shift, circular_shift=True, keepdims=False): """Shift a signal in the time domain by n samples. This function will perform a circular shift by default, inherently assuming that the signal is periodic. Use the option `circular_shift=False` to pad with nan values instead. Notes ----- This function is primarily intended to be used when processing impulse responses. Parameters ---------- signal : ndarray, float Signal to be shifted n_samples_shift : integer Number of samples by which the signal should be shifted. A negative number of samples will result in a left-shift, while a positive number of samples will result in a right shift of the signal. circular_shift : bool, True Perform a circular or non-circular shift. If a non-circular shift is performed, the data will be padded with nan values at the respective beginning or ending of the data, corresponding to the number of samples the data is shifted. keepdims : bool, False Do not squeeze the data before returning. Returns ------- shifted_signal : ndarray, float Shifted input signal """ n_samples_shift = np.asarray(n_samples_shift, dtype=np.int) if np.any(signal.shape[-1] < n_samples_shift): msg = "Shifting by more samples than length of the signal." if circular_shift: warnings.warn(msg, UserWarning) else: raise ValueError(msg) signal = np.atleast_2d(signal) n_samples = signal.shape[-1] signal_shape = signal.shape signal = np.reshape(signal, (-1, n_samples)) n_channels = np.prod(signal.shape[:-1]) if n_samples_shift.size == 1: n_samples_shift = np.broadcast_to(n_samples_shift, n_channels) elif n_samples_shift.size == n_channels: n_samples_shift = np.reshape(n_samples_shift, n_channels) else: raise ValueError("The number of shift samples has to match the number \ of signal channels.") shifted_signal = signal.copy() for channel in range(n_channels): shifted_signal[channel, :] = \ np.roll( shifted_signal[channel, :], n_samples_shift[channel], axis=-1) if not circular_shift: if n_samples_shift[channel] < 0: # index is negative, so index will reference from the # end of the array shifted_signal[channel, n_samples_shift[channel]:] = np.nan else: # index is positive, so index will reference from the # start of the array shifted_signal[channel, :n_samples_shift[channel]] = np.nan shifted_signal = np.reshape(shifted_signal, signal_shape) if not keepdims: shifted_signal = np.squeeze(shifted_signal) return shifted_signal
f5017a5b9988ff5dc10e49b1f2d4127293564607
21,364
def get_avgerr(l1_cols_train,l2_cols_train,own_cols_xgb,own_cols_svm,own_cols_bay,own_cols_adab,own_cols_lass,df_train,df_test,experiment,fold_num=0): """ Use mae as an evaluation metric and extract the appropiate columns to calculate the metric Parameters ---------- l1_cols_train : list list with names for the Layer 1 training columns l2_cols_train : list list with names for the Layer 2 training columns own_cols_xgb : list list with names for the Layer 1 xgb columns own_cols_svm : list list with names for the Layer 1 svm columns own_cols_bay : list list with names for the Layer 1 brr columns own_cols_adab : list list with names for the Layer 1 adaboost columns own_cols_lass : list list with names for the Layer 1 lasso columns df_train : pd.DataFrame dataframe for training predictions df_test : pd.DataFrame dataframe for testing predictions experiment : str dataset name fold_num : int number for the fold Returns ------- float best mae for Layer 1 float best mae for Layer 2 float best mae for Layer 3 float best mae for all layers float mae for xgb float mae for svm float mae for brr float mae for adaboost float mae for lasso list selected predictions Layer 2 list error for the selected predictions Layer 2 float train mae for Layer 3 """ # Get the mae l1_scores = [x/float(len(df_train["time"])) for x in list(df_train[l1_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] l2_scores = [x/float(len(df_train["time"])) for x in list(df_train[l2_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_xgb = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_xgb].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_svm = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_svm].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_bay = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_bay].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_lass = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_lass].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_adab = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_adab].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_l2 = [x/float(len(df_train["time"])) for x in list(df_train[l2_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] selected_col_l1 = l1_cols_train[l1_scores.index(min(l1_scores))] selected_col_l2 = l2_cols_train[l2_scores.index(min(l2_scores))] # Set mae to 0.0 if not able to get column try: selected_col_own_xgb = own_cols_xgb[own_scores_xgb.index(min(own_scores_xgb))] except KeyError: selected_col_own_xgb = 0.0 try: selected_col_own_svm = own_cols_svm[own_scores_svm.index(min(own_scores_svm))] except KeyError: selected_col_own_svm = 0.0 try: selected_col_own_bay = own_cols_bay[own_scores_bay.index(min(own_scores_bay))] except KeyError: selected_col_own_bay = 0.0 try: selected_col_own_lass = own_cols_lass[own_scores_lass.index(min(own_scores_lass))] except KeyError: selected_col_own_lass = 0.0 try: selected_col_own_adab = own_cols_adab[own_scores_adab.index(min(own_scores_adab))] except KeyError: selected_col_own_adab = 0.0 # Remove problems with seemingly duplicate columns getting selected try: cor_l1 = sum(map(abs,df_test["time"]-df_test[selected_col_l1]))/len(df_test["time"]) except KeyError: selected_col_l1 = selected_col_l1.split(".")[0] cor_l1 = sum(map(abs,df_test["time"]-df_test[selected_col_l1]))/len(df_test["time"]) try: cor_l2 = sum(map(abs,df_test["time"]-df_test[selected_col_l2]))/len(df_test["time"]) except KeyError: selected_col_l2 = selected_col_l2.split(".")[0] cor_l2 = sum(map(abs,df_test["time"]-df_test[selected_col_l2]))/len(df_test["time"]) try: cor_own_xgb = sum(map(abs,df_test["time"]-df_test[selected_col_own_xgb]))/len(df_test["time"]) except KeyError: selected_col_own_xgb = selected_col_own_xgb.split(".")[0] cor_own_xgb = sum(map(abs,df_test["time"]-df_test[selected_col_own_xgb]))/len(df_test["time"]) try: cor_own_svm = sum(map(abs,df_test["time"]-df_test[selected_col_own_svm]))/len(df_test["time"]) except KeyError: selected_col_own_svm = selected_col_own_svm.split(".")[0] cor_own_svm = sum(map(abs,df_test["time"]-df_test[selected_col_own_svm]))/len(df_test["time"]) try: cor_own_bay = sum(map(abs,df_test["time"]-df_test[selected_col_own_bay]))/len(df_test["time"]) except KeyError: selected_col_own_bay = selected_col_own_bay.split(".")[0] cor_own_bay = sum(map(abs,df_test["time"]-df_test[selected_col_own_bay]))/len(df_test["time"]) try: cor_own_lass = sum(map(abs,df_test["time"]-df_test[selected_col_own_lass]))/len(df_test["time"]) except KeyError: selected_col_own_lass = selected_col_own_lass.split(".")[0] cor_own_lass = sum(map(abs,df_test["time"]-df_test[selected_col_own_lass]))/len(df_test["time"]) try: cor_own_adab = sum(map(abs,df_test["time"]-df_test[selected_col_own_adab]))/len(df_test["time"]) except KeyError: selected_col_own_adab = selected_col_own_adab.split(".")[0] cor_own_adab = sum(map(abs,df_test["time"]-df_test[selected_col_own_adab]))/len(df_test["time"]) cor_l3 = sum(map(abs,df_test["time"]-df_test["preds"]))/len(df_test["time"]) # Variables holding all predictions across experiments all_preds_l1.extend(zip(df_test["time"],df_test[selected_col_l1],[experiment]*len(df_test[selected_col_l1]),[len(df_train.index)]*len(df_test[selected_col_l1]),[fold_num]*len(df_test[selected_col_l1]),df_test[selected_col_own_xgb],df_test[selected_col_own_bay],df_test[selected_col_own_lass],df_test[selected_col_own_adab])) all_preds_l2.extend(zip(df_test["time"],df_test[selected_col_l2],[experiment]*len(df_test[selected_col_l2]),[len(df_train.index)]*len(df_test[selected_col_l2]),[fold_num]*len(df_test[selected_col_l2]))) all_preds_l3.extend(zip(df_test["time"],df_test["preds"],[experiment]*len(df_test["preds"]),[len(df_train.index)]*len(df_test["preds"]),[fold_num]*len(df_test["preds"]))) # Also get the mae for the training models train_cor_l1 = sum(map(abs,df_train["time"]-df_train[selected_col_l1]))/len(df_train["time"]) train_cor_l2 = sum(map(abs,df_train["time"]-df_train[selected_col_l2]))/len(df_train["time"]) train_cor_l3 = sum(map(abs,df_train["time"]-df_train["preds"]))/len(df_train["time"]) print() print("Error l1: %s,%s" % (train_cor_l1,cor_l1)) print("Error l2: %s,%s" % (train_cor_l2,cor_l2)) print("Error l3: %s,%s" % (train_cor_l3,cor_l3)) print(selected_col_l1,selected_col_l2,selected_col_own_xgb) print() print() print("-------------") # Try to select the best Layer, this becomes Layer 4 cor_l4 = 0.0 if (train_cor_l1 < train_cor_l2) and (train_cor_l1 < train_cor_l3): cor_l4 = cor_l1 elif (train_cor_l2 < train_cor_l1) and (train_cor_l2 < train_cor_l3): cor_l4 = cor_l2 else: cor_l4 = cor_l3 return(cor_l1,cor_l2,cor_l3,cor_l4,cor_own_xgb,cor_own_svm,cor_own_bay,cor_own_adab,cor_own_lass,list(df_test[selected_col_l2]),list(df_test["time"]-df_test[selected_col_l2]),train_cor_l3)
74bfe9ce91f04a2ce3955098f9d1145c5c60ef4a
21,365
from operator import and_ def get_repository_metadata_by_changeset_revision( trans, id, changeset_revision ): """Get metadata for a specified repository change set from the database.""" # Make sure there are no duplicate records, and return the single unique record for the changeset_revision. Duplicate records were somehow # created in the past. The cause of this issue has been resolved, but we'll leave this method as is for a while longer to ensure all duplicate # records are removed. all_metadata_records = trans.sa_session.query( trans.model.RepositoryMetadata ) \ .filter( and_( trans.model.RepositoryMetadata.table.c.repository_id == trans.security.decode_id( id ), trans.model.RepositoryMetadata.table.c.changeset_revision == changeset_revision ) ) \ .order_by( trans.model.RepositoryMetadata.table.c.update_time.desc() ) \ .all() if len( all_metadata_records ) > 1: # Delete all recrds older than the last one updated. for repository_metadata in all_metadata_records[ 1: ]: trans.sa_session.delete( repository_metadata ) trans.sa_session.flush() return all_metadata_records[ 0 ] elif all_metadata_records: return all_metadata_records[ 0 ] return None
33f5da869f8fde08e2f83d7a60a708e4848664a1
21,366
import pickle import gc def generate_encounter_time(t_impact=0.495*u.Gyr, graph=False): """Generate fiducial model at t_impact after the impact""" # impact parameters M = 5e6*u.Msun rs = 10*u.pc # impact parameters Tenc = 0.01*u.Gyr dt = 0.05*u.Myr # potential parameters potential = 3 Vh = 225*u.km/u.s q = 1*u.Unit(1) rhalo = 0*u.pc par_pot = np.array([Vh.to(u.m/u.s).value, q.value, rhalo.to(u.m).value]) pkl = pickle.load(open('../data/fiducial_at_encounter.pkl', 'rb')) model = pkl['model'] xsub = pkl['xsub'] vsub = pkl['vsub'] # generate perturbed stream model potential_perturb = 2 par_perturb = np.array([M.to(u.kg).value, rs.to(u.m).value, 0, 0, 0]) #print(vsub.si, par_perturb) x1, x2, x3, v1, v2, v3 = interact.general_interact(par_perturb, xsub.to(u.m).value, vsub.to(u.m/u.s).value, Tenc.to(u.s).value, t_impact.to(u.s).value, dt.to(u.s).value, par_pot, potential, potential_perturb, model.x.to(u.m).value, model.y.to(u.m).value, model.z.to(u.m).value, model.v_x.to(u.m/u.s).value, model.v_y.to(u.m/u.s).value, model.v_z.to(u.m/u.s).value) stream = {} stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc) stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s) c = coord.Galactocentric(x=stream['x'][0], y=stream['x'][1], z=stream['x'][2], v_x=stream['v'][0], v_y=stream['v'][1], v_z=stream['v'][2], **gc_frame_dict) cg = c.transform_to(gc.GD1) wangle = 180*u.deg if graph: plt.close() plt.figure(figsize=(10,5)) plt.plot(cg.phi1.wrap_at(180*u.deg), cg.phi2, 'k.', ms=1) plt.xlim(-80,0) plt.ylim(-10,10) plt.tight_layout() return cg
0871e3b6f09e9bf1154182a3d0a24713a90f2fbb
21,367
def get_census_centroid(census_tract_id): """ Gets a pair of decimal coordinates representing the geographic center (centroid) of the requested census tract. :param census_tract_id: :return: """ global _cached_centroids if census_tract_id in _cached_centroids: return _cached_centroids[census_tract_id] tracts = census_tracts_db.as_dictionary() for tract in tracts: if tract_id_equals(census_tract_id, tract[census_tracts_db.ROW_GEOID]): _cached_centroids[census_tract_id] = float(tract[census_tracts_db.ROW_LATITUDE]), float(tract[census_tracts_db.ROW_LONGITUDE]) return _cached_centroids[census_tract_id]
ba3dde30ce9bd3eab96f8419580edfda051c5564
21,368
def configure_context(args: Namespace, layout: Layout, stop_event: Event) -> Context: """Creates the application context, manages state""" context = Context(args.file) context.layout = layout sensors = Sensors(context, stop_event) context.sensors = sensors listener = KeyListener(context.on_key, stop_event, sensors.get_lock()) context.listener = listener context.change_state("normal") context.load_config() return context
b24ee704939cf3f02774b6fe3c9399042247500a
21,369
def offsetEndpoint(points, distance, beginning=True): """ Pull back end point of way in order to create VISSIM intersection. Input: list of nodes, distance, beginning or end of link Output: transformed list of nodes """ if beginning: a = np.array(points[1], dtype='float') b = np.array(points[0], dtype='float') if not beginning: a = np.array(points[-2], dtype='float') b = np.array(points[-1], dtype='float') if np.sqrt(sum((b-a)**2)) < distance: distance = np.sqrt(sum((b-a)**2)) * 0.99 db = (b-a) / np.linalg.norm(b-a) * distance return b - db
a6733d5670221fbd14b527d63430ebd94e022a5a
21,370
def _remove_parenthesis(word): """ Examples -------- >>> _remove_parenthesis('(ROMS)') 'ROMS' """ try: return word[word.index("(") + 1 : word.rindex(")")] except ValueError: return word
f47cce7985196b1a9a12284e888b4097b26c32f4
21,371
def check_closed(f): """Decorator that checks if connection/cursor is closed.""" def g(self, *args, **kwargs): if self.closed: raise exceptions.Error(f"{self.__class__.__name__} already closed") return f(self, *args, **kwargs) return g
fcb7f8399ae759d644e47b6f8e8a6b887d9315fc
21,372
def get_box_filter(b: float, b_list: np.ndarray, width: float) -> np.ndarray: """ Returns the values of a box function filter centered on b, with specified width. """ return np.heaviside(width/2-np.abs(b_list-b), 1)
2885133af9f179fa5238d4fc054abfd48f317709
21,373
def search(ra=None, dec=None, radius=None, columns=None, offset=None, limit=None, orderby=None): """Creates a query for the carpyncho database, you can specify""" query = CarpynchoQuery(ra, dec, radius, columns, offset, limit, orderby) return query
aad189695ef93e44aa455635dae2791843f7d174
21,374
def repetitions(seq: str) -> int: """ [Easy] https://cses.fi/problemset/task/1069/ [Solution] https://cses.fi/paste/659d805082c50ec1219667/ You are given a DNA sequence: a string consisting of characters A, C, G, and T. Your task is to find the longest repetition in the sequence. This is a maximum-length substring containing only one type of character. The only input line contains a string of n characters. Print one integer, the length of the longest repetition. Constraints: 1 ≤ n ≤ 10^6 Example Input: ATTCGGGA Output: 3 """ res, cur = 0, 0 fir = '' for ch in seq: if ch == fir: cur += 1 else: res = max(res, cur) fir = ch cur = 1 return max(res, cur)
4dde2ec4a6cd6b13a54c2eafe4e8db0d87381faa
21,375
from uuid import getnode as getmac from gmusicapi.utils import utils import netifaces def get_gmusicmanager( useMobileclient = False, verify = True, device_id = None ): """ Returns a GmusicAPI_ manager used to perform operations on one's `Google Play Music`_ account. If the Musicmanager is instantiated but cannot find the device (hence properly authorize for operation), then the attribute ``error_device_ids`` is a non-empty :py:class:`set` of valid device IDs. :param bool useMobileClient: optional argument. If ``True``, use the :py:class:`MobileClient <gmusicapi.MobileClient>` manager, otherwise use the :py:class:`Musicmanager <gmusicapi.MusicManager>` manager. Default is ``False``. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :param str device_id: optional argument. If defined, then attempt to use this MAC ID to register the music manager. :raise ValueError: if cannot instantiate the Musicmanager. :raise AssertionError: if cannot get machine's MAC id. .. seealso:: :py:meth:`gmusicmanager <howdy.music.music.gmusicmanager>`. """ # ## first copy this code from gmusic.mobileclient ## because base method to determine device id by gmusicapi fails when cannot be found def return_deviceid( replace_colons = True ): try: mac_int = getmac( ) if (mac_int >> 40) % 2: raise OSError("a valid MAC could not be determined." " Provide an android_id (and be" " sure to provide the same one on future runs).") device_id = utils.create_mac_string( mac_int ) if replace_colons: return device_id.replace( ':', '' ) else: return device_id except Exception: pass try: valid_ifaces = list( filter(lambda iface: iface.lower( ) != 'lo', netifaces.interfaces( ) ) ) if len( valid_ifaces ) == 0: return None valid_iface = max( valid_ifaces ) iface_tuples = netifaces.ifaddresses( valid_iface )[ netifaces.AF_LINK ] if len( iface_tuples ) == 0: return None hwaddr = max( iface_tuples )[ 'addr' ].upper( ) if replace_colons: return hwaddr.replace(':', '') else: return hwaddr except Exception: return None if not useMobileclient: if device_id is None: device_id = return_deviceid( replace_colons = False ) assert( device_id is not None ), "error, could not determine the local MAC id" mmg = gmusicapi.Musicmanager( debug_logging = False, verify_ssl = verify ) credentials = core.oauthGetOauth2ClientGoogleCredentials( ) if credentials is None: raise ValueError( "Error, do not have Google Music credentials." ) mmg.login( oauth_credentials = credentials, uploader_id = device_id ) mmg.error_device_ids = { } else: if device_id is None: device_id = return_deviceid( ) assert( device_id is not None ), "error, could not determine the local MAC id" mmg = gmusicapi.Mobileclient( debug_logging = False, verify_ssl = verify ) credentials = oauth_get_google_credentials( ) if credentials is None: raise ValueError( "Error, do not have GMusicAPI Mobileclient credentials." ) try: mmg.oauth_login( oauth_credentials = credentials, device_id = device_id ) mmg.error_device_ids = { } except gmusicapi.exceptions.InvalidDeviceId as exc: # tack on some error messages mmg.error_device_ids = set( exc.valid_device_ids ) return mmg
546567e498a183e289f9b77931e10c6114121bc2
21,376
def measure(data, basis, gaussian=0, poisson=0): """Function computes the dot product <x,phi> for a given measurement basis phi Args: - data (n-size, numpy 1D array): the initial, uncompressed data - basis (nxm numpy 2D array): the measurement basis Returns: - A m-sized numpy 1D array to the dot product""" data = np.float_(data) if gaussian!=0 or poisson!=0: # Create the original matrix data = np.repeat([data], basis.shape[0], 0) if gaussian!=0: # Bruit data +=np.random.normal(scale=gaussian, size=data.shape) if poisson != 0: data = np.float_(np.random.poisson(np.abs(data))) if gaussian!=0 or poisson!=0: return np.diag((data).dot(basis.transpose())) else: return (data).dot(basis.transpose())
0a25ea52a67441972b65cdad7c76cb772ec6bc6d
21,377
def getUserByMail(email): """Get User by mailt.""" try: user = db_session.query(User).filter_by(email=email).one() return user except Exception: return None
423d5dc969d43e0f4a1aafc51b5a05671a1fc3e1
21,378
def parse_headers(headers, data): """ Given a header structure and some data, parse the data as headers. """ return {k: f(v) for (k, (f, _), _), v in zip(headers, data)}
456c2ab2d2f7832076a7263be8815b9abeec56dd
21,379
def dataset_parser(value, A): """Parse an ImageNet record from a serialized string Tensor.""" # return value[:A.shape[0]], value[A.shape[0]:] return value[:A.shape[0]], value
0b07b6eec9e3e23f470970c489ad83c416d650e7
21,380
def default_csv_file(): """ default name for csv files """ return 'data.csv'
e0a1267e1e8e463d435f3116e970132c4eab949d
21,381
import sys def get_gc_alt(alt, unit='km'): """ Return index of nearest altitude (km) of GEOS-Chem box (global value) """ if unit == 'km': alt_c = gchemgrid('c_km_geos5_r') elif unit == 'hPa': alt_c = gchemgrid('c_hPa_geos5') else: err_str = 'No case setup for altitude unit ({})'.format(unit) sys.exit() return find_nearest(alt_c, alt)
47744e39b3bcfecd2b3be60f71ad6de8eb85722a
21,382
def download(object_client, project_id, datasets_path): """Download the contents of file from the object store. Parameters ---------- object_client : faculty.clients.object.ObjectClient project_id : uuid.UUID datasets_path : str The target path to download to in the object store Returns ------- bytes The content of the file """ chunk_generator = download_stream(object_client, project_id, datasets_path) return b"".join(chunk_generator)
91da7409b4cc518d87b6502e193a4174c045be0e
21,383
from pathlib import Path from typing import Any def get_assets_of_dataset( db: Session = Depends(deps.get_db), dataset_id: int = Path(..., example="12"), offset: int = 0, limit: int = settings.DEFAULT_LIMIT, keyword: str = Query(None), viz_client: VizClient = Depends(deps.get_viz_client), current_user: models.User = Depends(deps.get_current_active_user), current_workspace: models.Workspace = Depends(deps.get_current_workspace), ) -> Any: """ Get asset list of specific dataset, pagination is supported by means of offset and limit """ dataset = crud.dataset.get_with_task(db, user_id=current_user.id, id=dataset_id) if not dataset: raise DatasetNotFound() assets = viz_client.get_assets( user_id=current_user.id, repo_id=current_workspace.hash, # type: ignore branch_id=dataset.task_hash, # type: ignore keyword=keyword, limit=limit, offset=offset, ) result = { "keywords": assets.keywords, "items": assets.items, "total": assets.total, } return {"result": result}
344095c94884059ed37662521f346d6c03bb4c7f
21,384
def check_rule(body, obj, obj_string, rule, only_body): """ Compare the argument with a rule. """ if only_body: # Compare only the body of the rule to the argument retval = (body == rule[2:]) else: retval = ((body == rule[2:]) and (obj == obj_string)) return retval
9237da310ebcc30f623211e659ac2247efb36f69
21,385
from warnings import warn def pdm_auto_arima(df, target_column, time_column, frequency_data, epochs_to_forecast = 12, d=1, D=0, seasonal=True, m =12, start_p = 2, start_q = 0, max_p=9, max_q=2, start_P = 0, start_Q = 0, max_P = 2, max_Q = 2, validate = False, epochs_to_test = 1): """ This function finds the best order parameters for a SARIMAX model, then makes a forecast Parameters: - df_input (pandas.DataFrame): Input Time Series. - target_column (str): name of the column containing the target feature - time_column (str): name of the column containing the pandas Timestamps - frequency_data (str): string representing the time frequency of record, e.g. "h" (hours), "D" (days), "M" (months) - epochs_to_forecast (int): number of steps for predicting future data - epochs_to_test (int): number of steps corresponding to most recent records to test on - d, D, m, start_p, start_q, max_p, max_q, start_P, start_Q, max_P, max_Q (int): SARIMAX parameters to be set for reseach - seasonal (bool): seasonality flag - validate (bool): if True, epochs_to_test rows are used for validating, else forecast without evaluation Returns: - forecast_df (pandas.DataFrame): Output DataFrame with best forecast found """ assert isinstance(target_column, str) assert isinstance(time_column, str) external_features = [col for col in df if col not in [time_column, target_column]] if epochs_to_test == 0: warn("epochs_to_test=0 and validate=True is not correct, setting validate=False instead") validate = False if frequency_data is not None: df = df.set_index(time_column).asfreq(freq=frequency_data, method="bfill").reset_index() if len(external_features) > 0: #Scaling all exogenous features scaler = MinMaxScaler() scaled = scaler.fit_transform(df.set_index(time_column).drop([target_column], axis = 1).values) train_df = df.dropna() train_df.set_index(time_column, inplace=True) if frequency_data is not None: date = pd.date_range(start=df[time_column].min(), periods=len(train_df)+epochs_to_forecast, freq=frequency_data) else: date = pd.date_range(start=df[time_column].min(), end=df[time_column].max(), periods=len(df)) ### Finding parameter using validation set if validate: train_df_validation = train_df[:-epochs_to_test] if len(external_features) > 0: exog_validation = scaled[:(len(train_df)-epochs_to_test)] model_validation = pmd_arima.auto_arima(train_df_validation[target_column],exogenous = exog_validation, max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) exog_validation_forecast = scaled[(len(train_df)-epochs_to_test):len(train_df)] forecast_validation, forecast_validation_ci = model_validation.predict(n_periods = epochs_to_test,exogenous= exog_validation_forecast, return_conf_int=True) validation_df = pd.DataFrame({target_column:train_df[target_column].values[(len(train_df)-epochs_to_test):len(train_df)],'Forecast':forecast_validation}) rmse = np.sqrt(mean_squared_error(validation_df[target_column].values, validation_df.Forecast.values)) print(f'RMSE: {rmse}') exog = scaled[:len(train_df)] model = pmd_arima.ARIMA( order = list(model_validation.get_params()['order']), seasonal_order = list(model_validation.get_params()['seasonal_order']), trace=False,error_action='ignore', suppress_warnings=True) model.fit(y = train_df[target_column],exogenous = exog) training_prediction = model.predict_in_sample(exogenous = exog_validation) else: model_validation = pmd_arima.auto_arima(train_df_validation[target_column], max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) forecast_validation, forecast_validation_ci = model_validation.predict(n_periods = epochs_to_test, return_conf_int=True) validation_df = pd.DataFrame({target_column:train_df[target_column].values[(len(train_df)-epochs_to_test):len(train_df)],'Forecast':forecast_validation}) rmse = np.sqrt(mean_squared_error(validation_df[target_column].values, validation_df.Forecast.values)) print(f'RMSE: {rmse}') #exog = scaled[:len(train_df)] model = pmd_arima.ARIMA( order = list(model_validation.get_params()['order']), seasonal_order = list(model_validation.get_params()['seasonal_order']), trace=False,error_action='ignore', suppress_warnings=True) model.fit(y = train_df[target_column]) training_prediction = model.predict_in_sample() else: if len(external_features) > 0: #Select exogenous features for training exog = scaled[:len(train_df)] #Search for best model model = pmd_arima.auto_arima(train_df[target_column],exogenous = exog, max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) training_prediction = model.predict_in_sample(exogenous = exog) #Training set predictions else: #Search for best model model = pmd_arima.auto_arima(train_df[target_column], max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) training_prediction = model.predict_in_sample() #Training set predictions ### Forecasting if len(external_features) > 0: exog_forecast = scaled[len(train_df):len(train_df)+epochs_to_forecast] #Forecast if len(exog_forecast)==0: exog_forecast = np.nan * np.ones((epochs_to_forecast,exog.shape[1])) if epochs_to_forecast > 0: if len(external_features) > 0: forecast, forecast_ci = model.predict(n_periods = len(exog_forecast),exogenous= exog_forecast, return_conf_int=True) else: forecast, forecast_ci = model.predict(n_periods = epochs_to_forecast, return_conf_int=True) #Building output dataset forecast_df=pd.DataFrame() forecast_df[target_column] = df[target_column].values[:len(train_df)+epochs_to_forecast]#df[target_column].values forecast_df['forecast'] = np.nan forecast_df['forecast_up'] = np.nan forecast_df['forecast_low'] = np.nan if validate and epochs_to_forecast > 0: forecast_df['forecast'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation forecast_df['forecast_up'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation_ci[:,0] elif validate and epochs_to_forecast == 0: forecast_df['forecast'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation forecast_df['forecast_up'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation_ci[:,0] if epochs_to_forecast > 0: forecast_df['forecast'].iloc[-epochs_to_forecast:] = forecast forecast_df['forecast_up'].iloc[-epochs_to_forecast:] = forecast_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast:] = forecast_ci[:,0] forecast_df[time_column] = date return forecast_df
d5dd6d8ddf01358cde26f9e467e410419290da2e
21,386
def get_lines(filename): """ Returns a list of lines of a file. Parameters filename : str, name of control file """ with open(filename, "r") as f: lines = f.readlines() return lines
1307b169733b50517b26ecbf0414ca3396475360
21,387
def _check_socket_state(realsock, waitfor="rw", timeout=0.0): """ <Purpose> Checks if the given socket would block on a send() or recv(). In the case of a listening socket, read_will_block equates to accept_will_block. <Arguments> realsock: A real socket.socket() object to check for. waitfor: An optional specifier of what to wait for. "r" for read only, "w" for write only, and "rw" for read or write. E.g. if timeout is 10, and wait is "r", this will block for up to 10 seconds until read_will_block is false. If you specify "r", then write_will_block is always true, and if you specify "w" then read_will_block is always true. timeout: An optional timeout to wait for the socket to be read or write ready. <Returns> A tuple, (read_will_block, write_will_block). <Exceptions> As with select.select(). Probably best to wrap this with _is_recoverable_network_exception and _is_terminated_connection_exception. Throws an exception if waitfor is not in ["r","w","rw"] """ # Check that waitfor is valid if waitfor not in ["rw","r","w"]: raise Exception, "Illegal waitfor argument!" # Array to hold the socket sock_array = [realsock] # Generate the read/write arrays read_array = [] if "r" in waitfor: read_array = sock_array write_array = [] if "w" in waitfor: write_array = sock_array # Call select() (readable, writeable, exception) = select.select(read_array,write_array,sock_array,timeout) # If the socket is in the exception list, then assume its both read and writable if (realsock in exception): return (False, False) # Return normally then return (realsock not in readable, realsock not in writeable)
f4f493f03a2cd824a2bdc343f9367611011558eb
21,388
def str_to_pauli_term(pauli_str: str, qubit_labels=None): """ Convert a string into a pyquil.paulis.PauliTerm. >>> str_to_pauli_term('XY', []) :param str pauli_str: The input string, made of of 'I', 'X', 'Y' or 'Z' :param set qubit_labels: The integer labels for the qubits in the string, given in reverse order. If None, default to the range of the length of pauli_str. :return: the corresponding PauliTerm :rtype: pyquil.paulis.PauliTerm """ if qubit_labels is None: labels_list = [idx for idx in reversed(range(len(pauli_str)))] else: labels_list = sorted(qubit_labels)[::-1] pauli_term = PauliTerm.from_list(list(zip(pauli_str, labels_list))) return pauli_term
3a5be0f84006979f9b7dbb6ed436e98e7554cf68
21,389
from typing import Type def _GetNextPartialIdentifierToken(start_token): """Returns the first token having identifier as substring after a token. Searches each token after the start to see if it contains an identifier. If found, token is returned. If no identifier is found returns None. Search is abandoned when a FLAG_ENDING_TYPE token is found. Args: start_token: The token to start searching after. Returns: The token found containing identifier, None otherwise. """ token = start_token.next while token and token.type not in Type.FLAG_ENDING_TYPES: match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search( token.string) if match is not None and token.type == Type.COMMENT: return token token = token.next return None
e486ba1f5e9ee1b2d6c01aa5aa9d5d5270e0db10
21,390
def _challenge_transaction(client_account): """ Generate the challenge transaction for a client account. This is used in `GET <auth>`, as per SEP 10. Returns the XDR encoding of that transaction. """ builder = Builder.challenge_tx( server_secret=settings.STELLAR_ACCOUNT_SEED, client_account_id=client_account, archor_name=ANCHOR_NAME, network=settings.STELLAR_NETWORK, ) builder.sign(secret=settings.STELLAR_ACCOUNT_SEED) envelope_xdr = builder.gen_xdr() return envelope_xdr.decode("ascii")
a1762788077d7e9403c7e5d3b94e78cca11f0ce8
21,391
def mapCtoD(sys_c, t=(0, 1), f0=0.): """Map a MIMO continuous-time to an equiv. SIMO discrete-time system. The criterion for equivalence is that the sampled pulse response of the CT system must be identical to the impulse response of the DT system. i.e. If ``yc`` is the output of the CT system with an input ``vc`` taken from a set of DACs fed with a single DT input ``v``, then ``y``, the output of the equivalent DT system with input ``v`` satisfies: ``y(n) = yc(n-)`` for integer ``n``. The DACs are characterized by rectangular impulse responses with edge times specified in the t list. **Input:** sys_c : object the LTI description of the CT system, which can be: * the ABCD matrix, * a list-like containing the A, B, C, D matrices, * a list of zpk tuples (internally converted to SS representation). * a list of LTI objects t : array_like The edge times of the DAC pulse used to make CT waveforms from DT inputs. Each row corresponds to one of the system inputs; [-1 -1] denotes a CT input. The default is [0 1], for all inputs except the first. f0 : float The (normalized) frequency at which the Gp filters' gains are to be set to unity. Default 0 (DC). **Output:** sys : tuple the LTI description for the DT equivalent, in A, B, C, D representation. Gp : list of lists the mixed CT/DT prefilters which form the samples fed to each state for the CT inputs. **Example:** Map the standard second order CT modulator shown below to its CT equivalent and verify that its NTF is :math:`(1-z^{-1})^2`. .. image:: ../doc/_static/mapCtoD.png :align: center :alt: mapCtoD block diagram It can be done as follows:: from __future__ import print_function import numpy as np from scipy.signal import lti from deltasigma import * LFc = lti([[0, 0], [1, 0]], [[1, -1], [0, -1.5]], [[0, 1]], [[0, 0]]) tdac = [0, 1] LF, Gp = mapCtoD(LFc, tdac) LF = lti(*LF) ABCD = np.vstack(( np.hstack((LF.A, LF.B)), np.hstack((LF.C, LF.D)) )) NTF, STF = calculateTF(ABCD) print("NTF:") # after rounding to a 1e-6 resolution print("Zeros:", np.real_if_close(np.round(NTF.zeros, 6))) print("Poles:", np.real_if_close(np.round(NTF.poles, 6))) Prints:: Zeros: [ 1. 1.] Poles: [ 0. 0.] Equivalent to:: (z -1)^2 NTF = ---------- z^2 .. seealso:: R. Schreier and B. Zhang, "Delta-sigma modulators employing \ continuous-time circuitry," IEEE Transactions on Circuits and Systems I, \ vol. 43, no. 4, pp. 324-332, April 1996. """ # You need to have A, B, C, D specification of the system Ac, Bc, Cc, Dc = _getABCD(sys_c) ni = Bc.shape[1] # Sanitize t if hasattr(t, 'tolist'): t = t.tolist() if (type(t) == tuple or type(t) == list) and np.isscalar(t[0]): t = [t] # we got a simple list, like the default value if not (type(t) == tuple or type(t) == list) and \ not (type(t[0]) == tuple or type(t[0]) == list): raise ValueError("The t argument has an unrecognized shape") # back to business t = np.array(t) if t.shape == (1, 2) and ni > 1: t = np.vstack((np.array([[-1, -1]]), np.dot(np.ones((ni - 1, 1)), t))) if t.shape != (ni, 2): raise ValueError('The t argument has the wrong dimensions.') di = np.ones(ni).astype(bool) for i in range(ni): if t[i, 0] == -1 and t[i, 1] == -1: di[i] = False # c2d assumes t1=0, t2=1. # Also c2d often complains about poor scaling and can even produce # incorrect results. A, B, C, D, _ = cont2discrete((Ac, Bc, Cc, Dc), 1, method='zoh') Bc1 = Bc[:, ~di] # Examine the discrete-time inputs to see how big the # augmented matrices need to be. B1 = B[:, ~di] D1 = D[:, ~di] n = A.shape[0] t2 = np.ceil(t[di, 1]).astype(np.int_) esn = (t2 == t[di, 1]) and (D[0, di] != 0).T # extra states needed? npp = n + np.max(t2 - 1 + 1*esn) # Augment A to npp x npp, B to np x 1, C to 1 x np. Ap = padb(padr(A, npp), npp) for i in range(n + 1, npp): Ap[i, i - 1] = 1 Bp = np.zeros((npp, 1)) if npp > n: Bp[n, 0] = 1 Cp = padr(C, npp) Dp = np.zeros((1, 1)) # Add in the contributions from each DAC for i in np.flatnonzero(di): t1 = t[i, 0] t2 = t[i, 1] B2 = B[:, i] D2 = D[:, i] if t1 == 0 and t2 == 1 and D2 == 0: # No fancy stuff necessary Bp = Bp + padb(B2, npp) else: n1 = np.floor(t1) n2 = np.ceil(t2) - n1 - 1 t1 = t1 - n1 t2 = t2 - n2 - n1 if t2 == 1 and D2 != 0: n2 = n2 + 1 extraStateNeeded = 1 else: extraStateNeeded = 0 nt = n + n1 + n2 if n2 > 0: if t2 == 1: Ap[:n, nt - n2:nt] = Ap[:n, nt - n2:nt] + np.tile(B2, (1, n2)) else: Ap[:n, nt - n2:nt - 1] = Ap[:n, nt - n2:nt - 1] + np.tile(B2, (1, n2 - 1)) Ap[:n, (nt-1)] = Ap[:n, (nt-1)] + _B2formula(Ac, 0, t2, B2) if n2 > 0: # pulse extends to the next period Btmp = _B2formula(Ac, t1, 1, B2) else: # pulse ends in this period Btmp = _B2formula(Ac, t1, t2, B2) if n1 > 0: Ap[:n, n + n1 - 1] = Ap[:n, n + n1 - 1] + Btmp else: Bp = Bp + padb(Btmp, npp) if n2 > 0: Cp = Cp + padr(np.hstack((np.zeros((D2.shape[0], n + n1)), D2*np.ones((1, n2)))), npp) sys = (Ap, Bp, Cp, Dp) if np.any(~di): # Compute the prefilters and add in the CT feed-ins. # Gp = inv(sI - Ac)*(zI - A)/z*Bc1 n, m = Bc1.shape Gp = np.empty_like(np.zeros((n, m)), dtype=object) # !!Make this like stf: an array of zpk objects ztf = np.empty_like(Bc1, dtype=object) # Compute the z-domain portions of the filters ABc1 = np.dot(A, Bc1) for h in range(m): for i in range(n): if Bc1[i, h] == 0: ztf[i, h] = (np.array([]), np.array([0.]), -ABc1[i, h]) # dt=1 else: ztf[i, h] = (np.atleast_1d(ABc1[i, h]/Bc1[i, h]), np.array([0.]), Bc1[i, h]) # dt = 1 # Compute the s-domain portions of each of the filters stf = np.empty_like(np.zeros((n, n)), dtype=object) # stf[out, in] = zpk for oi in range(n): for ii in range(n): # Doesn't do pole-zero cancellation stf[oi, ii] = ss2zpk(Ac, np.eye(n), np.eye(n)[oi, :], np.zeros((1, n)), input=ii) # scipy as of v 0.13 has no support for LTI MIMO systems # only 'MISO', therefore you can't write: # stf = ss2zpk(Ac, eye(n), eye(n), np.zeros(n, n))) for h in range(m): for i in range(n): # k = 1 unneded, see below for j in range(n): # check the k values for a non-zero term if stf[i, j][2] != 0 and ztf[j, h][2] != 0: if Gp[i, h] is None: Gp[i, h] = {} Gp[i, h].update({'Hs':[list(stf[i, j])]}) Gp[i, h].update({'Hz':[list(ztf[j, h])]}) else: Gp[i, h].update({'Hs':Gp[i, h]['Hs'] + [list(stf[i, j])]}) Gp[i, h].update({'Hz':Gp[i, h]['Hz'] + [list(ztf[j, h])]}) # the MATLAB-like cell code for the above statements would have # been: #Gp[i, h](k).Hs = stf[i, j] #Gp[i, h](k).Hz = ztf[j, h] #k = k + 1 if f0 != 0: # Need to correct the gain terms calculated by c2d # B1 = gains of Gp @f0; for h in range(m): for i in range(n): B1ih = np.real_if_close(evalMixedTF(Gp[i, h], f0)) # abs() used because ss() whines if B has complex entries... # This is clearly incorrect. # I've fudged the complex stuff by including a sign.... B1[i, h] = np.abs(B1ih) * np.sign(np.real(B1ih)) if np.abs(B1[i, h]) < 1e-09: B1[i, h] = 1e-09 # This prevents NaN in "line 174" below # Adjust the gains of the pre-filters for h in range(m): for i in range(n): for j in range(max(len(Gp[i, h]['Hs']), len(Gp[i, h]['Hz']))): # The next is "line 174" Gp[i, h]['Hs'][j][2] = Gp[i, h]['Hs'][j][2]/B1[i, h] sys = (sys[0], # Ap np.hstack((padb(B1, npp), sys[1])), # new B sys[2], # Cp np.hstack((D1, sys[3]))) # new D return sys, Gp
6aa83119efcad68b1fdf3a0cbc5467c53d2a30bb
21,392
def normalize_type(type: str) -> str: """Normalize DataTransfer's type strings. https://html.spec.whatwg.org/multipage/dnd.html#dom-datatransfer-getdata 'text' -> 'text/plain' 'url' -> 'text/uri-list' """ if type == 'text': return 'text/plain' elif type == 'url': return 'text/uri-list' return type
887c532218a7775ea55c6a39953ec244183af455
21,393
def _parity(N, j): """Private function to calculate the parity of the quantum system. """ if j == 0.5: pi = np.identity(N) - np.sqrt((N - 1) * N * (N + 1) / 2) * _lambda_f(N) return pi / N elif j > 0.5: mult = np.int32(2 * j + 1) matrix = np.zeros((mult, mult)) foo = np.ones(mult) for n in np.arange(-j, j + 1, 1): for l in np.arange(0, mult, 1): foo[l] = (2 * l + 1) * qutip.clebsch(j, l, j, n, 0, n) matrix[np.int32(n + j), np.int32(n + j)] = np.sum(foo) return matrix / mult
5afc399cc6f303ba35d7e7c6b6b039130fcd1b17
21,394
def get_log(id): """Returns the log for the given ansible play. This works on both live and finished plays. .. :quickref: Play; Returns the log for the given ansible play :param id: play id **Example Request**: .. sourcecode:: http GET /api/v2/plays/345835/log HTTP/1.1 **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK ... log file from the given play ... """ # For security, send_from_directory avoids sending any files # outside of the specified directory return send_from_directory(get_log_dir_abs(), str(id) + ".log")
7a67f7b9d89df39824e566fcb11083be9d3f76e8
21,395
def filterLinesByCommentStr(lines, comment_str='#'): """ Filter all lines from a file.readlines output which begins with one of the symbols in the comment_str. """ comment_line_idx = [] for i, line in enumerate(lines): if line[0] in comment_str: comment_line_idx.append(i) for j in comment_line_idx[::-1]: del lines[j] return lines
8a6ce56187afc2368ec81d11c38fe7af2eacb14f
21,396
def assemble_result_from_graph(type_spec, binding, output_map): """Assembles a result stamped into a `tf.Graph` given type signature/binding. This method does roughly the opposite of `capture_result_from_graph`, in that whereas `capture_result_from_graph` starts with a single structured object made up of tensors and computes its type and bindings, this method starts with the type/bindings and constructs a structured object made up of tensors. Args: type_spec: The type signature of the result to assemble, an instance of `types.Type` or something convertible to it. binding: The binding that relates the type signature to names of tensors in the graph, an instance of `pb.TensorFlow.Binding`. output_map: The mapping from tensor names that appear in the binding to actual stamped tensors (possibly renamed during import). Returns: The assembled result, a Python object that is composed of tensors, possibly nested within Python structures such as anonymous tuples. Raises: TypeError: If the argument or any of its parts are of an uexpected type. ValueError: If the arguments are invalid or inconsistent witch other, e.g., the type and binding don't match, or the tensor is not found in the map. """ type_spec = computation_types.to_type(type_spec) py_typecheck.check_type(type_spec, computation_types.Type) py_typecheck.check_type(binding, pb.TensorFlow.Binding) py_typecheck.check_type(output_map, dict) for k, v in output_map.items(): py_typecheck.check_type(k, str) if not tf.is_tensor(v): raise TypeError( 'Element with key {} in the output map is {}, not a tensor.'.format( k, py_typecheck.type_string(type(v)))) binding_oneof = binding.WhichOneof('binding') if isinstance(type_spec, computation_types.TensorType): if binding_oneof != 'tensor': raise ValueError( 'Expected a tensor binding, found {}.'.format(binding_oneof)) elif binding.tensor.tensor_name not in output_map: raise ValueError('Tensor named {} not found in the output map.'.format( binding.tensor.tensor_name)) else: return output_map[binding.tensor.tensor_name] elif isinstance(type_spec, computation_types.NamedTupleType): if binding_oneof != 'tuple': raise ValueError( 'Expected a tuple binding, found {}.'.format(binding_oneof)) else: type_elements = anonymous_tuple.to_elements(type_spec) if len(binding.tuple.element) != len(type_elements): raise ValueError( 'Mismatching tuple sizes in type ({}) and binding ({}).'.format( len(type_elements), len(binding.tuple.element))) result_elements = [] for (element_name, element_type), element_binding in zip(type_elements, binding.tuple.element): element_object = assemble_result_from_graph(element_type, element_binding, output_map) result_elements.append((element_name, element_object)) if not isinstance(type_spec, computation_types.NamedTupleTypeWithPyContainerType): return anonymous_tuple.AnonymousTuple(result_elements) container_type = computation_types.NamedTupleTypeWithPyContainerType.get_container_type( type_spec) if (py_typecheck.is_named_tuple(container_type) or py_typecheck.is_attrs(container_type)): return container_type(**dict(result_elements)) return container_type(result_elements) elif isinstance(type_spec, computation_types.SequenceType): if binding_oneof != 'sequence': raise ValueError( 'Expected a sequence binding, found {}.'.format(binding_oneof)) else: sequence_oneof = binding.sequence.WhichOneof('binding') if sequence_oneof == 'variant_tensor_name': variant_tensor = output_map[binding.sequence.variant_tensor_name] return make_dataset_from_variant_tensor(variant_tensor, type_spec.element) else: raise ValueError( 'Unsupported sequence binding \'{}\'.'.format(sequence_oneof)) else: raise ValueError('Unsupported type \'{}\'.'.format(type_spec))
a25b4d935dfcb62acad15da5aeafee390b03a38c
21,397
def parse(peaker): # type: (Peaker[Token]) -> Node """Parse the docstring. Args: peaker: A Peaker filled with the lexed tokens of the docstring. Raises: ParserException: If there is anything malformed with the docstring, or if anything goes wrong with parsing. # noqa Returns: The parsed docstring as an AST. """ keyword_parse_lookup = { 'Args': parse_args, 'Arguments': parse_args, 'Returns': parse_returns, 'Yields': parse_yields, 'Raises': parse_raises, } children = [ parse_description(peaker) ] while peaker.has_next(): next_value = peaker.peak().value if next_value in keyword_parse_lookup: children.append( keyword_parse_lookup[next_value](peaker) ) else: children.append( parse_long_description(peaker) ) return Node( node_type=NodeType.DOCSTRING, children=children, )
abd8b495281c159f070a890a392d7a80da740fa4
21,398
def listify(what, *, debug=False): """ non-reversible version of listify_safe(). In this case "None" always means "no columns". output: list """ l, _ = listify_safe(what, debug=debug) return l
677c7f9cacc270a2346e3ee002911b31825fbad9
21,399