content
stringlengths
22
815k
id
int64
0
4.91M
def load_indice_file(file, net_config): """Take the idlfile, data mean and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" indice_list = [] with open(file, 'r') as f: while True: line = f.readline() if line: indice = line.strip().split() indice = [int(i) for i in indice] indice_list.append(indice) else: break print file, len(indice_list) batch_input_indice = np.zeros((net_config["batch_size"], net_config["max_len"], 1, 1)) batch_output_indice = np.zeros((net_config["batch_size"], net_config["max_len"], 1, 1)) batch_wordvec_layer = np.zeros((net_config["batch_size"], net_config["vocab_size"], net_config["max_len"], 1)) batch_id = 0 while True: random.shuffle(indice_list) for indice in indice_list: output_indice = indice[:net_config['max_len']] + \ [net_config['zero_symbol']] * (net_config['max_len']-len(indice[:net_config['max_len']])) input_indice = [net_config["start_symbol"]] + output_indice[:-1] batch_input_indice[batch_id, :, 0, 0] = input_indice batch_output_indice[batch_id, :, 0, 0] = output_indice for i in range(net_config["max_len"]): ii = input_indice[i] vec = [0]*net_config["vocab_size"] vec[ii] = 1 batch_wordvec_layer[batch_id, :, i, 0] = vec if batch_id == net_config["batch_size"]-1: yield {"input_words":batch_input_indice, "target_words":batch_output_indice, "wordvec_layer":batch_wordvec_layer} batch_id = 0 else: batch_id += 1
5,342,500
def test_valid_args(voltage, expected_byte): """ Test if the return value of voltage_to_byte() is correct when passing a valid voltage. """ byte = voltage_to_byte(voltage) assert type(byte) is int assert byte == expected_byte
5,342,501
def context_list_entities(context): """ Returns list of entities to be displayed in list view """ # log.info(context['List_rows']) if 'List_rows' in context: return context['List_rows']['field_value'] elif 'entities' in context: return context['entities'] log.warning("No entity list found in context %r"%(context.keys())) return None
5,342,502
def run(arg): """Entry point""" error_map = {} validate_path(arg, None, error_map) if len(error_map) > 0: error_count = 0 for file, errors in error_map.items(): print(f"Error in {file}:") for error in errors: print(f" {error}") error_count+=1 print("") print(f"{error_count} error(s) found in {len(error_map)} file(s)") return 1 return 0
5,342,503
def test_retrieve_events_where_is_admin_only_includes_events_where_is_admin( user, member_of_organizer, organizer_type, membership_type, expected_events_amount ): """When retrieving events where is admin, only events where is admin should be returned""" hs = GroupFactory(type=GroupType.BOARD, name=AdminGroup.HS, slug=AdminGroup.HS) index = GroupFactory( type=GroupType.SUBGROUP, name=AdminGroup.INDEX, slug=AdminGroup.INDEX ) nok = GroupFactory( type=GroupType.SUBGROUP, name=AdminGroup.NOK, slug=AdminGroup.NOK ) sosialen = GroupFactory( type=GroupType.SUBGROUP, name=AdminGroup.SOSIALEN, slug=AdminGroup.SOSIALEN ) promo = GroupFactory( type=GroupType.SUBGROUP, name=AdminGroup.PROMO, slug=AdminGroup.PROMO ) kontkom = GroupFactory(type=GroupType.COMMITTEE, name="KontKom", slug="kontkom") pythons = GroupFactory(type=GroupType.INTERESTGROUP, name="Pythons", slug="pythons") EventFactory(organizer=hs) EventFactory(organizer=index) EventFactory(organizer=nok) EventFactory(organizer=nok) EventFactory(organizer=sosialen) EventFactory(organizer=promo) EventFactory(organizer=kontkom) EventFactory(organizer=pythons) EventFactory() client = get_api_client(user=user) add_user_to_group_with_name( user=user, group_name=member_of_organizer, group_type=organizer_type, membership_type=membership_type, ) url = f"{API_EVENTS_BASE_URL}admin/" response = client.get(url) if expected_events_amount > 0: assert int(response.json().get("count")) == expected_events_amount else: assert response.status_code == 403
5,342,504
def _bitcode_symbols_partial_impl( *, actions, binary_artifact, bitcode_symbol_maps, dependency_targets, label_name, output_discriminator, package_bitcode, platform_prerequisites): """Implementation for the bitcode symbols processing partial.""" bitcode_dirs = [] bitcode_symbols = {} if bitcode_symbol_maps: bitcode_symbols.update(bitcode_symbol_maps) if binary_artifact and bitcode_symbols: bitcode_files = [] copy_commands = [] for arch in bitcode_symbols: bitcode_file = bitcode_symbols[arch] if not bitcode_file: continue bitcode_files.append(bitcode_file) # Get the UUID of the arch slice and use that to name the bcsymbolmap file. copy_commands.append( ("cp {bitcode_file} " + "${{OUTPUT_DIR}}/$(dwarfdump -u {binary} " + "| grep \"({arch})\" | cut -d' ' -f2).bcsymbolmap").format( arch = arch, binary = binary_artifact.path, bitcode_file = bitcode_file.path, ), ) if bitcode_files: bitcode_dir = intermediates.directory( actions = actions, target_name = label_name, output_discriminator = output_discriminator, dir_name = "bitcode_files", ) bitcode_dirs.append(bitcode_dir) apple_support.run_shell( actions = actions, apple_fragment = platform_prerequisites.apple_fragment, inputs = [binary_artifact] + bitcode_files, outputs = [bitcode_dir], command = "mkdir -p ${OUTPUT_DIR} && " + " && ".join(copy_commands), env = {"OUTPUT_DIR": bitcode_dir.path}, mnemonic = "BitcodeSymbolsCopy", xcode_config = platform_prerequisites.xcode_version_config, ) transitive_bitcode_files = depset( direct = bitcode_dirs, transitive = [ x[_AppleBitcodeInfo].bitcode for x in dependency_targets if _AppleBitcodeInfo in x ], ) if package_bitcode: bundle_files = [(processor.location.archive, "BCSymbolMaps", transitive_bitcode_files)] else: bundle_files = [] return struct( bundle_files = bundle_files, providers = [_AppleBitcodeInfo(bitcode = transitive_bitcode_files)], )
5,342,505
def _str_trim_left(x): """ Remove leading whitespace. """ return x.str.replace(r"^\s*", "")
5,342,506
def fabric_host(docker_client): """Keep this session scoped to save time""" container = docker_client.containers.run( "efagerberg/pytest-fabric-sshd:latest", name="pytest-fabric-test-container", ports={'22': '2222'}, detach=True, ) env.disable_known_hosts = True env.password = 'root' env.hosts.append('root@{}:2222'.format(get_docker_host())) yield container container.stop() container.remove() env.disable_known_hosts = True env.hosts = [] env.password = None
5,342,507
def zipcompress(items_list, flags_list): """ SeeAlso: vt.zipcompress """ return [compress(list_, flags) for list_, flags in zip(items_list, flags_list)]
5,342,508
def test_config_file_fails_missing_value(monkeypatch, presence, config): """Check if test fails with missing value in database configuration.""" def mock_file_config(self): return {'database': {}} monkeypatch.setattr(presence.builder, "fetch_file_config", mock_file_config) status, msg = presence.check_configuration_file() assert status == "Skipping" assert "No configuration" in msg assert presence.db_config == {}
5,342,509
def construct_run_config(iterations_per_loop): """Construct the run config.""" # Parse hparams hparams = ssd_model.default_hparams() hparams.parse(FLAGS.hparams) return dict( hparams.values(), num_shards=FLAGS.num_shards, num_examples_per_epoch=FLAGS.num_examples_per_epoch, resnet_checkpoint=FLAGS.resnet_checkpoint, val_json_file=FLAGS.val_json_file, model_dir=FLAGS.model_dir, iterations_per_loop=iterations_per_loop, steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size, eval_samples=FLAGS.eval_samples, transpose_input=False if FLAGS.input_partition_dims is not None else True, use_spatial_partitioning=True if FLAGS.input_partition_dims is not None else False, dataset_threadpool_size=FLAGS.dataset_threadpool_size )
5,342,510
def bezier_curve(points, nTimes=1000): """ Given a set of control points, return the bezier curve defined by the control points. Control points should be a list of lists, or list of tuples such as [ [1,1], [2,3], [4,5], ..[Xn, Yn] ] nTimes is the number of time steps, defaults to 1000 See http://processingjs.nihongoresources.com/bezierinfo/ """ nPoints = len(points) xPoints = np.array([p[0] for p in points]) yPoints = np.array([p[1] for p in points]) t = np.linspace(0.0, 1.0, nTimes) polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ]) xvals = np.dot(xPoints, polynomial_array) yvals = np.dot(yPoints, polynomial_array) return xvals, yvals
5,342,511
def _compact_temporaries(exprs): """ Drop temporaries consisting of isolated symbols. """ # First of all, convert to SSA exprs = makeit_ssa(exprs) # What's gonna be dropped mapper = {e.lhs: e.rhs for e in exprs if e.lhs.is_Symbol and (q_leaf(e.rhs) or e.rhs.is_Function)} processed = [] for e in exprs: if e.lhs not in mapper: # The temporary is retained, and substitutions may be applied expr = e while True: handle = uxreplace(expr, mapper) if handle == expr: break else: expr = handle processed.append(handle) return processed
5,342,512
def print_formula(elements): """ The input dictionary, atoms and their amount, is processed to produce the chemical formula as a string Parameters ---------- elements : dict The elements that form the metabolite and their corresponding amount Returns ------- formula : str The formula of the metabolite """ formula = "".join([f"{k}{int(v)}" for k, v in elements.items()]) return formula
5,342,513
def try_get_code(url): """Returns code of URL if exists in database, else None""" command = """SELECT short FROM urls WHERE full=?;""" result = __execute_command(command, (url,)) if result is None: return None return result[0]
5,342,514
def is_chinese_char(cc): """ Check if the character is Chinese args: cc: char output: boolean """ return unicodedata.category(cc) == 'Lo'
5,342,515
def _get_ec2_on_demand_prices(region_name: str) -> pd.DataFrame: """ Returns a dataframe with columns instance_type, memory_gb, logical_cpu, and price where price is the on-demand price """ # All comments about the pricing API are based on # https://www.sentiatechblog.com/using-the-ec2-price-list-api # us-east-1 is the only region this pricing API is available and the pricing # endpoint in us-east-1 has pricing data for all regions. pricing_client = boto3.client("pricing", region_name="us-east-1") filters = [ # only get prices for the specified region { "Type": "TERM_MATCH", "Field": "regionCode", "Value": region_name, }, # filter out instance types that come with SQL Server pre-installed { "Type": "TERM_MATCH", "Field": "preInstalledSw", "Value": "NA", }, # limit ourselves to just Linux instances for now # TODO add support for Windows eventually { "Type": "TERM_MATCH", "Field": "operatingSystem", "Value": "Linux", }, # Shared is a "regular" EC2 instance, as opposed to Dedicated and Host {"Type": "TERM_MATCH", "Field": "tenancy", "Value": "Shared"}, # This relates to EC2 capacity reservations. Used is correct for when we don't # have any reservations {"Type": "TERM_MATCH", "Field": "capacitystatus", "Value": "Used"}, ] records = [] for product_json in _boto3_paginate( pricing_client.get_products, Filters=filters, ServiceCode="AmazonEC2", FormatVersion="aws_v1", ): product = json.loads(product_json) attributes = product["product"]["attributes"] instance_type = attributes["instanceType"] # We don't expect the "warnings" to get hit, we just don't want to get thrown # off if the data format changes unexpectedly or something like that. if "physicalProcessor" not in attributes: print( f"Warning, skipping {instance_type} because physicalProcessor is not " "specified" ) continue # effectively, this skips Graviton (ARM-based) processors # TODO eventually support Graviton processors. if ( "intel" not in attributes["physicalProcessor"].lower() and "amd" not in attributes["physicalProcessor"].lower() ): # only log if we see non-Graviton processors if "AWS Graviton" not in attributes["physicalProcessor"]: print( "Skipping non-Intel/AMD processor " f"{attributes['physicalProcessor']} in {instance_type}" ) continue if "OnDemand" not in product["terms"]: print( f"Warning, skipping {instance_type} because there was no OnDemand terms" ) continue on_demand = list(product["terms"]["OnDemand"].values()) if len(on_demand) != 1: print( f"Warning, skipping {instance_type} because there was more than one " "OnDemand SKU" ) continue price_dimensions = list(on_demand[0]["priceDimensions"].values()) if len(price_dimensions) != 1: print( f"Warning, skipping {instance_type} because there was more than one " "priceDimensions" ) continue pricing = price_dimensions[0] if pricing["unit"] != "Hrs": print( f"Warning, skipping {instance_type} because the pricing unit is not " f"Hrs: {pricing['unit']}" ) continue if "USD" not in pricing["pricePerUnit"]: print( f"Warning, skipping {instance_type} because the pricing is not in USD" ) continue usd_price = pricing["pricePerUnit"]["USD"] try: usd_price_float = float(usd_price) except ValueError: print( f"Warning, skipping {instance_type} because the price is not a float: " f"{usd_price}" ) continue memory = attributes["memory"] if not memory.endswith(" GiB"): print( f"Warning, skipping {instance_type} because memory doesn't end in GiB: " f"{memory}" ) continue try: memory_gb_float = float(memory[: -len(" GiB")]) except ValueError: print( f"Warning, skipping {instance_type} because memory isn't an float: " f"{memory}" ) continue try: vcpu_int = int(attributes["vcpu"]) except ValueError: print( f"Warning, skipping {instance_type} because vcpu isn't an int: " f"{attributes['vcpu']}" ) continue records.append((instance_type, memory_gb_float, vcpu_int, usd_price_float)) return pd.DataFrame.from_records( records, columns=["instance_type", "memory_gb", "logical_cpu", "price"] )
5,342,516
def resize_image(image, min_dim=None, max_dim=None, padding=False): """ Resizes an image keeping the aspect ratio. min_dim: if provided, resizes the image such that it's smaller dimension == min_dim max_dim: if provided, ensures that the image longest side doesn't exceed this value. padding: If true, pads image with zeros so it's size is max_dim x max_dim Returns: image: the resized image window: (y1, x1, y2, x2). If max_dim is provided, padding might be inserted in the returned image. If so, this window is the coordinates of the image part of the full image (excluding the padding). The x2, y2 pixels are not included. scale: The scale factor used to resize the image padding: Padding added to the image [(top, bottom), (left, right), (0, 0)] """ # Default window (y1, x1, y2, x2) and default scale == 1. h, w = image.shape[:2] window = (0, 0, h, w) scale = 1 # Scale? if min_dim: # Scale up but not down scale = max(1, min_dim / min(h, w)) # Does it exceed max dim? if max_dim: image_max = max(h, w) if round(image_max * scale) > max_dim: scale = max_dim / image_max # Resize image and mask if scale != 1: image = cv2.resize( image, (round(w * scale), round(h * scale))) # Need padding? if padding: # Get new height and width h, w = image.shape[:2] top_pad = (max_dim - h) // 2 bottom_pad = max_dim - h - top_pad left_pad = (max_dim - w) // 2 right_pad = max_dim - w - left_pad padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) return image, window, scale, padding
5,342,517
def _handle_add_fifo(pool, to_add: transaction.Transaction): """ FIFO is defined by putting the BUY transactions at the end. For split coins, they need to be sold first. """ if to_add.operation == transaction.Operation.SPLIT: pool.insert(0, to_add) else: assert to_add.operation in [ transaction.Operation.BUY, transaction.Operation.MINING, transaction.Operation.GIFT_RECEIVED, transaction.Operation.TRADE_INPUT, ] pool.append(to_add)
5,342,518
def save_gradients_images(gradients, file_name): """ Exports the original gradients image Args: gradients (np arr): Numpy array of the gradients with shape (3, 224, 224) file_name (str): File name to be exported """ if not os.path.exists('results'): os.makedirs('results') # Normalize gradients = gradients - gradients.min() gradients /= gradients.max() # Save image path_to_file = os.path.join('results', file_name + '.jpg') save_image(gradients, path_to_file)
5,342,519
def _set_no_data(gdal_ds, no_data): """ Set no data value into gdal dataset Description ----------- Parameters ---------- gdal_ds: gdal.Dataset gdal dataset no_data: list or tuple list of no data values corresponding to each raster band """ for band in range(gdal_ds.RasterCount): try: gdal_ds.GetRasterBand(band + 1).SetNoDataValue(no_data) except TypeError: pass
5,342,520
def idwt(approx, wavelets, h=np.array([1.0 / np.sqrt(2), -1.0 / np.sqrt(2)]), g=np.array([1.0 / np.sqrt(2), 1.0 / np.sqrt(2)])): """ Simple inverse discrete wavelet transform. for good reference: http://www.mathworks.com/help/wavelet/ref/dwt.html @param approx: approximation of signal at low resolution @param h: high pass filter (for details space) @param g: low pass filter (for approximation space) @return: recovered signal """ wave_level = iter(wavelets[::-1]) h, g = g[::-1], h[::-1] recovered = approx for wave in wave_level: #upsample recovered = np.column_stack([recovered, np.zeros(recovered.size)]).flatten() wave_up = np.column_stack([wave, np.zeros(wave.size)]).flatten() recovered = np.convolve(recovered, h)[:-(h.size - 1)] recovered = recovered + np.convolve(wave_up, g)[:-(g.size - 1)] return recovered
5,342,521
def app_durations(): """Generate JavaScript for appDurations.""" return 'appDurations = ' + json.dumps(supported_durations)
5,342,522
def generic_cc(mag=10,dmag=8,band='K'): """Returns a generic contrast curve. Keyword arguments: mag -- magnitude of target star in passband dmag -- can currently be either 8 or 4.5 (two example generic cc's being used) band -- passband of observation. """ if dmag==8: return fpp.ContrastCurveFromFile('%s/data/contrast_curves/ex8_K.txt' % KEPLERDIR,band,mag) elif dmag==4.5: return fpp.ContrastCurveFromFile('%s/data/contrast_curves/ex4.5_K.txt' % KEPLERDIR,band,mag)
5,342,523
def read_routes(*, db: Session = Depends(deps.get_db),data_in: schemas.DictDataCreate,current_user: models.User = Depends(deps.get_current_active_user)) -> Any: """ Retrieve Mock Data. """ db.add(models.Dict_Data(**jsonable_encoder(data_in))) return { "code": 20000, "data": "", "message":"修改成功", }
5,342,524
def plotTruePreds(tr_gamma, pred_gamma, tr_ele, pred_ele, tr_pi0, pred_pi0, tr_chPi, pred_chPi): """ Plots 4 True X Pred energy plots, one for each kind of particle :parameter tr_gamma: array containing the true values of the energy for photons. :parameter pred_gamma: array containing the predicted energies for photons. :parameter tr_ele: array containing the true values of the energy for electrons. :parameter pred_ele: array containing the predicted energies for electrons. :parameter tr_pi0: array containing the true values of the energy for neutral pions. :parameter pred_pi0: array containing the predicted energies for neutral pions. :parameter tr_chPi: array containing the true values of the energy for charged pions. :parameter pred_chPi: array containing the predicted energies for charged pions. """ # 4 subplots sharing both x/y axes f, axes2d = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(5, 5)) # f.suptitle('Predicted energy X True energy', fontsize=14) ax1 = axes2d[0, 0] ax2 = axes2d[0, 1] ax3 = axes2d[1, 0] ax4 = axes2d[1, 1] ax1.hist2d(tr_gamma, pred_gamma, bins=200, norm=LogNorm(), cmap="cool") ax1.set_title('Photons') ax2.hist2d(tr_ele, pred_ele, bins=200, norm=LogNorm(), cmap="cool") ax2.set_title('Electrons') ax3.hist2d(tr_pi0, pred_pi0, bins=200, norm=LogNorm(), cmap="cool") ax3.set_title('Neutral pions') ax4.hist2d(tr_chPi, pred_chPi, bins=200, norm=LogNorm(), cmap="cool") ax4.set_title('Charged pions') plt.xticks(np.arange(0, 600, 100.0)) plt.yticks(np.arange(0, 600, 100.0)) # tick.label.set_fontsize(14) # axes2d.set_xlabel("True energy (GeV)", fontsize=14) # axes2d.set_ylabel("Predicted energy (GeV)", fontsize=14) f.text(0.5, 0, "True energy (GeV)", ha='center', va='center', fontsize=14) f.text(0, 0.5, "Predicted energy (GeV)", ha='center', va='center', rotation='vertical', fontsize=14) #plt.show()
5,342,525
def get_companies_pagination_from_lagou(city_id=0, finance_stage_id=0, industry_id=0, page_no=1): """ 爬取拉勾公司分页数据 :param city_id: 城市 id :param finance_stage_id: 融资阶段 id :param industry_id: 行业 id :param page_no: 页码 :return: 拉勾公司分页数据 :rtype: utils.pagination.Pagination """ url = constants.COMPANIES_URL.format(city_id=city_id, finance_stage_id=finance_stage_id, industry_id=industry_id) params = {'pn': page_no, 'sortField': constants.SORTED_BY_JOBS_COUNT} response_json = utils.http_tools.requests_get(url=url, params=params).json() pagination = utils.pagination.Pagination(per_page=int(response_json['pageSize']), total=int(response_json['totalCount'])) return pagination
5,342,526
def test_d3_3_10v01_d3_3_10v01i(mode, save_output, output_format): """ A day is a calendar (or "local time") day in each timezone, including the timezones outside of +12:00 through -11:59 inclusive. """ assert_bindings( schema="ibmData/valid/D3_3_10/d3_3_10v01.xsd", instance="ibmData/valid/D3_3_10/d3_3_10v01.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,342,527
def _onTextReceive(iface, asDict): """Special text auto parsing for received messages""" # We don't throw if the utf8 is invalid in the text message. Instead we just don't populate # the decoded.data.text and we log an error message. This at least allows some delivery to # the app and the app can deal with the missing decoded representation. # # Usually btw this problem is caused by apps sending binary data but setting the payload type to # text. try: asBytes = asDict["decoded"]["payload"] asDict["decoded"]["text"] = asBytes.decode("utf-8") except Exception as ex: logging.error(f"Malformatted utf8 in text message: {ex}")
5,342,528
def is_quant_contam(contam_model): """Get the flag for quantitative contamination""" # the list of quantitative models quant_models = ['GAUSS', 'FLUXCUBE'] # set the default value isquantcont = True # check whether the contamination is not quantitative if not contam_model.upper() in quant_models: # re-set the flag isquantcont = False # return the flag return isquantcont
5,342,529
def nms_wrapper(scores, boxes, threshold = 0.7, class_sets = None): """ post-process the results of im_detect :param scores: N * K numpy :param boxes: N * (K * 4) numpy :param class_sets: e.g. CLASSES = ('__background__','person','bike','motorbike','car','bus') :return: a list of K-1 dicts, no background, each is {'class': classname, 'dets': None | [[x1,y1,x2,y2,score],...]} """ num_class = scores.shape[1] if class_sets is None else len(class_sets) assert num_class * 4 == boxes.shape[1],\ 'Detection scores and boxes dont match' class_sets = ['class_' + str(i) for i in range(0, num_class)] if class_sets is None else class_sets res = [] for ind, cls in enumerate(class_sets[1:]): ind += 1 # skip background cls_boxes = boxes[:, 4*ind : 4*(ind+1)] cls_scores = scores[:, ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, thresh=0.3) dets = dets[keep, :] dets = dets[np.where(dets[:, 4] > threshold)] r = {} if dets.shape[0] > 0: r['class'], r['dets'] = cls, dets else: r['class'], r['dets'] = cls, None res.append(r) return res
5,342,530
def Rbf( gamma: float = 1.0) -> InternalLayer: """Dual activation function for normalized RBF or squared exponential kernel. Dual activation function is `f(x) = sqrt(2)*sin(sqrt(2*gamma) x + pi/4)`. NNGP kernel transformation correspond to (with input dimension `d`) `k = exp(- gamma / d * ||x - x'||^2) = exp(- gamma*(q11 + q22 - 2 * q12))`. Args: gamma: related to characteristic length-scale (l) that controls width of the kernel, where `gamma = 1 / (2 l^2)`. Returns: `(init_fn, apply_fn, kernel_fn)`. """ def fn(x): return np.sqrt(2) * np.sin(np.sqrt(2 * gamma) * x + np.pi/4) @_requires(diagonal_spatial=_Diagonal()) # pytype:disable=wrong-keyword-args def kernel_fn(k: Kernel) -> Kernel: """Compute new kernels after an `Rbf` layer.""" cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk sum11, sum12, sum22 = _get_diagonal_outer_prods(cov1, cov2, k.diagonal_batch, k.diagonal_spatial, op.add) def nngp_ntk_fn(nngp, sum_, ntk): nngp = np.exp(gamma * (-sum_ + 2 * nngp)) if ntk is not None: ntk *= 2 * gamma * nngp return nngp, ntk def nngp_fn_diag(nngp): return np.ones_like(nngp) nngp, ntk = nngp_ntk_fn(nngp, sum12, ntk) if k.diagonal_batch and k.diagonal_spatial: cov1 = nngp_fn_diag(cov1) if cov2 is not None: cov2 = nngp_fn_diag(cov2) else: cov1, _ = nngp_ntk_fn(cov1, sum11, None) if cov2 is not None: cov2, _ = nngp_ntk_fn(cov2, sum22, None) return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk) return _elementwise(fn, f'Rbf({gamma})', kernel_fn)
5,342,531
def upgrade_state_dict_with_xlm_weights( state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str, ) -> Dict[str, Any]: """ Load XLM weights into a Transformer encoder or decoder model. Args: state_dict: state dict for either TransformerEncoder or TransformerDecoder pretrained_xlm_checkpoint: checkpoint to load XLM weights from Raises: AssertionError: If architecture (num layers, attention heads, etc.) does not match between the current Transformer encoder or decoder and the pretrained_xlm_checkpoint """ if not os.path.exists(pretrained_xlm_checkpoint): raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint)) state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint) xlm_state_dict = state["model"] for key in xlm_state_dict.keys(): for search_key in ["embed_tokens", "embed_positions", "layers"]: if search_key in key: subkey = key[key.find(search_key) :] assert subkey in state_dict, ( "{} Transformer encoder / decoder " "state_dict does not contain {}. Cannot " "load {} from pretrained XLM checkpoint " "{} into Transformer.".format( str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint ) ) state_dict[subkey] = xlm_state_dict[key] return state_dict
5,342,532
def test_get_current(client): """Assert that the business info for regular (not xpro) business is correct to spec.""" rv = client.get('/api/v1/businesses/CP0001965/directors') assert 200 == rv.status_code is_valid, errors = validate(rv.json, 'directors', validate_schema=True) if errors: for err in errors: print('\nERROR MESSAGE:') print(err.message) assert is_valid
5,342,533
def create_returns_tear_sheet(returns, positions=None, transactions=None, live_start_date=None, cone_std=(1.0, 1.5, 2.0), benchmark_rets=None, bootstrap=False, turnover_denom='AGB', header_rows=None, return_fig=False): """ Generate a number of plots for analyzing a strategy's returns. - Fetches benchmarks, then creates the plots on a single figure. - Plots: rolling returns (with cone), rolling beta, rolling sharpe, rolling Fama-French risk factors, drawdowns, underwater plot, monthly and annual return plots, daily similarity plots, and return quantile box plot. - Will also print the start and end dates of the strategy, performance statistics, drawdown periods, and the return range. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame, optional Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame, optional Executed trade volumes and fill prices. - See full explanation in create_full_tear_sheet. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. cone_std : float, or tuple, optional If float, The standard deviation to use for the cone plots. If tuple, Tuple of standard deviation values to use for the cone plots - The cone is a normal distribution with this standard deviation centered around a linear regression. benchmark_rets : pd.Series, optional Daily noncumulative returns of the benchmark. - This is in the same style as returns. bootstrap : boolean, optional Whether to perform bootstrap analysis for the performance metrics. Takes a few minutes longer. turnover_denom : str, optional Either AGB or portfolio_value, default AGB. - See full explanation in txn.get_turnover. header_rows : dict or OrderedDict, optional Extra rows to display at the top of the perf stats table. return_fig : boolean, optional If True, returns the figure that was plotted on. """ if benchmark_rets is not None: returns = utils.clip_returns_to_benchmark(returns, benchmark_rets) plotting.show_perf_stats(returns, benchmark_rets, positions=positions, transactions=transactions, turnover_denom=turnover_denom, bootstrap=bootstrap, live_start_date=live_start_date, header_rows=header_rows) plotting.show_worst_drawdown_periods(returns) vertical_sections = 11 if live_start_date is not None: vertical_sections += 1 live_start_date = ep.utils.get_utc_timestamp(live_start_date) if benchmark_rets is not None: vertical_sections += 1 if bootstrap: vertical_sections += 1 fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_rolling_returns = plt.subplot(gs[:2, :]) i = 2 ax_rolling_returns_vol_match = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_returns_log = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_returns = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 if benchmark_rets is not None: ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_monthly_heatmap = plt.subplot(gs[i, 0]) ax_annual_returns = plt.subplot(gs[i, 1]) ax_monthly_dist = plt.subplot(gs[i, 2]) i += 1 ax_return_quantiles = plt.subplot(gs[i, :]) i += 1 plotting.plot_rolling_returns( returns, factor_returns=benchmark_rets, live_start_date=live_start_date, cone_std=cone_std, ax=ax_rolling_returns) ax_rolling_returns.set_title( 'Cumulative returns') plotting.plot_rolling_returns( returns, factor_returns=benchmark_rets, live_start_date=live_start_date, cone_std=None, volatility_match=(benchmark_rets is not None), legend_loc=None, ax=ax_rolling_returns_vol_match) ax_rolling_returns_vol_match.set_title( 'Cumulative returns volatility matched to benchmark') plotting.plot_rolling_returns( returns, factor_returns=benchmark_rets, logy=True, live_start_date=live_start_date, cone_std=cone_std, ax=ax_rolling_returns_log) ax_rolling_returns_log.set_title( 'Cumulative returns on logarithmic scale') plotting.plot_returns( returns, live_start_date=live_start_date, ax=ax_returns, ) ax_returns.set_title( 'Returns') if benchmark_rets is not None: plotting.plot_rolling_beta( returns, benchmark_rets, ax=ax_rolling_beta) plotting.plot_rolling_volatility( returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility) plotting.plot_rolling_sharpe( returns, ax=ax_rolling_sharpe) # Drawdowns plotting.plot_drawdown_periods( returns, top=5, ax=ax_drawdown) plotting.plot_drawdown_underwater( returns=returns, ax=ax_underwater) plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap) plotting.plot_annual_returns(returns, ax=ax_annual_returns) plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist) plotting.plot_return_quantiles( returns, live_start_date=live_start_date, ax=ax_return_quantiles) if bootstrap and (benchmark_rets is not None): ax_bootstrap = plt.subplot(gs[i, :]) plotting.plot_perf_stats(returns, benchmark_rets, ax=ax_bootstrap) elif bootstrap: raise ValueError('bootstrap requires passing of benchmark_rets.') for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
5,342,534
def vectorize_text(text_col: pd.Series, vec_type: str = 'count', **kwargs): """ Vectorizes pre-processed text. Instantiates the vectorizer and fit_transform it to the data provided. :param text_col: Pandas series, containing preprocessed text. :param vec_type: string indicating what type of vectorization (count or tfidf currently). :param **kwargs: dict of keyworded arguments for sklearn vectorizer functions. :return: A tuple containing vectorized (doc-feature matrix that as d rows and f columns for count and tfidf vectorization) and vectorizer_obj (vectorization sklearn object representing trained vectorizer). """ # Check if vectorization type is supported assert vec_type in ['count', 'tfidf'] # Get raw values from pandas series text_raw = text_col.tolist() # Lets the vectorizer know the input has already been pre-tokenized # and is now delimited by whitespaces kwargs['analyzer'] = str.split # Apply proper vectorization if vec_type == 'count': count_vec = CountVectorizer(**kwargs) vectorized = count_vec.fit_transform(text_raw) vectorizer_obj = count_vec elif vec_type == 'tfidf': tfidf_vec = TfidfVectorizer(**kwargs) vectorized = tfidf_vec.fit_transform(text_raw) vectorizer_obj = tfidf_vec # Return vectorized object return vectorized, vectorizer_obj
5,342,535
def creation_LS(X,y,N): """Generates a random learning set of size N from the data in X (containing the input samples) and in y (containing the corresponding output values). Parameters ---------- X: array containing the input samples y: array containing the corresponding output values Return ------ X_random_rows : array of shape [N, (number of columns of X)] y_random_rows : array of shape [N] """ number_of_rows = X.shape[0] random_indices = np.random.choice(number_of_rows, size=N, replace=False) X_random_rows = X[random_indices, :] y_random_rows= y[random_indices] return X_random_rows, y_random_rows
5,342,536
def init_logger(): """将日志信息输出到控制台 Params: asctime: 打印日志的时间 levelname: 打印日志级别 name: 打印日志名字 message: 打印日志信息 """ logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO )
5,342,537
def print_summary(show="all", blocks=False, cid=True, blobs=True, size=True, typ=False, ch=False, ch_online=True, name=True, title=False, path=False, sanitize=False, start=1, end=0, channel=None, invalid=False, reverse=False, file=None, fdate=False, sep=";", server="http://localhost:5279"): """Print a summary of the items downloaded from the LBRY network. Parameters ---------- show: str, optional It defaults to `'all'`, in which case it shows all items. If it is `'incomplete'` it will show claims that are missing blobs. If it is `'full'` it will show claims that have all blobs. If it is `'media'` it will show claims that have the media file (mp4, mp3, mkv, etc.). Normally only items that have all blobs also have a media file; however, if the claim is currently being downloaded a partial media file may be present. If it is `'missing'` it will show claims that don't have the media file, whether the full blobs are present or not. blocks: bool, optional It defaults to `False`, in which case it won't print the `height` block of the claims. If it is `True` it will print this value, which gives some idea of when the claim was registered in the blockchain. cid: bool, optional It defaults to `True`. Show the `'claim_id'` of the claim. It is a 40 character alphanumeric string. blobs: bool, optional It defaults to `True`. Show the number of blobs in the file, and how many are complete. size: bool, optional It defaults to `True`. Show the length of the stream in minutes and seconds, like `14:12`, when possible (audio and video), and also the size in mebibytes (MB). typ: bool, optional It defaults to `False`. Show the type of claim (video, audio, document, etc.) ch: bool, optional It defaults to `False`. Show the name of the channel that published the claim. This is slow if `ch_online=True`. ch_online: bool, optional It defaults to `True`, in which case it searches for the channel name by doing a reverse search of the item online. This makes the search slow. By setting it to `False` it will consider the channel name stored in the input dictionary itself, which will be faster but it won't be the full name of the channel. If no channel is found offline, then it will set a default value `'_None_'` just so it can be printed with no error. This parameter only has effect if `ch=True`, or if `channel` is used, as it internally sets `ch=True`. name: bool, optional It defaults to `True`. Show the name of the claim. title: bool, optional It defaults to `False`. Show the title of the claim. path: bool, optional It defaults to `False`. Show the full path of the saved media file. sanitize: bool, optional It defaults to `False`, in which case it will not remove the emojis from the name of the claim and channel. If it is `True` it will remove these unicode characters. This option requires the `emoji` package to be installed. start: int, optional It defaults to 1. Show claims starting from this index in the list of items. end: int, optional It defaults to 0. Show claims until and including this index in the list of items. If it is 0, it is the same as the last index in the list. channel: str, optional It defaults to `None`. It must be a channel's name, in which case it shows only the claims published by this channel. Using this parameter sets `ch=True`. invalid: bool, optional It defaults to `False`, in which case it prints every single claim previously downloaded. If it is `True` it will only print those claims that are 'invalid', that is, those that cannot be resolved anymore from the online database. This probably means that the author decided to remove the claims at some point after they were downloaded originally. This can be verified with the blockchain explorer, by following the claim ID for an 'unspent' transaction. Using this parameter sets `ch_online=False` as the channel name of invalid claims cannot be resolved online, only from the offline database. reverse: bool, optional It defaults to `False`, in which case older items come first in the output list. If it is `True` newer claims are at the beginning of the list. file: str, optional It defaults to `None`. It must be a writable path to which the summary will be written. Otherwise the summary will be printed to the terminal. fdate: bool, optional It defaults to `False`. If it is `True` it will add the date to the name of the summary file. sep: str, optional It defaults to `;`. It is the separator character between the data fields in the printed summary. Since the claim name can have commas, a semicolon `;` is used by default. server: str, optional It defaults to `'http://localhost:5279'`. This is the address of the `lbrynet` daemon, which should be running in your computer before using any `lbrynet` command. Normally, there is no need to change this parameter from its default value. Returns ------- bool It returns `True` if it printed the summary successfully. If there is any error it will return `False`. """ if not funcs.server_exists(server=server): return False output = sort.sort_items_size(reverse=False, invalid=invalid, server=server) items = output["claims"] if not items or len(items) < 1: if file: print("No file written.") return False if invalid: ch_online = False print() status = prnt.print_items(items=items, show=show, blocks=blocks, cid=cid, blobs=blobs, size=size, typ=typ, ch=ch, ch_online=ch_online, name=name, title=title, path=path, sanitize=sanitize, start=start, end=end, channel=channel, reverse=reverse, file=file, fdate=fdate, sep=sep, server=server) return status
5,342,538
def nice_number_en(number, speech, denominators=range(1, 21)): """ English helper for nice_number This function formats a float to human understandable functions. Like 4.5 becomes "4 and a half" for speech and "4 1/2" for text Args: number (int or float): the float to format speech (bool): format for speech (True) or display (False) denominators (iter of ints): denominators to use, default [1 .. 20] Returns: (str): The formatted string. """ result = convert_to_mixed_fraction(number, denominators) if not result: # Give up, just represent as a 3 decimal number return str(round(number, 3)) whole, num, den = result if not speech: if num == 0: # TODO: Number grouping? E.g. "1,000,000" return str(whole) else: return '{} {}/{}'.format(whole, num, den) if num == 0: return str(whole) den_str = _FRACTION_STRING_EN[den] if whole == 0: if num == 1: return_string = 'a {}'.format(den_str) else: return_string = '{} {}'.format(num, den_str) elif num == 1: return_string = '{} and a {}'.format(whole, den_str) else: return_string = '{} and {} {}'.format(whole, num, den_str) if num > 1: return_string += 's' return return_string
5,342,539
def test_unexpected_response(requests_mock_get, invalid_response): """ Check that the corresponding exception is raised if the response body is unexpected """ _, response = requests_mock_get response.status_code = 200 response.json = lambda: invalid_response with raises(TemperatureSourceException): NoaaTemperatureSource.get_current_temperature(1.0, 2.0)
5,342,540
def read_dataframe_by_substring(directory, substring, index_col=None, parse_dates=False, **kwargs): """Return a dataframe for the file containing substring. Parameters ---------- directory : str substring : str identifier for output file, must be unique in directory index_col : str | int | None Index column name or index kwargs : kwargs Passed to underlying library for dataframe conversion. Returns ------- pd.DataFrame """ files = [x for x in os.listdir(directory) if substring in x] # Exclude any files that may have rolled, such as # Circuits-Losses-1-2.feather.1.gz regex = re.compile(r"\.\w+\.\d+(?:\.\w+)?$") files = [x for x in files if regex.search(x) is None] if not files: return None assert len(files) == 1, f"found multiple {substring} files in {directory}" filename = files[0] return read_dataframe( os.path.join(directory, filename), index_col=index_col, parse_dates=parse_dates, **kwargs )
5,342,541
def load_embeddings(path): """ Load embeddings from file and put into dict. :param path: path to embeddings file :return: a map word->embedding """ logging.info('Loading embeddings...') embeddings = dict() with open(path, 'r') as f: for line in f: line = line.split(' ') embeddings[line[0]] = np.array([float(a) for a in line[1:]]) return embeddings
5,342,542
def helm_preserve(preserve): """Convert secret data to a "--set" string for Helm deployments. Args: preserve (Iterable): Set of secrets we wish to get data from to assign to the Helm Chart. Returns: str: String containing variables to be set with Helm release. """ env_vars = [] for item in preserve: if isinstance(item, tuple): item = HelmPreserve(*item) elif not isinstance(item, HelmPreserve): raise TypeError("Items in preserve array must be HelmPerserve named tuples") secret_data = secret_read(item.secret_name, item.secret_namespace) env_vars.append(HelmSet(item.values_path, secret_data[item.data_item])) # Environmental variables # TODO: This may well be its own subfunction env_vars_string = "".join( [ " --set{} {}={}".format( "-string" if item.set_string else "", item.key, item.value ) for item in env_vars ] ) return env_vars_string
5,342,543
def format_component_descriptor(name, version): """ Return a properly formatted component 'descriptor' in the format <name>-<version> """ return '{0}-{1}'.format(name, version)
5,342,544
def dbconn(): """ Initializing db connection """ sqlite_db_file = '/tmp/test_qbo.db' return sqlite3.connect(sqlite_db_file, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
5,342,545
def md5(fname): """ Compute the md5 of a file in chunks. Avoid running out of memory when hashing large files. """ hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
5,342,546
def _create_comments_revisions(connection, obj_type): """Creates delete revisions for comments. Args: connection: An instance of SQLAlchemy connection. obj_type: String representation of object type. """ result = _get_comments_ids_by_obj_type(connection, obj_type) if result: result = [row[0] for row in result] utils.add_to_objects_without_revisions_bulk( connection, result, "Comment", action="deleted", )
5,342,547
def get_r(x, y, x1, y1): """ Get r vector following Xu et al. (2006) Eq. 4.2 x, y = arrays; x1, y1 = single points; or vice-versa """ return ((x-x1)**2 + (y-y1)**2)**0.5
5,342,548
def test_pylintrc_file_toml(testdir): """Verify that pyproject.toml can be used as a pylint rc file.""" rcfile = testdir.makefile( '.toml', pylint=""" [tool.pylint.FORMAT] max-line-length = "3" """ ) testdir.makepyfile('import sys') result = testdir.runpytest( '--pylint', '--pylint-rcfile={0}'.format(rcfile.strpath) ) # Parsing changed from integer to string in pylint >=2.5. Once # support is dropped <2.5 this is removable if 'should be of type int' in result.stdout.str(): rcfile = testdir.makefile( '.toml', pylint=""" [tool.pylint.FORMAT] max-line-length = 3 """ ) result = testdir.runpytest( '--pylint', '--pylint-rcfile={0}'.format(rcfile.strpath) ) assert 'Line too long (10/3)' in result.stdout.str()
5,342,549
def replace_empty_bracket(tokens): """ Remove empty bracket :param tokens: List of tokens :return: Fixed sequence """ merged = "".join(tokens) find = re.search(r"\{\}", merged) while find: merged = re.sub(r"\{\}", "", merged) find = re.search(r"\{\}", merged) return list(merged)
5,342,550
def presentation(): """ This route is the final project and will be test of all previously learned skills. """ return render_template("")
5,342,551
def extra_credit(grades,students,bonus): """ Returns a copy of grades with extra credit assigned The dictionary returned adds a bonus to the grade of every student whose netid is in the list students. Parameter grades: The dictionary of student grades Precondition: grades has netids as keys, ints as values. Parameter netids: The list of students to give extra credit Precondition: netids is a list of valid (string) netids Parameter bonus: The extra credit bonus to award Precondition: bonus is an int """ # DICTIONARY COMPREHENSION #return { k:(grades[k]+bonus if k in students else grades[k]) for k in grades } # ACCUMULATOR PATTERN result = {} for k in grades: if k in students: result[k] = grades[k]+bonus else: result[k] = grades[k] return result
5,342,552
def get_geo_signal_combos(data_source): """ Get list of geo type-signal type combinations that we expect to see. Cross references based on combinations reported available by COVIDcast metadata. """ meta = covidcast.metadata() source_meta = meta[meta['data_source'] == data_source] # Need to convert np.records to tuples so they are hashable and can be used in sets and dicts. geo_signal_combos = list(map(tuple, source_meta[["geo_type", "signal"]].to_records(index=False))) print("Number of expected geo region-signal combinations:", len(geo_signal_combos)) return geo_signal_combos
5,342,553
def absolute_(x, track_types = True, **kwargs): """Compute the absolute value of x. Parameters ---------- x : :obj:`xarray.DataArray` Data cube containing the values to apply the operator to. track_types : :obj:`bool` Should the operator promote the value type of the output object, based on the value type of the input object? **kwargs: Ignored. Returns ------- :obj:`xarray.DataArray` A data cube with the same shape as ``x`` containing the results of all evaluated expressions. Note ----- When tracking value types, this operator uses the following type promotion manual, with the keys being the supported value types of ``x``, and the corresponding value being the promoted value type of the output. .. exec_code:: :hide_code: from semantique.processor.types import TYPE_PROMOTION_MANUALS obj = TYPE_PROMOTION_MANUALS["absolute"] obj.pop("__preserve_labels__") print(obj) """ if track_types: promoter = TypePromoter(x, function = "absolute") promoter.check() f = lambda x: np.absolute(x) out = xr.apply_ufunc(f, x) if track_types: out = promoter.promote(out) return out
5,342,554
def upconv(path): """Check a 24bit FLAC file for upconversion""" if os.path.isfile(path): _upconvert_check_handler(path) elif os.path.isdir(path): for root, _, figles in os.walk(path): for f in figles: if f.lower().endswith(".flac"): filepath = os.path.join(root, f) click.secho(f"\nChecking {filepath}...", fg="cyan") _upconvert_check_handler(filepath)
5,342,555
def any_input(sys_, t, input_signal=0, init_cond=None, *, plot=True): """ Accept any input signal, then calculate the response of the system. :param sys_: the system :type sys_: TransferFunction | StateSpace :param t: time :type t: array_like :param input_signal: input signal accepted by the system :type input_signal: numbers.Real | np.ndarray :param init_cond: initial condition of the system :type init_cond: None | numbers.Real | np.ndarray :param plot: If plot is True, it will show the response curve. :type plot: bool :return: system output and time array :rtype: tuple[np.ndarray, np.ndarray] """ if isinstance(sys_, TransferFunction): sys_ = tf2ss(sys_) u = _setup_input_signal(input_signal, t, sys_.inputs) y, t = _any_input(sys_, t, u, init_cond) if plot: plot_response_curve(y, t, "response", sys_.is_ctime) return y, t
5,342,556
def get_combinations(suite_dir, fields, subset, limit, filter_in, filter_out, include_facet): """ Describes the combinations of a suite, optionally limiting or filtering output based on the given parameters. Includes columns for the subsuite and facets when include_facet is True. Returns a tuple of (headers, rows) where both elements are lists of strings. """ configs = [(combine_path(suite_dir, item[0]), item[1]) for item in build_matrix(suite_dir, subset)] num_listed = 0 rows = [] facet_headers = set() dirs = {} max_dir_depth = 0 for _, fragment_paths in configs: if limit > 0 and num_listed >= limit: break if filter_in and not any([f in path for f in filter_in for path in fragment_paths]): continue if filter_out and any([f in path for f in filter_out for path in fragment_paths]): continue fragment_fields = [extract_info(path, fields) for path in fragment_paths] # merge fields from multiple fragments by joining their values with \n metadata = {} for fragment_meta in fragment_fields: for field, value in fragment_meta.items(): if value == '': continue if field in metadata: metadata[field] += '\n' + str(value) else: metadata[field] = str(value) if include_facet: # map final dir (facet) -> filename without the .yaml suffix for path in fragment_paths: facet_dir = os.path.dirname(path) facet = os.path.basename(facet_dir) metadata[facet] = os.path.basename(path)[:-5] facet_headers.add(facet) facet_dirs = facet_dir.split('/')[:-1] for i, dir_ in enumerate(facet_dirs): if i not in dirs: dirs[i] = set() dirs[i].add(dir_) metadata['_dir_' + str(i)] = os.path.basename(dir_) max_dir_depth = max(max_dir_depth, i) rows.append(metadata) num_listed += 1 subsuite_headers = [] if include_facet: first_subsuite_depth = max_dir_depth for i in range(max_dir_depth): if len(dirs[i]) > 1: first_subsuite_depth = i break subsuite_headers = ['subsuite depth ' + str(i) for i in range(0, max_dir_depth - first_subsuite_depth + 1)] for row in rows: for i in range(first_subsuite_depth, max_dir_depth + 1): row[subsuite_headers[i - first_subsuite_depth]] = \ row.get('_dir_' + str(i), '') headers = subsuite_headers + sorted(facet_headers) + fields return headers, sorted([[row.get(field, '') for field in headers] for row in rows])
5,342,557
def cli(ctx, dry_run, stack_resources, stack_exports): """Print stack status and resources. Also includes parameters, resources, outputs & exports.""" # shortcut if we only print stack key (and names) if dry_run: for context in ctx.obj.runner.contexts: ctx.obj.ppt.secho(context.stack_key, bold=True) return options = StackStatusOptions( dry_run=dry_run, stack_resources=stack_resources, stack_exports=stack_exports, ) command = StackStatusCommand( pretty_printer=ctx.obj.ppt, options=options ) ctx.obj.runner.run(command)
5,342,558
def search(api: ApiClient, context: UserContext, search_string, as_csv): """ Search for a user """ if as_csv: fieldnames = ['id', 'title_before', 'first_name', 'last_name', 'title_after', 'avatar_url'] csv_writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames) csv_writer.writeheader() instances_ids = api.get_user(context.user_id)["privateData"]["instancesIds"] for instance_id in instances_ids: for user in api.search_users(instance_id, search_string): if as_csv: csv_writer.writerow(format_user_csv(user)) else: click.echo("{} {}".format(user["fullName"], user["id"]))
5,342,559
def mnext_mbv2_cfg(pretrained=False,in_chans=3,drop_rate=0.2,drop_connect_rate=0.5,bn_tf=False,bn_momentum=0.9,bn_eps=0.001, global_pool=False, **kwargs): """Creates a MNeXt Large model. Tensorflow compatible variant """ from .mnext import mnext model = mnext(**kwargs) return model
5,342,560
def _embeddings_from_arguments(column, args, weight_collections, trainable, output_rank=2): """Returns embeddings for a column based on the computed arguments. Args: column: the column name. args: the _DeepEmbeddingLookupArguments for this column. weight_collections: collections to store weights in. trainable: whether these embeddings should be trainable. output_rank: the desired rank of the returned `Tensor`. Inner dimensions will be combined to produce the desired rank. Returns: the embeddings. Raises: ValueError: if not possible to create. """ # pylint: disable=protected-access input_tensor = layers._inner_flatten(args.input_tensor, output_rank) weight_tensor = None if args.weight_tensor is not None: weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank) # pylint: enable=protected-access # This option is only enabled for scattered_embedding_column. if args.hash_key: embeddings = contrib_variables.model_variable( name="weights", shape=[args.vocab_size], dtype=dtypes.float32, initializer=args.initializer, trainable=(trainable and args.trainable), collections=weight_collections) return embedding_ops.scattered_embedding_lookup_sparse( embeddings, input_tensor, args.dimension, hash_key=args.hash_key, combiner=args.combiner, name="lookup") if args.shared_embedding_name is not None: shared_embedding_collection_name = ( "SHARED_EMBEDDING_COLLECTION_" + args.shared_embedding_name.upper()) graph = ops.get_default_graph() shared_embedding_collection = ( graph.get_collection_ref(shared_embedding_collection_name)) shape = [args.vocab_size, args.dimension] if shared_embedding_collection: if len(shared_embedding_collection) > 1: raise ValueError( "Collection %s can only contain one " "(partitioned) variable." % shared_embedding_collection_name) else: embeddings = shared_embedding_collection[0] if embeddings.get_shape() != shape: raise ValueError( "The embedding variable with name {} already " "exists, but its shape does not match required " "embedding shape here. Please make sure to use " "different shared_embedding_name for different " "shared embeddings.".format(args.shared_embedding_name)) else: embeddings = contrib_variables.model_variable( name=args.shared_embedding_name, shape=shape, dtype=dtypes.float32, initializer=args.initializer, trainable=(trainable and args.trainable), collections=weight_collections) graph.add_to_collection(shared_embedding_collection_name, embeddings) else: embeddings = contrib_variables.model_variable( name="weights", shape=[args.vocab_size, args.dimension], dtype=dtypes.float32, initializer=args.initializer, trainable=(trainable and args.trainable), collections=weight_collections) if _is_variable(embeddings): embeddings = [embeddings] else: embeddings = embeddings._get_variable_list() # pylint: disable=protected-access # pylint: disable=protected-access _maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings) return embedding_ops.safe_embedding_lookup_sparse( embeddings, input_tensor, sparse_weights=weight_tensor, combiner=args.combiner, name=column.name + "weights", max_norm=args.max_norm)
5,342,561
def stuw_laagstedoorstroombreedte(damo_gdf=None, obj=None, damo_doorstroombreedte="DOORSTROOMBREEDTE", damo_kruinvorm="WS_KRUINVORM"): """ als LAAGSTEDOORSTROOMHOOGTE is NULL en WS_KRUINVORM =3 (rechthoek) dan LAAGSTEDOORSTROOMBREEDTE = DOORSTROOMBREEDTE """ return damo_gdf.apply( lambda x: _stuw_get_laagstedoorstroombreedte_rechthoek(x[damo_kruinvorm], x[damo_doorstroombreedte]), axis=1)
5,342,562
def manage_categories(): """ Display all categories to manage categories page (admin only) """ # Denied user access to manage_categories page if session["user"] != "admin": return redirect(url_for('error', code=403)) # query for all categories from categories collection manage_categories = list(mongo.db.categories.find().sort( "category_name", 1)) # get the categories that are in use for navigation menu nav_categories = mongo.db.recipes.distinct("category_name") # call the paginated function to display only the # specific number of categories per page paginated_categories = paginated(manage_categories) # get the page pagination pagination = get_pagination(manage_categories) # total number of categories found total = len(manage_categories) # set up the page_set object page_set = { "title": "Manage Categories", "type": "form" } return render_template("pages/manage_categories.html", page_set=page_set, nav_categories=nav_categories, manage_categories=paginated_categories, pagination=pagination, total=total)
5,342,563
def callback(id): """ 获取指定记录 """ # 检查用户权限 _common_logic.check_user_power() _positions_logic = positions_logic.PositionsLogic() # 读取记录 result = _positions_logic.get_model_for_cache(id) if result: # 直接输出json return web_helper.return_msg(0, '成功', result) else: return web_helper.return_msg(-1, "查询失败")
5,342,564
def setup_exps_rllib(flow_params, n_cpus, n_rollouts): """Return the relevant components of an RLlib experiment. Parameters ---------- flow_params : dict flow-specific parameters (see flow/utils/registry.py) n_cpus : int number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration Returns ------- str name of the training algorithm str name of the gym environment to be trained dict training configuration parameters """ horizon = flow_params['env'].horizon alg_run = "PPO" agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) config["num_workers"] = n_cpus config["num_cpus_per_worker"] = 1 config["use_pytorch"] = False config["num_gpus"] = 0 config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.999 # discount rate # config["model"].update({"fcnet_hiddens": [32, 32, 32]}) config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 config['clip_actions'] = True # FIXME(ev) temporary ray bug config["horizon"] = horizon config["callbacks"] = MyCallbacks # save the flow params for reply flow_json = json.dumps( flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4) config['env_config']['flow_params'] = flow_json config['env_config']['run'] = alg_run create_env, gym_name = make_create_env(params=flow_params) # Register as rllib env register_env(gym_name, create_env) return alg_run, gym_name, config
5,342,565
def sort_cluster(x: list, t: np.ndarray) -> list: """ sort x according to t :param x: :param t: :return: """ return [x[i] for i in np.argsort(t)]
5,342,566
def virtualenv(ctx: DoctorContext): """Check that we're in the correct virtualenv.""" try: venv_path = pathlib.Path(os.environ['VIRTUAL_ENV']).resolve() except KeyError: ctx.error('VIRTUAL_ENV not set') return # When running in LUCI we might not have gone through the normal environment # setup process, so we need to skip the rest of this step. if 'LUCI_CONTEXT' in os.environ: return var = 'PW_ROOT' if '_PW_ACTUAL_ENVIRONMENT_ROOT' in os.environ: var = '_PW_ACTUAL_ENVIRONMENT_ROOT' root = pathlib.Path(os.environ[var]).resolve() if root not in venv_path.parents: ctx.error('VIRTUAL_ENV (%s) not inside %s (%s)', venv_path, var, root) ctx.error('\n'.join(os.environ.keys()))
5,342,567
def gettof(*args): """gettof(flags_t F) -> ushort""" return _idaapi.gettof(*args)
5,342,568
def test_parse_new_order(): """Test parsing raw new order data.""" args = { "limit_price": 1, "max_quantity": 2, "client_id": 3, "side": Side.Sell, "order_type": OrderType.PostOnly, } expected = bytes.fromhex( "00010000000100000001000000000000000200000000000000020000000300000000000000" ) # Raw hex from serum.js assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.NewOrder, args=args)) == expected assert_parsed_layout(InstructionType.NewOrder, args, expected)
5,342,569
def generate(temp): """ Wrapper that checks generated names against the base street names to avoid a direct regurgitation of input data. returns list """ is_in_dict = True while is_in_dict: result = textgen.generate(temperature=temp, return_as_list=True) str = ' '.join(result) is_in_dict = basenames.get(str, False) return result
5,342,570
def __create_pyramid_features(backbone_dict, ndim=2, feature_size=256, include_final_layers=True, lite=False, upsample_type='upsamplelike', interpolation='bilinear', z_axis_convolutions=False): """Creates the FPN layers on top of the backbone features. Args: backbone_dict (dictionary): A dictionary of the backbone layers, with the names as keys, e.g. ``{'C0': C0, 'C1': C1, 'C2': C2, ...}`` feature_size (int): The feature size to use for the resulting feature levels. include_final_layers (bool): Add two coarser pyramid levels ndim (int): The spatial dimensions of the input data. Must be either 2 or 3. lite (bool): Whether to use depthwise conv instead of regular conv for feature pyramid construction upsample_type (str): Choice of upsampling methods from ``['upsamplelike','upsamling2d','upsampling3d']``. interpolation (str): Choice of interpolation mode for upsampling layers from ``['bilinear', 'nearest']``. Returns: dict: The feature pyramid names and levels, e.g. ``{'P3': P3, 'P4': P4, ...}`` Each backbone layer gets a pyramid level, and two additional levels are added, e.g. ``[C3, C4, C5]`` --> ``[P3, P4, P5, P6, P7]`` Raises: ValueError: ``ndim`` is not 2 or 3 ValueError: ``upsample_type`` not in ``['upsamplelike','upsampling2d', 'upsampling3d']`` """ # Check input to ndims acceptable_ndims = [2, 3] if ndim not in acceptable_ndims: raise ValueError('Only 2 and 3 dimensional networks are supported') # Check if inputs to ndim and lite are compatible if ndim == 3 and lite: raise ValueError('lite models are not compatible with 3 dimensional ' 'networks') # Check input to interpolation acceptable_interpolation = {'bilinear', 'nearest'} if interpolation not in acceptable_interpolation: raise ValueError('Interpolation mode "{}" not supported. ' 'Choose from {}.'.format( interpolation, list(acceptable_interpolation))) # Check input to upsample_type acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'} if upsample_type not in acceptable_upsample: raise ValueError('Upsample method "{}" not supported. ' 'Choose from {}.'.format( upsample_type, list(acceptable_upsample))) # Get names of the backbone levels and place in ascending order backbone_names = get_sorted_keys(backbone_dict) backbone_features = [backbone_dict[name] for name in backbone_names] pyramid_names = [] pyramid_finals = [] pyramid_upsamples = [] # Reverse lists backbone_names.reverse() backbone_features.reverse() for i, N in enumerate(backbone_names): level = int(re.findall(r'\d+', N)[0]) pyramid_names.append('P{}'.format(level)) backbone_input = backbone_features[i] # Don't add for the bottom of the pyramid if i == 0: if len(backbone_features) > 1: upsamplelike_input = backbone_features[i + 1] else: upsamplelike_input = None addition_input = None # Don't upsample for the top of the pyramid elif i == len(backbone_names) - 1: upsamplelike_input = None addition_input = pyramid_upsamples[-1] # Otherwise, add and upsample else: upsamplelike_input = backbone_features[i + 1] addition_input = pyramid_upsamples[-1] pf, pu = create_pyramid_level(backbone_input, upsamplelike_input=upsamplelike_input, addition_input=addition_input, upsample_type=upsample_type, level=level, ndim=ndim, lite=lite, interpolation=interpolation, z_axis_convolutions=z_axis_convolutions) pyramid_finals.append(pf) pyramid_upsamples.append(pu) # Add the final two pyramid layers if include_final_layers: # "Second to last pyramid layer is obtained via a # 3x3 stride-2 conv on the coarsest backbone" N = backbone_names[0] F = backbone_features[0] level = int(re.findall(r'\d+', N)[0]) + 1 P_minus_2_name = 'P{}'.format(level) if ndim == 2: P_minus_2 = Conv2D(feature_size, kernel_size=(3, 3), strides=(2, 2), padding='same', name=P_minus_2_name)(F) else: P_minus_2 = Conv3D(feature_size, kernel_size=(1, 3, 3), strides=(1, 2, 2), padding='same', name=P_minus_2_name)(F) pyramid_names.insert(0, P_minus_2_name) pyramid_finals.insert(0, P_minus_2) # "Last pyramid layer is computed by applying ReLU # followed by a 3x3 stride-2 conv on second to last layer" level = int(re.findall(r'\d+', N)[0]) + 2 P_minus_1_name = 'P{}'.format(level) P_minus_1 = Activation('relu', name='{}_relu'.format(N))(P_minus_2) if ndim == 2: P_minus_1 = Conv2D(feature_size, kernel_size=(3, 3), strides=(2, 2), padding='same', name=P_minus_1_name)(P_minus_1) else: P_minus_1 = Conv3D(feature_size, kernel_size=(1, 3, 3), strides=(1, 2, 2), padding='same', name=P_minus_1_name)(P_minus_1) pyramid_names.insert(0, P_minus_1_name) pyramid_finals.insert(0, P_minus_1) pyramid_dict = dict(zip(pyramid_names, pyramid_finals)) return pyramid_dict
5,342,571
def graphviz(self, filename=None, directory=None, isEdge=False,showLabel=True, **kwargs): """Return graphviz source for visualizing the lattice graph.""" return lattice(self, filename, directory, isEdge, showLabel, **kwargs)
5,342,572
def plotDecisionBoundary(theta, X, y, Lambda): """ Plots the data points X and y into a new figure with the decision boundary defined by theta PLOTDECISIONBOUNDARY(theta, X,y) plots the data points with + for the positive examples and o for the negative examples. X is assumed to be a either 1) Mx3 matrix, where the first column is an all-ones column for the intercept. 2) MxN, N>3 matrix, where the first column is all-ones """ # Plot Data plt.figure() plotData(X[:,1:], y) if X.shape[1] <= 3: # Only need 2 points to define a line, so choose two endpoints plot_x = np.array([min(X[:, 2]), max(X[:, 2])]) # Calculate the decision boundary line plot_y = (-1./theta[2])*(theta[1]*plot_x + theta[0]) # Plot, and adjust axes for better viewing plt.plot(plot_x, plot_y) else: xvals = np.linspace(-1,1.5,50) yvals = np.linspace(-1,1.5,50) zvals = np.zeros((len(xvals),len(yvals))) for i in range(len(xvals)): for j in range(len(yvals)): myfeaturesij = mapFeature(np.array([xvals[i]]),np.array([yvals[j]])) zvals[i][j] = np.dot(theta.flatten(),myfeaturesij.T) zvals = zvals.transpose() u, v = np.meshgrid( xvals, yvals ) mycontour = plt.contour( xvals, yvals, zvals, [0]) #Kind of a hacky way to display a text on top of the decision boundary myfmt = { 0:'Lambda = %d'% Lambda} plt.clabel(mycontour, inline=1, fontsize=15, fmt=myfmt) plt.title("Decision Boundary") # plt.show()
5,342,573
def test_get_tot_pop(): """Testing """ scenario_drivers = {'heating': ['population']} classobject1 = dw_stock.Dwelling( 2015, {'longitude': 10, 'latitude': 10}, 1000, ['heating'], scenario_drivers, population=2.2 ) classobject2 = dw_stock.Dwelling( 2015, {'longitude': 10, 'latitude': 10}, 1000, ['heating'], scenario_drivers ) dwellings = [classobject1, classobject1] dwellings2 = [classobject2, classobject2] dw_stock_object = dw_stock.DwellingStock(dwellings, ['heating']) dw_stock_object2 = dw_stock.DwellingStock(dwellings2, ['heating']) expected = 4.4 expected2 = None # call function out_value = dw_stock.get_tot_pop(dw_stock_object.dwellings) out_value2 = dw_stock.get_tot_pop(dw_stock_object2.dwellings) assert out_value == expected assert out_value2 == expected2
5,342,574
def get_rectangle(origin, end): """Return all points of rectangle contained by origin and end.""" size_x = abs(origin[0]-end[0])+1 size_y = abs(origin[1]-end[1])+1 rectangle = [] for x in range(size_x): for y in range(size_y): rectangle.append((origin[0]+x, origin[1]+y)) return rectangle
5,342,575
def corr_list(df, target, thresh=0.1, sort=True, fill=True): """ List Most Correlated Features Returns a pandas Series with the most correlated features to a certain target variable. The function will return features with a correlation value bigger than some threshold, which can be adjusted. Parameters ---------- df : pandas DataFrame `df` must contain only numerical values. target : str or int String or integer indicating the target variable. thresh : float, optional Float indicating the minimum correlation between a feature and the target above wich the feature will be present in the returned list. Default value is 0.1. sort : bool, optional Wheter to sort the returned pandas Series. If True, it will be sorted descending. Default value is False. fill : bool, optional Wheter to fill null values. If True, Null values will be replaced with 0's. Default value is False. Returns ------- pandas Series """ if fill: interest = df.corr().fillna(0)[target] else: interest = df.corr()[target] interest = interest[np.abs(interest) > thresh] if len(interest) > 0: if sort: return interest.sort_values(ascending=False) else: return interest else: return []
5,342,576
def test_positive_change_license_status(assignment_type, license_handler, assignment_handler, create_environment_for_assignment_type): """Positive test: test all valid status changes (+ if status was set correct)""" object_type = _ASSIGNMENT_TYPE_TO_OBJECT_TYPE[assignment_type] with utu.UCSTestSchool() as schoolenv: object_name, _, license = create_environment_for_assignment_type(schoolenv, assignment_type) udm_license = license_handler.get_udm_license_by_code(license.license_code) # assign success = assignment_handler.assign_license(object_type=object_type, object_name=object_name, license=udm_license) assert success is True object_uuid = assignment_handler._get_object_uuid_by_name(object_type, object_name) [assignment] = assignment_handler.get_all_assignments_for_uuid( assignee_uuid=object_uuid, base=udm_license.dn) assert assignment.status == Status.ASSIGNED # change 1: ASSIGNED -> AVAILABLE success = assignment_handler.change_license_status(license_code=license.license_code, object_type=object_type, object_name=object_name, status=Status.AVAILABLE) assert success is True assignments = assignment_handler.get_all_assignments_for_uuid( assignee_uuid=object_uuid, base=udm_license.dn) assert len(assignments) == 0 # reassign success = assignment_handler.assign_license(object_type=object_type, object_name=object_name, license=udm_license) assert success is True [assignment] = assignment_handler.get_all_assignments_for_uuid( assignee_uuid=object_uuid, base=udm_license.dn) assert assignment.status == Status.ASSIGNED # change 2: ASSIGNED -> PROVISIONED success = assignment_handler.change_license_status(license_code=license.license_code, object_type=object_type, object_name=object_name, status=Status.PROVISIONED) assert success is True [assignment] = assignment_handler.get_all_assignments_for_uuid( assignee_uuid=object_uuid, base=udm_license.dn) assert assignment.status == Status.PROVISIONED
5,342,577
def compute_epsilon(steps): """Computes epsilon value for given hyperparameters.""" if FLAGS.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) sampling_probability = FLAGS.batch_size / NUM_TRAIN_EXAMPLES rdp = compute_rdp(q=sampling_probability, noise_multiplier=FLAGS.noise_multiplier, steps=steps, orders=orders) # Delta is set to 1e-5 because MNIST has 60000 training points. return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
5,342,578
def get_native_includes(object): """ After method association, check which native types an object uses and return a corresponding string list of include file This will also add the include needed for inheritance """ includes = set() for proc in object.procs: for argname,arg in proc.args.items(): if arg.native: includes.add(arg.type.type) if arg.type.matrix and not opts.no_fmat: includes.add(matrix_classname) if arg.type.type=='CHARACTER' and arg.intent!='in': if opts.std_string: # The use of angle brackets is handled specially # in the output code includes.add('<string>') else: includes.add(string_classname) if proc.retval and proc.retval.type.dt and proc.retval.pointer: includes.add(proc.retval.type.type) # For inheritance: if object.extends: includes.add(object.extends) return includes
5,342,579
def libraries_data_path(): """ Path to Packages/User/Deviot/pio/libraries.json """ user_data = user_pio_path() return path.join(user_data, 'libraries.json')
5,342,580
def dice_coeff(input, target): """Dice coeff for batches""" if input.is_cuda: s = torch.FloatTensor(1).to(device_f).zero_() else: s = torch.FloatTensor(1).zero_() for i, c in enumerate(zip(input, target)): s = s + DiceCoeff().forward(c[0], c[1]) return s / (i + 1)
5,342,581
def group_error_rates(labels, predictions, groups): """Returns a list containing error rates for each protected group.""" errors = [] for jj in range(groups.shape[1]): if groups[:, jj].sum() == 0: # Group is empty? errors.append(0.0) else: signed_labels_jj = 2 * labels[groups[:, jj] == 1] - 1 predictions_jj = predictions[groups[:, jj] == 1] errors.append(np.mean(signed_labels_jj * predictions_jj <= 0)) return errors
5,342,582
def lnprior(theta): """ Parameters ---------- theta : np.ndarray Array of parameters. Returns ------- Value of log-prior. """ pass
5,342,583
def get_emails_by_user_names(user_names): """Get emails by user names.""" emails_service = emails_digest_service.DailyEmailsService() emails_service.open_emails_digest() user_emails_dict = dict.fromkeys(user_names) for user_name in user_names: user_emails_dict[user_name] = emails_service.get_email_by_user_name( user_name) return user_emails_dict
5,342,584
def get_match_results(depc, qaid_list, daid_list, score_list, config): """ converts table results into format for ipython notebook """ # qaid_list, daid_list = request.get_parent_rowids() # score_list = request.score_list # config = request.config unique_qaids, groupxs = ut.group_indices(qaid_list) # grouped_qaids_list = ut.apply_grouping(qaid_list, groupxs) grouped_daids = ut.apply_grouping(daid_list, groupxs) grouped_scores = ut.apply_grouping(score_list, groupxs) ibs = depc.controller unique_qnids = ibs.get_annot_nids(unique_qaids) # scores _iter = zip(unique_qaids, unique_qnids, grouped_daids, grouped_scores) for qaid, qnid, daids, scores in _iter: dnids = ibs.get_annot_nids(daids) # Remove distance to self annot_scores = np.array(scores) daid_list_ = np.array(daids) dnid_list_ = np.array(dnids) is_valid = daid_list_ != qaid daid_list_ = daid_list_.compress(is_valid) dnid_list_ = dnid_list_.compress(is_valid) annot_scores = annot_scores.compress(is_valid) # Hacked in version of creating an annot match object match_result = wbia.AnnotMatch() match_result.qaid = qaid match_result.qnid = qnid match_result.daid_list = daid_list_ match_result.dnid_list = dnid_list_ match_result._update_daid_index() match_result._update_unique_nid_index() grouped_annot_scores = vt.apply_grouping(annot_scores, match_result.name_groupxs) name_scores = np.array([np.sum(dists) for dists in grouped_annot_scores]) match_result.set_cannonical_name_score(annot_scores, name_scores) yield match_result
5,342,585
def inbound_and_outbound_node_sets(C, CT): """ Returns the set of nodes that can reach an event and can be reached by an event, and the difference between those sets (outbound / inbound). """ inbound = defaultdict(set) for node, event in zip(*np.nonzero(C)): inbound[event].add(node) outbound = defaultdict(set) for node, event in zip(*np.nonzero(CT)): outbound[event].add(node) difference = {} for event, in_nodes in inbound.items(): difference[event] = outbound[event] - in_nodes return inbound, outbound, difference
5,342,586
def policy(Q): """Hard max over prescriptions Params: ------- * Q: dictionary of dictionaries Nested dictionary representing a table Returns: ------- * policy: dictonary of states to policies """ pol = {} for s in Q: pol[s] = max(Q[s].items(), key=lambda x: x[1])[0] return pol
5,342,587
def test_should_generate(fixture, color, result, expected): """Only return True if existing badge needs updating""" output = os.path.join(FIXTURES, "default-style", fixture) actual = badge_gen.should_generate_badge(output, color, result) assert actual is expected
5,342,588
def main(argv=sys.argv) -> None: # pragma: no cover """Run type coverage check.""" parser = argparse.ArgumentParser( usage=("python type_coverage.py coverage=80 file=typecov/linecount.txt \n") ) parser.add_argument( "coverage", type=float, metavar="<coverage>", help="Minimum required type coverage.", ) parser.add_argument( "file", type=str, metavar="<file>", help="File with line count type coverage report.", ) args = parser.parse_args() report = Path(args.file) min_coverage = args.coverage if not report.is_file(): sys.stdout.write(f"ERROR Line count report file not found on: {report}\n") sys.exit(1) with open(report) as f: coverage_summary = f.readline() if not coverage_summary: sys.stdout.write(f"ERROR Line count report file {report} is empty.\n") sys.exit(1) values = coverage_summary.split() coverage = int(values[0]) / int(values[1]) * 100 if coverage >= min_coverage: sys.stdout.write(f"Total coverage: {coverage}%\n") sys.exit(0) else: sys.stdout.write( f"FAIL Required type coverage of {min_coverage}% not reached. Total coverage: {coverage}%\n" ) sys.exit(1)
5,342,589
def fft(series): """ FFT of a series Parameters ---------- series Returns ------- """ signal = series.values time = series.index dt = np.mean(np.diff(time)) #n = 11*len(time) n = 50000 frequencies = np.fft.rfftfreq(n=n, d=dt) # [Hz] dft = np.abs(np.fft.rfft(signal, n=n)) return frequencies, dft
5,342,590
def local_variance(V, tsize=5): """ local non-linear variance calculation Parameters ---------- V : numpy.array, size=(m,n), dtype=float array with one velocity component, all algorithms are indepent of their axis. Parameters ---------- sig_V : numpy.array, size=(m,n), dtype=float statistical local variance, based on the procedure described in [1],[2] References ---------- .. [1] Joughin "Ice-sheet velocity mapping: a combined interferometric and speckle-tracking approach", Annuals of glaciology vol.34 pp.195-201. .. [2] Joughin et al. "Greenland ice mapping project 2 (GIMP-2) algorithm theoretical basis document", Making earth system data records for use in research environment (MEaSUREs) documentation. """ V_class = local_mad_filter(V, tsize=tsize) V[V_class] = np.nan V_0 = local_infilling_filter(V, tsize=tsize) # running mean adjustment mean_kernel = np.ones((tsize, tsize), dtype=float)/(tsize**2) V = ndimage.convolve(V, mean_kernel) # plane fitting and variance of residual sig_V = local_nonlin_var_filter(V, tsize=tsize) return sig_V
5,342,591
def get_virtual_device_configuration(device): """Get the virtual device configuration for a PhysicalDevice. Returns the list of VirtualDeviceConfiguration objects previously configured by a call to `tf.config.experimental.set_virtual_device_configuration()`. For example: >>> physical_devices = tf.config.experimental.list_physical_devices('CPU') >>> assert len(physical_devices) == 1, "No CPUs found" >>> configs = tf.config.experimental.get_virtual_device_configuration( ... physical_devices[0]) >>> try: ... assert configs is None ... tf.config.experimental.set_virtual_device_configuration( ... physical_devices[0], ... [tf.config.experimental.VirtualDeviceConfiguration(), ... tf.config.experimental.VirtualDeviceConfiguration()]) ... configs = tf.config.experimental.get_virtual_device_configuration( ... physical_devices[0]) ... assert len(configs) == 2 ... except: ... # Cannot modify virtual devices once initialized. ... pass Args: device: PhysicalDevice to query Returns: List of `tf.config.experimental.VirtualDeviceConfiguration` objects or `None` if no virtual device configuration has been set for this physical device. """ return context.context().get_virtual_device_configuration(device)
5,342,592
def user_directory_path(instance, filename): """Sets path to user uploads to: MEDIA_ROOT/user_<id>/<filename>""" return f"user_{instance.user.id}/{filename}"
5,342,593
def process_command_line(): """Process the file on the command line when run as a script or entry point.""" args = parse_command_line() code_file = args.code_file[0] processed_code = strip_file_to_string(code_file, args.to_empty, args.strip_nl, args.no_ast, args.no_colon_move, args.no_equal_move, args.only_assigns_and_defs, args.only_test_for_changes) if args.inplace: args.outfile = [code_file] if not args.only_test_for_changes: if not args.outfile: print(processed_code, end="") else: with open(args.outfile[0], "w") as f: f.write(str(processed_code)) else: if processed_code: # The variable processed_code will be boolean in this case. print("True") exit_code = 0 else: print("False") exit_code = 1 sys.exit(exit_code)
5,342,594
def not_none_to_dict(args_dict, key, value): """ Если значение не None, кладем его в словарь. """ if not (value is None): args_dict[key] = value
5,342,595
async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Load Tradfri switches based on a config entry.""" gateway_id = config_entry.data[CONF_GATEWAY_ID] tradfri_data = hass.data[DOMAIN][config_entry.entry_id] api = tradfri_data[KEY_API] devices = tradfri_data[DEVICES] async_add_entities( TradfriSwitch(dev, api, gateway_id) for dev in devices if dev.has_socket_control )
5,342,596
def setup(app): """Setup the Sphinx extension.""" # Register builder. app.add_builder(BeamerBuilder) # Add setting for allowframebreaks. app.add_config_value("beamer_allowframebreaks", True, "beamer") # Add setting for Beamer theme. app.add_config_value("beamer_theme", "Warsaw", "beamer") # Adjust titles upon doctree-resolved. app.connect("doctree-resolved", adjust_titles) return { "version": "1.0", "parallel_read_safe": True, "parallel_write_safe": True, }
5,342,597
def select_object_by_name_no_context(name): """ This is an attempt to deal with an incorrect context error """ obj = bpy.data.objects[name] for o in bpy.context.view_layer.objects: if (is_blender_28()): o.select_set(False) else: o.select = False if (is_blender_28()): obj.select_set(True) bpy.context.view_layer.objects.active = obj else: obj.select = True bpy.context.scene.objects.active = obj
5,342,598
def EndorseConnections(browser): """ Endorse skills for your connections found. This only likes the top three popular skills the user has endorsed. If people want this feature can be further expanded just post an enhancement request in the repository. browser: """ print("Gathering your connections url's to endorse their skills.") profileURLS = [] browser.get('https://www.linkedin.com/mynetwork/invite-connect/connections/') time.sleep(3) try: for counter in range(1,NUM_LAZY_LOAD_ON_MY_NETWORK_PAGE): ScrollToBottomAndWaitForLoad(browser) soup = BeautifulSoup(browser.page_source, "lxml") for a in soup.find_all('a', class_='mn-person-info__picture'): if VERBOSE: print(a['href']) profileURLS.append(a['href']) print("Endorsing your connection's skills.") for url in profileURLS: endorseConnection = True if RANDOMIZE_ENDORSING_CONNECTIONS: endorseConnection = random.choice([True, False]) if endorseConnection: fullURL = 'https://www.linkedin.com'+url if VERBOSE: print('Endorsing the connection '+fullURL) browser.get(fullURL) time.sleep(3) for button in browser.find_elements_by_xpath('//button[@data-control-name="endorse"]'): button.click() except: print('Exception occurred when endorsing your connections.') pass print('')
5,342,599