content
stringlengths
22
815k
id
int64
0
4.91M
def check_position(position): """Determines if the transform is valid. That is, not off-keypad.""" if position == (0, -3) or position == (4, -3): return False if (-1 < position[0] < 5) and (-4 < position[1] < 1): return True else: return False
31,200
def p_tables(p): """tables : schemaslash TABLE""" p[0] = p[1].tables()
31,201
def mobilenet_wd4_cub(num_classes=200, **kwargs): """ 0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861. Parameters: ---------- num_classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_mobilenet(num_classes=num_classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs)
31,202
def base_plus_copy_indices(words, dynamic_vocabs, base_vocab, volatile=False): """Compute base + copy indices. Args: words (list[list[unicode]]) dynamic_vocabs (list[HardCopyDynamicVocab]) base_vocab (HardCopyVocab) volatile (bool) Returns: MultiVocabIndices """ unk = base_vocab.UNK copy_seqs = [] for seq, dyna_vocab in izip(words, dynamic_vocabs): word_to_copy = dyna_vocab.word_to_copy_token normal_copy_seq = [] for w in seq: normal_copy_seq.append(word_to_copy.get(w, unk)) copy_seqs.append(normal_copy_seq) # each SeqBatch.values has shape (batch_size, seq_length) base_indices = SequenceBatch.from_sequences(words, base_vocab, volatile=volatile) copy_indices = SequenceBatch.from_sequences(copy_seqs, base_vocab, volatile=volatile) assert_tensor_equal(base_indices.mask, copy_indices.mask) # has shape (batch_size, seq_length, 2) concat_values = torch.stack([base_indices.values, copy_indices.values], 2) return MultiVocabIndices(concat_values, base_indices.mask)
31,203
def font_encoding(psname): """Return encoding name given a psname""" return LIBRARY.encoding(psname)
31,204
def shader_with_tex_offset(offset): """Returns a vertex FileShader using a texture access with the given offset.""" return FileShader(shader_source_with_tex_offset(offset), ".vert")
31,205
def braycurtis(u, v): """ d = braycurtis(u, v) Computes the Bray-Curtis distance between two n-vectors u and v, \sum{|u_i-v_i|} / \sum{|u_i+v_i|}. """ u = np.asarray(u) v = np.asarray(v) return abs(u-v).sum() / abs(u+v).sum()
31,206
def test_outcomes_unwrap_returns_trio_value_over_qt_value(): """Unwrapping an Outcomes prioritizes a Trio value over a Qt value.""" this_outcome = qtrio.Outcomes(qt=outcome.Value(2), trio=outcome.Value(3)) result = this_outcome.unwrap() assert result == 3
31,207
def _load_pyfunc(path): """ Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. :param path: Local filesystem path to the MLflow Model with the ``fastai`` flavor. """ return _FastaiModelWrapper(_load_model(path))
31,208
def fit_pseudo_voigt(x,y,p0=None,fit_alpha=True,alpha_guess=0.5): """Fits the data with a pseudo-voigt peak. Parameters ----------- x: np.ndarray Array with x values y: np.ndarray Array with y values p0: list (Optional) It contains a initial guess the for the pseudo-voigt variables, in the order: p0 = [x0,sigma,amplitude,constant,alpha]. If None, the code will create a guess. fit_alpha: boolean (Optional) Option to fit the alpha parameter. alpha_guess: float (Optional) If alpha is being fitted, then this will be the initial guess. Otherwise it will be the fixed parameter used. For lorenzian: alpha = 1, for gaussian: alpha = 0. Returns ----------- popt: np.ndarray Array with the optimized pseudo-voigt parameters. """ if p0 is None: width = (x.max()-x.min())/10. index = y == y.max() p0 = [x[index][0],width,y.max()*width*np.sqrt(np.pi/np.log(2)),y[0],alpha_guess] if fit_alpha is False: popt,pcov = curve_fit(lambda x,x0,sigma,amplitude,constant: pseudo_voigt(x,x0,sigma,amplitude,constant,alpha_guess), x,y,p0=p0[:-1]) popt = np.append(popt,alpha_guess) else: popt,pcov = curve_fit(pseudo_voigt,x,y,p0=p0) return popt
31,209
def mkdir(path: str): """ :param path: :return: """ os.makedirs(path, exist_ok=True)
31,210
def get_ready_directories(directory): """Returns a directory with list of files That directories should have a 'buildinfo' and 'inventory.yaml' file which are not empty. """ log_files = {} for root, _, files in os.walk(directory): build_uuid = root.split('/')[-1] if check_info_files(root, files): files.remove("buildinfo") files.remove("inventory.yaml") log_files[build_uuid] = files else: logging.info("Skipping build with uuid %s. Probably all files " "are not dowloaded yet." % build_uuid) continue return log_files
31,211
def asbytes(s: Literal["a"]): """ usage.scipy: 2 """ ...
31,212
def lint(paths, include, exclude, only_staged, ignore_untracked): """ Run code checks (pylint + mypy) """ from peltak.core import log from custom_commands_logic import check log.info('<0><1>{}', '-' * 60) log.info('paths: {}', paths) log.info('include: {}', include) log.info('exclude: {}', exclude) log.info('only_staged: {}', only_staged) log.info('ignore_untracked: {}', ignore_untracked) log.info('<0><1>{}', '-' * 60) check( paths=paths, include=include, exclude=exclude, only_staged=only_staged, untracked=not ignore_untracked, )
31,213
def get_name_of_day(str_date): """ Возвращает имя дня. """ day = datetime.fromisoformat(str_date).weekday() return DAYS_NAME.get(day)
31,214
def reset_variable_in_store(store_name, path): """ Resets the variable name in the hdfstore :param store_name: :param path: :return: """ try: with pd.get_store(store_name) as store: store.remove(path) except Exception as e: pass
31,215
def download(client, activity, retryer, backup_dir, export_formats=None): """Exports a Garmin Connect activity to a given set of formats and saves the resulting file(s) to a given backup directory. In case a given format cannot be exported for the activity, the file name will be appended to the :attr:`not_found_file` in the backup directory (to prevent it from being retried on subsequent backup runs). :param client: A :class:`garminexport.garminclient.GarminClient` instance that is assumed to be connected. :type client: :class:`garminexport.garminclient.GarminClient` :param activity: An activity tuple `(id, starttime)` :type activity: tuple of `(int, datetime)` :param retryer: A :class:`garminexport.retryer.Retryer` instance that will handle failed download attempts. :type retryer: :class:`garminexport.retryer.Retryer` :param backup_dir: Backup directory path (assumed to exist already). :type backup_dir: str :keyword export_formats: Which format(s) to export to. Could be any of: 'json_summary', 'json_details', 'gpx', 'tcx', 'fit'. :type export_formats: list of str """ id = activity[0] if 'json_summary' in export_formats: log.debug("getting json summary for %s", id) activity_summary = retryer.call(client.get_activity_summary, id) dest = os.path.join( backup_dir, export_filename(activity, 'json_summary')) with codecs.open(dest, encoding="utf-8", mode="w") as f: f.write(json.dumps(activity_summary, ensure_ascii=False, indent=4)) if 'json_details' in export_formats: log.debug("getting json details for %s", id) activity_details = retryer.call(client.get_activity_details, id) dest = os.path.join(backup_dir, export_filename(activity, 'json_details')) with codecs.open(dest, encoding="utf-8", mode="w") as f: f.write(json.dumps(activity_details, ensure_ascii=False, indent=4)) not_found_path = os.path.join(backup_dir, not_found_file) with open(not_found_path, mode="a") as not_found: if 'gpx' in export_formats: log.debug("getting gpx for %s", id) activity_gpx = retryer.call(client.get_activity_gpx, id) dest = os.path.join(backup_dir, export_filename(activity, 'gpx')) if activity_gpx is None: not_found.write(os.path.basename(dest) + "\n") else: with codecs.open(dest, encoding="utf-8", mode="w") as f: f.write(activity_gpx) if 'tcx' in export_formats: log.debug("getting tcx for %s", id) activity_tcx = retryer.call(client.get_activity_tcx, id) dest = os.path.join(backup_dir, export_filename(activity, 'tcx')) if activity_tcx is None: not_found.write(os.path.basename(dest) + "\n") else: with codecs.open(dest, encoding="utf-8", mode="w") as f: f.write(activity_tcx) if 'fit' in export_formats: log.debug("getting fit for %s", id) activity_fit = retryer.call(client.get_activity_fit, id) dest = os.path.join( backup_dir, export_filename(activity, 'fit')) if activity_fit is None: not_found.write(os.path.basename(dest) + "\n") else: with open(dest, mode="wb") as f: f.write(activity_fit)
31,216
def plot_passive_daily_comparisons(df_list: list, stock:str): """**First dataframe must be the Portfolio with switching.** """ temp_df1 = df_list[0].iloc[0:0] # temp_df1.drop(temp_df1.columns[0],axis=1,inplace=True) temp_df2 = df_list[1].iloc[0:0] # temp_df2.drop(temp_df2.columns[0],axis=1,inplace=True) temp_date_range = df_list[0]['Date'].tolist() for date in temp_date_range: df1 = df_list[0][df_list[0]['Date'] == date] # df1.drop(df1.columns[0],axis=1,inplace=True) # print(df1) # sys.exit() df2 = df_list[1][df_list[1]['Date'] == date] if not (df1.empty or df2.empty): temp_df1.append(df1, ignore_index=True) temp_df2.append(df2, ignore_index=True) # print(temp_df1) # sys.exit() p = figure(title="Daily price Comparison", x_axis_type='datetime', background_fill_color="#fafafa") p.add_tools(HoverTool( tooltips=[ ( 'Date', '@x{%F}'), ( 'Price', '$@y{%0.2f}'), # use @{ } for field names with spaces ], formatters={ 'x': 'datetime', # use 'datetime' formatter for 'date' field, 'y' : 'printf' }, mode='mouse' )) p.line(temp_df1['Date'].tolist(), temp_df1['Net'].values.tolist(), legend="Rebalanced stock portfolio", line_color="black") p.line(temp_df2['Date'].tolist(), temp_df2[stock].values.tolist(), legend=f"{stock} index") p.legend.location = "top_left" show(p)
31,217
def show(root=None, debug=False, parent=None): """Display Loader GUI Arguments: debug (bool, optional): Run loader in debug-mode, defaults to False """ try: module.window.close() del module.window except (RuntimeError, AttributeError): pass if debug is True: io.install() with parentlib.application(): window = Window(parent) window.setStyleSheet(style.load_stylesheet()) window.show() window.refresh() module.window = window
31,218
def test_fragment_two_aa_peptide_y_series(): """Test y2 fragmentation""" fragments = PeptideFragment0r('KK', charges=[1], ions=['y']).df # fragments = fragger.fragment_peptide(ion_series=['y']) assert isinstance(fragments, DataFrame) # assert len(fragments) == 2 row = fragments.iloc[3] assert row['name'] == 'y2' assert row['hill'] == 'C(12)H(26)N(4)O(3)' assert row['charge'] == 1 assert pytest.approx( row['mz'], 274.2004907132 )
31,219
def run(test, params, env): """ 'thin-provisioning' functions test using sg_utils: 1) Create image using qemu-img 2) Convert the image and check if the speed is much faster than standard time :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ standard_time = 0.4 qemu_img_binary = utils_misc.get_qemu_img_binary(params) base_dir = params.get("images_base_dir", data_dir.get_data_dir()) if not qemu_img_binary: raise exceptions.TestError("Can't find the command qemu-img.") image_create_cmd = params["create_cmd"] image_create_cmd = image_create_cmd % (qemu_img_binary, base_dir) image_convert_cmd = params["convert_cmd"] image_convert_cmd = image_convert_cmd % ( qemu_img_binary, base_dir, base_dir) process.system(image_create_cmd, shell=True) output = process.system_output(image_convert_cmd, shell=True) realtime = re.search(r"real\s+\dm(.*)s", output) if realtime is None: raise exceptions.TestError( "Faild to get the realtime from {}".format(output)) realtime = float(realtime.group(1)) logging.info("real time is : {:f}".format(realtime)) if realtime >= standard_time: err = "realtime({:f}) to convert the image is a " \ "little longer than standardtime({:f})" raise exceptions.TestFail(err.format(realtime, standard_time)) delete_image = params["disk_name"] delete_image = os.path.join(base_dir, delete_image) delete_convert_image = params.get("convert_disk_name") delete_convert_image = os.path.join(base_dir, delete_convert_image) process.system_output("rm -rf {:s} {:s}".format( delete_image, delete_convert_image))
31,220
def k_fold_split(ratings, min_num_ratings=10, k=4): """ Creates the k (training set, test_set) used for k_fold cross validation :param ratings: initial sparse matrix of shape (num_items, num_users) :param min_num_ratings: all users and items must have at least min_num_ratings per user and per item to be kept :param k: number of fold :return: a list fold of length k such that - fold[l][0] is a list of tuples (i,j) of the entries of 'ratings' that are the l-th testing set - fold[l][1] is a list of tuples (i,j) of the entries of 'ratings' that are the l-th training set """ num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten() num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten() # set seed np.random.seed(988) # select user and item based on the condition. valid_users = np.where(num_items_per_user >= min_num_ratings)[0] valid_items = np.where(num_users_per_item >= min_num_ratings)[0] valid_ratings = ratings[valid_items, :][:, valid_users] nnz_row, nnz_col = valid_ratings.nonzero() nnz = list(zip(nnz_row, nnz_col)) nnz = np.random.permutation(nnz) len_splits = int(len(nnz) / k) splits = [] for i in range(k): splits.append(nnz[i * len_splits: (i + 1) * len_splits]) splits = [f.tolist() for f in splits] folds = [] for i in range(k): tmp = [] for j in range(k): if j != i: tmp = tmp + splits[j] folds.append([splits[i], tmp]) return folds
31,221
def merge_dict(a, b, path:str=None): """ Args: a: b: path(str, optional): (Default value = None) Returns: Raises: """ "merges b into a" if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge_dict(a[key], b[key], path + [str(key)]) else: a[key] = b[key] else: a[key] = b[key] return a
31,222
def validateSignedOfferData(adat, ser, sig, tdat, method="igo"): """ Returns deserialized version of serialization ser which Offer if offer request is correctly formed. Otherwise returns None adat is thing's holder/owner agent resource ser is json encoded unicode string of request sig is base64 encoded signature from request header "signer" tag tdat is thing data resource offer request fields { "uid": offeruniqueid, "thing": thingDID, "aspirant": AgentDID, "duration": timeinsecondsofferisopen, } """ try: try: # get signing key of request from thing resource (adid, index, akey) = extractDatSignerParts(tdat) except ValueError as ex: raise ValidationError("Missing or invalid signer") # get agent key at index from signer data. assumes that resource is valid try: averkey = adat["keys"][index]["key"] except (TypeError, KeyError, IndexError) as ex: raise ValidationError("Missing or invalid signer key") if len(averkey) != 44: raise ValidationError("Invalid signer key") # invalid length for base64 encoded key # verify request using agent signer verify key if not verify64u(sig, ser, averkey): raise ValidationError("Unverifiable signatrue") # signature fails # now validate offer data try: dat = json.loads(ser, object_pairs_hook=ODict) except ValueError as ex: raise ValidationError("Invalid json") # invalid json if not dat: # offer request must not be empty raise ValidationError("Empty body") if not isinstance(dat, dict): # must be dict subclass raise ValidationError("JSON not dict") requireds = ("uid", "thing", "aspirant", "duration") for field in requireds: if field not in dat: raise ValidationError("Missing missing required field {}".format(field)) if not dat["uid"]: # uid must not be empty raise ValidationError("Empty uid") if dat["thing"] != tdat['did']: raise ValidationError("Not same thing") aspirant = dat["aspirant"] try: # correct did format pre:method:keystr pre, meth, keystr = aspirant.split(":") except ValueError as ex: raise ValidationError("Invalid aspirant") if pre != "did" or meth != method: raise ValidationError("Invalid aspirant") # did format bad try: duration = float(dat["duration"]) except ValueError as ex: raise ValidationError("Invalid duration") if duration < PROPAGATION_DELAY * 2.0: raise ValidationError("Duration too short") except ValidationError: raise except Exception as ex: # unknown problem raise ValidationError("Unexpected error") return dat
31,223
def get_relevant_phrases(obj=None): """ Get all phrases to be searched for. This includes all SensitivePhrases, and any RelatedSensitivePhrases that refer to the given object. :param obj: A model instance to check for sensitive phrases made specifically for that instance. :return: a dictionary of replacement phrases keyed by the phrases being replaced. """ replacements = [] content_type = ContentType.objects.get_for_model(obj) related_sensitive_phrases = RelatedSensitivePhrase.objects.filter( content_type__pk=content_type.id, object_id=obj.id ).extra(select={'length': 'Length(phrase)'}).order_by('-length', 'phrase') for phrase in related_sensitive_phrases: replacements.append({ 'phrase': phrase.phrase, 'replacement': phrase.replace_phrase, 'start_boundary': phrase.check_for_word_boundary_start, 'end_boundary': phrase.check_for_word_boundary_end }) sensitive_phrases = SensitivePhrase.objects.all() \ .extra(select={'length': 'Length(phrase)'}).order_by('-length', 'phrase') for phrase in sensitive_phrases: replacements.append({ 'phrase': phrase.phrase, 'replacement': phrase.replace_phrase, 'start_boundary': phrase.check_for_word_boundary_start, 'end_boundary': phrase.check_for_word_boundary_end }) return replacements
31,224
def test_last_ordered_3pc_not_reset_if_less_than_new_view(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): """ Check that if last_ordered_3pc's viewNo on a Replica is equal to the new viewNo after view change, then last_ordered_3pc is reset to (0,0). It can be that last_ordered_3pc was set for the previous view, since it's set during catch-up Example: a Node has last_ordered = (1, 300), and then the whole pool except this node restarted. The new viewNo is 0, but last_ordered is (1, 300), so all new requests will be discarded by this Node if we don't reset last_ordered_3pc """ old_view_no = checkViewNoForNodes(txnPoolNodeSet) for node in txnPoolNodeSet: node.master_replica.last_ordered_3pc = (old_view_no, 100) ensure_view_change_complete(looper, txnPoolNodeSet, customTimeout=60) for node in txnPoolNodeSet: assert (old_view_no, 100) == node.master_replica.last_ordered_3pc # Make sure the pool is working sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
31,225
def write_reproducible_script(script_filename, databases_root_folder, table_name, analysis_id): """ Read information from database """ db_select = DBSelect(databases_root_folder + table_name) analysis = db_select.select_analysis(Analysis, analysis_id) neural_network_rows = db_select.select_all_from_analysis(NeuralNetwork, analysis_id) leakage_model = db_select.select_from_analysis(LeakageModel, analysis_id) """ Create .py file in the scripts folder """ script_py_file = open(f"scripts/{script_filename}_{table_name.replace('.sqlite', '')}.py", "w+") """ write to file """ write_imports(script_py_file, analysis) write_minimum_settings(script_py_file, analysis, table_name) write_leakage_model(script_py_file, leakage_model) write_dataset_definitions(script_py_file, analysis) write_neural_networks(script_py_file, analysis, neural_network_rows) write_search_definitions(script_py_file, analysis) write_early_stopping_defitions(script_py_file, analysis) write_callbacks_defitions(script_py_file, analysis) write_run_method(script_py_file, analysis) script_py_file.close()
31,226
def _prepare_cabal_inputs( hs, cc, posix, dep_info, cc_info, direct_cc_info, component, package_id, tool_inputs, tool_input_manifests, cabal, setup, setup_deps, setup_dep_info, srcs, compiler_flags, flags, generate_haddock, cabal_wrapper, package_database, verbose, transitive_haddocks, dynamic_binary = None): """Compute Cabal wrapper, arguments, inputs.""" with_profiling = is_profiling_enabled(hs) # Haskell library dependencies or indirect C library dependencies are # already covered by their corresponding package-db entries. We only need # to add libraries and headers for direct C library dependencies to the # command line. direct_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.cc_libraries) # The regular Haskell rules perform mostly static linking, i.e. where # possible all C library dependencies are linked statically. Cabal has no # such mode, and since we have to provide dynamic C libraries for # compilation, they will also be used for linking. Hence, we need to add # RUNPATH flags for all dynamic C library dependencies. Cabal also produces # a dynamic and a static Haskell library in one go. The dynamic library # will link other Haskell libraries dynamically. For those we need to also # provide RUNPATH flags for dynamic Haskell libraries. (_, dynamic_libs) = get_library_files( hs, cc.cc_libraries_info, cc.transitive_libraries, dynamic = True, ) # Executables build by Cabal will link Haskell libraries statically, so we # only need to include dynamic C libraries in the runfiles tree. (_, runfiles_libs) = get_library_files( hs, cc.cc_libraries_info, get_cc_libraries(cc.cc_libraries_info, cc.transitive_libraries), dynamic = True, ) # Setup dependencies are loaded by runghc. setup_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.setup_libraries) # The regular Haskell rules have separate actions for linking and # compilation to which we pass different sets of libraries as inputs. The # Cabal rules, in contrast, only have a single action for compilation and # linking, so we must provide both sets of libraries as inputs to the same # action. transitive_compile_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.transitive_libraries) transitive_link_libs = _concat(get_library_files(hs, cc.cc_libraries_info, cc.transitive_libraries)) env = dict(hs.env) env["PATH"] = join_path_list(hs, _binary_paths(tool_inputs) + posix.paths) if hs.toolchain.is_darwin: env["SDKROOT"] = "macosx" # See haskell/private/actions/link.bzl if verbose: env["CABAL_VERBOSE"] = "True" args = hs.actions.args() package_databases = dep_info.package_databases transitive_headers = cc_info.compilation_context.headers direct_include_dirs = depset(transitive = [ direct_cc_info.compilation_context.includes, direct_cc_info.compilation_context.quote_includes, direct_cc_info.compilation_context.system_includes, ]) direct_lib_dirs = [file.dirname for file in direct_libs] args.add_all([component, package_id, generate_haddock, setup, cabal.dirname, package_database.dirname]) args.add_joined([ arg for package_id in setup_deps for arg in ["-package-id", package_id] ] + [ arg for package_db in setup_dep_info.package_databases.to_list() for arg in ["-package-db", "./" + _dirname(package_db)] ], join_with = " ", format_each = "--ghc-arg=%s", omit_if_empty = False) args.add("--flags=" + " ".join(flags)) args.add_all(compiler_flags, format_each = "--ghc-option=%s") if dynamic_binary: args.add_all( [ "--ghc-option=-optl-Wl,-rpath," + create_rpath_entry( binary = dynamic_binary, dependency = lib, keep_filename = False, prefix = relative_rpath_prefix(hs.toolchain.is_darwin), ) for lib in dynamic_libs ], uniquify = True, ) args.add("--") args.add_all(package_databases, map_each = _dirname, format_each = "--package-db=%s") args.add_all(direct_include_dirs, format_each = "--extra-include-dirs=%s") args.add_all(direct_lib_dirs, format_each = "--extra-lib-dirs=%s", uniquify = True) if with_profiling: args.add("--enable-profiling") # Redundant with _binary_paths() above, but better be explicit when we can. args.add_all(tool_inputs, map_each = _cabal_tool_flag) inputs = depset( [setup, hs.tools.ghc, hs.tools.ghc_pkg, hs.tools.runghc], transitive = [ depset(srcs), depset(cc.files), package_databases, setup_dep_info.package_databases, transitive_headers, depset(setup_libs), depset(transitive_compile_libs), depset(transitive_link_libs), depset(transitive_haddocks), setup_dep_info.interface_dirs, setup_dep_info.hs_libraries, dep_info.interface_dirs, dep_info.hs_libraries, tool_inputs, ], ) input_manifests = tool_input_manifests + hs.toolchain.cc_wrapper.manifests return struct( cabal_wrapper = cabal_wrapper, args = args, inputs = inputs, input_manifests = input_manifests, env = env, runfiles = depset(direct = runfiles_libs), )
31,227
def split_kp(kp_joined, detach=False): """ Split the given keypoints into two sets(one for driving video frames, and the other for source image) """ if detach: kp_video = {k: v[:, 1:].detach() for k, v in kp_joined.items()} kp_appearance = {k: v[:, :1].detach() for k, v in kp_joined.items()} else: kp_video = {k: v[:, 1:] for k, v in kp_joined.items()} kp_appearance = {k: v[:, :1] for k, v in kp_joined.items()} return {'kp_driving': kp_video, 'kp_source': kp_appearance}
31,228
def train(model, train_primary, train_ss, ss_padding_index): """ Runs through one epoch - all training examples. :param model: the initialized model to use for forward and backward pass :param train_primary: primary (amino acid seq) train data (all data for training) of shape (num_sentences, window_size) :param train_ss: secondary structure train data (all data for training) of shape (num_sentences, window_size) :param ss_padding_index: the padding index, the id of *PAD* token. This integer is used when masking padding labels. :return: None """ num_examples = train_primary.shape[0] num_batches = (int)(np.ceil(num_examples / model.batch_size)) primary_batch = np.asarray(np.array_split(train_primary, num_batches)) ss_batch = np.asarray(np.array_split(train_ss, num_batches)) for i in range(num_batches): curr_primary = primary_batch[i] curr_SS = ss_batch[i] ss_batch_inputs = curr_SS[:, 0:-1] ss_batch_labels = curr_SS[:, 1:] mask = np.where(ss_batch_labels == ss_padding_index, 0, 1) with tf.GradientTape() as tape: probs = model(curr_primary, ss_batch_inputs) loss = model.loss_function(probs, ss_batch_labels, mask) gradients = tape.gradient(loss, model.trainable_variables) model.optimizer.apply_gradients( zip(gradients, model.trainable_variables))
31,229
def low_shelve(signal, frequency, gain, order, shelve_type='I', sampling_rate=None): """ Create and apply first or second order low shelve filter. Uses the implementation of [#]_. Parameters ---------- signal : Signal, None The Signal to be filtered. Pass None to create the filter without applying it. frequency : number Characteristic frequency of the shelve in Hz gain : number Gain of the shelve in dB order : number The shelve order. Must be ``1`` or ``2``. shelve_type : str Defines the characteristic frequency. The default is ``'I'`` ``'I'`` defines the characteristic frequency 3 dB below the gain value if the gain is positive and 3 dB above the gain value otherwise ``'II'`` defines the characteristic frequency at 3 dB if the gain is positive and at -3 dB if the gain is negative. ``'III'`` defines the characteristic frequency at gain/2 dB sampling_rate : None, number The sampling rate in Hz. Only required if signal is ``None``. The default is ``None``. Returns ------- signal : Signal The filtered signal. Only returned if ``sampling_rate = None``. filter : FilterIIR Filter object. Only returned if ``signal = None``. References ---------- .. [#] https://github.com/spatialaudio/digital-signal-processing-lecture/\ blob/master/filter_design/audiofilter.py """ output = _shelve( signal, frequency, gain, order, shelve_type, sampling_rate, 'low') return output
31,230
def setDiskOffload(nodename, servername, enabled): """Enables or disables dynacache offload to disk on the given server""" m = "setDiskOffload:" #sop(m,"Entry. nodename=%s servername=%s enabled=%s" % ( repr(nodename), repr(servername), repr(enabled) )) if enabled != 'true' and enabled != 'false': raise m + " Error: enabled=%s. enabled must be 'true' or 'false'." % ( repr(enabled) ) server_id = getServerByNodeAndName(nodename, servername) #sop(m,"server_id=%s " % ( repr(server_id), )) dynacache = AdminConfig.list('DynamicCache', server_id) #sop(m,"dynacache=%s " % ( repr(dynacache), )) AdminConfig.modify(dynacache, [['enableDiskOffload', enabled]]) #sop(m,"Exit.")
31,231
def test_mesh2d_merge_two_nodes( meshkernel_with_mesh2d: MeshKernel, first_node: int, second_node: int, num_faces: int, ): """Tests `mesh2d_merge_two_nodes` by checking if two selected nodes are properly merged 6---7---8 | | | 3---4---5 | | | 0---1---2 """ mk = meshkernel_with_mesh2d(2, 2) mk.mesh2d_merge_two_nodes(first_node, second_node) output_mesh2d = mk.mesh2d_get() assert output_mesh2d.node_x.size == 8 assert output_mesh2d.face_x.size == num_faces
31,232
def get_breast_zone(mask: np.ndarray, convex_contour: bool = False) -> Union[np.ndarray, tuple]: """ Función de obtener la zona del seno de una imagen a partir del area mayor contenido en una mascara. :param mask: mascara sobre la cual se realizará la búsqueda de contornos y de las zonas más largas. :param convex_contour: boleano para aplicar contornos convexos. :return: Máscara que contiene el contorno con mayor area juntamente con el vértice x e y con la anchura y la altura del cuadrado que contienen la zona de mayor area de la mascara- """ # Se obtienen los contornos de las zonas de la imagen de color blanco. contours = get_contours(img=mask) # Se obtiene el contorno más grande a partir del area que contiene largest_countour = sorted(contours, key=cv2.contourArea, reverse=True)[0] # Se modifican los contornos si se decide obtener contornos convexos. if convex_contour: largest_countour = cv2.convexHull(largest_countour) # Se crea la máscara con el area y el contorno obtenidos. breast_zone = cv2.drawContours( image=np.zeros(mask.shape, np.uint8), contours=[largest_countour], contourIdx=-1, color=(255, 255, 255), thickness=-1 ) # Se obtiene el rectangulo que contiene el pecho x, y, w, h = cv2.boundingRect(largest_countour) return breast_zone, (x, y, w, h)
31,233
def replace(temporaryans, enterword, answer): """ :param temporaryans: str, temporary answer. :param enterword: str, the character that user guesses. :param answer: str, the answer for this hangman game. :return: str, the temporary answer after hyphens replacement. """ # s = replace('-----', 'A', answer) while True: i = answer.find(enterword) if i >= 0: y = temporaryans[:i] # --- y += enterword # ---A y += temporaryans[i+1:] # ---A- temporaryans = y answer = answer[:i] + '-' + answer[i+1:] else: ans = y break return ans
31,234
def extract_timestamp(line): """Extract timestamp and convert to a form that gives the expected result in a comparison """ # return unixtime value return line.split('\t')[6]
31,235
def test_parse__param_field_type_field_or_none__param_section_with_optional(): """Parse a simple docstring.""" def f(foo): """ Docstring with line continuation. :param foo: descriptive test text :type foo: str or None """ sections, errors = parse(f) assert len(sections) == 2 assert sections[1].type == Section.Type.PARAMETERS assert_parameter_equal( sections[1].value[0], Parameter( SOME_NAME, annotation="Optional[str]", description=SOME_TEXT, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD ), )
31,236
def da_scala_dar_resources_library( daml_root_dir, daml_dir_names, lf_versions, add_maven_tag = False, maven_name_prefix = "", exclusions = {}, enable_scenarios = False, **kwargs): """ Define a Scala library with dar files as resources. """ for lf_version in lf_versions: for daml_dir_name in daml_dir_names: # 1. Compile daml files daml_compile_name = "%s-tests-%s" % (daml_dir_name, lf_version) daml_compile_kwargs = { "project_name": "%s-tests" % daml_dir_name.replace("_", "-"), "srcs": native.glob(["%s/%s/*.daml" % (daml_root_dir, daml_dir_name)], exclude = exclusions.get(lf_version, [])), "target": lf_version, "enable_scenarios": enable_scenarios, } daml_compile_kwargs.update(kwargs) daml_compile(name = daml_compile_name, **daml_compile_kwargs) # 2. Generate lookup objects genrule_name = "test-dar-lookup-%s" % lf_version genrule_command = """ cat > $@ <<EOF package com.daml.ledger.test import com.daml.lf.language.LanguageVersion sealed trait TestDar {{ val path: String }} object TestDar {{ val lfVersion: LanguageVersion = LanguageVersion.v{lf_version} val paths: List[String] = List( EOF """.format(lf_version = mangle_for_java(lf_version)) + "\n".join([""" echo " \\"%s/%s-tests-%s.dar\\"," >> $@ """ % (native.package_name(), test_name, lf_version) for test_name in daml_dir_names]) + """ echo " )\n}\n" >> $@ """ + "\n".join([""" echo "case object %sTestDar extends TestDar { val path = \\"%s/%s-tests-%s.dar\\" }" >> $@ """ % (to_camel_case(test_name), native.package_name(), test_name, lf_version) for test_name in daml_dir_names]) genrule_kwargs = { "outs": ["TestDar-%s.scala" % mangle_for_java(lf_version)], "cmd": genrule_command, } genrule_kwargs.update(kwargs) native.genrule(name = genrule_name, **genrule_kwargs) # 3. Build a Scala library with the above filegroup_name = "dar-files-%s" % lf_version filegroup_kwargs = { "srcs": ["%s-tests-%s.dar" % (dar_name, lf_version) for dar_name in daml_dir_names], } filegroup_kwargs.update(kwargs) native.filegroup(name = filegroup_name, **filegroup_kwargs) da_scala_library_name = "dar-files-%s-lib" % lf_version da_scala_library_kwargs = { "srcs": [":test-dar-lookup-%s" % lf_version], "generated_srcs": [":test-dar-files-%s.scala" % lf_version], # required for scaladoc "resources": ["dar-files-%s" % lf_version], "deps": ["//daml-lf/language"], } if add_maven_tag: da_scala_library_kwargs.update({"tags": ["maven_coordinates=com.daml:%s-dar-files-%s-lib:__VERSION__" % (maven_name_prefix, lf_version)]}) da_scala_library_kwargs.update(kwargs) da_scala_library(name = da_scala_library_name, **da_scala_library_kwargs)
31,237
def svn_stream_from_stringbuf(*args): """svn_stream_from_stringbuf(svn_stringbuf_t str, apr_pool_t pool) -> svn_stream_t""" return _core.svn_stream_from_stringbuf(*args)
31,238
def get_autoencoder_model(hidden_units, target_predictor_fn, activation, add_noise=None, dropout=None): """Returns a function that creates a Autoencoder TensorFlow subgraph. Args: hidden_units: List of values of hidden units for layers. target_predictor_fn: Function that will predict target from input features. This can be logistic regression, linear regression or any other model, that takes x, y and returns predictions and loss tensors. activation: activation function used to map inner latent layer onto reconstruction layer. add_noise: a function that adds noise to tensor_in, e.g. def add_noise(x): return(x + np.random.normal(0, 0.1, (len(x), len(x[0])))) dropout: When not none, causes dropout regularization to be used, with the specified probability of removing a given coordinate. Returns: A function that creates the subgraph. """ def dnn_autoencoder_estimator(x): """Autoencoder estimator with target predictor function on top.""" encoder, decoder = autoencoder_ops.dnn_autoencoder( x, hidden_units, activation, add_noise=add_noise, dropout=dropout) return encoder, decoder, target_predictor_fn(x, decoder) return dnn_autoencoder_estimator
31,239
def plot_histogram(ax,values,bins,colors='r',log=False,xminmax=None): """ plot 1 histogram """ #print (type(values)) ax.hist(values, histtype="bar", bins=bins,color=colors,log=log, alpha=0.8, density=False, range=xminmax) # Add a small annotation. # ax.annotate('Annotation', xy=(0.25, 4.25), # xytext=(0.9, 0.9), textcoords=ax.transAxes, # va="top", ha="right", # bbox=dict(boxstyle="round", alpha=0.2), # arrowprops=dict( # arrowstyle="->", # connectionstyle="angle,angleA=-95,angleB=35,rad=10"), # ) return ax
31,240
def build_model(): """Build the model. Returns ------- tensorflow.keras.Model The model. """ input_x = tf.keras.Input( shape=(30,), name='input_x' ) # shape does not include the batch size. layer1 = tf.keras.layers.Dense(5, activation=tf.keras.activations.tanh) layer2 = tf.keras.layers.Dense( 1, activation=tf.keras.activations.sigmoid, name='output_layer' ) h = layer1(input_x) output = layer2(h) return tf.keras.Model(inputs=[input_x], outputs=[output])
31,241
def test_compute_difficulty_0_difficult(result_r): """ GIVEN two valid dicts representing a quiz where difficults question are failed WHEN the method _compute_difficulty is called THEN Result.advices must be update with a corresponding new entry """ score = {1: 5, 2: 3, 3: 0} total = {1: 5, 2: 3, 3: 2} result_r._compute_difficulty(score, total) assert ( result_r.advices["difficulty"] == "Vous maîtrisez bien ce sujet sujet mais les questions plus avancées vous échappent encore" )
31,242
def create_output_directory(output_dir_path: str) -> None: """ Create the output directory if it doesn't already exist. """ if not os.path.isdir(output_dir_path): print("Creating output directory...") os.mkdir(output_dir_path)
31,243
def evaluate_generator(generator, backbone_pool, lookup_table, CONFIG, device, val=True): """ Evaluate kendetall and hardware constraint loss of generator """ total_loss = 0 evaluate_metric = {"gen_macs":[], "true_macs":[]} for mac in range(CONFIG.low_macs, CONFIG.high_macs, 10): hardware_constraint = torch.tensor(mac, dtype=torch.float32) hardware_constraint = hardware_constraint.view(-1, 1) hardware_constraint = hardware_constraint.to(device) backbone = backbone_pool.get_backbone(hardware_constraint.item()) backbone = backbone.to(device) normalize_hardware_constraint = min_max_normalize(CONFIG.high_macs, CONFIG.low_macs, hardware_constraint) noise = torch.randn(*backbone.shape) noise = noise.to(device) noise *= 0 arch_param = generator(backbone, normalize_hardware_constraint, noise) arch_param = lookup_table.get_validation_arch_param(arch_param) layers_config = lookup_table.decode_arch_param(arch_param) print(layers_config) gen_mac = lookup_table.get_model_macs(arch_param) hc_loss = cal_hc_loss(gen_mac.cuda(), hardware_constraint.item(), CONFIG.alpha, CONFIG.loss_penalty) evaluate_metric["gen_macs"].append(gen_mac.item()) evaluate_metric["true_macs"].append(mac) total_loss += hc_loss.item() tau, _ = stats.kendalltau(evaluate_metric["gen_macs"], evaluate_metric["true_macs"]) return evaluate_metric, total_loss, tau
31,244
def load_data(config, var_mode): """Main data loading routine""" print("Loading {} data".format(var_mode)) # use only the first two characters for shorter abbrv var_mode = var_mode[:2] # Now load data. var_name_list = [ "xs", "ys", "Rs", "ts", "img1s", "cx1s", "cy1s", "f1s", "img2s", "cx2s", "cy2s", "f2s", ] data_folder = config.data_dump_prefix if config.use_lift: data_folder += "_lift" # Let's unpickle and save data data = {} data_names = getattr(config, "data_" + var_mode) data_names = data_names.split(".") for data_name in data_names: cur_data_folder = "/".join([ data_folder, data_name, "numkp-{}".format(config.obj_num_kp), "nn-{}".format(config.obj_num_nn), ]) if not config.data_crop_center: cur_data_folder = os.path.join(cur_data_folder, "nocrop") suffix = "{}-{}".format( var_mode, getattr(config, "train_max_" + var_mode + "_sample") ) cur_folder = os.path.join(cur_data_folder, suffix) ready_file = os.path.join(cur_folder, "ready") if not os.path.exists(ready_file): # data_gen_lock.unlock() raise RuntimeError("Data is not prepared!") for var_name in var_name_list: cur_var_name = var_name + "_" + var_mode in_file_name = os.path.join(cur_folder, cur_var_name) + ".pkl" with open(in_file_name, "rb") as ifp: if var_name in data: data[var_name] += pickle.load(ifp) else: data[var_name] = pickle.load(ifp) return data
31,245
def getCameras(): """Return a list of cameras in the current maya scene.""" return cmds.listRelatives(cmds.ls(type='camera'), p=True)
31,246
def convert_quotes(text): """ Convert quotes in *text* into HTML curly quote entities. >>> print(convert_quotes('"Isn\\'t this fun?"')) &#8220;Isn&#8217;t this fun?&#8221; """ punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" # Special case if the very first character is a quote # followed by punctuation at a non-word-break. Close the quotes by brute # force: text = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), '&#8217;', text) text = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), '&#8221;', text) # Special case for double sets of quotes, e.g.: # <p>He said, "'Quoted' words in a larger quote."</p> text = re.sub(r""""'(?=\w)""", '&#8220;&#8216;', text) text = re.sub(r"""'"(?=\w)""", '&#8216;&#8220;', text) # Special case for decade abbreviations (the '80s): text = re.sub(r"""\b'(?=\d{2}s)""", '&#8217;', text) close_class = r'[^\ \t\r\n\[\{\(\-]' dec_dashes = '&#8211;|&#8212;' # Get most opening single quotes: opening_single_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) ' # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_single_quotes_regex.sub(r'\1&#8216;', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (?!\s | s\b | \d) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1&#8217;', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (\s | s\b) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1&#8217;\2', text) # Any remaining single quotes should be opening ones: text = re.sub("'", '&#8216;', text) # Get most opening double quotes: opening_double_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) " # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_double_quotes_regex.sub(r'\1&#8220;', text) # Double closing quotes: closing_double_quotes_regex = re.compile(r""" #(%s)? # character that indicates the quote should be closing " (?=\s) """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub('&#8221;', text) closing_double_quotes_regex = re.compile(r""" (%s) # character that indicates the quote should be closing " """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub(r'\1&#8221;', text) # Any remaining quotes should be opening ones. text = re.sub('"', '&#8220;', text) return text
31,247
def time_shift(signal, n_samples_shift, circular_shift=True, keepdims=False): """Shift a signal in the time domain by n samples. This function will perform a circular shift by default, inherently assuming that the signal is periodic. Use the option `circular_shift=False` to pad with nan values instead. Notes ----- This function is primarily intended to be used when processing impulse responses. Parameters ---------- signal : ndarray, float Signal to be shifted n_samples_shift : integer Number of samples by which the signal should be shifted. A negative number of samples will result in a left-shift, while a positive number of samples will result in a right shift of the signal. circular_shift : bool, True Perform a circular or non-circular shift. If a non-circular shift is performed, the data will be padded with nan values at the respective beginning or ending of the data, corresponding to the number of samples the data is shifted. keepdims : bool, False Do not squeeze the data before returning. Returns ------- shifted_signal : ndarray, float Shifted input signal """ n_samples_shift = np.asarray(n_samples_shift, dtype=np.int) if np.any(signal.shape[-1] < n_samples_shift): msg = "Shifting by more samples than length of the signal." if circular_shift: warnings.warn(msg, UserWarning) else: raise ValueError(msg) signal = np.atleast_2d(signal) n_samples = signal.shape[-1] signal_shape = signal.shape signal = np.reshape(signal, (-1, n_samples)) n_channels = np.prod(signal.shape[:-1]) if n_samples_shift.size == 1: n_samples_shift = np.broadcast_to(n_samples_shift, n_channels) elif n_samples_shift.size == n_channels: n_samples_shift = np.reshape(n_samples_shift, n_channels) else: raise ValueError("The number of shift samples has to match the number \ of signal channels.") shifted_signal = signal.copy() for channel in range(n_channels): shifted_signal[channel, :] = \ np.roll( shifted_signal[channel, :], n_samples_shift[channel], axis=-1) if not circular_shift: if n_samples_shift[channel] < 0: # index is negative, so index will reference from the # end of the array shifted_signal[channel, n_samples_shift[channel]:] = np.nan else: # index is positive, so index will reference from the # start of the array shifted_signal[channel, :n_samples_shift[channel]] = np.nan shifted_signal = np.reshape(shifted_signal, signal_shape) if not keepdims: shifted_signal = np.squeeze(shifted_signal) return shifted_signal
31,248
def compare_shapes( proto, input_key_values, expected_outputs, use_cpu_only=False, pred=None ): """ Inputs: - proto: MLModel proto. - input_key_values: str -> np.array or PIL.Image. Keys must match those in input_placeholders. - expected_outputs: dict[str, np.array]. - use_cpu_only: True/False. - pred: Prediction to use, if it has already been computed. """ if _IS_MACOS: if not pred: pred = run_core_ml_predict(proto, input_key_values, use_cpu_only) for o, expected in expected_outputs.items(): msg = "Output: {}. expected shape {} != actual shape {}".format( o, expected.shape, pred[o].shape ) # Core ML does not support scalar as output # remove this special case when support is added if expected.shape == () and pred[o].shape == (1,): continue assert pred[o].shape == expected.shape, msg
31,249
def get_avgerr(l1_cols_train,l2_cols_train,own_cols_xgb,own_cols_svm,own_cols_bay,own_cols_adab,own_cols_lass,df_train,df_test,experiment,fold_num=0): """ Use mae as an evaluation metric and extract the appropiate columns to calculate the metric Parameters ---------- l1_cols_train : list list with names for the Layer 1 training columns l2_cols_train : list list with names for the Layer 2 training columns own_cols_xgb : list list with names for the Layer 1 xgb columns own_cols_svm : list list with names for the Layer 1 svm columns own_cols_bay : list list with names for the Layer 1 brr columns own_cols_adab : list list with names for the Layer 1 adaboost columns own_cols_lass : list list with names for the Layer 1 lasso columns df_train : pd.DataFrame dataframe for training predictions df_test : pd.DataFrame dataframe for testing predictions experiment : str dataset name fold_num : int number for the fold Returns ------- float best mae for Layer 1 float best mae for Layer 2 float best mae for Layer 3 float best mae for all layers float mae for xgb float mae for svm float mae for brr float mae for adaboost float mae for lasso list selected predictions Layer 2 list error for the selected predictions Layer 2 float train mae for Layer 3 """ # Get the mae l1_scores = [x/float(len(df_train["time"])) for x in list(df_train[l1_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] l2_scores = [x/float(len(df_train["time"])) for x in list(df_train[l2_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_xgb = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_xgb].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_svm = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_svm].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_bay = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_bay].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_lass = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_lass].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_adab = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_adab].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_l2 = [x/float(len(df_train["time"])) for x in list(df_train[l2_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] selected_col_l1 = l1_cols_train[l1_scores.index(min(l1_scores))] selected_col_l2 = l2_cols_train[l2_scores.index(min(l2_scores))] # Set mae to 0.0 if not able to get column try: selected_col_own_xgb = own_cols_xgb[own_scores_xgb.index(min(own_scores_xgb))] except KeyError: selected_col_own_xgb = 0.0 try: selected_col_own_svm = own_cols_svm[own_scores_svm.index(min(own_scores_svm))] except KeyError: selected_col_own_svm = 0.0 try: selected_col_own_bay = own_cols_bay[own_scores_bay.index(min(own_scores_bay))] except KeyError: selected_col_own_bay = 0.0 try: selected_col_own_lass = own_cols_lass[own_scores_lass.index(min(own_scores_lass))] except KeyError: selected_col_own_lass = 0.0 try: selected_col_own_adab = own_cols_adab[own_scores_adab.index(min(own_scores_adab))] except KeyError: selected_col_own_adab = 0.0 # Remove problems with seemingly duplicate columns getting selected try: cor_l1 = sum(map(abs,df_test["time"]-df_test[selected_col_l1]))/len(df_test["time"]) except KeyError: selected_col_l1 = selected_col_l1.split(".")[0] cor_l1 = sum(map(abs,df_test["time"]-df_test[selected_col_l1]))/len(df_test["time"]) try: cor_l2 = sum(map(abs,df_test["time"]-df_test[selected_col_l2]))/len(df_test["time"]) except KeyError: selected_col_l2 = selected_col_l2.split(".")[0] cor_l2 = sum(map(abs,df_test["time"]-df_test[selected_col_l2]))/len(df_test["time"]) try: cor_own_xgb = sum(map(abs,df_test["time"]-df_test[selected_col_own_xgb]))/len(df_test["time"]) except KeyError: selected_col_own_xgb = selected_col_own_xgb.split(".")[0] cor_own_xgb = sum(map(abs,df_test["time"]-df_test[selected_col_own_xgb]))/len(df_test["time"]) try: cor_own_svm = sum(map(abs,df_test["time"]-df_test[selected_col_own_svm]))/len(df_test["time"]) except KeyError: selected_col_own_svm = selected_col_own_svm.split(".")[0] cor_own_svm = sum(map(abs,df_test["time"]-df_test[selected_col_own_svm]))/len(df_test["time"]) try: cor_own_bay = sum(map(abs,df_test["time"]-df_test[selected_col_own_bay]))/len(df_test["time"]) except KeyError: selected_col_own_bay = selected_col_own_bay.split(".")[0] cor_own_bay = sum(map(abs,df_test["time"]-df_test[selected_col_own_bay]))/len(df_test["time"]) try: cor_own_lass = sum(map(abs,df_test["time"]-df_test[selected_col_own_lass]))/len(df_test["time"]) except KeyError: selected_col_own_lass = selected_col_own_lass.split(".")[0] cor_own_lass = sum(map(abs,df_test["time"]-df_test[selected_col_own_lass]))/len(df_test["time"]) try: cor_own_adab = sum(map(abs,df_test["time"]-df_test[selected_col_own_adab]))/len(df_test["time"]) except KeyError: selected_col_own_adab = selected_col_own_adab.split(".")[0] cor_own_adab = sum(map(abs,df_test["time"]-df_test[selected_col_own_adab]))/len(df_test["time"]) cor_l3 = sum(map(abs,df_test["time"]-df_test["preds"]))/len(df_test["time"]) # Variables holding all predictions across experiments all_preds_l1.extend(zip(df_test["time"],df_test[selected_col_l1],[experiment]*len(df_test[selected_col_l1]),[len(df_train.index)]*len(df_test[selected_col_l1]),[fold_num]*len(df_test[selected_col_l1]),df_test[selected_col_own_xgb],df_test[selected_col_own_bay],df_test[selected_col_own_lass],df_test[selected_col_own_adab])) all_preds_l2.extend(zip(df_test["time"],df_test[selected_col_l2],[experiment]*len(df_test[selected_col_l2]),[len(df_train.index)]*len(df_test[selected_col_l2]),[fold_num]*len(df_test[selected_col_l2]))) all_preds_l3.extend(zip(df_test["time"],df_test["preds"],[experiment]*len(df_test["preds"]),[len(df_train.index)]*len(df_test["preds"]),[fold_num]*len(df_test["preds"]))) # Also get the mae for the training models train_cor_l1 = sum(map(abs,df_train["time"]-df_train[selected_col_l1]))/len(df_train["time"]) train_cor_l2 = sum(map(abs,df_train["time"]-df_train[selected_col_l2]))/len(df_train["time"]) train_cor_l3 = sum(map(abs,df_train["time"]-df_train["preds"]))/len(df_train["time"]) print() print("Error l1: %s,%s" % (train_cor_l1,cor_l1)) print("Error l2: %s,%s" % (train_cor_l2,cor_l2)) print("Error l3: %s,%s" % (train_cor_l3,cor_l3)) print(selected_col_l1,selected_col_l2,selected_col_own_xgb) print() print() print("-------------") # Try to select the best Layer, this becomes Layer 4 cor_l4 = 0.0 if (train_cor_l1 < train_cor_l2) and (train_cor_l1 < train_cor_l3): cor_l4 = cor_l1 elif (train_cor_l2 < train_cor_l1) and (train_cor_l2 < train_cor_l3): cor_l4 = cor_l2 else: cor_l4 = cor_l3 return(cor_l1,cor_l2,cor_l3,cor_l4,cor_own_xgb,cor_own_svm,cor_own_bay,cor_own_adab,cor_own_lass,list(df_test[selected_col_l2]),list(df_test["time"]-df_test[selected_col_l2]),train_cor_l3)
31,250
def get_repository_metadata_by_changeset_revision( trans, id, changeset_revision ): """Get metadata for a specified repository change set from the database.""" # Make sure there are no duplicate records, and return the single unique record for the changeset_revision. Duplicate records were somehow # created in the past. The cause of this issue has been resolved, but we'll leave this method as is for a while longer to ensure all duplicate # records are removed. all_metadata_records = trans.sa_session.query( trans.model.RepositoryMetadata ) \ .filter( and_( trans.model.RepositoryMetadata.table.c.repository_id == trans.security.decode_id( id ), trans.model.RepositoryMetadata.table.c.changeset_revision == changeset_revision ) ) \ .order_by( trans.model.RepositoryMetadata.table.c.update_time.desc() ) \ .all() if len( all_metadata_records ) > 1: # Delete all recrds older than the last one updated. for repository_metadata in all_metadata_records[ 1: ]: trans.sa_session.delete( repository_metadata ) trans.sa_session.flush() return all_metadata_records[ 0 ] elif all_metadata_records: return all_metadata_records[ 0 ] return None
31,251
def generate_encounter_time(t_impact=0.495*u.Gyr, graph=False): """Generate fiducial model at t_impact after the impact""" # impact parameters M = 5e6*u.Msun rs = 10*u.pc # impact parameters Tenc = 0.01*u.Gyr dt = 0.05*u.Myr # potential parameters potential = 3 Vh = 225*u.km/u.s q = 1*u.Unit(1) rhalo = 0*u.pc par_pot = np.array([Vh.to(u.m/u.s).value, q.value, rhalo.to(u.m).value]) pkl = pickle.load(open('../data/fiducial_at_encounter.pkl', 'rb')) model = pkl['model'] xsub = pkl['xsub'] vsub = pkl['vsub'] # generate perturbed stream model potential_perturb = 2 par_perturb = np.array([M.to(u.kg).value, rs.to(u.m).value, 0, 0, 0]) #print(vsub.si, par_perturb) x1, x2, x3, v1, v2, v3 = interact.general_interact(par_perturb, xsub.to(u.m).value, vsub.to(u.m/u.s).value, Tenc.to(u.s).value, t_impact.to(u.s).value, dt.to(u.s).value, par_pot, potential, potential_perturb, model.x.to(u.m).value, model.y.to(u.m).value, model.z.to(u.m).value, model.v_x.to(u.m/u.s).value, model.v_y.to(u.m/u.s).value, model.v_z.to(u.m/u.s).value) stream = {} stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc) stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s) c = coord.Galactocentric(x=stream['x'][0], y=stream['x'][1], z=stream['x'][2], v_x=stream['v'][0], v_y=stream['v'][1], v_z=stream['v'][2], **gc_frame_dict) cg = c.transform_to(gc.GD1) wangle = 180*u.deg if graph: plt.close() plt.figure(figsize=(10,5)) plt.plot(cg.phi1.wrap_at(180*u.deg), cg.phi2, 'k.', ms=1) plt.xlim(-80,0) plt.ylim(-10,10) plt.tight_layout() return cg
31,252
def get_census_centroid(census_tract_id): """ Gets a pair of decimal coordinates representing the geographic center (centroid) of the requested census tract. :param census_tract_id: :return: """ global _cached_centroids if census_tract_id in _cached_centroids: return _cached_centroids[census_tract_id] tracts = census_tracts_db.as_dictionary() for tract in tracts: if tract_id_equals(census_tract_id, tract[census_tracts_db.ROW_GEOID]): _cached_centroids[census_tract_id] = float(tract[census_tracts_db.ROW_LATITUDE]), float(tract[census_tracts_db.ROW_LONGITUDE]) return _cached_centroids[census_tract_id]
31,253
def abvcalc_main(): """Entry point for abvcalc command line script. """ import argparse parser = argparse.ArgumentParser() parser.add_argument('og', type=float, help='Original Gravity') parser.add_argument('fg', type=float, help='Final Gravity') args = parser.parse_args() abv = 100. * abv_calc(args.og, args.fg) att = 100.0 * attenuation(args.og, args.fg) print('{0:.02f}% ABV'.format(abv)) print('{0:.0f}% Attenuation'.format(att))
31,254
def reptile_select_list(news_list_url, mainElem, linkElem, TimeElem, titleElem, context_config, class_list=[]): """利用新闻列表来获取正文和标题""" args = [] args.append(mainElem) args.append(linkElem) args.append(TimeElem) args.append(titleElem) reptile_select_context(news_list_url, args, None, context_config, class_list, True)
31,255
def configure_context(args: Namespace, layout: Layout, stop_event: Event) -> Context: """Creates the application context, manages state""" context = Context(args.file) context.layout = layout sensors = Sensors(context, stop_event) context.sensors = sensors listener = KeyListener(context.on_key, stop_event, sensors.get_lock()) context.listener = listener context.change_state("normal") context.load_config() return context
31,256
async def test_turn_off_image(opp): """After turn off, Demo camera raise error.""" await opp.services.async_call( CAMERA_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_CAMERA}, blocking=True ) with pytest.raises(OpenPeerPowerError) as error: await async_get_image(opp, ENTITY_CAMERA) assert error.args[0] == "Camera is off"
31,257
def offsetEndpoint(points, distance, beginning=True): """ Pull back end point of way in order to create VISSIM intersection. Input: list of nodes, distance, beginning or end of link Output: transformed list of nodes """ if beginning: a = np.array(points[1], dtype='float') b = np.array(points[0], dtype='float') if not beginning: a = np.array(points[-2], dtype='float') b = np.array(points[-1], dtype='float') if np.sqrt(sum((b-a)**2)) < distance: distance = np.sqrt(sum((b-a)**2)) * 0.99 db = (b-a) / np.linalg.norm(b-a) * distance return b - db
31,258
def write_config(infile, data, ftype='yaml'): """ Input - full path to config file Dictionary of parameters to write Output - None """ infile = str(infile) if ftype in ['json','pyini']: try: data = json.dump(data, open(infile,'w'), sort_keys=True, indent=4) except: raise RuntimeError('{0} not found'.format(infile)) elif ftype in ['yaml']: try: data = yaml.safe_dump(data, open(infile,'w'), default_flow_style=False) except: raise RuntimeError('{0} not found'.format(infile)) else: raise RuntimeError('{0} format not recognized'.format(infile))
31,259
def _remove_parenthesis(word): """ Examples -------- >>> _remove_parenthesis('(ROMS)') 'ROMS' """ try: return word[word.index("(") + 1 : word.rindex(")")] except ValueError: return word
31,260
def check_closed(f): """Decorator that checks if connection/cursor is closed.""" def g(self, *args, **kwargs): if self.closed: raise exceptions.Error(f"{self.__class__.__name__} already closed") return f(self, *args, **kwargs) return g
31,261
def get_box_filter(b: float, b_list: np.ndarray, width: float) -> np.ndarray: """ Returns the values of a box function filter centered on b, with specified width. """ return np.heaviside(width/2-np.abs(b_list-b), 1)
31,262
def search(ra=None, dec=None, radius=None, columns=None, offset=None, limit=None, orderby=None): """Creates a query for the carpyncho database, you can specify""" query = CarpynchoQuery(ra, dec, radius, columns, offset, limit, orderby) return query
31,263
def repetitions(seq: str) -> int: """ [Easy] https://cses.fi/problemset/task/1069/ [Solution] https://cses.fi/paste/659d805082c50ec1219667/ You are given a DNA sequence: a string consisting of characters A, C, G, and T. Your task is to find the longest repetition in the sequence. This is a maximum-length substring containing only one type of character. The only input line contains a string of n characters. Print one integer, the length of the longest repetition. Constraints: 1 ≤ n ≤ 10^6 Example Input: ATTCGGGA Output: 3 """ res, cur = 0, 0 fir = '' for ch in seq: if ch == fir: cur += 1 else: res = max(res, cur) fir = ch cur = 1 return max(res, cur)
31,264
def get_gmusicmanager( useMobileclient = False, verify = True, device_id = None ): """ Returns a GmusicAPI_ manager used to perform operations on one's `Google Play Music`_ account. If the Musicmanager is instantiated but cannot find the device (hence properly authorize for operation), then the attribute ``error_device_ids`` is a non-empty :py:class:`set` of valid device IDs. :param bool useMobileClient: optional argument. If ``True``, use the :py:class:`MobileClient <gmusicapi.MobileClient>` manager, otherwise use the :py:class:`Musicmanager <gmusicapi.MusicManager>` manager. Default is ``False``. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :param str device_id: optional argument. If defined, then attempt to use this MAC ID to register the music manager. :raise ValueError: if cannot instantiate the Musicmanager. :raise AssertionError: if cannot get machine's MAC id. .. seealso:: :py:meth:`gmusicmanager <howdy.music.music.gmusicmanager>`. """ # ## first copy this code from gmusic.mobileclient ## because base method to determine device id by gmusicapi fails when cannot be found def return_deviceid( replace_colons = True ): from uuid import getnode as getmac from gmusicapi.utils import utils try: mac_int = getmac( ) if (mac_int >> 40) % 2: raise OSError("a valid MAC could not be determined." " Provide an android_id (and be" " sure to provide the same one on future runs).") device_id = utils.create_mac_string( mac_int ) if replace_colons: return device_id.replace( ':', '' ) else: return device_id except Exception: pass try: import netifaces valid_ifaces = list( filter(lambda iface: iface.lower( ) != 'lo', netifaces.interfaces( ) ) ) if len( valid_ifaces ) == 0: return None valid_iface = max( valid_ifaces ) iface_tuples = netifaces.ifaddresses( valid_iface )[ netifaces.AF_LINK ] if len( iface_tuples ) == 0: return None hwaddr = max( iface_tuples )[ 'addr' ].upper( ) if replace_colons: return hwaddr.replace(':', '') else: return hwaddr except Exception: return None if not useMobileclient: if device_id is None: device_id = return_deviceid( replace_colons = False ) assert( device_id is not None ), "error, could not determine the local MAC id" mmg = gmusicapi.Musicmanager( debug_logging = False, verify_ssl = verify ) credentials = core.oauthGetOauth2ClientGoogleCredentials( ) if credentials is None: raise ValueError( "Error, do not have Google Music credentials." ) mmg.login( oauth_credentials = credentials, uploader_id = device_id ) mmg.error_device_ids = { } else: if device_id is None: device_id = return_deviceid( ) assert( device_id is not None ), "error, could not determine the local MAC id" mmg = gmusicapi.Mobileclient( debug_logging = False, verify_ssl = verify ) credentials = oauth_get_google_credentials( ) if credentials is None: raise ValueError( "Error, do not have GMusicAPI Mobileclient credentials." ) try: mmg.oauth_login( oauth_credentials = credentials, device_id = device_id ) mmg.error_device_ids = { } except gmusicapi.exceptions.InvalidDeviceId as exc: # tack on some error messages mmg.error_device_ids = set( exc.valid_device_ids ) return mmg
31,265
def measure(data, basis, gaussian=0, poisson=0): """Function computes the dot product <x,phi> for a given measurement basis phi Args: - data (n-size, numpy 1D array): the initial, uncompressed data - basis (nxm numpy 2D array): the measurement basis Returns: - A m-sized numpy 1D array to the dot product""" data = np.float_(data) if gaussian!=0 or poisson!=0: # Create the original matrix data = np.repeat([data], basis.shape[0], 0) if gaussian!=0: # Bruit data +=np.random.normal(scale=gaussian, size=data.shape) if poisson != 0: data = np.float_(np.random.poisson(np.abs(data))) if gaussian!=0 or poisson!=0: return np.diag((data).dot(basis.transpose())) else: return (data).dot(basis.transpose())
31,266
def to_huang_ner(out_file, cd, n_workers = 47, max_line = 1000): """ """ fn = 'huang_ner/' abs_cnt = 0 f = open(fn + '' + str(abs_cnt).zfill(5) + '.txt', 'w') f.write('Line\t') for x in range(n_workers): f.write(str(x) + '\t') f.write('\n') res = [] wids = [] line = 0 for index, (sen, clabs) in enumerate(zip(cd.sentences, cd.crowdlabs)): #text_sen = get_word_list(sen, features) VERY SLOW text_sen = [] #if index not in indices: continue n = len(sen) if n == 0: continue if (index+1) % 20 == 0: f.close() abs_cnt += 1 f = open(fn + '' + str(abs_cnt).zfill(5) + '.txt', 'w') f.write('Line\t') for x in range(n_workers): f.write(str(x) + '\t') f.write('\n') for i in range(n): labs = ['0']* n_workers for cl in clabs: # 9 is non entity labs[cl.wid] = str(0 if cl.sen[i] == 9 else 1) line += 1 f.write(str(line) + '\t') for j in range(n_workers): f.write(labs[j]) if j < n_workers - 1: f.write('\t') f.write('\n') f.close()
31,267
def inline(session, module): """ Run specific per-module inline tests """ session.install("-r", "requirements/install.txt") if str(module).endswith('mathematics'): session.install("-r", "requirements/nox/tests.txt") session.run('python', '-m', module)
31,268
def getUserByMail(email): """Get User by mailt.""" try: user = db_session.query(User).filter_by(email=email).one() return user except Exception: return None
31,269
def add_axes( # Ranges xrange=[0,1], yrange=[0,1], zrange=[0,1], # Titles xtitle = 'x', ytitle = 'y', ztitle = 'z', htitle = '', # Grids xyGrid=True, yzGrid=True, zxGrid=True, xyGrid2=False, yzGrid2=False, zxGrid2=False, xyGridTransparent=True, yzGridTransparent=True, zxGridTransparent=True, xyGrid2Transparent=True, yzGrid2Transparent=True, zxGrid2Transparent=True, # Other numberOfDivisions=10 ): """ Routine for adding custom x-, y- & z-axes to a figure. :param list xrange: Range of x-axis [min,max] :param list yrange: Range of y-axis [min,max] :param list zrange: Range of z-axis [min,max] :param string xtitle: Label of x-axis :param string ytitle: Label of y-axis :param string ztitle: Label of z-axis :param boolean xyGrid: Add primary grid on xy-plane :param boolean yzGrid: Add primary grid on yz-plane :param boolean zxGrid: Add primary grid on xz-plane :param boolean xyGrid2: Add secondary grid on xy-plane :param boolean yzGrid2: Add secondary grid on yz-plane :param boolean zxGrid2: Add secondary grid on xz-plane :param boolean xyGridTransparent: Transparency for xyGrid :param boolean yzGridTransparent: Transparency for yzGrid :param boolean xzGridTransparent: Transparency for zxGrid :param boolean xyGrid2Transparent: Transparency for xyGrid2 :param boolean yzGrid2Transparent: Transparency for yzGrid2 :param boolean zxGrid2Transparent: Transparency for zxGrid2 :param int numberOfDivisions: Number of divisions for all axes """ app = init_app() plot_window = VedoPlotWindow.instance().plot_window axes = v.Axes( numberOfDivisions=numberOfDivisions, xtitle=xtitle, ytitle=ytitle, ztitle=ztitle, htitle=htitle, xyGrid=xyGrid, yzGrid=yzGrid, zxGrid=zxGrid, xyGrid2=xyGrid, yzGrid2=yzGrid, zxGrid2=zxGrid, xyGridTransparent=xyGridTransparent, yzGridTransparent=yzGridTransparent, zxGridTransparent=zxGridTransparent, xyGrid2Transparent=xyGrid2Transparent, yzGrid2Transparent=yzGrid2Transparent, zxGrid2Transparent=zxGrid2Transparent, xrange=xrange, yrange=yrange, zrange=zrange ) plot_window.dia_axes[plot_window.fig].append(axes)
31,270
def parse_headers(headers, data): """ Given a header structure and some data, parse the data as headers. """ return {k: f(v) for (k, (f, _), _), v in zip(headers, data)}
31,271
def release_gen_payload(runtime, is_name, is_namespace, organization, repository, event_id): """Generates two sets of input files for `oc` commands to mirror content and update image streams. Files are generated for each arch defined in ocp-build-data for a version, as well as a final file for manifest-lists. One set of files are SRC=DEST mirroring definitions for 'oc image mirror'. They define what source images we will sync to which destination repos, and what the mirrored images will be labeled as. The other set of files are YAML image stream tags for 'oc apply'. Those are applied to an openshift cluster to define "release streams". When they are applied the release controller notices the update and begins generating a new payload with the images tagged in the image stream. For automation purposes this command generates a mirroring yaml files after the arch-specific files have been generated. The yaml files include names of generated content. You may provide the namespace and base name for the image streams, or defaults will be used. The generated files will append the -arch and -priv suffixes to the given name and namespace as needed. The ORGANIZATION and REPOSITORY options are combined into ORGANIZATION/REPOSITORY when preparing for mirroring. Generate files for mirroring from registry-proxy (OSBS storage) to our quay registry: \b $ doozer --group=openshift-4.2 release:gen-payload \\ --is-name=4.2-art-latest Note that if you use -i to include specific images, you should also include openshift-enterprise-cli to satisfy any need for the 'cli' tag. The cli image is used automatically as a stand-in for images when an arch does not build that particular tag. ## Validation ## Additionally we want to check that the following conditions are true for each imagestream being updated: * For all architectures built, RHCOS builds must have matching versions of any unshipped RPM they include (per-entry os metadata - the set of RPMs may differ between arches, but versions should not). * Any RPMs present in images (including machine-os-content) from unshipped RPM builds included in one of our candidate tags must exactly version-match the latest RPM builds in those candidate tags (ONLY; we never flag what we don't directly ship.) These checks (and likely more in the future) should run and any failures should be listed in brief via a "release.openshift.io/inconsistency" annotation on the relevant image istag (these are publicly visible; ref. https://bit.ly/37cseC1) and in more detail in state.yaml. The release-controller, per ART-2195, will read and propagate/expose this annotation in its display of the release image. """ runtime.initialize(clone_distgits=False, config_excludes='non_release') brew_session = runtime.build_retrying_koji_client() base_target = SyncTarget( # where we will mirror and record the tags orgrepo=f"{organization}/{repository}", istream_name=is_name if is_name else default_is_base_name(runtime.get_minor_version()), istream_namespace=is_namespace if is_namespace else default_is_base_namespace() ) gen = PayloadGenerator(runtime, brew_session, event_id, base_target) latest_builds, invalid_name_items, images_missing_builds, mismatched_siblings = gen.load_latest_builds() gen.write_mirror_destinations(latest_builds, mismatched_siblings) if invalid_name_items: yellow_print("Images skipped due to invalid naming:") for img in sorted(invalid_name_items): click.echo(" {}".format(img)) if images_missing_builds: yellow_print("No builds found for:") for img in sorted(images_missing_builds): click.echo(" {}".format(img)) if mismatched_siblings: yellow_print("Images skipped due to siblings mismatch:") for img in sorted(mismatched_siblings): click.echo(" {}".format(img))
31,272
def dataset_parser(value, A): """Parse an ImageNet record from a serialized string Tensor.""" # return value[:A.shape[0]], value[A.shape[0]:] return value[:A.shape[0]], value
31,273
def default_csv_file(): """ default name for csv files """ return 'data.csv'
31,274
def get_gc_alt(alt, unit='km'): """ Return index of nearest altitude (km) of GEOS-Chem box (global value) """ if unit == 'km': alt_c = gchemgrid('c_km_geos5_r') elif unit == 'hPa': alt_c = gchemgrid('c_hPa_geos5') else: err_str = 'No case setup for altitude unit ({})'.format(unit) sys.exit() return find_nearest(alt_c, alt)
31,275
def download(object_client, project_id, datasets_path): """Download the contents of file from the object store. Parameters ---------- object_client : faculty.clients.object.ObjectClient project_id : uuid.UUID datasets_path : str The target path to download to in the object store Returns ------- bytes The content of the file """ chunk_generator = download_stream(object_client, project_id, datasets_path) return b"".join(chunk_generator)
31,276
def set_log_level(level): """ Set the logging level for urbansim. Parameters ---------- level : int A supporting logging level. Use logging constants like logging.DEBUG. """ logging.getLogger('urbansim').setLevel(level)
31,277
def upload_to_db(buffer: BytesIO): """Записывает изменения, сделанные в xlsx в файле, в базу данных""" wb = load_workbook(filename=buffer) for ws in wb: for row in ws.iter_rows(min_row=2, max_row=ws.max_row): if row[9].value == 1: set_application_ok(row[0].value)
31,278
def get_assets_of_dataset( db: Session = Depends(deps.get_db), dataset_id: int = Path(..., example="12"), offset: int = 0, limit: int = settings.DEFAULT_LIMIT, keyword: str = Query(None), viz_client: VizClient = Depends(deps.get_viz_client), current_user: models.User = Depends(deps.get_current_active_user), current_workspace: models.Workspace = Depends(deps.get_current_workspace), ) -> Any: """ Get asset list of specific dataset, pagination is supported by means of offset and limit """ dataset = crud.dataset.get_with_task(db, user_id=current_user.id, id=dataset_id) if not dataset: raise DatasetNotFound() assets = viz_client.get_assets( user_id=current_user.id, repo_id=current_workspace.hash, # type: ignore branch_id=dataset.task_hash, # type: ignore keyword=keyword, limit=limit, offset=offset, ) result = { "keywords": assets.keywords, "items": assets.items, "total": assets.total, } return {"result": result}
31,279
def check_rule(body, obj, obj_string, rule, only_body): """ Compare the argument with a rule. """ if only_body: # Compare only the body of the rule to the argument retval = (body == rule[2:]) else: retval = ((body == rule[2:]) and (obj == obj_string)) return retval
31,280
def pdm_auto_arima(df, target_column, time_column, frequency_data, epochs_to_forecast = 12, d=1, D=0, seasonal=True, m =12, start_p = 2, start_q = 0, max_p=9, max_q=2, start_P = 0, start_Q = 0, max_P = 2, max_Q = 2, validate = False, epochs_to_test = 1): """ This function finds the best order parameters for a SARIMAX model, then makes a forecast Parameters: - df_input (pandas.DataFrame): Input Time Series. - target_column (str): name of the column containing the target feature - time_column (str): name of the column containing the pandas Timestamps - frequency_data (str): string representing the time frequency of record, e.g. "h" (hours), "D" (days), "M" (months) - epochs_to_forecast (int): number of steps for predicting future data - epochs_to_test (int): number of steps corresponding to most recent records to test on - d, D, m, start_p, start_q, max_p, max_q, start_P, start_Q, max_P, max_Q (int): SARIMAX parameters to be set for reseach - seasonal (bool): seasonality flag - validate (bool): if True, epochs_to_test rows are used for validating, else forecast without evaluation Returns: - forecast_df (pandas.DataFrame): Output DataFrame with best forecast found """ assert isinstance(target_column, str) assert isinstance(time_column, str) external_features = [col for col in df if col not in [time_column, target_column]] if epochs_to_test == 0: from warnings import warn warn("epochs_to_test=0 and validate=True is not correct, setting validate=False instead") validate = False if frequency_data is not None: df = df.set_index(time_column).asfreq(freq=frequency_data, method="bfill").reset_index() if len(external_features) > 0: #Scaling all exogenous features scaler = MinMaxScaler() scaled = scaler.fit_transform(df.set_index(time_column).drop([target_column], axis = 1).values) train_df = df.dropna() train_df.set_index(time_column, inplace=True) if frequency_data is not None: date = pd.date_range(start=df[time_column].min(), periods=len(train_df)+epochs_to_forecast, freq=frequency_data) else: date = pd.date_range(start=df[time_column].min(), end=df[time_column].max(), periods=len(df)) ### Finding parameter using validation set if validate: train_df_validation = train_df[:-epochs_to_test] if len(external_features) > 0: exog_validation = scaled[:(len(train_df)-epochs_to_test)] model_validation = pmd_arima.auto_arima(train_df_validation[target_column],exogenous = exog_validation, max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) exog_validation_forecast = scaled[(len(train_df)-epochs_to_test):len(train_df)] forecast_validation, forecast_validation_ci = model_validation.predict(n_periods = epochs_to_test,exogenous= exog_validation_forecast, return_conf_int=True) validation_df = pd.DataFrame({target_column:train_df[target_column].values[(len(train_df)-epochs_to_test):len(train_df)],'Forecast':forecast_validation}) rmse = np.sqrt(mean_squared_error(validation_df[target_column].values, validation_df.Forecast.values)) print(f'RMSE: {rmse}') exog = scaled[:len(train_df)] model = pmd_arima.ARIMA( order = list(model_validation.get_params()['order']), seasonal_order = list(model_validation.get_params()['seasonal_order']), trace=False,error_action='ignore', suppress_warnings=True) model.fit(y = train_df[target_column],exogenous = exog) training_prediction = model.predict_in_sample(exogenous = exog_validation) else: model_validation = pmd_arima.auto_arima(train_df_validation[target_column], max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) forecast_validation, forecast_validation_ci = model_validation.predict(n_periods = epochs_to_test, return_conf_int=True) validation_df = pd.DataFrame({target_column:train_df[target_column].values[(len(train_df)-epochs_to_test):len(train_df)],'Forecast':forecast_validation}) rmse = np.sqrt(mean_squared_error(validation_df[target_column].values, validation_df.Forecast.values)) print(f'RMSE: {rmse}') #exog = scaled[:len(train_df)] model = pmd_arima.ARIMA( order = list(model_validation.get_params()['order']), seasonal_order = list(model_validation.get_params()['seasonal_order']), trace=False,error_action='ignore', suppress_warnings=True) model.fit(y = train_df[target_column]) training_prediction = model.predict_in_sample() else: if len(external_features) > 0: #Select exogenous features for training exog = scaled[:len(train_df)] #Search for best model model = pmd_arima.auto_arima(train_df[target_column],exogenous = exog, max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) training_prediction = model.predict_in_sample(exogenous = exog) #Training set predictions else: #Search for best model model = pmd_arima.auto_arima(train_df[target_column], max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) training_prediction = model.predict_in_sample() #Training set predictions ### Forecasting if len(external_features) > 0: exog_forecast = scaled[len(train_df):len(train_df)+epochs_to_forecast] #Forecast if len(exog_forecast)==0: exog_forecast = np.nan * np.ones((epochs_to_forecast,exog.shape[1])) if epochs_to_forecast > 0: if len(external_features) > 0: forecast, forecast_ci = model.predict(n_periods = len(exog_forecast),exogenous= exog_forecast, return_conf_int=True) else: forecast, forecast_ci = model.predict(n_periods = epochs_to_forecast, return_conf_int=True) #Building output dataset forecast_df=pd.DataFrame() forecast_df[target_column] = df[target_column].values[:len(train_df)+epochs_to_forecast]#df[target_column].values forecast_df['forecast'] = np.nan forecast_df['forecast_up'] = np.nan forecast_df['forecast_low'] = np.nan if validate and epochs_to_forecast > 0: forecast_df['forecast'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation forecast_df['forecast_up'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation_ci[:,0] elif validate and epochs_to_forecast == 0: forecast_df['forecast'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation forecast_df['forecast_up'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation_ci[:,0] if epochs_to_forecast > 0: forecast_df['forecast'].iloc[-epochs_to_forecast:] = forecast forecast_df['forecast_up'].iloc[-epochs_to_forecast:] = forecast_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast:] = forecast_ci[:,0] forecast_df[time_column] = date return forecast_df
31,281
def get_lines(filename): """ Returns a list of lines of a file. Parameters filename : str, name of control file """ with open(filename, "r") as f: lines = f.readlines() return lines
31,282
def shout(*text): """Echoes text back, but louder. text: the text to echo back Who shouts backwards anyway?""" print(' '.join(text).upper())
31,283
def _check_socket_state(realsock, waitfor="rw", timeout=0.0): """ <Purpose> Checks if the given socket would block on a send() or recv(). In the case of a listening socket, read_will_block equates to accept_will_block. <Arguments> realsock: A real socket.socket() object to check for. waitfor: An optional specifier of what to wait for. "r" for read only, "w" for write only, and "rw" for read or write. E.g. if timeout is 10, and wait is "r", this will block for up to 10 seconds until read_will_block is false. If you specify "r", then write_will_block is always true, and if you specify "w" then read_will_block is always true. timeout: An optional timeout to wait for the socket to be read or write ready. <Returns> A tuple, (read_will_block, write_will_block). <Exceptions> As with select.select(). Probably best to wrap this with _is_recoverable_network_exception and _is_terminated_connection_exception. Throws an exception if waitfor is not in ["r","w","rw"] """ # Check that waitfor is valid if waitfor not in ["rw","r","w"]: raise Exception, "Illegal waitfor argument!" # Array to hold the socket sock_array = [realsock] # Generate the read/write arrays read_array = [] if "r" in waitfor: read_array = sock_array write_array = [] if "w" in waitfor: write_array = sock_array # Call select() (readable, writeable, exception) = select.select(read_array,write_array,sock_array,timeout) # If the socket is in the exception list, then assume its both read and writable if (realsock in exception): return (False, False) # Return normally then return (realsock not in readable, realsock not in writeable)
31,284
def str_to_pauli_term(pauli_str: str, qubit_labels=None): """ Convert a string into a pyquil.paulis.PauliTerm. >>> str_to_pauli_term('XY', []) :param str pauli_str: The input string, made of of 'I', 'X', 'Y' or 'Z' :param set qubit_labels: The integer labels for the qubits in the string, given in reverse order. If None, default to the range of the length of pauli_str. :return: the corresponding PauliTerm :rtype: pyquil.paulis.PauliTerm """ if qubit_labels is None: labels_list = [idx for idx in reversed(range(len(pauli_str)))] else: labels_list = sorted(qubit_labels)[::-1] pauli_term = PauliTerm.from_list(list(zip(pauli_str, labels_list))) return pauli_term
31,285
def _GetNextPartialIdentifierToken(start_token): """Returns the first token having identifier as substring after a token. Searches each token after the start to see if it contains an identifier. If found, token is returned. If no identifier is found returns None. Search is abandoned when a FLAG_ENDING_TYPE token is found. Args: start_token: The token to start searching after. Returns: The token found containing identifier, None otherwise. """ token = start_token.next while token and token.type not in Type.FLAG_ENDING_TYPES: match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search( token.string) if match is not None and token.type == Type.COMMENT: return token token = token.next return None
31,286
def _challenge_transaction(client_account): """ Generate the challenge transaction for a client account. This is used in `GET <auth>`, as per SEP 10. Returns the XDR encoding of that transaction. """ builder = Builder.challenge_tx( server_secret=settings.STELLAR_ACCOUNT_SEED, client_account_id=client_account, archor_name=ANCHOR_NAME, network=settings.STELLAR_NETWORK, ) builder.sign(secret=settings.STELLAR_ACCOUNT_SEED) envelope_xdr = builder.gen_xdr() return envelope_xdr.decode("ascii")
31,287
def mapCtoD(sys_c, t=(0, 1), f0=0.): """Map a MIMO continuous-time to an equiv. SIMO discrete-time system. The criterion for equivalence is that the sampled pulse response of the CT system must be identical to the impulse response of the DT system. i.e. If ``yc`` is the output of the CT system with an input ``vc`` taken from a set of DACs fed with a single DT input ``v``, then ``y``, the output of the equivalent DT system with input ``v`` satisfies: ``y(n) = yc(n-)`` for integer ``n``. The DACs are characterized by rectangular impulse responses with edge times specified in the t list. **Input:** sys_c : object the LTI description of the CT system, which can be: * the ABCD matrix, * a list-like containing the A, B, C, D matrices, * a list of zpk tuples (internally converted to SS representation). * a list of LTI objects t : array_like The edge times of the DAC pulse used to make CT waveforms from DT inputs. Each row corresponds to one of the system inputs; [-1 -1] denotes a CT input. The default is [0 1], for all inputs except the first. f0 : float The (normalized) frequency at which the Gp filters' gains are to be set to unity. Default 0 (DC). **Output:** sys : tuple the LTI description for the DT equivalent, in A, B, C, D representation. Gp : list of lists the mixed CT/DT prefilters which form the samples fed to each state for the CT inputs. **Example:** Map the standard second order CT modulator shown below to its CT equivalent and verify that its NTF is :math:`(1-z^{-1})^2`. .. image:: ../doc/_static/mapCtoD.png :align: center :alt: mapCtoD block diagram It can be done as follows:: from __future__ import print_function import numpy as np from scipy.signal import lti from deltasigma import * LFc = lti([[0, 0], [1, 0]], [[1, -1], [0, -1.5]], [[0, 1]], [[0, 0]]) tdac = [0, 1] LF, Gp = mapCtoD(LFc, tdac) LF = lti(*LF) ABCD = np.vstack(( np.hstack((LF.A, LF.B)), np.hstack((LF.C, LF.D)) )) NTF, STF = calculateTF(ABCD) print("NTF:") # after rounding to a 1e-6 resolution print("Zeros:", np.real_if_close(np.round(NTF.zeros, 6))) print("Poles:", np.real_if_close(np.round(NTF.poles, 6))) Prints:: Zeros: [ 1. 1.] Poles: [ 0. 0.] Equivalent to:: (z -1)^2 NTF = ---------- z^2 .. seealso:: R. Schreier and B. Zhang, "Delta-sigma modulators employing \ continuous-time circuitry," IEEE Transactions on Circuits and Systems I, \ vol. 43, no. 4, pp. 324-332, April 1996. """ # You need to have A, B, C, D specification of the system Ac, Bc, Cc, Dc = _getABCD(sys_c) ni = Bc.shape[1] # Sanitize t if hasattr(t, 'tolist'): t = t.tolist() if (type(t) == tuple or type(t) == list) and np.isscalar(t[0]): t = [t] # we got a simple list, like the default value if not (type(t) == tuple or type(t) == list) and \ not (type(t[0]) == tuple or type(t[0]) == list): raise ValueError("The t argument has an unrecognized shape") # back to business t = np.array(t) if t.shape == (1, 2) and ni > 1: t = np.vstack((np.array([[-1, -1]]), np.dot(np.ones((ni - 1, 1)), t))) if t.shape != (ni, 2): raise ValueError('The t argument has the wrong dimensions.') di = np.ones(ni).astype(bool) for i in range(ni): if t[i, 0] == -1 and t[i, 1] == -1: di[i] = False # c2d assumes t1=0, t2=1. # Also c2d often complains about poor scaling and can even produce # incorrect results. A, B, C, D, _ = cont2discrete((Ac, Bc, Cc, Dc), 1, method='zoh') Bc1 = Bc[:, ~di] # Examine the discrete-time inputs to see how big the # augmented matrices need to be. B1 = B[:, ~di] D1 = D[:, ~di] n = A.shape[0] t2 = np.ceil(t[di, 1]).astype(np.int_) esn = (t2 == t[di, 1]) and (D[0, di] != 0).T # extra states needed? npp = n + np.max(t2 - 1 + 1*esn) # Augment A to npp x npp, B to np x 1, C to 1 x np. Ap = padb(padr(A, npp), npp) for i in range(n + 1, npp): Ap[i, i - 1] = 1 Bp = np.zeros((npp, 1)) if npp > n: Bp[n, 0] = 1 Cp = padr(C, npp) Dp = np.zeros((1, 1)) # Add in the contributions from each DAC for i in np.flatnonzero(di): t1 = t[i, 0] t2 = t[i, 1] B2 = B[:, i] D2 = D[:, i] if t1 == 0 and t2 == 1 and D2 == 0: # No fancy stuff necessary Bp = Bp + padb(B2, npp) else: n1 = np.floor(t1) n2 = np.ceil(t2) - n1 - 1 t1 = t1 - n1 t2 = t2 - n2 - n1 if t2 == 1 and D2 != 0: n2 = n2 + 1 extraStateNeeded = 1 else: extraStateNeeded = 0 nt = n + n1 + n2 if n2 > 0: if t2 == 1: Ap[:n, nt - n2:nt] = Ap[:n, nt - n2:nt] + np.tile(B2, (1, n2)) else: Ap[:n, nt - n2:nt - 1] = Ap[:n, nt - n2:nt - 1] + np.tile(B2, (1, n2 - 1)) Ap[:n, (nt-1)] = Ap[:n, (nt-1)] + _B2formula(Ac, 0, t2, B2) if n2 > 0: # pulse extends to the next period Btmp = _B2formula(Ac, t1, 1, B2) else: # pulse ends in this period Btmp = _B2formula(Ac, t1, t2, B2) if n1 > 0: Ap[:n, n + n1 - 1] = Ap[:n, n + n1 - 1] + Btmp else: Bp = Bp + padb(Btmp, npp) if n2 > 0: Cp = Cp + padr(np.hstack((np.zeros((D2.shape[0], n + n1)), D2*np.ones((1, n2)))), npp) sys = (Ap, Bp, Cp, Dp) if np.any(~di): # Compute the prefilters and add in the CT feed-ins. # Gp = inv(sI - Ac)*(zI - A)/z*Bc1 n, m = Bc1.shape Gp = np.empty_like(np.zeros((n, m)), dtype=object) # !!Make this like stf: an array of zpk objects ztf = np.empty_like(Bc1, dtype=object) # Compute the z-domain portions of the filters ABc1 = np.dot(A, Bc1) for h in range(m): for i in range(n): if Bc1[i, h] == 0: ztf[i, h] = (np.array([]), np.array([0.]), -ABc1[i, h]) # dt=1 else: ztf[i, h] = (np.atleast_1d(ABc1[i, h]/Bc1[i, h]), np.array([0.]), Bc1[i, h]) # dt = 1 # Compute the s-domain portions of each of the filters stf = np.empty_like(np.zeros((n, n)), dtype=object) # stf[out, in] = zpk for oi in range(n): for ii in range(n): # Doesn't do pole-zero cancellation stf[oi, ii] = ss2zpk(Ac, np.eye(n), np.eye(n)[oi, :], np.zeros((1, n)), input=ii) # scipy as of v 0.13 has no support for LTI MIMO systems # only 'MISO', therefore you can't write: # stf = ss2zpk(Ac, eye(n), eye(n), np.zeros(n, n))) for h in range(m): for i in range(n): # k = 1 unneded, see below for j in range(n): # check the k values for a non-zero term if stf[i, j][2] != 0 and ztf[j, h][2] != 0: if Gp[i, h] is None: Gp[i, h] = {} Gp[i, h].update({'Hs':[list(stf[i, j])]}) Gp[i, h].update({'Hz':[list(ztf[j, h])]}) else: Gp[i, h].update({'Hs':Gp[i, h]['Hs'] + [list(stf[i, j])]}) Gp[i, h].update({'Hz':Gp[i, h]['Hz'] + [list(ztf[j, h])]}) # the MATLAB-like cell code for the above statements would have # been: #Gp[i, h](k).Hs = stf[i, j] #Gp[i, h](k).Hz = ztf[j, h] #k = k + 1 if f0 != 0: # Need to correct the gain terms calculated by c2d # B1 = gains of Gp @f0; for h in range(m): for i in range(n): B1ih = np.real_if_close(evalMixedTF(Gp[i, h], f0)) # abs() used because ss() whines if B has complex entries... # This is clearly incorrect. # I've fudged the complex stuff by including a sign.... B1[i, h] = np.abs(B1ih) * np.sign(np.real(B1ih)) if np.abs(B1[i, h]) < 1e-09: B1[i, h] = 1e-09 # This prevents NaN in "line 174" below # Adjust the gains of the pre-filters for h in range(m): for i in range(n): for j in range(max(len(Gp[i, h]['Hs']), len(Gp[i, h]['Hz']))): # The next is "line 174" Gp[i, h]['Hs'][j][2] = Gp[i, h]['Hs'][j][2]/B1[i, h] sys = (sys[0], # Ap np.hstack((padb(B1, npp), sys[1])), # new B sys[2], # Cp np.hstack((D1, sys[3]))) # new D return sys, Gp
31,288
def main( loglevel="ERROR", keep_repos=False, cache_http=False, cache_ttl=7200, output_file=None, ): """Main""" logger.setLevel(loglevel) logger.info("Running circuitpython.org/libraries updater...") run_time = datetime.datetime.now() logger.info("Run Date: %s", run_time.strftime("%d %B %Y, %I:%M%p")) if output_file: file_handler = logging.FileHandler(output_file) logger.addHandler(file_handler) logger.info(" - Report output will be saved to: %s", output_file) if cache_http: cpy_vals.github.setup_cache(cache_ttl) repos = common_funcs.list_repos( include_repos=( "CircuitPython_Community_Bundle", "cookiecutter-adafruit-circuitpython", ) ) new_libs = {} updated_libs = {} open_issues_by_repo = {} open_prs_by_repo = {} contributors = set() reviewers = set() merged_pr_count_total = 0 repos_by_error = {} default_validators = [ vals[1] for vals in inspect.getmembers(cpy_vals.LibraryValidator) if vals[0].startswith("validate") ] bundle_submodules = common_funcs.get_bundle_submodules() latest_pylint = "" pylint_info = pypi.get("/pypi/pylint/json") if pylint_info and pylint_info.ok: latest_pylint = pylint_info.json()["info"]["version"] validator = cpy_vals.LibraryValidator( default_validators, bundle_submodules, latest_pylint, keep_repos=keep_repos, ) for repo in repos: if ( repo["name"] in cpy_vals.BUNDLE_IGNORE_LIST or repo["name"] == "circuitpython" ): continue repo_name = repo["name"] # get a list of new & updated libraries for the last week check_releases = common_funcs.is_new_or_updated(repo) if check_releases == "new": new_libs[repo_name] = repo["html_url"] elif check_releases == "updated": updated_libs[repo_name] = repo["html_url"] # get a list of open issues and pull requests check_issues, check_prs = get_open_issues_and_prs(repo) if check_issues: open_issues_by_repo[repo_name] = check_issues if check_prs: open_prs_by_repo[repo_name] = check_prs # get the contributors and reviewers for the last week get_contribs, get_revs, get_merge_count = get_contributors(repo) if get_contribs: contributors.update(get_contribs) if get_revs: reviewers.update(get_revs) merged_pr_count_total += get_merge_count if repo_name in DO_NOT_VALIDATE: continue # run repo validators to check for infrastructure errors errors = [] try: errors = validator.run_repo_validation(repo) except Exception as err: # pylint: disable=broad-except logging.exception("Unhandled exception %s", str(err)) errors.extend([cpy_vals.ERROR_OUTPUT_HANDLER]) for error in errors: if not isinstance(error, tuple): # check for an error occurring in the validator module if error == cpy_vals.ERROR_OUTPUT_HANDLER: # print(errors, "repo output handler error:", validator.output_file_data) logging.error(", ".join(validator.output_file_data)) validator.output_file_data.clear() if error not in repos_by_error: repos_by_error[error] = [] repos_by_error[error].append(repo["html_url"]) else: if error[0] not in repos_by_error: repos_by_error[error[0]] = [] repos_by_error[error[0]].append(f"{repo['html_url']} ({error[1]} days)") # assemble the JSON data build_json = { "updated_at": run_time.strftime("%Y-%m-%dT%H:%M:%SZ"), "contributors": sorted(contributors, key=str.lower), "reviewers": sorted(reviewers, key=str.lower), "merged_pr_count": str(merged_pr_count_total), "library_updates": { "new": {key: new_libs[key] for key in sorted(new_libs, key=str.lower)}, "updated": { key: updated_libs[key] for key in sorted(updated_libs, key=str.lower) }, }, "open_issues": { key: open_issues_by_repo[key] for key in sorted(open_issues_by_repo, key=str.lower) }, "pull_requests": { key: open_prs_by_repo[key] for key in sorted(open_prs_by_repo, key=str.lower) }, "repo_infrastructure_errors": { key: repos_by_error[key] for key in sorted(repos_by_error, key=str.lower) }, } logger.info("%s", json.dumps(build_json, indent=2))
31,289
def main(args): """ Main entry. """ logging.info("Loading image lists ...") v_info = [] img_lst = dict() with open(args.ori_lst, 'r') as orif: v_info = [line.split() for line in orif] for impath, label in v_info: if label not in img_lst: img_lst[label] = [] img_lst[label].append(impath) logging.info("Done!") num_img = len(v_info) v_imgid = range(num_img) v_negid = range(num_img) qry_idx = num_img neg_idx = -1 logging.info("Generating triplets ...") with open(args.tri_lst, 'w') as trif: for i in xrange(args.num_tri): if i % 10000 == 0: logging.info("\tfinished %6.2f%% of %d" % ( 100.0 * i / args.num_tri, args.num_tri)) if qry_idx == num_img: random.shuffle(v_imgid) qry_idx = 0 qry_imgid = v_imgid[qry_idx] qry, label = v_info[qry_imgid] for j in xrange(10): pos = random.choice(img_lst[label]) if pos != qry: break while True: neg_idx += 1 if neg_idx == num_img: random.shuffle(v_negid) neg_idx = 0 neg_imgid = v_negid[neg_idx] neg, neg_label = v_info[neg_imgid] if neg_label != label: break trif.write("%s\t%s\t%s\n" % (qry, pos, neg)) qry_idx += 1 logging.info("\tfinished 100.00%% of %d" % (args.num_tri)) logging.info("Done!")
31,290
def normalize_type(type: str) -> str: """Normalize DataTransfer's type strings. https://html.spec.whatwg.org/multipage/dnd.html#dom-datatransfer-getdata 'text' -> 'text/plain' 'url' -> 'text/uri-list' """ if type == 'text': return 'text/plain' elif type == 'url': return 'text/uri-list' return type
31,291
def server_socket((host, port), (host_m,port_m)): """ Instances the main server. Receives data from client, forward the data to a mirror server and replies the client. :param host: host to bind socket :param port: port to listen to :return: """ s = Socket.get_instance() s.bind((host, port)) s.listen(2) print "Listening on %(host)s:%(port)s" % {"host":host, "port":port} while True: # accept connections from client conn, address = s.accept() print address, "Now connected" content = conn.recv(64) # connect to mirror socket m = Socket.get_instance() m.connect((host_m,port_m)) # m.send(len(content)) # send content length # m.recv(1) # receive OK or ERROR m.sendall(content) # send actual data print m.recv(64) m.close() if content.strip() == "EOF": print "EOF" conn.send("Good bye!") conn.close() break # response to client print content conn.send("I'm server socket. Thank you for connecting.\n") conn.close() print ""
31,292
def _parity(N, j): """Private function to calculate the parity of the quantum system. """ if j == 0.5: pi = np.identity(N) - np.sqrt((N - 1) * N * (N + 1) / 2) * _lambda_f(N) return pi / N elif j > 0.5: mult = np.int32(2 * j + 1) matrix = np.zeros((mult, mult)) foo = np.ones(mult) for n in np.arange(-j, j + 1, 1): for l in np.arange(0, mult, 1): foo[l] = (2 * l + 1) * qutip.clebsch(j, l, j, n, 0, n) matrix[np.int32(n + j), np.int32(n + j)] = np.sum(foo) return matrix / mult
31,293
def get_log(id): """Returns the log for the given ansible play. This works on both live and finished plays. .. :quickref: Play; Returns the log for the given ansible play :param id: play id **Example Request**: .. sourcecode:: http GET /api/v2/plays/345835/log HTTP/1.1 **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK ... log file from the given play ... """ # For security, send_from_directory avoids sending any files # outside of the specified directory return send_from_directory(get_log_dir_abs(), str(id) + ".log")
31,294
def filterLinesByCommentStr(lines, comment_str='#'): """ Filter all lines from a file.readlines output which begins with one of the symbols in the comment_str. """ comment_line_idx = [] for i, line in enumerate(lines): if line[0] in comment_str: comment_line_idx.append(i) for j in comment_line_idx[::-1]: del lines[j] return lines
31,295
def test_browserdriver_phantomjs(): """PhantomJSDriver is registered as implementing BrowserDriver""" assert issubclass(PhantomJSDriver, BrowserDriver)
31,296
def assemble_result_from_graph(type_spec, binding, output_map): """Assembles a result stamped into a `tf.Graph` given type signature/binding. This method does roughly the opposite of `capture_result_from_graph`, in that whereas `capture_result_from_graph` starts with a single structured object made up of tensors and computes its type and bindings, this method starts with the type/bindings and constructs a structured object made up of tensors. Args: type_spec: The type signature of the result to assemble, an instance of `types.Type` or something convertible to it. binding: The binding that relates the type signature to names of tensors in the graph, an instance of `pb.TensorFlow.Binding`. output_map: The mapping from tensor names that appear in the binding to actual stamped tensors (possibly renamed during import). Returns: The assembled result, a Python object that is composed of tensors, possibly nested within Python structures such as anonymous tuples. Raises: TypeError: If the argument or any of its parts are of an uexpected type. ValueError: If the arguments are invalid or inconsistent witch other, e.g., the type and binding don't match, or the tensor is not found in the map. """ type_spec = computation_types.to_type(type_spec) py_typecheck.check_type(type_spec, computation_types.Type) py_typecheck.check_type(binding, pb.TensorFlow.Binding) py_typecheck.check_type(output_map, dict) for k, v in output_map.items(): py_typecheck.check_type(k, str) if not tf.is_tensor(v): raise TypeError( 'Element with key {} in the output map is {}, not a tensor.'.format( k, py_typecheck.type_string(type(v)))) binding_oneof = binding.WhichOneof('binding') if isinstance(type_spec, computation_types.TensorType): if binding_oneof != 'tensor': raise ValueError( 'Expected a tensor binding, found {}.'.format(binding_oneof)) elif binding.tensor.tensor_name not in output_map: raise ValueError('Tensor named {} not found in the output map.'.format( binding.tensor.tensor_name)) else: return output_map[binding.tensor.tensor_name] elif isinstance(type_spec, computation_types.NamedTupleType): if binding_oneof != 'tuple': raise ValueError( 'Expected a tuple binding, found {}.'.format(binding_oneof)) else: type_elements = anonymous_tuple.to_elements(type_spec) if len(binding.tuple.element) != len(type_elements): raise ValueError( 'Mismatching tuple sizes in type ({}) and binding ({}).'.format( len(type_elements), len(binding.tuple.element))) result_elements = [] for (element_name, element_type), element_binding in zip(type_elements, binding.tuple.element): element_object = assemble_result_from_graph(element_type, element_binding, output_map) result_elements.append((element_name, element_object)) if not isinstance(type_spec, computation_types.NamedTupleTypeWithPyContainerType): return anonymous_tuple.AnonymousTuple(result_elements) container_type = computation_types.NamedTupleTypeWithPyContainerType.get_container_type( type_spec) if (py_typecheck.is_named_tuple(container_type) or py_typecheck.is_attrs(container_type)): return container_type(**dict(result_elements)) return container_type(result_elements) elif isinstance(type_spec, computation_types.SequenceType): if binding_oneof != 'sequence': raise ValueError( 'Expected a sequence binding, found {}.'.format(binding_oneof)) else: sequence_oneof = binding.sequence.WhichOneof('binding') if sequence_oneof == 'variant_tensor_name': variant_tensor = output_map[binding.sequence.variant_tensor_name] return make_dataset_from_variant_tensor(variant_tensor, type_spec.element) else: raise ValueError( 'Unsupported sequence binding \'{}\'.'.format(sequence_oneof)) else: raise ValueError('Unsupported type \'{}\'.'.format(type_spec))
31,297
def do_quota_class_show(cs, args): """List the quotas for a quota class.""" _quota_show(cs.quota_classes.get(args.class_name))
31,298
def test_right_shift_by_n(emulator, value1, value2, shift_amount): """Test for the left-shift-by-n utility""" # Arrange RAM[variables.tmp4] = 0x1 emulator.AC = -shift_amount & 0xFF RAM[0x42] = value1 emulator.Y = 0x00 emulator.X = 0x42 emulator.next_instruction = asm.symbol("right-shift-by-n") # Act emulator.run_for(_shift.cost_of_right_shift_by_n) # Assert assert emulator.AC == (value1 >> shift_amount) assert emulator.next_instruction & 0xFF == 0x1 # Arrange emulator.AC = RAM[variables.tmp5] RAM[0x42] = value2 emulator.next_instruction = asm.symbol("right-shift-by-n.second-time") # Act emulator.run_for(_shift.cost_of_right_shift_by_n__second_time) # Assert assert emulator.AC == (value2 >> shift_amount) assert emulator.next_instruction & 0xFF == 0x1
31,299