content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def get_dftd3_energy(ipt): """ Grimme's D3 correction to energy """ fxyz, func, iabc = ipt sabc = ' -abc' if iabc else '' cmd = "dftd3 %s -func %s -bj%s | grep Edisp | awk '{print $NF}'"%(fxyz,func,sabc) #print(cmd) #; sys.exit(2) e = eval(os.popen(cmd).read().strip()) return e
9c0098fd619e202b867b956b56227124d10cae8a
28,000
import itertools def get_state_vect_cols(prefix=''): """Get the column names of the state vector components with the provided `prefix`. :param prefix: The prefix that is used in front of the state vector components in the column names, examples are `physics_pred` and `physics_err` or none :type prefix: str :return: A list of the 6 names of the prefixed state vector components :rtype: [str] """ if prefix: prefix += '_' vectors = ['r', 'v'] components = ['x', 'y', 'z'] col_names = [f'{prefix}{v}_{c}' for v, c in itertools.product(vectors, components)] return col_names
d61c5ebd2aad8c679dda50fa1e310ebf11480e01
28,001
import types def set_df_index(typingctx, df_t, index_t=None): """used in very limited cases like distributed to_csv() to create a new dataframe with index """ # TODO: make inplace when dfs are full objects def codegen(context, builder, signature, args): in_df_arg = args[0] index = args[1] in_df = cgutils.create_struct_proxy( signature.args[0])(context, builder, value=in_df_arg) # create dataframe struct and store values dataframe = cgutils.create_struct_proxy( signature.return_type)(context, builder) dataframe.data = in_df.data dataframe.index = index dataframe.columns = in_df.columns dataframe.unboxed = in_df.unboxed dataframe.parent = in_df.parent # increase refcount of stored values if context.enable_nrt: context.nrt.incref(builder, index_t, index) # TODO: refcount context.nrt.incref(builder, types.Tuple(df_t.data), dataframe.data) context.nrt.incref( builder, types.UniTuple(string_type, len(df_t.columns)), dataframe.columns) return dataframe._getvalue() ret_typ = DataFrameType(df_t.data, index_t, df_t.columns) sig = signature(ret_typ, df_t, index_t) return sig, codegen
7bdbdee58bc2a8a71583d43d637a6fa9ab7e8224
28,002
import six import shlex def parse_options(options=None, api=False): """ Parse given option string :param options: :type options: :param api :type api: boolean :return: :rtype: """ if isinstance(options, six.string_types): args = shlex.split(options) options = vars(argument_parser.parse_args(args)) elif options is None: if api: options = {} else: options = vars(argument_parser.parse_args()) elif not isinstance(options, dict): options = vars(argument_parser.parse_args(options)) return options
f8a2b3671dab3ffc5f23bd937181324bc1c0d9c7
28,003
import random from datetime import datetime def data_for_column(column: dict, kwargs: dict, size: int) -> list: """Generates data for schema column :param dict column: Column definition :param dict kwargs: Faker keyword arguments :param int size: Number of rows :return: List of random data for a column :rtype: list """ data = [] data_type = column.get('type', 'empty') try: method = getattr(fake, data_type) except AttributeError: raise AttributeError(f"Exception at column {column.get('name', '')}, '{data_type}' is not a valid data type") percent_empty = column.get('percent_empty', 0) for _ in range(size): if random.random() <= percent_empty: data.append(None) else: datum = method(**kwargs) if isinstance(datum, (datetime.date, datetime.datetime)): datum = datum.strftime(column['format']) if 'format' in column else datum.isoformat() data.append(datum) return data
d2ba76d48d80cc256f1959d8fa617b81301119d0
28,004
def peak_bin(peaks, i): """Return the (bin) index of the ith largest peak. Peaks is a list of tuples (i, x[i]) of peak indices i and values x[i], sorted in decreasing order by peak value.""" if len(peaks) > i: return peaks[i][0] else: return np.nan
fc667fe04c856e3090ded9ca8eb0a45d51cda74a
28,005
def fetch_all(path, params=None, client=default_client): """ Args: path (str): The path for which we want to retrieve all entries. Returns: list: All entries stored in database for a given model. You can add a filter to the model name like this: "tasks?project_id=project-id" """ return get(url_path_join("data", path), params=params, client=client)
d663414388b9b6e105fab42d8e4d9cde558322cf
28,006
from datetime import datetime import calendar def plotter(fdict): """ Go """ pgconn = get_dbconn('coop') cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor) ctx = get_autoplot_context(fdict, get_description()) station = ctx['station'] table = "alldata_%s" % (station[:2],) nt = NetworkTable("%sCLIMATE" % (station[:2],)) cursor.execute(""" SELECT year, month, avg((high+low)/2.) from """+table+""" WHERE station = %s and day < %s and year > 1892 GROUP by year, month ORDER by year ASC """, (station, datetime.date.today().replace(day=1))) if cursor.rowcount == 0: raise ValueError("No results found for query") for rownum, row in enumerate(cursor): if rownum == 0: baseyear = row[0] avgs = np.ones((datetime.datetime.now().year - baseyear + 1, 12)) * -99. avgs[row[0]-baseyear, row[1]-1] = row[2] matrix = np.zeros((12, 12)) lastyear = np.zeros((12, 12)) rows = [] for i in range(12): for j in range(12): # How many years was i warmer than j t = np.where(np.logical_and(avgs[:, j] > -99, np.logical_and(avgs[:, i] > avgs[:, j], avgs[:, i] > -99)), 1, 0) matrix[i, j] = np.sum(t) lastyear[i, j] = datetime.datetime.now().year - np.argmax(t[::-1]) lyear = lastyear[i, j] if matrix[i, j] > 0 else None rows.append(dict(month1=(i+1), month2=(j+1), years=matrix[i, j], lastyear=lyear)) df = pd.DataFrame(rows) (fig, ax) = plt.subplots(1, 1, sharex=True, figsize=(8, 6)) x, y = np.meshgrid(np.arange(-0.5, 12.5, 1), np.arange(-0.5, 12.5, 1)) res = ax.pcolormesh(x, y, np.transpose(matrix)) for i in range(12): for j in range(12): txt = ax.text(i, j, "%s" % ( "%.0f" % (matrix[i, j],) if i != j else '-'), va='center', ha='center', color='white') txt.set_path_effects([PathEffects.withStroke(linewidth=2, foreground="k")]) if matrix[i, j] > 0 and matrix[i, j] < 10: txt = ax.text(i, j-0.5, "%.0f" % (lastyear[i, j],), fontsize=9, va='bottom', ha='center', color='white') txt.set_path_effects([PathEffects.withStroke(linewidth=2, foreground="k")]) ax.set_xticks(range(0, 12)) ax.set_xticklabels(calendar.month_abbr[1:]) ax.set_yticks(range(0, 12)) ax.set_yticklabels(calendar.month_abbr[1:]) ax.set_xlim(-0.5, 11.5) ax.set_ylim(-0.5, 11.5) ax.set_title(("[%s] %s\nYears that Month was Warmer than other Month" ) % (station, nt.sts[station]['name'])) fig.colorbar(res) ax.set_xlabel("This Month was Warmer than...") ax.set_ylabel("...this month for same year") return fig, df
4b11cee286494963afb43cfc5b6ab7e56c281476
28,007
def link_library_dynamic(hs, dep_info, object_files, my_pkg_id): """Link a dynamic library for the package using given object files. Returns: File: Produced dynamic library. """ dynamic_library = hs.actions.declare_file( "lib{0}-ghc{1}.{2}".format( pkg_id.library_name(hs, my_pkg_id), hs.toolchain.version, _so_extension(hs), ) ) args = hs.actions.args() args.add(["-shared", "-dynamic"]) # Work around macOS linker limits. This fix has landed in GHC HEAD, but is # not yet in a release; plus, we still want to support older versions of # GHC. For details, see: https://phabricator.haskell.org/D4714 if hs.toolchain.is_darwin: args.add(["-optl-Wl,-dead_strip_dylibs"]) for package in set.to_list(dep_info.package_ids): args.add(["-package-id", package]) # XXX This should be really dep_info.direct_prebuilt_deps, but since we # cannot add prebuilt_dependencies to the "depends" field on package # registration (see a comment there), we have to pass all transitive # prebuilt_dependencies on linking like this. for package in set.to_list(dep_info.prebuilt_dependencies): args.add(["-package", package]) for cache in set.to_list(dep_info.package_caches): args.add(["-package-db", cache.dirname]) _add_external_libraries(args, dep_info.external_libraries.values()) args.add([ f.path for f in object_files ]) solibs = set.union( set.from_list(dep_info.external_libraries), dep_info.dynamic_libraries, ) if hs.toolchain.is_darwin: dynamic_library_tmp = hs.actions.declare_file(dynamic_library.basename + ".temp") _fix_linker_paths( hs, dynamic_library_tmp, dynamic_library, dep_info.external_libraries ) args.add(["-optl-Wl,-headerpad_max_install_names"]) else: dynamic_library_tmp = dynamic_library for rpath in set.to_list(_infer_rpaths(dynamic_library, solibs)): args.add(["-optl-Wl,-rpath," + rpath]) args.add(["-o", dynamic_library_tmp.path]) hs.actions.run( inputs = depset(transitive = [ depset(object_files), set.to_depset(dep_info.package_caches), set.to_depset(dep_info.dynamic_libraries), depset(dep_info.external_libraries.values()), ]), outputs = [dynamic_library_tmp], mnemonic = "HaskellLinkDynamicLibrary", executable = hs.tools.ghc, arguments = [args] ) return dynamic_library
5171d75c71b52e2487ff1d349add86c042a84062
28,008
def save_mvgcca_latents_space(X, W, model, path, prefix, epochs): """Saves the list containing the common latent space Z and all the views latent space Z_m. - X : [np.array(n x d1),...,np.array(n x dM)] multivews features ; n number of instances; dm dimension of views m ; M number of views - W : np.array(n x n) weighted adjacency matrix - model : trained model MVGCCA - path : str - epochs (which epochs is saved): str """ if prefix != '' : prefix = "_"+prefix Z_list = get_mvgcca_latents_space(X, W, model) key = ['t'+str(s) for s in range(len(Z_list))] dictionary = dict(zip(key, Z_list)) sio.savemat(path+"latent_space_"+str(epochs+1)+'epochs'+prefix+'.mat',dictionary) return Z_list[0]
dc0fbb15dd73e44bf1b1b2c74b173cfb6b8cf1d8
28,009
def TDC_sampling(in_channels, mode='downsampling'): """ wrapper_function: -> TIC_sampling [B, in_channels, T, F] => [B, in_channels, T, F//2 or F*2] in_channels: number of input channels """ return TIC_sampling(in_channels, mode)
8458e9fe9bfd6bc92af2940b4c3ea5d2f09eb40a
28,010
def bmxbm(s, t, batch_first=True): """ Batched matrix and batched matrix multiplication. """ if batch_first: equation = "aij,ajk->aik" else: equation = "ija,jka->ika" return tf.einsum(equation, s, t)
6ac60eb1ffeed2caad312fd4691d689e705986c0
28,011
import re def get_all_semantic_case_ids(): """Get iterator over test sorted IDs of all cases in the SBML semantic suite""" pattern = re.compile(r'\d{5}') return sorted(str(x.name) for x in SBML_SEMANTIC_CASES_DIR.iterdir() if pattern.match(x.name))
d4a5cba008010f02398bb61c32f06450610de350
28,012
def generate_points(n=500, min_=0, max_=1): """ Generate a list of n points. Parameters ---------- n : int min_ : float max_ : float Returns ------- list List of length n with tuples (x, y) where x is in [min_, max_] and y is either 0 or 1. """ assert max_ > min_ ret = [] np.random.seed(seed=42) for x in np.linspace(min_, max_, n): noise = np.random.random() def f(x): """Some function.""" return 2.0*x+100.0 ret.append((x, f(x)+noise)) return ret
fe2dbe0ed281716a465804d67014badab96fb414
28,013
def gce(nvf): """ Write the necessary code for launch the VNF using GCE :param nvf: :return: vagrantfile code """ element = Template(u'''\ config.vm.box = "{{image}}" config.vm.provider :google do |google, override| google.google_project_id = {{google_project_id}} google.google_client_email = {{google_client_email}} google.google_json_key_location = {{google_json_key_location}} override.ssh.username = {{username}} ''') vim = Gce.objects.get(name=nvf.vim.name) element = element.render( name=nvf.name, username=nvf.operator.name, image=nvf.vnf.image, google_project_id=vim.google_project_id, google_client_email=vim.google_client_email, google_json_key_location=vim.google_json_key_location, ) return element
588c2472b2a957a1eda64bef526b6410103b72b2
28,014
import os def save_pretrained_models( model: nn.Module, config: DictConfig, path: str, ) -> DictConfig: """ Save the pretrained models and configs to local to make future loading not dependent on Internet access. By loading local checkpoints, Huggingface doesn't need to download pretrained checkpoints from Internet. It is called by setting "standalone=True" in "AutoMMPredictor.load()". Parameters ---------- model One model. config A DictConfig object. The model config should be accessible by "config.model". path The path to save pretrained checkpoints. """ requires_saving = any([ model_name.lower().startswith((CLIP, HF_TEXT)) for model_name in config.model.names ]) if not requires_saving: return config if len(config.model.names) == 1: model = nn.ModuleList([model]) else: # assumes the fusion model has a model attribute, a nn.ModuleList model = model.model for per_model in model: if per_model.prefix.lower().startswith((CLIP, HF_TEXT)): per_model.model.save_pretrained(os.path.join(path, per_model.prefix)) model_config = getattr(config.model, per_model.prefix) model_config.checkpoint_name = os.path.join('local://', per_model.prefix) return config
f33fb55d8a152581c906470156a0dc8c87dadef4
28,015
from datetime import datetime def get_ethpm_birth_block( w3: Web3, from_block: int, to_block: int, target_timestamp: int ) -> int: """ Returns the closest block found before the target_timestamp """ version_release_date = datetime.fromtimestamp(target_timestamp) while from_block < to_block: mid = BlockNumber((from_block + to_block) // 2) target = datetime.fromtimestamp(w3.eth.getBlock(mid)["timestamp"]) if target > version_release_date: to_block = mid elif target < version_release_date: from_block = mid + 1 else: return mid - 1 raise BlockNotFoundError( f"Cannot find closest block to timestamp: {target_timestamp} " f"in range given {from_block} - {to_block}." )
c2448152cea2a3c9a9dd227a5126e2dd0767b773
28,016
def Line(p0, p1=None, c="r", alpha=1, lw=1, dotted=False, res=None): """ Build the line segment between points `p0` and `p1`. If `p0` is a list of points returns the line connecting them. A 2D set of coords can also be passed as p0=[x..], p1=[y..]. :param c: color name, number, or list of [R,G,B] colors. :type c: int, str, list :param float alpha: transparency in range [0,1]. :param lw: line width. :param bool dotted: draw a dotted line :param int res: number of intermediate points in the segment """ # detect if user is passing a 2D ist of points as p0=xlist, p1=ylist: if len(p0) > 3: if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1): # assume input is 2D xlist, ylist p0 = list(zip(p0, p1)) p1 = None # detect if user is passing a list of points: if utils.isSequence(p0[0]): ppoints = vtk.vtkPoints() # Generate the polyline dim = len((p0[0])) if dim == 2: for i, p in enumerate(p0): ppoints.InsertPoint(i, p[0], p[1], 0) else: ppoints.SetData(numpy_to_vtk(p0, deep=True)) lines = vtk.vtkCellArray() # Create the polyline. lines.InsertNextCell(len(p0)) for i in range(len(p0)): lines.InsertCellPoint(i) poly = vtk.vtkPolyData() poly.SetPoints(ppoints) poly.SetLines(lines) else: # or just 2 points to link lineSource = vtk.vtkLineSource() lineSource.SetPoint1(p0) lineSource.SetPoint2(p1) if res: lineSource.SetResolution(res) lineSource.Update() poly = lineSource.GetOutput() actor = Actor(poly, c, alpha) actor.GetProperty().SetLineWidth(lw) if dotted: actor.GetProperty().SetLineStipplePattern(0xF0F0) actor.GetProperty().SetLineStippleRepeatFactor(1) actor.base = np.array(p0) actor.top = np.array(p1) settings.collectable_actors.append(actor) return actor
1a56c260ad0d3478b51db03fa267898c637bf819
28,017
def date_dd(dataset, source): """Display 3 blocks: 1. image of the patent, 2. choice block, 3. text block for date. 2 is artifical and should be ignored""" def get_stream(): # Load the directory of images and add options to each task stream = Images(source) for eg in stream: eg["options"] = OPTIONS yield eg return { "dataset": dataset, "view_id": "blocks", "config": { "choice_style": "single", "blocks": [ {"view_id": "choice", "text": None}, { "view_id": "text_input", "field_rows": 1, "field_label": "Publication year (yyyy)", }, ], }, "stream": get_stream(), }
fae232b97ab4d758aceea806ebc95816db3cb044
28,018
def sort(array=[12,4,5,6,7,3,1,15]): """Sort the array by using quicksort.""" less = [] equal = [] greater = [] if len(array) > 1: pivot = array[0] for x in array: if x < pivot: less.append(x) elif x == pivot: equal.append(x) elif x > pivot: greater.append(x) # Don't forget to return something! return sort(less)+equal+sort(greater) # Just use the + operator to join lists # Note that you want equal ^^^^^ not pivot else: # You need to handle the part at the end of the recursion - when you only have one element in your array, just return the array. return array
bc31df069f8e985d620032b9053bd8f13880780f
28,019
def _derive_country_JP(place): """Derive Japanese place names.""" derived = [] if _JP_FU_SUFFIX.search(place.asciiname): bare = _JP_FU_SUFFIX.sub("", place.asciiname) derived += [bare, bare + " prefecture", bare + " pref"] elif _JP_KEN_SUFFIX.search(place.asciiname): bare = _JP_KEN_SUFFIX.sub("", place.asciiname) derived += [bare, bare + " prefecture", bare + " pref", bare + "-ken", bare + " ken"] elif _JP_SHI_SUFFIX.search(place.name): bare = _JP_SHI_SUFFIX.sub("", place.name) derived += [bare, bare + "-city", bare + " city"] elif _JP_KU_SUFFIX.search(place.name): bare = _JP_KU_SUFFIX.sub("", place.name) derived += [bare, bare + "-ku", bare + " ku", bare + " ward"] en_names = [DerivedName(text.lower(), "en") for text in derived] _LOGGER.debug("derive_country_JP: en_names: %r", en_names) if _JA_JP_SHI_SUFFIX.search(place.name): bare = _JA_JP_SHI_SUFFIX.sub("", place.name) ja_names = [DerivedName(bare, "ja")] else: ja_names = [] return en_names + ja_names
aea2aec9611457e63de335c3fe333be74c0efa6f
28,020
from typing import Optional async def remove_completed_game(player_id: str, game_id: str) -> Optional[dict]: """ Updates the player's current games by removing a game from it. :param player_id: the object id of the player :param game_id: the object id of the game :return: an awaitable resolving to the updated document, or None if one is not found """ collection = await db.get_player_collection() updated_player = await collection.find_one_and_update( {"_id": PydanticObjectID(player_id)}, {"$pull": {"completed_games": PydanticObjectID(game_id)}}, return_document=ReturnDocument.AFTER, ) return updated_player
2e5f4ec3af053d1f1685e6a576d8027db585bc87
28,021
def triangulate_dlt(Ps, ys): """Triangulate 3D position between two 2D correspondances using the direct linear transformation (DLT) method. If any 2D correspondance is missing (i.e. NaN), returns triangulated position as NaN value as well. TODO: Normalize input data (see HZ, p104. "4.4 Transformation invariance and normalization.", particulary 4.4.4): For each image, 1. Translate all points such that collection's centroid is about the origin. 2. Scale all points (Cartesian/non-homogeneous) so average distance is sqrt(2). Reference: Hartley and Zimmerman, p312; [OpenCV implementation] (https://github.com/opencv/opencv_contrib/blob/master/modules/sfm/src/triangulation.cpp) Parameters ---------- Ps: ndarray, shape (C,3,4) Camera matrices ys: ndarray, shape (C,...,2) 2D image correspondances Returns ------- x: ndarray, shape (...,3) """ Ps, ys = jnp.atleast_3d(Ps), jnp.atleast_3d(ys) num_cameras = len(Ps) batch_shape = ys.shape[1:-1] A = jnp.empty((*batch_shape, 2*num_cameras,4)) # Eliminate homogeneous scale factor via cross product for c in range(num_cameras): P, y = Ps[c], ys[c] A = A.at[..., 2*c, :].set(y[..., [0]] * P[2] - P[0]) A = A.at[..., 2*c+1,:].set(y[..., [1]] * P[2] - P[1]) # Solution which minimizes algebraic corespondance error is # the right eigenvector associated with smallest eigenvalue. # Vh: ndarray, shape (N,4,4). Matrix of right eigenvectors _, _, Vh = jnp.linalg.svd(A) X = Vh[...,-1,:] # 3D position in homogeneous coordinates, shape (N,4) X = X[...,:-1] / X[...,[-1]] # Inputs with NaN entries produce NaN matrix blocks in A. # When SVD performed on these matrix blocks, returns matrix of -I # Resulting X vector = [0,0,0.], which is unidentifiable from a point # truly at the origin. Mask these points out and reset as NaN. # NBL: Appears unneeded with jax.numpy, but needed for regular numpy # isnan_mask = jnp.any(jnp.isnan(A), axis=(-1,-2)) # shape (...,) # X = X.at[isnan_mask].set(jnp.nan) return jnp.squeeze(X)
0e291bdcd3b95eb6dd29f3f5a305d0fe88c07072
28,022
def is_multioutput(y): """Whether the target y is multi-output (or multi-index)""" return hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1
bcdaa46c304fec50c173dffca5f1f1d5d8871a58
28,023
def read_all(db: Session): """ Get all dimensions. :param db: :return: List[QuestionModel] """ question = db.query(QuestionModel).all() return question
a854c4667dc30918cd1e9ec767d65fa8ad1fb5ca
28,024
def get_all_tenants(context): """Returns a list of all tenants stored in repository. :param context: context of the transaction """ return context.session.query(db_models.AristaProvisionedProjects)
62d8fed653f5b8e380caa47f5f408ecab860a58b
28,025
import os def load_sample_nni(series='short'): """Returns a short-term (5min) or long-term (60min) series of sample NNI found in the pyhrv/files/ directory. Docs: Parameters ---------- series : string, optional If 'long', returns a 60min NNI series, if 'short', returns a 5min NNI series Returns ------- nni_series : array Sample NNI series Raises ------ ValueError If an unknown value for the 'series' input parameter is provided (other than 'short' or 'long') Note ---- .. These sample series were extracted from the MIT-BIH NSRDB Database from physionet.org: https://physionet.org/physiobank/database/nsrdb/ """ if series == 'long': return np.load(os.path.join(os.path.split(__file__)[0], './files/SampleNNISeriesLong.npy')) elif series == 'short': return np.load(os.path.join(os.path.split(__file__)[0], './files/SampleNNISeriesShort.npy')) else: raise ValueError("Unknown input value '%s'. Please select 'short' or 'long'." % series)
d3ffd2208321f432312f87922deb4aac22c87ce5
28,026
def total_sub_pixels_2d_from(mask_2d: np.ndarray, sub_size: int) -> int: """ Returns the total number of sub-pixels in unmasked pixels in a mask. Parameters ---------- mask_2d : np.ndarray A 2D array of bools, where `False` values are unmasked and included when counting sub pixels. sub_size : int The size of the sub-grid that each pixel of the 2D mask array is divided into. Returns ------- int The total number of sub pixels that are unmasked. Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) total_sub_pixels = total_sub_pixels_from_mask(mask=mask, sub_size=2) """ return total_pixels_2d_from(mask_2d) * sub_size ** 2
98461ffe073172db596570630ccfbd27384c7e3a
28,027
import logging import sys def connectOutputLogger(file=None, logger_name='output'): """ Function that connects the output logger. This is always enabled and hardwired to generate logging.INFO level messages only. @params file : Output file to store these messages into. @default None : If file is None no output file generated. """ # Create logger for logging messages logger = logging.getLogger(logger_name) # # Define and set the format of the logged messages # formatter = logging.Formatter('%(message)s') # # Create handlers here # std_output = logging.StreamHandler(sys.stdout) std_output.setFormatter(formatter) logger.addHandler(std_output) # log to a file if file !=None: file_output= logging.FileHandler(file) file_output.setFormatter(formatter) logger.addHandler(file_output) logger.setLevel(logging.INFO) return logger
ce3b0f2a9910afd1ea364177f746353559f63bd2
28,028
from simtk import unit as simtk_unit import torch def formaldehyde_conformer(formaldehyde) -> torch.Tensor: """Returns a conformer [A] of formaldehyde with an ordering which matches the ``formaldehyde`` fixture.""" formaldehyde.generate_conformers(n_conformers=1) conformer = formaldehyde.conformers[0].value_in_unit(simtk_unit.angstrom) return torch.from_numpy(conformer).type(torch.float)
f5a9a19f6dd8e26a121e496161fa6da7b8f63047
28,029
import warnings def reduce_function(op_func, input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None): """ Handler function for Tensorflow depreciation of keep_dims for tf 1.8 and above, but tf 1.4 requires keep_dims :param op_func: expects the function to handle eg: tf.reduce_sum. :param input_tensor: The tensor to reduce. Should have numeric type. :param axis: The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)). :param keepdims: If true, retains reduced dimensions with length 1. :param name: A name for the operation (optional). :param reduction_indices: The old (deprecated) name for axis. :param keep_dims: Deprecated alias for keepdims. :return: outputs same value as op_func. """ if LooseVersion(tf.__version__) < LooseVersion('1.8.0'): warning = "Running on tensorflow version " + \ LooseVersion(tf.__version__).vstring + \ ". Support for this version in CleverHans is deprecated " + \ "and may be removed on or after 2019-01-26" warnings.warn(warning) out = op_func(input_tensor, axis=axis, keep_dims=keepdims, name=name, reduction_indices=reduction_indices) else: out = op_func(input_tensor, axis=axis, keepdims=keepdims, name=name, reduction_indices=reduction_indices) return out
f6433479bcb01a8fc5dfc2c08dd70bf2fe500e94
28,030
from typing import Mapping def filter_dict(function_or_value, dict_to_filter): """ Filter by value >>> filter_dict(123, {'a': 123, 'b': 1234}) {'b': 1234} Filter by value not applicable >>> filter_dict(123, {'a': 1234, 'b': 5123}) {'a': 1234, 'b': 5123} Embedded filter by value >>> filter_dict(123, {'a': {'c': 123}, 'b': 1234}) {'b': 1234} Embedded with extra by value >>> filter_dict(123, {'a': {'c': 123, 'd': 432}, 'b': 1234}) {'a': {'d': 432}, 'b': 1234} Embedded mixed filter >>> filter_dict(123, {'a': {'c': 123, 'd': 432}, 'b': 123, 'e': 'test'}) {'a': {'d': 432}, 'e': 'test'} Filter by callable >>> filter_dict(lambda x: x % 2 == 0, {'a': 532, 'b': 891}) {'a': 532} Filter by callable not applicable >>> filter_dict(lambda x: x % 2 == 0, {'a': 538, 'b': 8}) {'a': 538, 'b': 8} Embedded filter by callable >>> filter_dict(lambda x: bool(x), {'a': {'c': False}, 'b': 'test'}) {'b': 'test'} Embedded with extra by callable >>> filter_dict( ... lambda x: 'a' in x, {'a': {'c': 'ba', 'd': 'tt'}, 'b': 'd'}) {'a': {'c': 'ba'}} Embedded mixed filter >>> filter_dict( ... lambda x: bool(x), {'a': {'c': True, 'd': 0}, 'b': 'test', 'e': []} ... ) {'a': {'c': True}, 'b': 'test'} """ func = function_or_value if not callable(function_or_value): def new_func(value): return value != function_or_value func = new_func result = {} for key, value in dict_to_filter.items(): if isinstance(value, Mapping): value = filter_dict(func, value) if value: result[key] = value elif func(value): result[key] = value return result
6403f716c21a1cfef046174899183858837bb92e
28,031
import click from typing import OrderedDict import json import sys def _buy(config, client, machine_auth, resource, info_only=False, payment_method='offchain', header=(), method='GET', output_file=None, data=None, data_file=None, maxprice=10000): """Purchase a 402-enabled resource via CLI. This function attempts to purchase the requested resource using the `payment_method` and then write out its results to STDOUT. This allows a user to view results or pipe them into another command-line function. Args: config (two1.commands.config.Config): an object necessary for various user-specific actions, as well as for using the `capture_usage` function decorator. client (two1.server.rest_client.TwentyOneRestClient) an object for sending authenticated requests to the TwentyOne backend. machine_auth (two1.server.machine_auth_wallet.MachineAuthWallet): a wallet used for machine authentication. resource (str): a URI of the form scheme://host:port/path with `http` and `https` strictly enforced as required schemes. info_only (bool): if True, do not purchase the resource, and cause the function to write only the 402-related headers. payment_method (str): the payment method used for the purchase. header (tuple): list of HTTP headers to send with the request. method (str): the HTTP method/verb to make with the request. output_file (str): name of the file to redirect function output. data (str): serialized data to send with the request. The function will attempt to deserialize the data and determine its encoding type. data_file (str): name of the data file to send in HTTP body. maxprice (int): allowed maximum price (in satoshis) of the resource. Raises: click.ClickException: if some set of parameters or behavior cause the purchase to not complete successfully for any reason. """ # Find the correct payment method if payment_method == 'offchain': requests = bitrequests.BitTransferRequests(machine_auth, config.username) elif payment_method == 'onchain': requests = bitrequests.OnChainRequests(machine_auth.wallet) elif payment_method == 'channel': requests = bitrequests.ChannelRequests(machine_auth.wallet) else: raise click.ClickException(uxstring.UxString.buy_bad_payment_method.format(payment_method)) # Request user consent if they're creating a channel for the first time if payment_method == 'channel' and not requests._channelclient.list(): confirmed = click.confirm(uxstring.UxString.buy_channel_warning.format( requests.DEFAULT_DEPOSIT_AMOUNT, statemachine.PaymentChannelStateMachine.PAYMENT_TX_MIN_OUTPUT_AMOUNT ), default=True) if not confirmed: raise click.ClickException(uxstring.UxString.buy_channel_aborted) resource = parse_resource(resource) # Retrieve 402-related header information, print it, then exit if info_only: response = requests.get_402_info(resource) return logger.info('\n'.join(['{}: {}'.format(key, val) for key, val in response.items()])) # Collect HTTP header parameters into a single dictionary headers = {key.strip(): value.strip() for key, value in (h.split(':') for h in header)} # Handle data if applicable if data or data_file: method = 'POST' if method == 'GET' else method if data: data, headers['Content-Type'] = _parse_post_data(data) # Make the paid request for the resource try: response = requests.request( method.lower(), resource, max_price=maxprice, data=data or data_file, headers=headers ) except bitrequests.ResourcePriceGreaterThanMaxPriceError as e: raise click.ClickException(uxstring.UxString.Error.resource_price_greater_than_max_price.format(e)) except wallet_exceptions.DustLimitError as e: raise click.ClickException(e) except Exception as e: raise click.ClickException(e) # Write response text to stdout or a filename if provided if not output_file: try: json_resp = response.json() except ValueError: logger.info(response.content, nl=False) else: if isinstance(json_resp, dict): ordered = OrderedDict(sorted(json_resp.items())) logger.info(json.dumps(ordered, indent=4), nl=False) else: logger.info(json.dumps(json_resp, indent=4), nl=False) else: with open(output_file, 'wb') as f: logger.info(response.content, file=f, nl=False) logger.info('', err=True) # newline for pretty-printing errors to stdout # We will have paid iff response is a paid_response (regardless of # response.ok) if hasattr(response, 'amount_paid'): # Fetch and write out diagnostic payment information for balances if payment_method == 'offchain': twentyone_balance = client.get_earnings()["total_earnings"] logger.info( uxstring.UxString.buy_balances.format(response.amount_paid, '21.co', twentyone_balance), err=True) elif payment_method == 'onchain': onchain_balance = min(requests.wallet.confirmed_balance(), requests.wallet.unconfirmed_balance()) logger.info(uxstring.UxString.buy_balances.format(response.amount_paid, 'blockchain', onchain_balance), err=True) elif payment_method == 'channel': channel_client = requests._channelclient channel_client.sync() channels_balance = sum(s.balance for s in (channel_client.status(url) for url in channel_client.list()) if s.state == channels.PaymentChannelState.READY) logger.info( uxstring.UxString.buy_balances.format(response.amount_paid, 'payment channels', channels_balance), err=True ) if not response.ok: sys.exit(1)
52a139a4c0ac7cff9fb788b646d2e2744f3506dd
28,032
def resnet50(pretrained=False, **kwargs): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) return model
ba0f11d8645f3dcc5ccc48ec718de0c6ff624930
28,033
import torch import math def irfft(x, res): """ :param x: tensor of shape [..., m] :return: tensor of shape [..., alpha] """ assert res % 2 == 1 *size, sm = x.shape x = x.reshape(-1, sm) x = torch.cat([ x.new_zeros(x.shape[0], (res - sm) // 2), x, x.new_zeros(x.shape[0], (res - sm) // 2), ], dim=-1) assert x.shape[1] == res l = res // 2 x = torch.stack([ torch.cat([ x[:, l:l + 1], x[:, l + 1:].div(math.sqrt(2)) ], dim=1), torch.cat([ torch.zeros_like(x[:, :1]), x[:, :l].flip(-1).div(-math.sqrt(2)), ], dim=1), ], dim=-1) x = torch.irfft(x, 1) * res return x.reshape(*size, res)
8f383523bc0c4ed6895d8aad0aca2758401d2fe5
28,034
import torch def calc_ranks(idx, label, pred_score): """Calculating triples score ranks. Args: idx ([type]): The id of the entity to be predicted. label ([type]): The id of existing triples, to calc filtered results. pred_score ([type]): The score of the triple predicted by the model. Returns: ranks: The rank of the triple to be predicted, dim [batch_size]. """ b_range = torch.arange(pred_score.size()[0]) target_pred = pred_score[b_range, idx] pred_score = torch.where(label.bool(), -torch.ones_like(pred_score) * 10000000, pred_score) pred_score[b_range, idx] = target_pred ranks = ( 1 + torch.argsort( torch.argsort(pred_score, dim=1, descending=True), dim=1, descending=False )[b_range, idx] ) return ranks
1f3d56c9a93afdd314c9a244319ef78668426481
28,035
def GBT(trainingData, testData): """ Gradient Boosted Tree Regression Model :param trainingData: :param testData: :return: Trained model, predictions """ gbt = GBTRegressor( maxIter=100, maxDepth=6, seed=42) model = gbt.fit(trainingData) predictions = model.transform(testData) return model, predictions
4e17c7188ccdd2676463a705a4e3ab4ccbc5adeb
28,036
def fromcolumns(cols, header=None, missing=None): """View a sequence of columns as a table, e.g.:: >>> import petl as etl >>> cols = [[0, 1, 2], ['a', 'b', 'c']] >>> tbl = etl.fromcolumns(cols) >>> tbl +----+-----+ | f0 | f1 | +====+=====+ | 0 | 'a' | +----+-----+ | 1 | 'b' | +----+-----+ | 2 | 'c' | +----+-----+ If columns are not the same length, values will be padded to the length of the longest column with `missing`, which is None by default, e.g.:: >>> cols = [[0, 1, 2], ['a', 'b']] >>> tbl = etl.fromcolumns(cols, missing='NA') >>> tbl +----+------+ | f0 | f1 | +====+======+ | 0 | 'a' | +----+------+ | 1 | 'b' | +----+------+ | 2 | 'NA' | +----+------+ See also :func:`petl.io.json.fromdicts`. .. versionadded:: 1.1.0 """ return ColumnsView(cols, header=header, missing=missing)
c033e0fbc11e18a73eb8216e4a3a2c79a0756bb8
28,037
import re def function_sql(field, mysql_result_list): """ 替换MySQL查询结果的方法 :param field: 第一个参数是yaml文件里面定义的字段 :param mysql_result_list: 第二个参数是MySQL查询结果列表 :return: """ if "{__SQL" in field: mysql_index_list = re.findall("{__SQL(.+?)}", field) # 获取索引列表 for i in mysql_index_list: mysql_value = mysql_result_list[int(i)] if type(mysql_value) != str: mysql_value = str(mysql_value) field = field.replace("{__SQL" + i + "}", mysql_value) else: pass return field # 返回替换后的字段
769881ae5e3a7caa036c977785827e219e5ab92b
28,038
import os def get_data_path(): """ Return the location of the settings file for the data readers. """ return os.path.dirname(__file__)
aaae8a5c9b5b18c8b2d9513cb4b7be6e466bb443
28,039
def enable_dropout(model, rate=None, custom_objects={}): """ Enables the droput layer - used for monte carlo droput based uncertainty computation Note: the weights needs to be reloaded after calling this model >>> model = enable_dropout(model) >>> model.load_weights('path to model weight') :param model: :param rate: :param custom_objects: :return: """ if(rate is not None): assert rate >= 0 and rate < 1, 'dropout rate is out of range' model_config = model.get_config() for i in range(len(model_config['layers'])): class_name = model_config['layers'][i]['class_name'] if (class_name == 'SpatialDropout2D' or class_name =='Dropout' ): model_config['layers'][i]['inbound_nodes'][0][0][-1]['training'] = True if (rate is not None): model_config['layers'][i]['config']['rate'] = rate #print('dropout enabled') model = tf.keras.models.Model.from_config(model_config, custom_objects=custom_objects) return model
2268c23bc5598fcf0befe76a15f0dbc444e28828
28,040
import ctypes def get_max_torque_norm(p_state, idx_image=-1, idx_chain=-1): """Returns the current maximum norm of the torque acting on any spin.""" return float(_Get_MaxTorqueNorm(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))
b6ae73ef269a192b5aafc96e939a0ab1f9a937be
28,041
def filter_by_zscore(data, features, remove_z): """Remove rows with |z scores| > remove_z""" return data[(np.abs(np.nan_to_num(zscore(data[features]), posinf=0.0, neginf=0.0)) < remove_z).all(axis=1)]
bbaad3ee7879d64dafb2e45062c6cbe97ff457bc
28,042
def _GetProperty(obj, components): """Grabs a property from obj.""" if obj is None: return None elif not components: return obj elif (isinstance(components[0], _Key) and isinstance(obj, dict)): return _GetProperty(obj.get(components[0]), components[1:]) elif (isinstance(components[0], _Index) and isinstance(obj, list) and components[0] < len(obj)): return _GetProperty(obj[components[0]], components[1:]) elif (isinstance(components[0], _Slice) and isinstance(obj, list)): return [_GetProperty(item, components[1:]) for item in obj] else: return None
d887613e06078fcde887d51c8f83cc9ddc8f16f8
28,043
def make_similarity_function(similarity=None, distance=None, radius=None): """ Function creating a similarity function returning True if the compared items are similar from a variety of functions & parameters. Basically, if a distance function is given, it will be inverted and if a radius is given, the returned function will return whether the distance/similarity between two items is under/over it. Args: similarity (callable, optional): Similarity function. distance (callable, optional): Distance function. radius (number, optional): Radius. Returns: function: A similarity function with signature (A, B) -> bool """ if similarity is None and distance is None: raise TypeError('fog.clustering: need at least a similarity or distance function.') if radius is not None: if similarity: return lambda A, B: similarity(A, B) >= radius else: return lambda A, B: distance(A, B) <= radius else: if similarity: return similarity else: return lambda A, B: not distance(A, B)
b8eeeeb466f21f2b3605941253f56392c3e41e88
28,044
from typing import Optional def prepare_error_message(message: str, error_context: Optional[str] = None) -> str: """ If `error_context` is not None prepend that to error message. """ if error_context is not None: return error_context + ": " + message else: return message
ea95d40797fcc431412990706d5c098a07986156
28,045
def _options_from_args(args): """Returns a QRCodeOptions instance from the provided arguments. """ options = args.get('options') if options: if not isinstance(options, QRCodeOptions): raise TypeError('The options argument must be of type QRCodeOptions.') else: # Convert the string "None" into None kw = {k: v if v != 'None' else None for k, v in args.items()} options = QRCodeOptions(**kw) return options
ff895e537a0d2c00f42e10f827b8176865902774
28,046
def calc_q_rq_H(region, R_type): """単位面積当たりの必要暖房能力 Args: region(int): 省エネルギー地域区分 R_type(string): 暖冷房区画の種類 Returns: float: 単位面積当たりの必要暖房能力 Raises: ValueError: R_type が '主たる居室' または 'その他の居室' 以外の場合に発生する """ table_3 = get_table_3() if R_type == '主たる居室': return table_3[region - 1][0] elif R_type == 'その他の居室': return table_3[region - 1][1] else: raise ValueError(R_type)
1b413f0d83d723e1ef01c558cbc54e8afddc65ac
28,047
def tuple_compare_lt(left, right): """Compare two 'TupleOf' instances by comparing their individual elements.""" for i in range(min(len(left), len(right))): if left[i] > right[i]: return False if left[i] < right[i]: return True return len(left) < len(right)
8f93d0c1336fd63d7c7f04cf54680de25acfdafb
28,048
def multilevel_roi_align(inputs, boxes, image_shape, crop_size: int = 7): """Perform a batch multilevel roi_align on the inputs Arguments: - *inputs*: A list of tensors of shape [batch_size, width, height, channel] representing the pyramid. - *boxes*: A tensor and shape [batch_size, num_boxes, (y1, x1, y2, x2)] - *image_shape*: A tuple with the height and the width of the original image input image Returns: A tensor and shape [batch_size * num_boxes, 7, 7, channel] """ boxes_per_level, box_indices_per_level, pos_per_level = match_boxes_to_their_pyramid_level( boxes, len(inputs)) tensors_per_level = [] for tensor, target_boxes, box_indices in zip(inputs, boxes_per_level, box_indices_per_level): tensors_per_level.append( roi_align(tensor, target_boxes, box_indices, image_shape, crop_size)) tensors = tf.concat(values=tensors_per_level, axis=0) original_pos = tf.concat(values=pos_per_level, axis=0) # Reorder the tensor per batch indices_to_reorder_boxes = tf.math.invert_permutation(original_pos) tensors = tf.gather(tensors, indices_to_reorder_boxes) return tensors
3b150e6b6bcada3d3633f1edf61a99a566792849
28,049
def login(): """Login user""" # Instantiate login form form = LoginForm() username = form.username.data if form.validate_on_submit(): # Query database for username and validate form submission user = User.query.filter_by(username=username).first() # if user exists if user: # Compare hashed password from submission to database password if check_password_hash(user.password, form.password.data): login_user(user, remember=True) flash(f"Logged in successfully as {form.username.data}.", category="success") # Return user to the home page return redirect(url_for("index")) else: flash("Incorrect password..please try again", category="error") else: flash("Username does not exist", category="error") return render_template("login.html", form=form)
fa1e1814d71bcbf04fda08b282f3f1a58965dcfb
28,050
from datetime import datetime import uuid def serialize(obj): """JSON serializer for objects not serializable by default json code""" if isinstance(obj, datetime.datetime): serial = obj.isoformat(sep='T') return serial if isinstance(obj, uuid.UUID): serial = str(obj) return serial try: return obj.__dict__ except AttributeError: return str(obj) except Exception as e: strval = 'unknown obj' exceptval = 'unknown err' try: strval = str(obj) exceptval = repr(e) except Exception: pass return 'json fail {} {}'.format(exceptval, strval)
c20abaac68e8f8c8314a6dbbaee128b54110705c
28,051
def clean_names_AZ(col): """ Removes any non-alpha characters (excluding spaces) from a string. Replaces these characters with an empty space. Trims outer whitespace. Example -------- >>> Input: "JOHN SMITH 2000" >>> Output: "JOHN SMITH" """ return trim(regexp_replace(col, "[^A-Z ]+", ""))
4db710ec573087df59109046ea2a965c7545f1a2
28,052
def check_continent_node_membership(continents, continent_node_id): """The function checks that a node continent is bound to the corresponding relation through 'label' membership. """ assert continent_node_id[0] == 'n', ("A node expected in " "check_continent_node_membership()") errors = [] for cont_id, cont_data in continents.items(): continent_rel_id, continent_rel_data = cont_id, cont_data if (cont_id[0] == 'r' and is_the_same_continent(continents[continent_node_id], continent_rel_data) and continent_node_id not in continent_rel_data.get('labels', []) ): errors.append(f"Node {continent_node_id} represents the same " f"continent as relation {continent_rel_id} " "but is not its label") return errors
7ef0895e26fdd495f54ac58ea35513178f00eb19
28,053
import string def remove_punctuation(input_string): """ remove the punctuation of input Parameters ---------- input_string : string Returns ------- output_string : string string without punctuation ###from assignment encoder """ out_string= '' for item in input_string: if item not in string.punctuation: out_string= out_string+item return out_string
2bbd1dc90d37c1ad16698092b6269c0fe601d902
28,054
from typing import Any def field_value_between(value: Any = None, field: str = None, lower: float = None, upper: float = None) -> bool: """ Validate value at the given field to be between the lower/upper boundaries. """ if not value: return False if not isinstance(value, list): value = [value] for v in value: current = v.get(field) if current is None: return False if (lower > float(current)) or (float(current) > upper): return False return True
4ff2dfa814f0ddda7efca3ce19f137a0d86b9f40
28,055
import yaml def j2_to_json(path_in, path_out, **kwargs): """Render a yaml.j2 chart to JSON. Args: path_in: the j2 template path path_out: the JSON path to write to kwargs: data to pass to the j2 template Returns: the file path and JSON string """ return pipe( render_yaml(path_in, **kwargs), yaml.load, write_json(filepath=path_out) # pylint: disable=no-value-for-parameter )
2cd41eb29e293e44772855f7d66e7425eedaec8d
28,056
def user_logged_out(connection,user): """ update login status to false when user has logged out :param connection: :param user: :return: """ with connection: return connection.execute(UPDATE_USER_LOGIN_STATUS_TO_FALSE,(user,))
b355fa6e74180adb7504e60602cb164095e1898d
28,057
def findGrayscaleTilesInImage(img): """ Find chessboard and convert into input tiles for CNN """ if img is None: return None, None # Convert to grayscale numpy array img_arr = np.asarray(img.convert("L"), dtype=np.float32) # Use computer vision to find orthorectified chessboard corners in image corners = findChessboardCorners(img_arr) if corners is None: return None, None # Pull grayscale tiles out given image and chessboard corners tiles = getChessTilesGray(img_arr, corners) # Return both the tiles as well as chessboard corner locations in the image return tiles, corners
d3431c519f53c0a56b144dde8196d58000f2f788
28,058
def run(df, docs, columns): """ converts each column to type int :param df: :param columns: :return: """ for doc in docs: doc.start("t07 - Change type of {} to int".format(str(columns).replace("'", "")), df) for column in columns: df[column] = df[column].astype(int) for doc in docs: doc.end(df) return df
5d360a764ad30a80c39d58f9aeb520d7c57f7903
28,059
import requests def get_articles(): """ Retreives the articles list (via an API request) """ endpoint = "%s%s" % ( settings.API_BASE_URL, reverse("api:articles-list") ) headers = DEFAULT_REQUESTS_HEADERS r = requests.get( endpoint, headers=DEFAULT_REQUESTS_HEADERS ) if r.status_code != 200: raise UnexpectedApiResponse( endpoint=endpoint, method="GET", payload=None, response_status_code=r.status_code, response_content=r.content ) raw_data = decode_requests_response(r.content) serializer = ArticleSerializer(data=raw_data, many=True) serializer.is_valid(raise_exception=True) articles = serializer.validated_data return articles
fb2b59cc301890b8c6f4c6c115b6f08f4a4cbe72
28,060
import numpy def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None, preconditioner = None): """ Unconstrained minimization of a function using the Newton-CG method. Parameters ---------- f : callable ``f(x, *args)`` Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable ``f'(x, *args)`` Gradient of f. fhess_p : callable ``fhess_p(x, p, *args)``, optional Function which computes the Hessian of f times an arbitrary vector, p. fhess : callable ``fhess(x, *args)``, optional Function to compute the Hessian matrix of f. args : tuple, optional Extra arguments passed to f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon : float or ndarray, optional If fhess is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function which is called after each iteration. Called as callback(xk), where xk is the current parameter vector. avextol : float, optional Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True, return the optional outputs. disp : bool, optional If True, print convergence message. retall : bool, optional If True, return a list of results at each iteration. precontioner: numpy.ndarray, used for preconditioning CG (PCG), it is a one dim array on the M's diagnol indices. Returns ------- xopt : ndarray Parameters which minimize f, i.e. ``f(xopt) == fopt``. fopt : float Value of the function at xopt, i.e. ``fopt = f(xopt)``. fcalls : int Number of function calls made. gcalls : int Number of gradient calls made. hcalls : int Number of hessian calls made. warnflag : int Warnings generated by the algorithm. 1 : Maximum number of iterations exceeded. allvecs : list The result at each iteration, if retall is True (see below). See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Newton-CG' `method` in particular. Notes ----- Only one of `fhess_p` or `fhess` need to be given. If `fhess` is provided, then `fhess_p` will be ignored. If neither `fhess` nor `fhess_p` is provided, then the hessian product will be approximated using finite differences on `fprime`. `fhess_p` must compute the hessian times an arbitrary vector. If it is not given, finite-differences on `fprime` are used to compute it. Newton-CG methods are also called truncated Newton methods. This function differs from scipy.optimize.fmin_tnc because 1. scipy.optimize.fmin_ncg is written purely in python using numpy and scipy while scipy.optimize.fmin_tnc calls a C function. 2. scipy.optimize.fmin_ncg is only for unconstrained minimization while scipy.optimize.fmin_tnc is for unconstrained minimization or box constrained minimization. (Box constraints give lower and upper bounds for each variable separately.) References ---------- Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140. """ if preconditioner is None: preconditioner = numpy.ones(x0.shape[0]) opts = {'xtol': avextol, 'eps': epsilon, 'maxiter': maxiter, 'disp': disp, 'return_all': retall, 'preconditioner': preconditioner,} res = _minimize_newton_pcg(f, x0, args, fprime, fhess, fhess_p,callback=callback, **opts) # res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['nfev'], res['njev'], res['nhev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x']
bb2d4c3d1303adebe856f6c3ac13cd92beeee0ab
28,061
def add_missing_flow_by_fields(flowby_partial_df, flowbyfields): """ Add in missing fields to have a complete and ordered :param flowby_partial_df: Either flowbyactivity or flowbysector df :param flowbyfields: Either flow_by_activity_fields, flow_by_sector_fields, or flow_by_sector_collapsed_fields :return: """ for k in flowbyfields.keys(): if k not in flowby_partial_df.columns: flowby_partial_df[k] = None # convert data types to match those defined in flow_by_activity_fields for k, v in flowbyfields.items(): flowby_partial_df[k] = flowby_partial_df[k].astype(v[0]['dtype']) # Resort it so order is correct flowby_partial_df = flowby_partial_df[flowbyfields.keys()] return flowby_partial_df
49eb8810c7c2c4e852a40aa86e2d2d2a8506f253
28,062
from datetime import datetime def calcular_diferencia_dias(fin_dia): """ Obtiene la diferencia de dias entre una fecha y hoy """ hoy = datetime.now() end = datetime.strptime(str(fin_dia), '%Y-%m-%d') return abs(end - hoy).days
41b732f3bb09d2deca4be034273a5fed74971386
28,063
def matrix_base_mpl(matrix, positions, substitutions, conservation=None, secondary_structure=None, wildtype_sequence=None, min_value=None, max_value=None, ax=None, colormap=plt.cm.RdBu_r, colormap_conservation=plt.cm.Oranges, na_color="#bbbbbb", title=None, position_label_size=8, substitution_label_size=8, show_colorbar=True, colorbar_indicate_bounds=False, show_wt_char=True, label_filter=None, secondary_structure_style=None): """ Matplotlib-based mutation matrix plotting. This is the base plotting function, see plot_mutation_matrix() for more convenient access. Parameters ---------- matrix : np.array(float) 2D numpy array with values for individual single mutations (first axis: position, second axis: substitution) positions : list(int) or list(str) List of positions along x-axis of matrix (length has to agree with first dimension of matrix) substitutions : list(str) List of substitutions along y-axis of matrix (length has to agree with second dimension of matrix) conservation : list(float) or np.array(float), optional (default: None) Positional conservation along sequence. Values must range between 0 (not conserved) and 1 (fully conserved). If given, will plot conservation along bottom of mutation matrix. secondary_structure : str or list(str), optional (default: None) Secondary structure for each position along sequence. If given, will draw secondary structure cartoon on top of matrix. wildtype_sequence : str or list(str), optional (default: None) Sequence of wild-type symbols. If given, will indicate wild-type entries in matrix with a dot. min_value : float, optional (default: None) Threshold colormap at this minimum value. If None, defaults to minimum value in matrix; if max_value is also None, defaults to -max(abs(matrix)) max_value : float, optional (default: None) Threshold colormap at this maximum value. If None, defaults to maximum value in matrix; if min_value is also None, defaults to max(abs(matrix)) ax : Matplotlib axes object, optional (default: None) Draw mutation matrix on this axis. If None, new figure and axis will be created. colormap : matplotlib colormap object, optional (default: plt.cm.RdBu_r) Maps mutation effects to colors of matrix cells. colormap_conservation: matplotlib colormap object, optional (default: plt.cm.Oranges) Maps sequence conservation to colors of conservation vector plot. na_color : str, optional (default: "#bbbbbb") Color for missing values in matrix title : str, optional (default: None) If given, set title of plot to this value. position_label_size : int, optional (default: 8) Font size of x-axis labels. substitution_label_size : int, optional (default: 8) Font size of y-axis labels. show_colorbar : bool, optional (default: True) If True, show colorbar next to matrix. colorbar_indicate_bounds : bool, optional (default: False) If True, add greater-than/less-than signs to limits of colorbar to indicate that colors were thresholded at min_value/max_value show_wt_char : bool, optional (default: True) Display wild-type symbol in axis labels label_filter : function, optional (default: None) Function with one argument (integer) that determines if a certain position label will be printed (if label_filter(pos)==True) or not. secondary_structure_style : dict, optional (default: None) Pass on as **kwargs to evcouplings.visualize.pairs.secondary_structure_cartoon to determine appearance of secondary structure cartoon. Returns ------- ax : Matplotlib axes object Axes on which mutation matrix was drawn """ LINEWIDTH = 0.0 LABEL_X_OFFSET = 0.55 LABEL_Y_OFFSET = 0.45 def _draw_rect(x_range, y_range, linewidth): r = plt.Rectangle( (min(x_range), min(y_range)), max(x_range) - min(x_range), max(y_range) - min(y_range), fc='None', linewidth=linewidth ) ax.add_patch(r) matrix_width = matrix.shape[0] matrix_height = len(substitutions) # mask NaN entries in mutation matrix matrix_masked = np.ma.masked_where(np.isnan(matrix), matrix) # figure out maximum and minimum values for color map if max_value is None and min_value is None: max_value = np.abs(matrix_masked).max() min_value = -max_value elif min_value is None: min_value = matrix_masked.min() elif max_value is None: max_value = matrix_masked.max() # set NaN color value in colormaps colormap = deepcopy(colormap) colormap.set_bad(na_color) colormap_conservation = deepcopy(colormap_conservation) colormap_conservation.set_bad(na_color) # determine size of plot (depends on how much tracks # with information we will add) num_rows = ( len(substitutions) + (conservation is not None) + (secondary_structure is not None) ) ratio = matrix_width / float(num_rows) # create axis, if not given if ax is None: fig = plt.figure(figsize=(ratio * 5, 5)) ax = fig.gca() # make square-shaped matrix cells ax.set_aspect("equal", "box") # define matrix coordinates # always add +1 because coordinates are used by # pcolor(mesh) as beginning and start of rectangles x_range = np.array(range(matrix_width + 1)) y_range = np.array(range(matrix_height + 1)) y_range_avg = range(-2, 0) x_range_avg = range(matrix_width + 1, matrix_width + 3) y_range_cons = np.array(y_range_avg) - 1.5 # coordinates for text labels (fixed axis) x_left_subs = min(x_range) - 1 x_right_subs = max(x_range_avg) + 1 if conservation is None: y_bottom_res = min(y_range_avg) - 0.5 else: y_bottom_res = min(y_range_cons) - 0.5 # coordinates for additional annotation y_ss = max(y_range) + 2 # 1) main mutation matrix X, Y = np.meshgrid(x_range, y_range) cm = ax.pcolormesh( X, Y, matrix_masked.T, cmap=colormap, vmax=max_value, vmin=min_value ) _draw_rect(x_range, y_range, LINEWIDTH) # 2) mean column effect (bottom "subplot") mean_pos = np.mean(matrix_masked, axis=1)[:, np.newaxis] X_pos, Y_pos = np.meshgrid(x_range, y_range_avg) ax.pcolormesh( X_pos, Y_pos, mean_pos.T, cmap=colormap, vmax=max_value, vmin=min_value ) _draw_rect(x_range, y_range_avg, LINEWIDTH) # 3) amino acid average (right "subplot") mean_aa = np.mean(matrix_masked, axis=0)[:, np.newaxis] X_aa, Y_aa = np.meshgrid(x_range_avg, y_range) ax.pcolormesh(X_aa, Y_aa, mean_aa, cmap=colormap, vmax=max_value, vmin=min_value) _draw_rect(x_range_avg, y_range, LINEWIDTH) # mark wildtype residues if wildtype_sequence is not None: subs_list = list(substitutions) for i, wt in enumerate(wildtype_sequence): # skip unspecified entries if wt is not None and wt != "": marker = plt.Circle( (x_range[i] + 0.5, y_range[subs_list.index(wt)] + 0.5), 0.1, fc='k', axes=ax ) ax.add_patch(marker) # put labels along both axes of matrix # x-axis (positions) for i, pos in zip(x_range, positions): # filter labels, if selected if label_filter is not None and not label_filter(pos): continue # determine what position label should be if show_wt_char and wildtype_sequence is not None: wt_symbol = wildtype_sequence[i] if type(pos) is tuple and len(pos) == 2: # label will be in format segment AA pos, eg B_1 A 151 label = "{} {} {}".format(pos[0], wt_symbol, pos[1]) else: label = "{} {}".format(wt_symbol, pos) else: if type(pos) is tuple: label = " ".join(map(str, pos)) else: label = str(pos) ax.text( i + LABEL_X_OFFSET, y_bottom_res, label, size=position_label_size, horizontalalignment='center', verticalalignment='top', rotation=90 ) # y-axis (substitutions) for j, subs in zip(y_range, substitutions): # put on lefthand side of matrix... ax.text( x_left_subs, j + LABEL_Y_OFFSET, subs, size=substitution_label_size, horizontalalignment='center', verticalalignment='center' ) # ...and on right-hand side of matrix ax.text( x_right_subs, j + LABEL_Y_OFFSET, subs, size=substitution_label_size, horizontalalignment='center', verticalalignment='center' ) # draw colorbar if show_colorbar: cb = plt.colorbar( cm, ticks=[min_value, max_value], shrink=0.3, pad=0.15 / ratio, aspect=8 ) if colorbar_indicate_bounds: symbol_min, symbol_max = u"\u2264", u"\u2265" else: symbol_min, symbol_max = "", "" cb.ax.set_yticklabels( [ "{symbol} {value:>+{width}.1f}".format( symbol=s, value=v, width=0 ) for (v, s) in [(min_value, symbol_min), (max_value, symbol_max)] ] ) cb.ax.xaxis.set_ticks_position("none") cb.ax.yaxis.set_ticks_position("none") cb.outline.set_linewidth(0) # plot secondary structure cartoon if secondary_structure is not None: # if no style given for secondary structure, set default if secondary_structure_style is None: secondary_structure_style = { "width": 0.8, "line_width": 2, "strand_width_factor": 0.5, "helix_turn_length": 2, "min_sse_length": 2, } start, end, sse = find_secondary_structure_segments(secondary_structure) secondary_structure_cartoon( sse, sequence_start=start, sequence_end=end, center=y_ss, ax=ax, **secondary_structure_style ) # plot conservation if conservation is not None: conservation = np.array(conservation)[:, np.newaxis] cons_masked = np.ma.masked_where(np.isnan(conservation), conservation) X_cons, Y_cons = np.meshgrid(x_range, y_range_cons) ax.pcolormesh( X_cons, Y_cons, cons_masked.T, cmap=colormap_conservation, vmax=1, vmin=0 ) _draw_rect(x_range, y_range_cons, LINEWIDTH) # remove chart junk for line in ['top', 'bottom', 'right', 'left']: ax.spines[line].set_visible(False) ax.xaxis.set_ticks_position("none") ax.yaxis.set_ticks_position("none") plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) if title is not None: ax.set_title(title) return ax
ef661fd556b3ba2e4c313e032e8ef3be532bb73d
28,064
def gaussian_laplace(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multi-dimensional Laplace filter using Gaussian second derivatives. Args: input (cupy.ndarray): The input array. sigma (scalar or sequence of scalar): Standard deviations for each axis of Gaussian kernel. A single value applies to all axes. output (cupy.ndarray, dtype or None): The array in which to place the output. Default is is same dtype as the input. mode (str): The array borders are handled according to the given mode (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, ``'wrap'``). Default is ``'reflect'``. cval (scalar): Value to fill past edges of input if mode is ``'constant'``. Default is ``0.0``. kwargs (dict, optional): dict of extra keyword arguments to pass ``gaussian_filter()``. Returns: cupy.ndarray: The result of the filtering. .. seealso:: :func:`scipy.ndimage.gaussian_laplace` .. note:: When the output data type is integral (or when no output is provided and input is integral) the results may not perfectly match the results from SciPy due to floating-point rounding of intermediate results. """ def derivative2(input, axis, output, mode, cval): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_laplace(input, derivative2, output, mode, cval)
6b5f184b658dd446a4f3ec7de0ee126f33663b0c
28,065
def get_commits_after_forkpoint(main_dir, base_sha, head_sha, repo_name, log, secure_repo_name): """List commit hashes between base_sha and head_sha :param main_dir: model directory :param base_sha: base SHA i.e. point of the history when the branch started diverging from the main branch. :param head_sha: head SHA i.e. current tip of the branch :param repo_name: Repository folder name on disk :param log: logger object :param secure_repo_name: repo name in secure form :return: list of commit sha1 hashes """ forkdata_dataframe = read_attrs(main_dir + '/git/attr_fork_points.csv.zip', sep='\t') repo_id = None commits = [] for repo, repoinfo in forkdata_dataframe.to_dict().items(): log.debug('Repo info for {} is {}'.format(repo, repoinfo)) if 'id' in repoinfo and (repoinfo['id'].split('/')[-1] == repo_name or repoinfo['id'].split('/')[-1] == secure_repo_name): repo_id = repoinfo['id'] git_dir = find_git_dir(main_dir, repoinfo['id']) cmd = 'git -C "{}" rev-list "{}..{}"'.format(git_dir, base_sha, head_sha) commits = [c.strip() for c in delegator.run(cmd).out.splitlines()] elif 'fork_point' in repoinfo: if repoinfo['fork_point'] == base_sha: commits = repoinfo['commits_after_fork_point'].split(';') else: # TODO: Sometimes the fork point SHA does not match with base SHA. # Workaround: Repo name is also very reliable way to ensure we # use the fork info from the correct line. repo_name_from_file = repoinfo['id'].split('/')[-1] if repo_name_from_file == repo_name: log.warning('Repository {} base SHA {} and fork point SHA ' '{} do not match.'.format(repo_name, base_sha, repoinfo['fork_point'])) commits = repoinfo['commits_after_fork_point'].split(';') elif repo_name_from_file == secure_repo_name: log.warning('Repository {} base SHA {} and fork point SHA ' '{} do not match.'.format(secure_repo_name, base_sha, repoinfo['fork_point'])) commits = repoinfo['commits_after_fork_point'].split(';') else: log.debug("Looking for {} or {}, found {} (main_dir {}, repoinfo {})".format( repo_name, secure_repo_name, repoinfo['id'].split('/')[-1], main_dir, repoinfo)) return repo_id, commits
6b8f8609adb80903215fc11f08380db3f8deffcd
28,066
def perimeter_mask(image, corner_fraction=0.035): """ Create boolean mask for image with a perimeter marked as True. The perimeter is the same width as the corners created by corner_mask. Args: image : the image to work with corner_fraction: determines the width of the perimeter Returns: boolean 2D array with corners marked True """ v, h = image.shape n = int(v * corner_fraction) m = int(h * corner_fraction) the_mask = np.full_like(image, False, dtype=np.bool) the_mask[:, :m] = True the_mask[:, -m:] = True the_mask[:n, :] = True the_mask[-n:, :] = True return the_mask
afc755dccfffa9ff68e060a6af3da0d38d323178
28,067
def vgg13_bn(**kwargs): """VGG 13-layer model (configuration "B") with batch normalization""" model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) return model
1fa3ffdbb301b55a48fc1912baab84006705e15f
28,068
import regex def convert_version_to_tuple(version: str) -> VersionTuple: """ Convert version info from string representation to tuple representation. The tuple representation is convenient for direct comparison. """ m = regex.fullmatch(r"(?P<major>\d+)\.(?P<minor>\d+)", version) if not m: raise ValueError(f"{version} is not a valid version") major, minor = m.group("major", "minor") version_tuple = (int(major), int(minor)) return version_tuple
6c197988ae2c98481f9b16f90f9ae3f7072ac7c8
28,069
from typing import Callable def SU3GradientTF( f: Callable[[Tensor], Tensor], x: Tensor, ) -> tuple[Tensor, Tensor]: """Compute gradient using TensorFlow GradientTape. y = f(x) must be a real scalar value. Returns: - (f(x), D), where D = T^a D^a = T^a ∂_a f(x) NOTE: Use real vector derivatives, e.g. D^a = ∂_a f(x) = ∂_t f(exp(T^a) x) |_{t=0} """ zeros = tf.zeros(8) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(zeros) y = f(tf.linalg.matmul(exp(su3fromvec(zeros)), x)) d = tape.gradient(y, zeros) return y, d
93b029e0a2854e651d4c6ea5995f8d952f9a64e6
28,070
def create_app(config): """Flask application factory. Returns: Flask Application with BrazilDataCubeDB extension prepared. """ app = Flask(__name__) BrazilDataCubeDB(app) return app
d8ba6d7306508e4a55f9f3dbee5d17df16c56820
28,071
import string def genpass_comprehension(length=8, chars=string.letters+string.digits): """Generate password using a list comprehension. """ # Can be rewritten as a list comprehension. return ''.join([choice(chars) for i in range(length)])
d77b89e2872eef92390d08f555adbb52f9da1c34
28,072
import functools def typed(*types): """Type annotation. The final type is the output type. """ if len(types) < 1: raise SyntaxError('Too few arguments: typed{}'.format(types)) if len(types) > 3: raise NotImplementedError('Too many arguments: typed{}'.format(types)) result_type = types[-1] arg_types = types[:-1] def decorator_0(fun): @functools.wraps(fun) def typed_fun(): return result_type(fun()) return typed_fun def decorator_1(fun): @functools.wraps(fun) def typed_fun(arg): arg = arg_types[0](arg) return result_type(fun(arg)) return typed_fun def decorator_2(fun): @functools.wraps(fun) def typed_fun(arg0, arg1): arg0 = arg_types[0](arg0) arg1 = arg_types[1](arg1) return result_type(fun(arg0, arg1)) return typed_fun return [decorator_0, decorator_1, decorator_2][len(arg_types)]
90f100bebd5778d36eee1ad04b7c831b003ce604
28,073
from typing import Tuple def insert_linebreaks( input_fragments: StyleAndTextTuples, max_line_width: int, truncate_long_lines: bool = True) -> Tuple[StyleAndTextTuples, int]: """Add line breaks at max_line_width if truncate_long_lines is True. Returns input_fragments with each character as it's own formatted text tuple.""" fragments: StyleAndTextTuples = [] total_width = 0 line_width = 0 line_height = 0 new_break_inserted = False for item in input_fragments: # Check for non-printable fragment; doesn't affect the width. if '[ZeroWidthEscape]' in item[0]: fragments.append(item) continue new_item_style = item[0] # For each character in the fragment for character in item[1]: # Get the width respecting double width characters width = get_cwidth(character) # Increment counters total_width += width line_width += width # Save this character as it's own fragment if line_width <= max_line_width: if not new_break_inserted or character != '\n': fragments.append((new_item_style, character)) # Was a line break just inserted? if character == '\n': # Increase height line_height += 1 new_break_inserted = False # Reset width to zero even if we are beyond the max line width. if character == '\n': line_width = 0 # Are we at the limit for this line? elif line_width == max_line_width: # Insert a new linebreak fragment fragments.append((new_item_style, '\n')) # Increase height line_height += 1 # Set a flag for skipping the next character if it is also a # line break. new_break_inserted = True if not truncate_long_lines: # Reset line width to zero line_width = 0 # Check if the string ends in a final line break last_fragment_style = fragments[-1][0] last_fragment_text = fragments[-1][1] if not last_fragment_text.endswith('\n'): # Add a line break if none exists fragments.append((last_fragment_style, '\n')) line_height += 1 return fragments, line_height
ec9faf8ff80e3500487634b759a136dc2deca684
28,074
def score_reactant_combination(candidate_combination, scoring_fcn): """ Generates a score for a combination of reactant candidates according to the criteria. """ # Extract only the reactant candidate compound ID's. reactant_ids = [combo[0] for combo in candidate_combination] # Score the reactant candidate combinations according to the specified criteria. if scoring_fcn == "similarity": combination_score = np.mean([combo[1] for combo in candidate_combination]) else: combination_score = 0.0 return reactant_ids, combination_score
715a21bf24af0a60ba3ea421b7bf8dcebcca17fc
28,075
import sys def main(argv=None): """Provide the main entry point.""" help_builder.init("charmcraft", GENERAL_SUMMARY, COMMAND_GROUPS) emit.init(EmitterMode.NORMAL, "charmcraft", f"Starting charmcraft version {__version__}") if argv is None: argv = sys.argv extra_global_options = [ GlobalArgument( "project_dir", "option", "-p", "--project-dir", "Specify the project's directory (defaults to current)", ), ] # process try: setup_parts() # load the dispatcher and put everything in motion dispatcher = Dispatcher(COMMAND_GROUPS, extra_global_options) global_args = dispatcher.pre_parse_args(argv[1:]) loaded_config = config.load(global_args["project_dir"]) command = dispatcher.load_command(loaded_config) if command.needs_config and not loaded_config.project.config_provided: raise ArgumentParsingError( "The specified command needs a valid 'charmcraft.yaml' configuration file (in " "the current directory or where specified with --project-dir option); see " "the reference: https://discourse.charmhub.io/t/charmcraft-configuration/4138" ) retcode = dispatcher.run() except ArgumentParsingError as err: print(err, file=sys.stderr) # to stderr, as argparse normally does emit.ended_ok() retcode = 1 except ProvideHelpException as err: print(err, file=sys.stderr) # to stderr, as argparse normally does emit.ended_ok() retcode = 0 except CraftError as err: emit.error(err) retcode = err.retcode except KeyboardInterrupt as exc: error = CraftError("Interrupted.") error.__cause__ = exc emit.error(error) retcode = 1 except Exception as err: error = CraftError(f"charmcraft internal error: {err!r}") error.__cause__ = err emit.error(error) retcode = 1 else: emit.ended_ok() if retcode is None: retcode = 0 return retcode
6adf05b9883b698a69c3845df2f58f48953e98e8
28,076
import logging import os import json def xmind_testsuite_to_json_file(xmind_file): """Convert XMind file to a testsuite json file""" xmind_file = get_absolute_path(xmind_file) logging.info('Start converting XMind file(%s) to testsuites json file...', xmind_file) testsuites = get_xmind_testsuite_list(xmind_file) testsuite_json_file = xmind_file[:-6] + '_testsuite.json' if os.path.exists(testsuite_json_file): logging.info('The testsuite json file already exists, return it directly: %s', testsuite_json_file) return testsuite_json_file with open(testsuite_json_file, 'w', encoding='utf8') as f: f.write(json.dumps(testsuites, indent=4, separators=(',', ': '))) logging.info('Convert XMind file(%s) to a testsuite json file(%s) successfully!', xmind_file, testsuite_json_file) return testsuite_json_file
d8c7c7aec99c76e7e2d1195dd4f557e68be3adfa
28,077
def get_named_entities(df): """ Count the named entities that are neither A nor B. Hopefully this correlates with class "Neither". :param df: competition data with one extra field spacy_nlp_doc: precomputed nlp(text) :return: """ named_df = pd.DataFrame(0, index=df.index, columns=["named_ent"]) with timer('Extracting named entities'): for i in range(len(df)): doc = df.loc[i, "spacy_nlp_doc"] A = df.loc[i, "A"] B = df.loc[i, "B"] A_offset = df.loc[i, "A-offset"] B_offset = df.loc[i, "B-offset"] P_offset = df.loc[i, "Pronoun-offset"] # count persons that are not A or B # spacy's entities are spans, not tokens # e.g. "Cheryl Cassidy" is one entity ent_list = [ent for ent in doc.ents if (ent.label_ == "PERSON" and ent.text != A and ent.text != B)] named_df.loc[i, "named_ent"] = len(ent_list) return named_df
65469fe65c8808943343d952fd82ebe62bb9df97
28,078
def normalize(vectors): """ Normalize a set of vectors. The length of the returned vectors will be unity. Parameters ---------- vectors : np.ndarray Set of vectors of any length, except zero. """ if len(vectors.shape) == 1: return vectors / np.linalg.norm(vectors) norm = np.linalg.norm(vectors, axis=1) return vectors / norm[:, np.newaxis]
839104d17a3ccbfd1191474bf95076445b4b0464
28,079
def get_all_requests(current_user): """Gets all requests""" all_requests = [] for request in request_model.requests.values(): all_requests.append(request) return jsonify(all_requests)
bcadfb936826b3a33f809cc95af1a991c5bf741e
28,080
def RunManifestExe(target, source, env): """Calls RunManifest for updating an executable (resource_num=1).""" return RunManifest(target, source, env, resource_num=1)
629ffccb7b163514bd91c790894bdfec3683110e
28,081
import torch def dist_reduce_tensor(tensor, dst=0): """Reduce to specific rank""" world_size = get_world_size() if world_size < 2: return tensor with torch.no_grad(): dist.reduce(tensor, dst=dst) if get_rank() == dst: tensor.div_(world_size) return tensor
d64d153145bffaf454dd3f46154db156b600bac3
28,082
def upload_blob(bucket_name, source_file_name, destination_blob_name): """Uploads a file to the bucket.""" storage_client = storage.Client() bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(destination_blob_name) blob.upload_from_file(source_file_name) print('File {} uploaded to {}.'.format( source_file_name, destination_blob_name)) return destination_blob_name
b63d6bb0ede33d68d684b98968e3e94efbd0c5df
28,083
def get_lines(matrix, loc): """Returns lines that pass though `loc`. Matrix can be indices. Args: matrix: a N by N matrix representing the board loc: a tuple of loc coordinates Returns: Numerical values on the horizontal, vertical, and diagonal lines that pass through loc. Examples 1: >>> m = np.array([[0, 0, 1], >>> [1, 2, 4], >>> [6, 3, 2]]) >>> get_lines(m, (0, 1)) (array([0, 2, 3]), array([0, 0, 1]), array([0, 4]), array([0, 1])) Example 2: >>> m.shape (3, 3) >>> ind = np.indices(m.shape) >>> ind # ind.shape = (2,3,3) array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) >>> ind2 = np.moveaxis(ind, 0, -1) >>> ind2.shape (3, 3, 2) >>> ind2 array([[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]], [[2, 0], [2, 1], [2, 2]]]) >>> get_lines(ind2, (0,1)) (array([[0, 1], [1, 1], [2, 1]]), array([[0, 0], [0, 1], [0, 2]]), array([[0, 1], [1, 2]]), array([[0, 1], [1, 0]])) """ i, j = loc flat = matrix.reshape(-1, *matrix.shape[2:]) w = matrix.shape[0] h = matrix.shape[1] def flat_pos(pos): """Returns the flattened index of element (i,j).""" return pos[0] * h + pos[1] pos = flat_pos((i, j)) # index for flipping matrix across different axis ic = w - 1 - i jc = h - 1 - j # top left tl = (i - j, 0) if i > j else (0, j - i) tl = flat_pos(tl) # bottom left bl = (w - 1 - (ic - j), 0) if ic > j else (w - 1, j - ic) bl = flat_pos(bl) # top right tr = (i - jc, h - 1) if i > jc else (0, h - 1 - (jc - i)) tr = flat_pos(tr) # bottom right br = (w - 1 - (ic - jc), h - 1) if ic > jc else (w - 1, h - 1 - (jc - ic)) br = flat_pos(br) hor = matrix[:, j] ver = matrix[i, :] diag_right = np.concatenate([flat[tl:pos:h + 1], flat[pos:br + 1:h + 1]]) diag_left = np.concatenate([flat[tr:pos:h - 1], flat[pos:bl + 1:h - 1]]) return hor, ver, diag_right, diag_left
43909460e847d5dde88216cc37b902a56ba2d261
28,084
from bs4 import BeautifulSoup from typing import Dict def process_citations_in_paragraph(para_el: BeautifulSoup, sp: BeautifulSoup, bibs: Dict, bracket: bool) -> Dict: """ Process all citations in paragraph and generate a dict for surface forms :param para_el: :param sp: :param bibs: :param bracket: :return: """ # CHECK if range between two surface forms is appropriate for bracket style expansion def _get_surface_range(start_surface, end_surface): span1_match = SINGLE_BRACKET_REGEX.match(start_surface) span2_match = SINGLE_BRACKET_REGEX.match(end_surface) if span1_match and span2_match: # get numbers corresponding to citations span1_num = int(span1_match.group(1)) span2_num = int(span2_match.group(1)) # expand if range is between 1 and 20 if 1 < span2_num - span1_num < 20: return span1_num, span2_num return None # CREATE BIBREF range between two reference ids, e.g. BIBREF1-BIBREF4 -> BIBREF1 BIBREF2 BIBREF3 BIBREF4 def _create_ref_id_range(start_ref_id, end_ref_id): start_ref_num = int(start_ref_id[6:]) end_ref_num = int(end_ref_id[6:]) return [f'BIBREF{curr_ref_num}' for curr_ref_num in range(start_ref_num, end_ref_num + 1)] # CREATE surface form range between two bracket strings, e.g. [1]-[4] -> [1] [2] [3] [4] def _create_surface_range(start_number, end_number): return [f'[{n}]' for n in range(start_number, end_number + 1)] # create citation dict with keywords cite_map = dict() tokgen = UniqTokenGenerator('CITETOKEN') for rtag in para_el.find_all('ref'): try: raw_coord = rtag.attrs.get("coords", None) coord = '' if raw_coord != None: raw_coord = raw_coord.split(';')[0] coord = list(map(float, raw_coord.split(','))) coord = { 'page': coord[0], 'left': coord[1], 'top': coord[2], 'width': coord[3], 'height': coord[4] } # get surface span, e.g. [3] surface_span = rtag.text.strip() # check if target is available (#b2 -> BID2) if rtag.get('target'): # normalize reference string rtag_ref_id = normalize_grobid_id(rtag.get('target')) # skip if rtag ref_id not in bibliography if rtag_ref_id not in bibs: cite_key = tokgen.next() rtag.replace_with(sp.new_string(f" {cite_key} ")) cite_map[cite_key] = (None, surface_span, coord) continue # if bracket style, only keep if surface form is bracket if bracket: # valid bracket span if surface_span and (surface_span[0] == '[' or surface_span[-1] == ']' or surface_span[-1] == ','): pass # invalid, replace tag with surface form and continue to next ref tag else: rtag.replace_with(sp.new_string(f" {surface_span} ")) continue # not bracket, add cite span and move on else: cite_key = tokgen.next() rtag.replace_with(sp.new_string(f" {cite_key} ")) cite_map[cite_key] = (rtag_ref_id, surface_span, coord) continue ### EXTRA PROCESSING FOR BRACKET STYLE CITATIONS; EXPAND RANGES ### # look backward for range marker, e.g. [1]-*[3]* backward_between_span = "" for sib in rtag.previous_siblings: if sib.name == 'ref': break elif type(sib) == NavigableString: backward_between_span += sib else: break # check if there's a backwards expansion, e.g. need to expand [1]-[3] -> [1] [2] [3] if is_expansion_string(backward_between_span): # get surface number range surface_num_range = _get_surface_range( rtag.find_previous_sibling('ref').text.strip(), surface_span ) # if the surface number range is reasonable (range < 20, in order), EXPAND if surface_num_range: # delete previous ref tag and anything in between (i.e. delete "-" and extra spaces) for sib in rtag.previous_siblings: if sib.name == 'ref': break elif type(sib) == NavigableString: sib.replace_with(sp.new_string("")) else: break # get ref id of previous ref, e.g. [1] (#b0 -> BID0) previous_rtag = rtag.find_previous_sibling('ref') previous_rtag_ref_id = normalize_grobid_id(previous_rtag.get('target')) previous_rtag.decompose() # replace this ref tag with the full range expansion, e.g. [3] (#b2 -> BID1 BID2) id_range = _create_ref_id_range(previous_rtag_ref_id, rtag_ref_id) surface_range = _create_surface_range(surface_num_range[0], surface_num_range[1]) replace_string = '' for range_ref_id, range_surface_form in zip(id_range, surface_range): # only replace if ref id is in bibliography, else add none if range_ref_id in bibs: cite_key = tokgen.next() cite_map[cite_key] = (range_ref_id, range_surface_form, coord) else: cite_key = tokgen.next() cite_map[cite_key] = (None, range_surface_form, coord) replace_string += cite_key + ' ' rtag.replace_with(sp.new_string(f" {replace_string} ")) # ELSE do not expand backwards and replace previous and current rtag with appropriate ref id else: # add mapping between ref id and surface form for previous ref tag previous_rtag = rtag.find_previous_sibling('ref') previous_rtag_ref_id = normalize_grobid_id(previous_rtag.get('target')) previous_rtag_surface = previous_rtag.text.strip() cite_key = tokgen.next() previous_rtag.replace_with(sp.new_string(f" {cite_key} ")) cite_map[cite_key] = (previous_rtag_ref_id, previous_rtag_surface, coord) # add mapping between ref id and surface form for current reftag cite_key = tokgen.next() rtag.replace_with(sp.new_string(f" {cite_key} ")) cite_map[cite_key] = (rtag_ref_id, surface_span, coord) else: # look forward and see if expansion string, e.g. *[1]*-[3] forward_between_span = "" for sib in rtag.next_siblings: if sib.name == 'ref': break elif type(sib) == NavigableString: forward_between_span += sib else: break # look forward for range marker (if is a range, continue -- range will be expanded # when we get to the second value) if is_expansion_string(forward_between_span): continue # else treat like normal reference else: cite_key = tokgen.next() rtag.replace_with(sp.new_string(f" {cite_key} ")) cite_map[cite_key] = (rtag_ref_id, surface_span, coord) else: cite_key = tokgen.next() rtag.replace_with(sp.new_string(f" {cite_key} ")) cite_map[cite_key] = (None, surface_span, coord) except Exception as exception: print(exception) continue return cite_map
74418fafc2a2d828b702555b79b515d9b16d9f10
28,085
def duration(start_time, end_time=None): """Get a timedelta between end_time and start_time, where end_time defaults to now(). WARNING: mixing tz-aware and naive datetimes in start_time and end_time will cause an error. """ if not start_time: return None last_time = end_time if end_time else current_time() return last_time - start_time
89febebf342225525bf7543342b884f130e7b3f2
28,086
def get_commands(cfg, clargs, *, what, **kwargs): """ Delegates the creation of commands lists to appropriate functions based on `what` parameter. Parameters ---------- cfg: dict Configuration dictionary. clargs: Namespace Command line arguments. cmds: iter(tuple(str)) what: str Determines the returned value (see: Returns[out]). kwargs: dict MANDATORY: path_i Dictionary with additional information from previous step. Returns ------- out: iter(tuple(str, tuple(str))) An iterator with the 1st element as a tag (the `what` parameter) and the 2nd element as the iterator of the actual commands. """ get_commands_f = { "video": get_commands_video_1, "image": get_commands_image_1, "check": get_commands_check, } ps = ( kwargs["path_i"] if what not in cfg["extensions"] else filter( lambda p: osp.splitext(p)[1].lower() in cfg["extensions"][what], kwargs["path_i"] ) ) ps = map(lambda p: (p, get_path(cfg, clargs, p, **kwargs)), ps) out = chain.from_iterable( map(lambda p: get_commands_f[what](cfg, clargs, path_i_1=p[0], path_o_1=p[1], **kwargs), ps) ) return map(lambda c: (what, c), out)
360410064a24d547729722c4f5843d78af9444c8
28,087
def heappush(heap, item): """ >>> heappush([4, 4, 8, 9, 4, 12, 9, 11, 13], 7) [4, 4, 8, 9, 4, 12, 9, 11, 13, 7] >>> heappush([4, 4, 8, 9, 4, 12, 9, 11, 13, 7], 10) [4, 4, 8, 9, 4, 12, 9, 11, 13, 7, 10] >>> heappush([4, 4, 8, 9, 4, 12, 9, 11, 13, 7, 10], 5) [4, 4, 5, 9, 4, 8, 9, 11, 13, 7, 10, 12] :param heap: :param item: :return: """ heap.append(item) bubble_up(heap, len(heap) - 1) return heap
99e6814828e42da8a14f4d0873e62af920a800b8
28,088
def dx(scalar_field): """ Computes first derivative of a 1D scalar field :param scalar_field: :return: """ first_derivative = np.zeros((scalar_field.size - 1)) for i_scalar in range(scalar_field.size - 1): i_next_scalar = i_scalar + 1 first_derivative[i_scalar] = scalar_field[i_next_scalar] - scalar_field[i_scalar] return first_derivative
b0af862210a2a395dcdfdab2e921f2c305a536d2
28,089
import argparse def makeParser(): """ Make a command-line argument parser. @return: An C{argparse.ArgumentParser} instance. """ parser = argparse.ArgumentParser( description=('Print a JSON object containing reference to read ' 'distances extracted from a SAM file.')) parser.add_argument( '--samFile', action='append', required=True, help='The SAM file(s) to load. May be repeated.') parser.add_argument( '--minMatchingReads', type=int, help=('The minimum number of reads that must match a reference for it ' 'to be included.')) parser.add_argument( '--scoreTag', help=('The score tag to use for the alignment score. If not given, ' '1 will be used to indicate that a read matched a reference ' '(non-matches are not included). The default is no score tag, ' 'which is not that useful. A good choice is "AS", for the ' 'alignment score, but that has to be present in the SAM file, ' 'which means that the aligner (bowtie2, bwa, etc. has to have ' 'produced such a tag.')) parser.add_argument( '--verbose', action='store_true', help='Print extra information.') return parser
d1dd832d1533eb5a506eca19a35499362b03feb7
28,090
def get_genetic_profiles(study_id, profile_filter=None): """Return all the genetic profiles (data sets) for a given study. Genetic profiles are different types of data for a given study. For instance the study 'cellline_ccle_broad' has profiles such as 'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA' for copy number alterations, etc. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' profile_filter : Optional[str] A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION The genetic profiles can include "mutation", "CNA", "rppa", "methylation", etc. Returns ------- genetic_profiles : list[str] A list of genetic profiles available for the given study. """ data = {'cmd': 'getGeneticProfiles', 'cancer_study_id': study_id} df = send_request(**data) res = _filter_data_frame(df, ['genetic_profile_id'], 'genetic_alteration_type', profile_filter) genetic_profiles = list(res['genetic_profile_id'].values()) return genetic_profiles
b409a1511112cafab0330a23684b3e255fa0a60c
28,091
import string def cipher(sentence, n_rotate): """ Cipher string with Caesar algorithm ( Anything else than letters stays the same. ) :param sentence: String containing sentence/sentences/word/words. :param n_rotate: number to translate letters :return: string with ciphered words """ upper = [char for char in string.ascii_uppercase] # Uppercase Letters lower = [char for char in string.ascii_lowercase] # Lowercase Letters string_ = [char for char in sentence] # String to cipher for i in range(len(string_)): transl = 0 # Cipher Uppercase Letters if string_[i] in upper: for j in range(len(upper)): if string_[i] == upper[j]: transl = j+n_rotate while transl >= len(upper): transl = transl - len(upper) string_[i] = upper[transl] break # Cipher Lowercase Letters elif string_[i] in lower: for j in range(len(lower)): if string_[i] == lower[j]: transl = j + n_rotate while transl >= len(lower): transl = transl - len(lower) string_[i] = lower[transl] break # Return Cipher sentence return ''.join(string_)
e0606949f254971431faf7899bd254f4792176d4
28,092
from typing import Union from typing import List def _assert_in_fc( r: RestClient, uuids: Union[str, List[str]], all_keys: bool = False ) -> StrDict: """Also return data.""" if isinstance(uuids, str): uuids = [uuids] if all_keys: data = r.request_seq('GET', '/api/files', {'all-keys': True}) else: data = r.request_seq('GET', '/api/files') assert '_links' in data assert 'self' in data['_links'] assert 'files' in data assert len(data['files']) == len(uuids) for f in data['files']: assert f['uuid'] in uuids return data
908f12309a93abc472e05598abbb0e5ee29cc798
28,093
def power_law(uref, h, href, shear): """ Extrapolate wind speed (or other) according to power law. NOTE: see https://en.wikipedia.org/wiki/Wind_profile_power_law :param uref: wind speed at reference height (same units as extrapolated wind speed, u) :param h: height of extrapolated wind speed (same units as href) :param href: reference height (same units as h) :param shear: shear exponent alpha (1/7 in neutral stability) (unitless) :return u: extrapolated wind speed (same units as uref) """ u = np.array(uref) * np.array(h / href) ** np.array(shear) return u
cb5d002dfeed022af694060bfe9e516191835742
28,094
import os def get_defaults(): """ Returns default frequencies to project intensities onto as well as default paths for locations of the pure and mixture spectroscopic data. Returns ------- frequency_range: numpy.ndarray Frequencies over which to project the intensities. pure_data_path : str Directory location where pure-component spectra are stored. mixture_data_path : str Directory location where mixed-component spectra are stored. """ pure_data_path = os.path.join(data_path, 'pure_components/') mixture_data_path = os.path.join(data_path, 'mixed_components/') reaction_data_path = os.path.join(data_path, 'reaction/') frequency_range = np.linspace(850,1850,num=501,endpoint=True) return frequency_range, pure_data_path, mixture_data_path, reaction_data_path
a38f8a1aa9f74cf5736c87e1ad6a6fbd556aa472
28,095
def binary_weight_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, wb_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """Binary Weight Convolution, multiplier-less inner-product with a scale factor. Binary Weight Convolution is the convolution function, but the inner product in this function is the following, .. math:: y_{n, a, b} = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}} \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}. Therefore :math:`sign(w_{n, m, i, j})` is either :math:`1` or :math:`-1` and the inner product simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_n\\|_{\\ell_1}}`. The number of :math:`n` is the number of outmaps of the convolution function. References: Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using Binary Convolutional Neural Networks." arXiv preprint arXiv:1603.05279 (2016). .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the binarized weights (`binary_weight`) 2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the binary weights will not be in sync. 3) Quantized values are stored as floating point number for `binary_weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction. w_init (~nnabla.initializer.BaseInitializer): Initializer for weight. wb_init (~nnabla.initializer.BaseInitializer): Initializer for binary weight. b_init (~nnabla.initializer.BaseInitializer): Initializer for bias. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if wb_init is None: wb_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, not fix_parameters) wb = get_parameter_or_create( "Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel), wb_init, not fix_parameters) alpha = get_parameter_or_create( "alpha", (outmaps, ), ConstantInitializer(0), False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, not fix_parameters) return F.binary_weight_convolution(inp, w, wb, alpha, b, base_axis, pad, stride, dilation, group)
3cae56fee85ba0c7679e9de7fd2743c9ce252d1a
28,096
def dataset2Xy(dataset): """Convert a dataset (pd.DataFrame) to X, y and output_dim where X is the features, y is the labels (one-hot vectors), and output_dim is the number of labels overall. Args: dataset: A pandas dataframe that is composed of features columns and the class column which is the last one. Returns: tuple (X, y, output_dim) where X is the features matrix; y is the one-hot label matrix; and output_dim is the amount of different labels in y. """ output_col = dataset.columns[-1] output_dim = len(dataset[output_col].value_counts()) X = dataset.drop(columns=[output_col]).to_numpy() y = to_categorical(dataset[output_col].to_numpy(), num_classes=output_dim) return X, y, output_dim
ef07873db639a7a9c34b149959acd419d4a0b9d3
28,097
from typing import Callable import sys def sysexit(func: Callable) -> Callable: """ use the function return value as the system exit code """ @wraps(func) def wrapper(*args, **kwargs): sys.exit(func(*args, **kwargs)) return wrapper
826b9b265a3df5d79cc1fa61cdccefbd5eca87fa
28,098
def pairwise_list(a_list): """ list转换为成对list "s -> (s0,s1), (s1,s2), (s2, s3), ..." :param a_list: list :return: 成对list """ if len(a_list) % 2 != 0: raise Exception("pairwise_list error!") r_list = [] for i in range(0, len(a_list) - 1, 2): r_list.append([a_list[i], a_list[i + 1]]) return r_list
5142fb2e00c931ab57fc9028eb9b6df5a98c0342
28,099