content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch def sen_loss(outputs, all_seq, dim_used, dct_n): """ :param outputs: N * (seq_len*dim_used_len) :param all_seq: N * seq_len * dim_full_len :param input_n: :param dim_used: :return: """ n, seq_len, dim_full_len = all_seq.data.shape dim_used_len = len(dim_used) dim_used = np.array(dim_used) _, idct_m = data_utils.get_dct_matrix(seq_len) idct_m = torch.from_numpy(idct_m).float().to(MY_DEVICE) outputs_t = outputs.view(-1, dct_n).transpose(0, 1) pred_expmap = torch.matmul(idct_m[:, :dct_n], outputs_t).transpose(0, 1).contiguous().view(-1, dim_used_len, seq_len).transpose(1, 2) targ_expmap = all_seq.clone()[:, :, dim_used] loss = torch.mean(torch.sum(torch.abs(pred_expmap - targ_expmap), dim=2).view(-1)) return loss
e570a142fc7c709de599e9864ec0a074e28b81ac
3,636,000
def get_net_peer_count(endpoint=_default_endpoint, timeout=_default_timeout) -> str: """ Get peer number in the net Parameters ---------- endpoint: :obj:`str`, optional Endpoint to send request to timeout: :obj:`int`, optional Timeout in seconds Returns ------ str Number of peers represented as a Hex string """ method = "net_peerCount" try: return rpc_request(method,endpoint=endpoint, timeout=timeout)['result'] except TypeError as e: raise InvalidRPCReplyError(method, endpoint) from e
cd13962e29c6944d5a0bebaa3c63a756d6caa187
3,636,001
import os def get_cursor_column_number(widget: QTextEdit) -> int: """Get the cursor column number from the QTextEdit widget Args: widget (QTextEdit): _description_ Returns: int: _description_ """ assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return len(text[:pos].split(os.linesep)[-1])
925949fd83b67d5c1f18111e3d9e4dd3d6fcb1c0
3,636,002
def delete_user(user_id: int): """删除""" user = User.query.filter( User.user_id == user_id ).first() if not user: return dbu.inner_error("该不存在或已被删除") db.session.delete(user) try: db.session.commit() except Exception as e: current_app.logger.warning(e) db.session.rollback() return dbu.inner_error("删除失败") return jsonify({"data": {"user_id": user_id}, "code": 200, "msg": "删除角色成功"})
0d2c391eb79ab30a3d80d7caa2c4618e2663a75f
3,636,003
def _combine_odds(odds): """Combine odds of different targets.""" combined_odds = 1 / (1 / odds).sum(axis=1) return combined_odds
a504990dc2c5b00c1c55fe7598bebf2fdecf95b8
3,636,004
def decrypt(pp, skx, cty, max_innerprod=100): """ Performs the decrypt algorithm for IPE on a secret key skx and ciphertext cty. The output is the inner product <x,y>, so long as it is in the range [0,max_innerprod]. """ (k1, k2) = skx (c1, c2) = cty d1 = pair(k1, c1) d2 = innerprod_pair(k2, c2) # check for unit element # gt = group.random(GT) # if(gt == gt * d2): # print("Unit") # return True # return False return solve_dlog_bsgs(d1, d2, max_innerprod + 1)
4f3e214300a21490a33c4e143ed8d79e30720fe6
3,636,005
def alpha_blend_colors(colors, additional_alpha=1.0): """ Given a sequence of colors, return the alpha blended color. This assumes the last color is the one in front. """ srcr, srcg, srcb, srca = COLOR_CONVERTER.to_rgba(colors[0]) srca *= additional_alpha for color in colors[1:]: dstr, dstg, dstb, dsta = COLOR_CONVERTER.to_rgba(color) dsta *= additional_alpha outa = srca + dsta * (1 - srca) outr = (srcr * srca + dstr * dsta * (1 - srca)) / outa outg = (srcg * srca + dstg * dsta * (1 - srca)) / outa outb = (srcb * srca + dstb * dsta * (1 - srca)) / outa srca, srcr, srcg, srcb = outa, outr, outg, outb return srcr, srcg, srcb, srca
cac568988c5794310a058ac98ff8ca572f18432e
3,636,006
def ruled(nrb1, nrb2): """ Construct a ruled surface/volume between two NURBS curves/surfaces. Parameters ---------- nrb1, nrb2 : NURBS """ assert nrb1.dim == nrb2.dim assert nrb1.dim <= 2 assert nrb2.dim <= 2 nrb1, nrb2 = compat(nrb1, nrb2) Cw = np.zeros(nrb1.shape+(2,4),dtype='d') Cw[...,0,:] = nrb1.control Cw[...,1,:] = nrb2.control UVW = nrb1.knots + ([0,0,1,1],) return NURBS(UVW, Cw)
c74ed6ff2485d115f3e387de96c779017e6ea62b
3,636,007
import time async def db_head_state(): """Status/health check.""" sql = ("SELECT num, created_at, extract(epoch from created_at) ts " "FROM hive_blocks ORDER BY num DESC LIMIT 1") row = DB.query_row(sql) return dict(db_head_block=row['num'], db_head_time=str(row['created_at']), db_head_age=int(time.time() - row['ts']))
4ffbf84c2e28f91f9182e9b8a4d214ff953c30fd
3,636,008
def d_d_theta_inv(y, alpha): """ xi'(y) = 1/theta''(xi(y)) > 0 = alpha / (1 - |y|)^2 Nikolova et al 2014, table 1, theta_2 and eq 5. """ assert -1 < y < 1 and alpha > 0 denom = 1 - abs(y) return alpha / (denom*denom)
8ed796a46021f64ca3e24972abcf70bd6f64976d
3,636,009
async def add_item( item: ItemEditableFields, db_session: Session = Depends(get_db_session) ): """This handler adds item to the DB. Args: item: ItemEditableFields. The data of item to be added to the DB. db_session: Session. The database session used to interact with the DB. Returns: Item. The item object that was inserted. Raises: HTTPException. Item already exists in the DB. """ existing_item = item_crud.get_by_sku(db_session, item.sku) if existing_item: raise HTTPException(status_code=412, detail='Item already exists.') return item_crud.create(db_session, Item(**item.dict()))
487d26f4d7858a6f720e3f12cb5a2295e0eb6370
3,636,010
def center_crop(img, crop_height, crop_width): """ Crop the central part of an image. Args: img (ndarray): image to be cropped. crop_height (int): height of the crop. crop_width (int): width of the crop. Return: (ndarray): the cropped image. """ def get_center_crop_coords(height, width, crop_height, crop_width): y1 = (height - crop_height) // 2 y2 = y1 + crop_height x1 = (width - crop_width) // 2 x2 = x1 + crop_width return x1, y1, x2, y2 height, width = img.shape[:2] x1, y1, x2, y2 = get_center_crop_coords( height, width, crop_height, crop_width) return img[y1:y2, x1:x2, ...]
6e5fafee8c34632b61d047e9eb66e9b08b5d0203
3,636,011
import logging import signal def membOutDet(input_slc, cell_mask=10, outer_mask=30, det_cutoff=0.75): """ Detection of mYFP maxima in the line of interest. Algorithm is going from outside to inside cell and finding first outer maxima of the membrane. "cell_mask" - option for hiding inner cell region for ignoring possible cytoplasmic artefacts of fluorescence, number of pixels to be given to zero. "outer_mask" - option for hiding extracellular artefacts of fluorescence, numbers of pexels Working with diam slice only! Returns two indexes of membrane maxima. """ slc = np.copy(input_slc) if (np.shape(slc)[0] % 2) != 0: # parity check for correct splitting slice by two half slc = slc[:-1] slc_left, slc_right = np.split(slc, 2) # slc_right = np.flip(slc_right) logging.info('Slice splitted!') slc_left[-cell_mask:] = 0 # mask cellular space slc_right[:cell_mask] = 0 # slc_left[:outer_mask] = 0 # mask extracellular space slc_right[-outer_mask:] = 0 # left_peak, _ = signal.find_peaks(slc_left, height=[slc_left.max()*det_cutoff, slc_left.max()], distance=10) logging.info('Left peak val {:.2f}'.format(slc_left[left_peak[0]])) right_peak, _ = signal.find_peaks(slc_right, height=[slc_right.max()*det_cutoff, slc_right.max()], distance=10) logging.info('Right peak val {:.2f}'.format(slc_right[right_peak[0]])) memb_peaks = [] try: memb_peaks.append(left_peak[0]) except IndexError: logging.error('LEFT membrane peak NOT DETECTED!') memb_peaks.append(0) try: memb_peaks.append(int(len(slc)/2+right_peak[0])) except IndexError: logging.error('RIGHT membrane peak NOT DETECTED!') memb_peaks.append(0) logging.info('L {}, R {}'.format(memb_peaks[0], memb_peaks[1])) output_slc = np.concatenate((slc_left, slc_right)) return output_slc, memb_peaks
24077afc3321612c017475ae4f81a25e375369a4
3,636,012
import os def cmk_arn_value(variable_name): """Retrieve target CMK ARN from environment variable.""" arn = os.environ.get(variable_name, None) if arn is None: raise ValueError( 'Environment variable "{}" must be set to a valid KMS CMK ARN for examples to run'.format( variable_name ) ) if arn.startswith("arn:") and ":alias/" not in arn: return arn raise ValueError("KMS CMK ARN provided for examples must be a key not an alias")
d235ad95c050bd68d8428d6c0d737ad394a5d1ea
3,636,013
def qaoa_ansatz(gammas, betas): """ Function that returns a QAOA ansatz program for a list of angles betas and gammas. len(betas) == len(gammas) == P for a QAOA program of order P. :param list(float) gammas: Angles over which to parameterize the cost Hamiltonian. :param list(float) betas: Angles over which to parameterize the driver Hamiltonian. :return: The QAOA ansatz program. :rtype: Program. """ return Program([exponentiate_commuting_pauli_sum(h_cost)(g) + exponentiate_commuting_pauli_sum(h_driver)(b) for g, b in zip(gammas, betas)])
c1b6d45430b9d2d09fe19ea2f505c7cd806c684a
3,636,014
import psutil from datetime import datetime def print_header(procs_status, num_procs): """Print system-related info, above the process list.""" def get_dashes(perc): dashes = "|" * int((float(perc) / 10 * 4)) empty_dashes = " " * (40 - len(dashes)) return dashes, empty_dashes # cpu usage percs = psutil.cpu_percent(interval=0, percpu=True) for cpu_num, perc in enumerate(percs): dashes, empty_dashes = get_dashes(perc) line = " CPU%-2s [%s%s] %5s%%" % (cpu_num, dashes, empty_dashes, perc) printl(line, color=get_color(perc)) # memory usage mem = psutil.virtual_memory() dashes, empty_dashes = get_dashes(mem.percent) line = " Mem [%s%s] %5s%% %6s / %s" % ( dashes, empty_dashes, mem.percent, bytes2human(mem.used), bytes2human(mem.total), ) printl(line, color=get_color(mem.percent)) # swap usage swap = psutil.swap_memory() dashes, empty_dashes = get_dashes(swap.percent) line = " Swap [%s%s] %5s%% %6s / %s" % ( dashes, empty_dashes, swap.percent, bytes2human(swap.used), bytes2human(swap.total), ) printl(line, color=get_color(swap.percent)) # processes number and status st = [] for x, y in procs_status.items(): if y: st.append("%s=%s" % (x, y)) st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=1) printl(" Processes: %s (%s)" % (num_procs, ', '.join(st))) # load average, uptime uptime = datetime.datetime.now() - \ datetime.datetime.fromtimestamp(psutil.boot_time()) av1, av2, av3 = psutil.getloadavg() line = " Load average: %.2f %.2f %.2f Uptime: %s" \ % (av1, av2, av3, str(uptime).split('.')[0]) printl(line)
8986f5852e6922a5ff5e396d6b133eb560414883
3,636,015
def approximated_atmo_spectrum(energy): """Gives an approximated atmospheric neutrino spectrum. Can be used for comparing expected true energy distribution to recorded energy proxy distributions. It is normalised such that the weight for an energy of 1 is equal to 1. (It is agnostic to energy units) :param energy: True neutrino energy (in some consistent unit) :return: Spectrum weight for that energy """ return energy**-3.7
86768b5ba0bd31ef19a89dfe27af6c793492daa7
3,636,016
def to_csv(logbook, filename, output=False): """ Write a logbook to a CSV file. The output file is readable using an ordinary CSV reader, e.g. ``pandas.read_csv``. Alternatively you may read it back into a logbook format using ``gt.ops.from_csv``. """ logs = [] for unique_trip_id in logbook: log = logbook[unique_trip_id].assign(unique_trip_id=unique_trip_id) logs.append(log) if len(logs) == 0: df = pd.DataFrame( columns=[ 'trip_id', 'route_id', 'action', 'minimum_time', 'maximum_time', 'stop_id', 'latest_information_time', 'unique_trip' ] ) else: df = pd.concat(logs) if output: return df else: return df.to_csv(filename, index=False)
4517d8812c795cf856575b7a93fe3be7a912ec8d
3,636,017
import collections def flatten_dict(d, parent_key="", sep="_"): """ Flatten a dictionnary """ items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if v and isinstance(v, collections.MutableMapping): items.extend(flatten_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
36c63188fc49950ac493c2fa7bf72369b465fe3c
3,636,018
def request_mxnet_inference(ip_address="127.0.0.1", port="80", connection=None, model="squeezenet"): """ Send request to container to test inference on kitten.jpg :param ip_address: :param port: :connection: ec2_connection object to run the commands remotely over ssh :return: <bool> True/False based on result of inference """ conn_run = connection.run if connection is not None else run # Check if image already exists run_out = conn_run("[ -f kitten.jpg ]", warn=True) if run_out.return_code != 0: conn_run("curl -O https://s3.amazonaws.com/model-server/inputs/kitten.jpg", hide=True) run_out = conn_run(f"curl -X POST http://{ip_address}:{port}/predictions/{model} -T kitten.jpg", warn=True) # The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result # is 404. Hence the extra check. if run_out.return_code != 0 or "probability" not in run_out.stdout: return False return True
886f86b9456c49df42c636391a5305ca57c7e913
3,636,019
def flatten(iterable): """ Flattens an iterable, where strings and dicts are not considered iterable. :param iterable: The iterable to flatten. :returns: The iterable flattened as a flat list. >>> from dautils import collect >>> collect.flatten([[1, 2]]) [1, 2] """ logger = log_api.env_logger() logger.debug('Iterable {}'.format(iterable)) assert isiterable(iterable), 'Not iterable {}'.format(iterable) flat = iterable if isiterable(iterable[0]): flat = [i for i in chain.from_iterable(iterable)] return flat
e395bdff6ad1756b500aaa8fcdbe4a882a73e383
3,636,020
def get_ts_pid(pidfile): """Read a pidfile, return a PID.""" try: with open(pidfile) as f: pid = f.readline() except EnvironmentError: LOG.warning("Unable to read pidfile; process metrics will fail!") pid = None return pid
75cb541af8a4b8958b497ed6407497cef4baa614
3,636,021
def conjg(a): """ Find the complex conjugate values of the input. Parameters ---------- a : af.Array Multi dimensional arrayfire array. Returns -------- out : af.Array array containing copmplex conjugate values from `a`. """ return _arith_unary_func(a, backend.get().af_conjg)
75fa1293a00c83b90fa40085222f07f755451897
3,636,022
def obj_size_avg_residual(coeffs, avg_size, class_id): """ :param coeffs: object sizes :param size_template: dictionary that saves the mean size of each category :param class_id: nyu class id. :return: size residual ground truth normalized by the average size """ size_residual = (coeffs - avg_size[class_id]) / avg_size[class_id] return size_residual
8d44d4ebf273baf460195ec7c6ade58c8d057025
3,636,023
import re def search_content(obj): """Get the excerpt in which the searched term matches the content.""" indexable_text = [] if hasattr(obj, 'excerpt'): if obj.excerpt: indexable_text.append(obj.excerpt) if not hasattr(obj, 'content_editor'): return ' '.join(indexable_text) content = obj.content_editor if isinstance(content, str): indexable_text.append(return_all_content(content)) return re.sub(r' +', ' ', ' '.join(indexable_text)).strip() if isinstance(content, StreamValue): indexable_text = [] text = '' for block in content: text = '' if block.block_type in ALLOWED_BLOCK_TYPES: text = return_all_content(block.render()) if text: indexable_text.append(' %s' % text) if indexable_text: return re.sub(r' +', ' ', ' '.join(indexable_text)).strip() return ''
e6bdc25d0ac15d76fd7ddac805c619957a31e6c4
3,636,024
import requests import json def get_total_num_activities(): """Query the IATI registry and return a faceted list of activity counts and their frequencies. The total number of activities is then calculated as the sum of the product of a count and a frequency. E.g. if "30" is the count and the frequency is 2, then the total number of activities is 60. """ activity_request = requests.get(ACTIVITY_URL) if activity_request.status_code == 200: activity_json = json.loads(activity_request.content.decode('utf-8')) activity_count = 0 for key in activity_json["result"]["facets"]["extras_activity_count"]: activity_count += int(key) * activity_json["result"]["facets"]["extras_activity_count"][key] return activity_count else: raise CommandError('Unable to connect to IATI registry to query activities.')
5752a6d5fe5f2bd6125936b639de2ff5226b51ac
3,636,025
def _get_chromosome_dirs(input_directory): """Collect chromosome directories""" dirs = [] for d in input_directory.iterdir(): if not d.is_dir(): continue # Just in case user re-runs and # does not delete output files elif d.name == 'logs': continue elif d.name == 'p_distance_output': continue else: dirs.append(d) return dirs
5047c0c158f11794e312643dbf7d307b381ba59f
3,636,026
import warnings def calculateCumTimeDiff(df): """ Calculates the cumulative of the time difference between points for each track of 'dfTrack'. """ warnings.warn("The calculateCumTimeDiff function is deprecated and " "will be removed in version 2.0.0. " "Use the calculate_cum_time_diff function instead.", FutureWarning, stacklevel=8 ) return calculate_cum_time_diff(df)
6c4b21f07a0d564e63a7b485a1f5d5e831d51058
3,636,027
def optimalPriceFast(z_list, V_list, U, s_radius, max_iter = 1e4): """ Returns tuple of optimal prices p^* chosen in hindsight, and revenue achieved if we know z_t (list of 1-D arrays) V_t (list of 2-D arrays), and U (2-D array) is Orthogonal! Performs optimization over low-dimensional actions and is therefore fast. """ TOL = 1e-10 # numerical error allowed. T = len(z_list) z = np.zeros(z_list[0].shape) V = np.zeros(V_list[0].shape) N = U.shape[0] for t in range(T): z += z_list[t] V += V_list[t] z = z/T z = z.reshape((z_list[0].shape[0],1)) V = V/T c = np.dot(U, z) B = np.dot(U, np.dot(V, U.transpose())) # ensure B is positive definite: eigvals = np.linalg.eigvals((B + B.transpose())/2.0) if (sum(~np.isreal(eigvals))) > 0 or (np.min(eigvals) < -TOL): print("Warning: B is not positive definite") cons = {'type':'ineq', 'fun': lambda x: s_radius - np.linalg.norm(x), 'jac': lambda x: x / np.linalg.norm(x) if np.linalg.norm(x) > TOL else np.zeros(x.shape)} res = optimize.minimize(fun=hindsightLowDimObj, x0=np.zeros(U.shape[1]), args = (z,V), jac = hindsightLowDimGrad, method = 'SLSQP', constraints=cons, options={'disp':True,'maxiter':max_iter}) # Note: Set options['disp'] = True to print output of optimization. x_star = res['x'] p_star = np.dot(U, x_star) p_norm = np.linalg.norm(p_star) if p_norm > s_radius: print ("Warning: p_star not in constraints") p_star = s_radius * p_star/p_norm R_star = hindsightObj(p_star, c, B)*T for i in range(1000): # compare with random search to see if optimization worked p_rand = randomPricing(N,s_radius) R_rand = hindsightObj(p_rand, c, B)*T if (R_star - R_rand)/np.abs(R_star + TOL) > 0.001: raise ValueError("SLSQP optimization failed, R_star="+ str(R_star)+ ", R_rand="+str(R_rand)) if R_rand < R_star: p_star = p_rand R_star = R_rand return (p_star, R_star)
15fb22257e19b6abf46545dd05d23a6bc7a31da9
3,636,028
def extract_seqIdList_from_cluster(cluster_name, cluster_dic, extract_id, allSeqDf, idName, match_method="contains"): """ For all sequences in a cluster, find corresponding sequences in the dataframe. The extract_id function is used to extract a id that will be used to find the matching entry in the dataframe index. If the match_method option is set to "contains", the dataframe index will match the id if it contains the string. If the match_method option is set to "exact", the dataframe index will match the id if matches exactly. Return a list of dataframe indices. """ seq_list = [] for seq in cluster_dic[cluster_name]: # The output of the clustering algorithm CD-HIT only print the first 19 characters # (excluding the first > character) of the sequence id. # Therefore, we need to perform a search to map the printed information to the unique id # in the dataframe. # Extract the accession id which is **unique** for each sequence seqAccessionId = extract_id(seq[0]) # Search for the accession id in the dataframe # Note: here we assume a multiindex dataframe, because the same function will # be used again later on the multiindex version of the allSeqDf dataframe. if match_method == "contains": pattern = seqAccessionId elif match_method == "exact": pattern = r'^' + seqAccessionId + r'$' else: print("Error extract_sequences_from_cluster: invalid match_method option.") dfSearch = allSeqDf.index.get_level_values(idName).str.contains(pattern) dfId = None # Test if we get more than one match nMatches = pd.Series(dfSearch).sum() if nMatches == 1: #dfId = allSeqDf[dfSearch].index.tolist()[0] # Valid for multiindex dataframe dfId = allSeqDf.index.get_level_values(idName)[dfSearch].tolist()[0] elif nMatches > 1: print("Error extract_sequences_from_cluster: sequenceid", seq[0], " with accession id \"", seqAccessionId, "\" has several matches in dataframe.") dfId = None else: print("Error extract_sequences_from_cluster: sequenceid", seq[0], " with accession id \"", seqAccessionId, "\" not found in dataframe.") # print("dfId =",allSeqDf.index.get_level_values(idName)[dfSearch].tolist()[0]) # print("dfSearch.sum() =",dfSearch.sum()) dfId = None # seq_list is just the list of sequence ids seq_list.append(dfId) return seq_list
de22b77e897c9ca9d14f250e160d6e06790c9b22
3,636,029
def format_imports(import_statements): """ ----- examples: @need from fastest.constants import TestBodies @end @let import_input = TestBodies.TEST_STACK_IMPORTS_INPUT output = TestBodies.TEST_STACK_IMPORTS_OUTPUT @end 1) format_imports(import_input) -> output ----- :param import_statements: list :return: list """ return [ '{}\n'.format(import_statement.strip()) for import_statement in import_statements if len(import_statement) > 0 ]
91514d19da4a4dab8c832e6fc2d3c6cbe7cca04a
3,636,030
import re def parse_modmap(lines): """Parse a modmap file.""" re_range = re.compile(r'KeyCodes range from (\d+) to') lower_bound = 8 re_line = re.compile(r'^\s+(\d+)\s+0x[\dA-Fa-f]+\s+(.*)') re_remainder = re.compile(r'\((.+?)\)') ret = ModMapper() for line in lines.split('\n'): if not line: continue grps = re_range.search(line) if grps: lower_bound = int(grps.group(1)) #end if grps = re_line.search(line) if grps: code = int(grps.group(1)) - lower_bound strlst = [] for grp in re_remainder.finditer(grps.group(2)): strlst.append(grp.group(1)) #end for # We'll pick the first one alias = strlst[0].upper() my_keyname = 'KEY_' + alias my_keyname = my_keyname.replace('XF86', '') ret.set_map(code, (my_keyname, alias)) #end if #end for ret.done() return ret
23bfac476377bd549b15bf77fcc47716d61d6908
3,636,031
def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True): """Read an evoked dataset Parameters ---------- fname : string The file name. setno : int or str | list of int or str | None The index or list of indices of the evoked dataset to read. FIF files can contain multiple datasets. If None and there is only one dataset in the file, this dataset is loaded. baseline : None (default) or tuple of length 2 The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. kind : str Either 'average' or 'standard_error', the type of data to read. proj : bool If False, available projectors won't be applied to the data. Returns ------- evoked : instance of Evoked or list of Evoked The evoked datasets. """ evoked_node = _get_evoked_node(fname) if setno is None and len(evoked_node) > 1: fid, _, _ = fiff_open(fname) try: _, _, t = _get_entries(fid, evoked_node) except: t = 'None found, must use integer' else: fid.close() raise ValueError('%d datasets present, setno parameter must be set.' 'Candidate setno names:\n%s' % (len(evoked_node), t)) elif isinstance(setno, list): return [Evoked(fname, s, baseline=baseline, kind=kind, proj=proj) for s in setno] else: if setno is None: setno = 0 return Evoked(fname, setno, baseline=baseline, kind=kind, proj=proj)
f985a6bb5fdfcec92a567470f78b1ed0b0014356
3,636,032
def construct_lexically_addressed_lambda_tree(lambda_tree, surrounding_scope=None): """ Out of a "bare" LambdaTree constructs a LexicallyAddressedLambdaTree, i.e. a tree of lambdas where the nodes are decorated with a dict like so: {symbol: scopes_up_count} It does this by going down the tree, calling lexical_addressing_x at each node (and passing down the surrounding node's info). """ if surrounding_scope is None: surrounding_scope = {} pmts(surrounding_scope, dict) pmts(lambda_tree, LambdaTree) lexical_addresses = lexical_addressing_x(surrounding_scope, lambda_tree.lambda_form) constructed_children = [ construct_lexically_addressed_lambda_tree(child, lexical_addresses) for child in lambda_tree.children ] return LexicallyAddressedLambdaTree( lambda_tree.lambda_form, lexical_addresses, constructed_children, )
a838a15b287940b6a32acd0f15e38e146a1a871c
3,636,033
def integrate_vec(vec, time_dep=False, method='ss', **kwargs): """ Integrate (stationary of time-dependent) vector field (N-D Tensor) in tensorflow Aside from directly using tensorflow's numerical integration odeint(), also implements "scaling and squaring", and quadrature. Note that the diff. equation given to odeint is the one used in quadrature. Parameters: vec: the Tensor field to integrate. If vol_size is the size of the intrinsic volume, and vol_ndim = len(vol_size), then vector shape (vec_shape) should be [vol_size, vol_ndim] (if stationary) [vol_size, vol_ndim, nb_time_steps] (if time dependent) time_dep: bool whether vector is time dependent method: 'scaling_and_squaring' or 'ss' or 'ode' or 'quadrature' if using 'scaling_and_squaring': currently only supports integrating to time point 1. nb_steps: int number of steps. Note that this means the vec field gets broken down to 2**nb_steps. so nb_steps of 0 means integral = vec. if using 'ode': out_time_pt (optional): a time point or list of time points at which to evaluate Default: 1 init (optional): if using 'ode', the initialization method. Currently only supporting 'zero'. Default: 'zero' ode_args (optional): dictionary of all other parameters for tf.contrib.integrate.odeint() Returns: int_vec: integral of vector field. Same shape as the input if method is 'scaling_and_squaring', 'ss', 'quadrature', or 'ode' with out_time_pt not a list. Will have shape [*vec_shape, len(out_time_pt)] if method is 'ode' with out_time_pt being a list. Todo: quadrature for more than just intrinsically out_time_pt = 1 """ if method not in ['ss', 'scaling_and_squaring', 'ode', 'quadrature']: raise ValueError("method has to be 'scaling_and_squaring' or 'ode'. found: %s" % method) if method in ['ss', 'scaling_and_squaring']: nb_steps = kwargs['nb_steps'] assert nb_steps >= 0, 'nb_steps should be >= 0, found: %d' % nb_steps if time_dep: svec = K.permute_dimensions(vec, [-1, *range(0, vec.shape[-1] - 1)]) assert 2**nb_steps == svec.shape[0], "2**nb_steps and vector shape don't match" svec = svec / (2**nb_steps) for _ in range(nb_steps): svec = svec[0::2] + tf.map_fn(transform, svec[1::2, :], svec[0::2, :]) disp = svec[0, :] else: vec = vec / (2**nb_steps) for _ in range(nb_steps): vec += transform(vec, vec) disp = vec elif method == 'quadrature': # TODO: could output more than a single timepoint! nb_steps = kwargs['nb_steps'] assert nb_steps >= 1, 'nb_steps should be >= 1, found: %d' % nb_steps vec = vec / nb_steps if time_dep: disp = vec[..., 0] for si in range(nb_steps - 1): disp += transform(vec[..., si + 1], disp) else: disp = vec for _ in range(nb_steps - 1): disp += transform(vec, disp) else: assert not time_dep, "odeint not implemented with time-dependent vector field" fn = lambda disp, _: transform(vec, disp) # process time point. out_time_pt = kwargs['out_time_pt'] if 'out_time_pt' in kwargs.keys() else 1 out_time_pt = tf.cast(K.flatten(out_time_pt), tf.float32) len_out_time_pt = out_time_pt.get_shape().as_list()[0] assert len_out_time_pt is not None, 'len_out_time_pt is None :(' # initializing with something like tf.zeros(1) gives a control flow issue. z = out_time_pt[0:1] * 0.0 K_out_time_pt = K.concatenate([z, out_time_pt], 0) # enable a new integration function than tf.contrib.integrate.odeint odeint_fn = tf.contrib.integrate.odeint if 'odeint_fn' in kwargs.keys() and kwargs['odeint_fn'] is not None: odeint_fn = kwargs['odeint_fn'] # process initialization if 'init' not in kwargs.keys() or kwargs['init'] == 'zero': disp0 = vec * 0 # initial displacement is 0 else: raise ValueError('non-zero init for ode method not implemented') # compute integration with odeint if 'ode_args' not in kwargs.keys(): kwargs['ode_args'] = {} disp = odeint_fn(fn, disp0, K_out_time_pt, **kwargs['ode_args']) disp = K.permute_dimensions(disp[1:len_out_time_pt + 1, :], [*range(1, len(disp.shape)), 0]) # return if len_out_time_pt == 1: disp = disp[..., 0] return disp
ae11bd391321edafce26b73e680f56e67a2f776c
3,636,034
def get_all_news(num_page, limit): """Get all users""" news = News.objects.paginate(page=num_page, per_page=limit) response_object = { 'status': 'success', 'data': news.items, } return jsonify(response_object), 200
a74e30958aedc8dfbc34174ab7e420a22a68c452
3,636,035
def correlations(X, y=None): """ given a pandas DataFrame returns correlation matrix and figure representing the correlations :param y: [pandas Series] target column :param X: [pandas DataFrame] predictor columns :param size: matplotlib figure size :return: correlation matrix and figure representing the correlations """ assert (isinstance(X, pd.DataFrame)) and (not X.empty), 'X should be a valid pandas DataFrame' numerical_cols = X.select_dtypes(include=[np.number]).columns if len(numerical_cols) == 0: return None, None df = X.copy() if y is not None: df[y.name] = y corr = df.corr() fig = sns.clustermap(corr, linewidths=.5, figsize=constants.FIGURE_SIZE) plt.suptitle('Raw Features Correlation', fontsize=20) return corr, fig
5a84ce44a99662cb4d5cbc72324170e9535aa633
3,636,036
def consensus(): """ Resolve the blockchain based on consensus algorthim when multiple nodes are conencted """ replaced = blockchain.resolve_conflicts() if replaced: response = { 'message': 'Our chain was replaced', 'new_chain': blockchain.chain } else: response = { 'message': 'Our chain is authoritative', 'chain': blockchain.chain } return jsonify(response), 200
1026ffe3da9d0306b55c1807a82576d456825b6f
3,636,037
import _warnings def create_idletomography_report(results, filename, title="auto", ws=None, auto_open=False, link_to=None, brevity=0, advancedOptions=None, verbosity=1): """ Creates an Idle Tomography report, summarizing the results of running idle tomography on a data set. Parameters ---------- results : IdleTomographyResults An object which represents the set of results from an idle tomography run, typically obtained from running :func:`do_idle_tomography` OR a dictionary of such objects, representing multiple idle tomography runs to be compared (typically all with *different* data sets). The keys of this dictionary are used to label different data sets that are selectable in the report. filename : string, optional The output filename where the report file(s) will be saved. If None, then no output file is produced (but returned Workspace still caches all intermediate results). title : string, optional The title of the report. "auto" causes a random title to be generated (which you may or may not like). ws : Workspace, optional The workspace used as a scratch space for performing the calculations and visualizations required for this report. If you're creating multiple reports with similar tables, plots, etc., it may boost performance to use a single Workspace for all the report generation. auto_open : bool, optional If True, automatically open the report in a web browser after it has been generated. link_to : list, optional If not None, a list of one or more items from the set {"tex", "pdf", "pkl"} indicating whether or not to create and include links to Latex, PDF, and Python pickle files, respectively. "tex" creates latex source files for tables; "pdf" renders PDFs of tables and plots ; "pkl" creates Python versions of plots (pickled python data) and tables (pickled pandas DataFrams). advancedOptions : dict, optional A dictionary of advanced options for which the default values aer usually are fine. Here are the possible keys of `advancedOptions`: - connected : bool, optional Whether output HTML should assume an active internet connection. If True, then the resulting HTML file size will be reduced because it will link to web resources (e.g. CDN libraries) instead of embedding them. - cachefile : str, optional filename with cached workspace results - precision : int or dict, optional The amount of precision to display. A dictionary with keys "polar", "sci", and "normal" can separately specify the precision for complex angles, numbers in scientific notation, and everything else, respectively. If an integer is given, it this same value is taken for all precision types. If None, then `{'normal': 6, 'polar': 3, 'sci': 0}` is used. - resizable : bool, optional Whether plots and tables are made with resize handles and can be resized within the report. - autosize : {'none', 'initial', 'continual'} Whether tables and plots should be resized, either initially -- i.e. just upon first rendering (`"initial"`) -- or whenever the browser window is resized (`"continual"`). verbosity : int, optional How much detail to send to stdout. Returns ------- Workspace The workspace object used to create the report """ tStart = _time.time() printer = _VerbosityPrinter.build_printer(verbosity) # , comm=comm) if advancedOptions is None: advancedOptions = {} precision = advancedOptions.get('precision', None) cachefile = advancedOptions.get('cachefile', None) connected = advancedOptions.get('connected', False) resizable = advancedOptions.get('resizable', True) autosize = advancedOptions.get('autosize', 'initial') mdl_sim = advancedOptions.get('simulator', None) # a model if filename and filename.endswith(".pdf"): fmt = "latex" else: fmt = "html" printer.log('*** Creating workspace ***') if ws is None: ws = _ws.Workspace(cachefile) if title is None or title == "auto": if filename is not None: autoname = _autotitle.generate_name() title = "Idle Tomography Report for " + autoname _warnings.warn(("You should really specify `title=` when generating reports," " as this makes it much easier to identify them later on. " "Since you didn't, pyGSTi has generated a random one" " for you: '{}'.").format(autoname)) else: title = "N/A" # No title - but it doesn't matter since filename is None results_dict = results if isinstance(results, dict) else {"unique": results} renderMath = True qtys = {} # stores strings to be inserted into report template def addqty(b, name, fn, *args, **kwargs): """Adds an item to the qtys dict within a timed block""" if b is None or brevity < b: with _timed_block(name, formatStr='{:45}', printer=printer, verbosity=2): qtys[name] = fn(*args, **kwargs) qtys['title'] = title qtys['date'] = _time.strftime("%B %d, %Y") pdfInfo = [('Author', 'pyGSTi'), ('Title', title), ('Keywords', 'GST'), ('pyGSTi Version', _version.__version__)] qtys['pdfinfo'] = _merge.to_pdfinfo(pdfInfo) # Generate Switchboard printer.log("*** Generating switchboard ***") #Create master switchboard switchBd, dataset_labels = \ _create_switchboard(ws, results_dict) if fmt == "latex" and (len(dataset_labels) > 1): raise ValueError("PDF reports can only show a *single* dataset," " estimate, and gauge optimization.") # Generate Tables printer.log("*** Generating tables ***") multidataset = bool(len(dataset_labels) > 1) #REM intErrView = [False,True,True] if fmt == "html": qtys['topSwitchboard'] = switchBd #REM qtys['intrinsicErrSwitchboard'] = switchBd.view(intErrView,"v1") results = switchBd.results #REM errortype = switchBd.errortype #REM errorop = switchBd.errorop A = None # no brevity restriction: always display; for "Summary"- & "Help"-tab figs #Brevity key: # TODO - everything is always displayed for now addqty(A, 'intrinsicErrorsTable', ws.IdleTomographyIntrinsicErrorsTable, results) addqty(A, 'observedRatesTable', ws.IdleTomographyObservedRatesTable, results, 20, mdl_sim) # HARDCODED - show only top 20 rates # errortype, errorop, # Generate plots printer.log("*** Generating plots ***") toggles = {} toggles['CompareDatasets'] = False # not comparable by default if multidataset: #check if data sets are comparable (if they have the same sequences) comparable = True gstrCmpList = list(results_dict[dataset_labels[0]].dataset.keys()) # maybe use circuit_lists['final']?? for dslbl in dataset_labels: if list(results_dict[dslbl].dataset.keys()) != gstrCmpList: _warnings.warn("Not all data sets are comparable - no comparisions will be made.") comparable = False; break if comparable: #initialize a new "dataset comparison switchboard" dscmp_switchBd = ws.Switchboard( ["Dataset1", "Dataset2"], [dataset_labels, dataset_labels], ["buttons", "buttons"], [0, 1] ) dscmp_switchBd.add("dscmp", (0, 1)) dscmp_switchBd.add("dscmp_gss", (0,)) dscmp_switchBd.add("refds", (0,)) for d1, dslbl1 in enumerate(dataset_labels): dscmp_switchBd.dscmp_gss[d1] = results_dict[dslbl1].circuit_structs['final'] dscmp_switchBd.refds[d1] = results_dict[dslbl1].dataset # only used for #of spam labels below # dsComp = dict() all_dsComps = dict() indices = [] for i in range(len(dataset_labels)): for j in range(len(dataset_labels)): indices.append((i, j)) #REMOVE (for using comm) #if comm is not None: # _, indexDict, _ = _distribute_indices(indices, comm) # rank = comm.Get_rank() # for k, v in indexDict.items(): # if v == rank: # d1, d2 = k # dslbl1 = dataset_labels[d1] # dslbl2 = dataset_labels[d2] # # ds1 = results_dict[dslbl1].dataset # ds2 = results_dict[dslbl2].dataset # dsComp[(d1, d2)] = _DataComparator( # [ds1, ds2], DS_names=[dslbl1, dslbl2]) # dicts = comm.gather(dsComp, root=0) # if rank == 0: # for d in dicts: # for k, v in d.items(): # d1, d2 = k # dscmp_switchBd.dscmp[d1, d2] = v # all_dsComps[(d1,d2)] = v #else: for d1, d2 in indices: dslbl1 = dataset_labels[d1] dslbl2 = dataset_labels[d2] ds1 = results_dict[dslbl1].dataset ds2 = results_dict[dslbl2].dataset all_dsComps[(d1, d2)] = _DataComparator([ds1, ds2], DS_names=[dslbl1, dslbl2]) dscmp_switchBd.dscmp[d1, d2] = all_dsComps[(d1, d2)] qtys['dscmpSwitchboard'] = dscmp_switchBd addqty(4, 'dsComparisonSummary', ws.DatasetComparisonSummaryPlot, dataset_labels, all_dsComps) #addqty('dsComparisonHistogram', ws.DatasetComparisonHistogramPlot, dscmp_switchBd.dscmp, display='pvalue') addqty(4, 'dsComparisonHistogram', ws.ColorBoxPlot, 'dscmp', dscmp_switchBd.dscmp_gss, dscmp_switchBd.refds, None, dscomparator=dscmp_switchBd.dscmp, typ="histogram") addqty(1, 'dsComparisonBoxPlot', ws.ColorBoxPlot, 'dscmp', dscmp_switchBd.dscmp_gss, dscmp_switchBd.refds, None, dscomparator=dscmp_switchBd.dscmp) toggles['CompareDatasets'] = True else: toggles['CompareDatasets'] = False # not comparable! else: toggles['CompareDatasets'] = False if filename is not None: if True: # comm is None or comm.Get_rank() == 0: # 3) populate template file => report file printer.log("*** Merging into template file ***") if fmt == "html": if filename.endswith(".html"): _merge.merge_jinja_template( qtys, filename, templateDir='~idletomography_html_report', auto_open=auto_open, precision=precision, link_to=link_to, connected=connected, toggles=toggles, renderMath=renderMath, resizable=resizable, autosize=autosize, verbosity=printer ) else: _merge.merge_jinja_template_dir( qtys, filename, templateDir='~idletomography_html_report', auto_open=auto_open, precision=precision, link_to=link_to, connected=connected, toggles=toggles, renderMath=renderMath, resizable=resizable, autosize=autosize, verbosity=printer ) elif fmt == "latex": raise NotImplementedError("No PDF version of this report is available yet.") templateFile = "idletomography_pdf_report.tex" base = _os.path.splitext(filename)[0] # no extension _merge.merge_latex_template(qtys, templateFile, base + ".tex", toggles, precision, printer) # compile report latex file into PDF cmd = _ws.WorkspaceOutput.default_render_options.get('latex_cmd', None) flags = _ws.WorkspaceOutput.default_render_options.get('latex_flags', []) assert(cmd), "Cannot render PDF documents: no `latex_cmd` render option." printer.log("Latex file(s) successfully generated. Attempting to compile with %s..." % cmd) _merge.compile_latex_report(base, [cmd] + flags, printer, auto_open) else: raise ValueError("Unrecognized format: %s" % fmt) else: printer.log("*** NOT Merging into template file (filename is None) ***") printer.log("*** Report Generation Complete! Total time %gs ***" % (_time.time() - tStart)) return ws
b0e9bae7a4c890005bc50fc80c5febbeaac22059
3,636,038
def sha256_secrethash(secret: Secret) -> SecretHash: """Compute the secret hash using sha256.""" return SecretHash(sha256(secret).digest())
9af5d9507575e5fe474eb5fe73d8060825d91add
3,636,039
import requests def make_http_request(method, url, check_response=None, *args, **kwargs): """ Make an HTTP request with the global session. :return: The response object. :raises: WebserviceException """ session = get_requests_session() response = ClientBase._execute_func(lambda *args, **kwargs: session.request(method, *args, **kwargs), url, *args, **kwargs) try: if check_response is None: response.raise_for_status() elif not check_response(response): raise WebserviceException('Received bad response from service:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(response.status_code, response.headers, response.content), logger=module_logger) except requests.Timeout: raise WebserviceException('Error, request to {} timed out.'.format(url), logger=module_logger) except requests.exceptions.HTTPError: raise WebserviceException('Received bad response from service:\n' 'Response Code: {}\n' 'Headers: {}\n' 'Content: {}'.format(response.status_code, response.headers, response.content), logger=module_logger) return response
6f88174e61a75e3452a4cf839bb956aba27411f9
3,636,040
def get_pages_matches_no_prep(title, edition, archive, filename, text, keysentences): """ Get pages within a document that include one or more keywords. For each page that includes a specific keyword, add a tuple of form: (<TITLE>, <EDITION>, <ARCHIVE>, <FILENAME>, <TEXT>, <KEYWORD>) If a keyword occurs more than once on a page, there will be only one tuple for the page for that keyword. If more than one keyword occurs on a page, there will be one tuple per keyword. :return: list of tuples """ matches = [] for keysentence in keysentences: #sentence_match = get_sentences_list_matches(text, keysentence) sentence_match_idx = get_text_keyword_idx(text, keysentence) if sentence_match: match = (title, edition, archive, filename, text, keysentence) matches.append(match) return matches
9a536f7532a77c39172a7deb32dd99d73f629f02
3,636,041
import mpmath def sf(k, r, p): """ Survival function of the negative binomial distribution. Parameters ---------- r : int Number of failures until the experiment is stopped. p : float Probability of success. """ with mpmath.extradps(5): k = mpmath.mpf(k) r = mpmath.mpf(r) p = mpmath.mpf(p) return mpmath.betainc(k + 1, r, 0, p, regularized=True)
d836e2cd762c5fa2be11d83aae31ba5d8589b3f0
3,636,042
def process_menu(menu: Menu, perms: PermWrapper) -> MenuGroup: """Enable a menu item if view permissions exist for the user.""" for group in menu.groups: for item in group.items: # Parse the URL template tag to a permission string. app, scope = item.url.split(":") object_name = scope.replace("_list", "") view_perm = f"{app}.view_{scope}" add_perm = f"{app}.add_object_name" if view_perm in perms: # If the view permission for each item exists, toggle # the `disabled` field, which will be used in the UI. item.disabled = False if add_perm in perms: if item.add_url is not None: item.has_add = True if item.import_url is not None: item.has_import = True return menu
748d9e1687c661a9b1a2dee32315eadaf54e4fac
3,636,043
def _grid_in_property(field_name, docstring, read_only=False, closed_only=False): """Create a GridIn property.""" def getter(self): if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 if field_name == 'length': return self._file.get(field_name, 0) return self._file.get(field_name, None) def setter(self, value): if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " "can only be read after :meth:`close` " "has been called.") if not read_only and not closed_only: return property(getter, setter, doc=docstring) return property(getter, doc=docstring)
891e3a828b496467c201ad14e0540abf235d7e64
3,636,044
import logging def Run(benchmark_spec): """Runs memtier against memcached and gathers the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample instances. """ client = benchmark_spec.vm_groups['client'][0] server = benchmark_spec.vm_groups['server'][0] server_ip = server.internal_ip metadata = {'memcached_version': memcached_server.GetVersion(server), 'memcached_server_size': FLAGS.memcached_size_mb} logging.info('Start benchmarking memcached using memtier.') samples = memtier.Run(client, server_ip, memcached_server.MEMCACHED_PORT) for sample in samples: sample.metadata.update(metadata) return samples
87560ad915fe9626632c1ac24fdbdc461a5dec83
3,636,045
from pb_bss import distribution def get_trainer_class_from_model(parameter): """ >>> from IPython.lib.pretty import pprint >>> from pb_bss.distribution.cacgmm import ( ... ComplexAngularCentralGaussian, ... ) >>> get_trainer_class_from_model(ComplexAngularCentralGaussian).__name__ 'ComplexAngularCentralGaussianTrainer' >>> get_trainer_class_from_model(ComplexAngularCentralGaussian()).__name__ 'ComplexAngularCentralGaussianTrainer' """ if not hasattr(parameter, '__name__'): parameter = parameter.__class__ name = parameter.__name__ assert 'Trainer' not in name, name name = name + 'Trainer' return getattr(distribution, name)
713e5ea055149b851e8c734ebaabba3491364ccb
3,636,046
def is_fractional_it(input_str, short_scale=False): """ This function takes the given text and checks if it is a fraction. Updated to italian from en version 18.8.9 Args: input_str (str): the string to check if fractional short_scale (bool): use short scale if True, long scale if False Returns: (bool) or (float): False if not a fraction, otherwise the fraction """ input_str = input_str.lower() if input_str.endswith('i', -1) and len(input_str) > 2: input_str = input_str[:-1] + "o" # normalizza plurali fracts_it = {"intero": 1, "mezza": 2, "mezzo": 2} if short_scale: for num in _SHORT_ORDINAL_STRING_IT: if num > 2: fracts_it[_SHORT_ORDINAL_STRING_IT[num]] = num else: for num in _LONG_ORDINAL_STRING_IT: if num > 2: fracts_it[_LONG_ORDINAL_STRING_IT[num]] = num if input_str in fracts_it: return 1.0 / fracts_it[input_str] return False
c62af11f56f81fe84af7723e181a494d64f07bae
3,636,047
def qchem2molgraph(logfile, return_qobj=False, return_none_on_err=False, **kwargs): """ Convert a Q-Chem logfile to a MolGraph object. Return the QChem object in addition to the MolGraph if return_qobj is True. Catch QChemError if return_none_on_err is True and return None. Options in kwargs are passed to valid_job. If the job is not valid, also return None. """ try: q = QChem(logfile=logfile) except QChemError as e: if return_none_on_err: print(e) return None else: raise if not valid_job(q, **kwargs): return None energy = q.get_energy() + q.get_zpe() # With ZPE symbols, coords = q.get_geometry() mol = MolGraph(symbols=symbols, coords=coords, energy=energy) mol.infer_connections() if return_qobj: return mol, q else: return mol
4cbebcd36ae10e060e0c16fd00501a41e46b425c
3,636,048
def extract_signals(data, fs, segmentation_times): """ Signal that given the set of segmentation times, extract the signal from the raw trace. Args: data : Numpy The input seismic data containing both, start and end times of the seismic data. fs : float The sampling frequency. segmentation_times : list A list containing the segmentation of the file Returns: List A list containing the extracted signals. """ signals = [] durations = [] for m in segmentation_times: segmented = data[int(m[0] * fs): int(m[1] * fs)] signals.append(segmented) durations.append(segmented.shape[0]/float(fs)) return signals, durations
81ff3d0b343dbba218eb5d2d988b8ca20d1a7209
3,636,049
import glob def get_dataset( file_pattern, n_classes, batch_size, volume_shape, plane, n_slices=24, block_shape=None, n_epochs=None, mapping=None, shuffle_buffer_size=None, num_parallel_calls=AUTOTUNE, mode="train", ): """Returns tf.data.Dataset after preprocessing from tfrecords for training and validation Parameters ---------- file_pattern: n_classes: """ files = glob.glob(file_pattern) if not files: raise ValueError("no files found for pattern '{}'".format(file_pattern)) compressed = _is_gzipped(files[0]) shuffle = bool(shuffle_buffer_size) ds = nobrainer.dataset.tfrecord_dataset( file_pattern=file_pattern, volume_shape=volume_shape, shuffle=shuffle, scalar_label=True, compressed=compressed, num_parallel_calls=num_parallel_calls, ) def _ss(x, y): x, y = structural_slice(x, y, plane, n_slices) return (x, y) ds = ds.map(_ss, num_parallel_calls) ds = ds.prefetch(buffer_size=batch_size) if batch_size is not None: ds = ds.batch(batch_size=batch_size, drop_remainder=True) if mode == "train": if shuffle_buffer_size: ds = ds.shuffle(buffer_size=shuffle_buffer_size) # Repeat the dataset n_epochs times ds = ds.repeat(n_epochs) return ds
da09356f40ab1873efbc7fe03d135f3ed90e951b
3,636,050
import math def cos_d(x:int)->float: """ This function takes in input in radians and returns the computed derivaive of cos which is -sin. """ return -math.sin(x)
e8a0ba95d6a53d8c88bfa867dd423e23eb782911
3,636,051
def rhist(ax, data, **kwargs): """ Create a hist plot with default style parameters to look like ggplot2. kwargs can be passed to changed other parameters. """ defaults = {'facecolor': '0.3', 'edgecolor': '0.36', 'linewidth': 1, 'rwidth': 1} for x, y in defaults.iteritems(): kwargs.setdefault(x, y) return ax.hist(data, **kwargs)
a0ddefcfd58d42ac6fc2cbaa76a98252debd6e43
3,636,052
def follow(request, username_to_follow): """View that is used to let the login user to follow another user""" try: # If the user is not all ready begin follwed if not request.user.following.filter(username=username_to_follow).exists(): user_to_follow = get_object_or_404(User, username=username_to_follow) request.user.following.add(user_to_follow) return JsonResponse({'message':'success'}) else: return JsonResponse({'message': 'user is all ready being follwed'}) except: res = JsonResponse({'message': 'error'}) res.status_code = 400 return res
ad4b60813a35100378a114f67e916e9c8a665a5b
3,636,053
def Neq(left: Expr, right: Expr) -> BinaryExpr: """Difference expression. Checks if left != right. Args: left: A value to check. right: The other value to check. Must evaluate to the same type as left. """ return BinaryExpr(Op.neq, right.type_of(), TealType.uint64, left, right)
b0243c6ea70058bd072b5bf0e4be9e185faea0df
3,636,054
def create_simplex_matrix( dimensions: int, distance: float) -> np.ndarray: """ Create centered normalized N-dimensional simplex structure The structure is described by N+1, N-dimensional points that have the same distance between them :param dimensions: The number of dimensions of the simplex :param distance: Distance between the points :return: [dimensions+1, dimensions] matrix of points, each row a point """ # -------------------------------- # argument checking if dimensions <= 0: raise ValueError("dimensions should be > 0") if distance <= 0: raise ValueError("distance should be > 0") # -------------------------------- # An N-Dimensional simplex requires N+1 points points = dimensions + 1 # create identity matrix (N points) matrix = np.identity(dimensions, dtype=np.float) # we create the last point # Now we need a n+1-th point with the same distance to all other points. # We have to choose (x, x, ... x). point = np.ones(shape=(1, dimensions), dtype=np.float) * \ ((1. + np.sqrt(dimensions + 1.)) / dimensions) matrix = np.vstack([matrix, point]) # center points to zero mean_m = np.mean(matrix, axis=0) matrix = matrix - mean_m # all points now have sqrt(2) distance between them # points lie on the surface of an n-dimensional circle # calculate the radius of that circle radius = np.mean(np.linalg.norm(matrix, ord=2, axis=1)) # angle between origin center (0) # a point A and the midpoint intersection sin_theta = (np.sqrt(2) / 2.) / radius # go through the points and normalize into the set distance for i in range(points): norm2 = np.linalg.norm(matrix[i], ord=2) matrix[i] = matrix[i] * ((distance / (sin_theta * 2.)) / norm2) return matrix
c23f8ffc8d5578debc6b7ea92780aa3438a11d86
3,636,055
def asymmetric_lorentz_gauss_sum(x, mu, fwhm_l, fwhm_g, alpha=1.0, beta=1.5): """ asymmetric Lorentzian with Gauss convoluted """ ygaus = np.array(gauss_one(x, fwhm_g, mu)) ylorentz = np.array(asymmetric_lorentz(x, fwhm_l, mu, alpha=alpha, beta=beta)) ydata = ylorentz + ygaus return ydata
163ea45934e199cf06ded309c6d51a0d4cb9ef46
3,636,056
def access(path, mode): """Use the real uid/gid to test for access to path. :type path: bytes | unicode :type mode: int :rtype: bool """ return False
c0baab44d63bf354e4da9e5d0048d8b05c1f8040
3,636,057
import unicodedata def _is_punctuation(char: str) -> bool: """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
be6f469f4e53ff911d97edfda3eeb98687db8d59
3,636,058
import ast def get_imports(filename, source=None): """ Returns a list of #ImportInfo tuples for all module imports in the specified Python source file or the *source* string. Note that `from X import Y` imports could also refer to a member of the module X named Y and not the module X.Y. """ def _find_nodes(ast_node, predicate): result = [] class Visitor(ast.NodeVisitor): def visit(self, node): if predicate(node): result.append(node) self.generic_visit(node) Visitor().generic_visit(ast_node) return result if source is None: with open(filename, 'rb') as fp: source = fp.read() module = ast.parse(source, filename) result = [] def _is_import_call(x): return isinstance(x, ast.Call) and isinstance(x.func, ast.Name) and \ x.func.id == '__import__' and x.args and isinstance(x.args[0], ast.Str) for node in _find_nodes(module, _is_import_call): result.append(ImportInfo(node.args[0].s, filename, node.lineno, False)) for node in _find_nodes(module, lambda x: isinstance(x, ast.Import)): for alias in node.names: result.append(ImportInfo(alias.name, filename, node.lineno, False)) for node in _find_nodes(module, lambda x: isinstance(x, ast.ImportFrom)): parent_name = '.' * node.level + (node.module or '') result.append(ImportInfo(parent_name, filename, node.lineno, False)) for alias in node.names: if alias.name == '*': continue import_name = parent_name if not import_name.endswith('.'): import_name += '.' import_name += alias.name result.append(ImportInfo(import_name, filename, node.lineno, True)) result.sort(key=lambda x: x.lineno) return result
4c866ccef90d047bb1de1d2e2edba97ffcc88636
3,636,059
def follow_card(card, deck_size, shuffles, shuffler): """Follow position of the card in deck of deck_size during shuffles.""" position = card for shuffle, parameter in shuffles: shuffling = shuffler(shuffle) position = shuffling(deck_size, position, parameter) return position
10774bd899afde0d64cbf800bc3dad1d86543022
3,636,060
import tempfile def get_secure_directory(): """get a temporary secure sub directory""" temp_dir = tempfile.mkdtemp(suffix='',prefix='') return temp_dir
08fb9587a2d17778e9a733312b08b504d6aff7bd
3,636,061
def area(shape): """Multimethod dispatch key""" return shape.get('type')
0561d97ad21afdda160bd3063948e884a5c02945
3,636,062
def get_trie_properties(trie, offsets, values): """Obtain the length of every trigger in the trie.""" anchor_length = np.zeros(len(values), dtype=np.int32) start, end = 0, 0 for idx, key in enumerate(trie.iterkeys()): end = offsets[idx] anchor_length[start:end] = len(key) start = end return anchor_length
ade365befbc481efec77a9c7b1d2c2e667c108c9
3,636,063
import os def get_available_qmix_configs(configs_dir=None): """ Create a list of available qmix configurations Parameters ---------- configs_dir : string or None The parent directory containing the Qmix configurations. If ``None``, assume the default directory used by Qmix Elements, i.e., `C:/Users/Public/Documents/QmixElements/Projects/default_project/Configurations/`. Returns ------- list of strings Names of available Qmix configurations. Raises ------ ValueError If the configuration directory does not exist. """ if configs_dir is None: configs_dir = DEFAULT_CONFIGS_DIR def get_immediate_subdirectories(a_dir): return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))] if not os.path.exists(configs_dir): msg = 'The configuration directory does not exist: %s' % configs_dir raise ValueError(msg) return get_immediate_subdirectories(configs_dir)
5fea3eab95253a5e3eb2f234cfc98cf71207088b
3,636,064
import torch def besseli(X, order=0, Nk=64): """ Approximates the modified Bessel function of the first kind, of either order zero or one. OBS: Inputing float32 can lead to numerical issues. Args: X (torch.tensor): Input (N, 1). order (int, optional): 0 or 1, defaults to 0. Nk (int, optional): Terms in summation, higher number, better approximation. Defaults to 50. Returns: I (torch.tensor): Modified Bessel function of the first kind (N, 1). See also: https://mathworld.wolfram.com/ModifiedBesselFunctionoftheFirstKind.html """ device = X.device dtype = X.dtype if len(X.shape) == 1: X = X[:, None] N = X.shape[0] else: N = 1 # Compute factorial term X = X.repeat(1, Nk) K = torch.arange(0, Nk, dtype=dtype, device=device) K = K.repeat(N, 1) K_factorial = (K + 1).lgamma().exp() if order == 0: # ..0th order i = torch.sum((0.25 * X ** 2) ** K / (K_factorial ** 2), dim=1, dtype=torch.float64) else: # ..1st order i = torch.sum( 0.5 * X * ((0.25 * X ** 2) ** K / (K_factorial * torch.exp(torch.lgamma(K + 2)))), dim=1, dtype=torch.float64) return i
5233398b240244f13af595088077b8e43d2a4b2f
3,636,065
def laplacian_total_variation_kernel(x, y, sigma=1.0, **kwargs): """Geodesic Laplacian kernel based on total variation distance.""" dist = np.abs(x - y).sum() / 2.0 return np.exp(-sigma * dist)
a0d123c28493af4aa1400958bab134230c5735c3
3,636,066
import random def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" line, text = '', '' # Iterating 200 time to pick up random keys and values for count in range(0, 200): key = random.choice(list(mimic_dict.keys())) val = mimic_dict.get(key) line += ('{} {} '.format(key, random.choice(val))) # print 70 columns per line if len(line) > 70: text += line + '\n' line = '' print(text) return True
61dea92175feff7cb3e3744460ccf692cfc18ca7
3,636,067
from pathlib import Path def get_dataset( dataset_name: str, path: Path = default_dataset_path, regenerate: bool = False, ) -> TrainDatasets: """ Get a repository dataset. The datasets that can be obtained through this function have been used with different processing over time by several papers (e.g., [SFG17]_, [LCY+18]_, and [YRD15]_). Parameters ---------- dataset_name name of the dataset, for instance "m4_hourly" regenerate whether to regenerate the dataset even if a local file is present. If this flag is False and the file is present, the dataset will not be downloaded again. path where the dataset should be saved Returns ------- dataset obtained by either downloading or reloading from local file. """ dataset_path = materialize_dataset(dataset_name, path, regenerate) return load_datasets( metadata=dataset_path, train=dataset_path / "train", test=dataset_path / "test", )
92f5cf094d1eafb4439dbbd57f0ab7cfab84e4ef
3,636,068
def test_view(regression_id): """ Show a single regression test. :param regression_id: id of the regression test :type regression_id: int :return: Regression test :rtype: dict """ test = RegressionTest.query.filter(RegressionTest.id == regression_id).first() if test is None: g.log.error(f'requested regression test with id: {regression_id} not found!') abort(404) return { 'test': test }
5765fcca8ee06e64ecadd346ed228696b2ce9dcb
3,636,069
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_post(uuid, tapi_path_computation_path_objective_function=None): # noqa: E501 """data_context_path_computation_context_path_comp_serviceuuid_objective_function_post creates tapi.path.computation.PathObjectiveFunction # noqa: E501 :param uuid: Id of path-comp-service :type uuid: str :param tapi_path_computation_path_objective_function: tapi.path.computation.PathObjectiveFunction to be added to list :type tapi_path_computation_path_objective_function: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_path_computation_path_objective_function = TapiPathComputationPathObjectiveFunction.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
88d2e8e9442fd6b71ce0f8bb5a5463101e8ed65a
3,636,070
def tbl_2_nparray(in_tbl, flds): """Form the TableToNumPyArray to account for nulls for various dtypes. This is essentially a shortcut to `arcpy.da.TableToNumPyArray` Requires -------- `in_tbl` : table, or featureclass table name `flds` : list of field names `skip_nulls` = False : set within function `null_value` : determined from the dtype of the array... otherwise you may as well do it manually Source ------ arraytools, apt.py module """ nulls = {'Double':np.nan, 'Integer':np.iinfo(np.int32).min, 'OID':np.iinfo(np.int32).min, 'String':"None"} # fld_dict = {i.name: i.type for i in arcpy.ListFields(in_tbl)} null_dict = {f:nulls[fld_dict[f]] for f in flds} a = arcpy.da.TableToNumPyArray(in_table=in_tbl, field_names=flds, skip_nulls=False, null_value=null_dict) return a
1c18526f9dcd4a388b3df6fa8ba113fc18a9fb0a
3,636,071
import html def _get_paper_page(url: str) -> object: # pragma: no cover """ Get a paper page element from a provided URL Parameters ---------- url : str The paper URL Returns ------- Object A HTML element representing the paper given by the provided URL """ response = common_util.try_success(lambda: DefaultSession().get(url), 2) return html.fromstring(response.content)
2e112469b5928d3156d7786dc506eb3b2b8c6a74
3,636,072
def check_grid_side(ctx, param, value: int) -> int: """ check the size of the grid :type value: int """ if value < 5: raise ValueError("all sides of grid must be at least 5") return value
9e1403ca90c8f0716e10248b418ca59fe501c0c4
3,636,073
def distributed_transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False, gpu_nums=2): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = int(hidden_size / num_attention_heads) input_shape = bert_utils.get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] input_width = input_shape[2] # The Transformer performs sum residuals on all layers so the input needs # to be the same as the hidden size. if input_width != hidden_size: raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % (input_width, hidden_size)) # We keep the representation as a 2D tensor to avoid re-shaping it back and # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on # the GPU/CPU but may not be free on the TPU, so we want to minimize them to # help the optimizer. prev_output = bert_utils.reshape_to_matrix(input_tensor) all_layer_outputs = [] gpu_partition = int(num_hidden_layers/gpu_nums) gpu_id = -1 # gpu_id is started from 0 to gpu_nums for layer_idx in range(num_hidden_layers): with tf.variable_scope("layer_%d" % layer_idx): layer_input = prev_output if np.mod(layer_idx, gpu_partition) == 0: gpu_id += 1 with tf.device('/gpu:{}'.format(gpu_id)): tf.logging.info(" apply transformer attention {}-th layer on device {} ".format(layer_idx, gpu_id)) print(" apply transformer attention {}-th layer on device {} ".format(layer_idx, gpu_id)) with tf.variable_scope("attention"): attention_heads = [] with tf.variable_scope("self"): attention_head = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.variable_scope("output"): attention_output = tf.layers.dense( attention_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.variable_scope("intermediate"): intermediate_output = tf.layers.dense( attention_output, intermediate_size, activation=intermediate_act_fn, kernel_initializer=create_initializer(initializer_range)) # Down-project back to `hidden_size` then add the residual. with tf.variable_scope("output"): layer_output = tf.layers.dense( intermediate_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) layer_output = dropout(layer_output, hidden_dropout_prob) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: final_outputs = [] for layer_output in all_layer_outputs: final_output = bert_utils.reshape_from_matrix(layer_output, input_shape) final_outputs.append(final_output) return final_outputs else: final_output = bert_utils.reshape_from_matrix(prev_output, input_shape) return final_output
d5210a2969e18766956ff3e052a79f6d9fa86d13
3,636,074
from typing import Counter import math import networkx def sgrank(doc, kp_count, window=1500, idf=None): """ Extracts keyphrases from a text using SGRank algorithm. Args: doc: a spacy.Doc object kp_count: number of keyphrases window: word co-occurrence window length idf: a dictionary (string, float) of inverse document frequencies Returns: list of keyphrases Raises: TypeError if idf is not dictionary or None """ if isinstance(idf, dict): idf = defaultdict(lambda: 1, idf) elif idf is not None: msg = "idf must be a dictionary, not {}".format(type(idf)) raise TypeError(msg) cutoff_factor = 3000 token_count = len(doc) top_n = max(int(token_count * 0.2), 100) min_freq = 1 if 1500 < token_count < 4000: min_freq = 2 elif token_count >= 4000: min_freq = 3 terms = [tok for toks in (ngrams(doc, n) for n in range(1,7)) for tok in toks] term_strs = {id(term): normalize(term) for term in terms} # Count terms and filter by the minimum term frequency counts = Counter(term_strs[id(term)] for term in terms) term_freqs = {term_str: freq for term_str, freq in counts.items() if freq >= min_freq} if idf: # For ngrams with n >= 2 we have idf = 1 modified_tfidf = {term_str: freq * idf[term_str] if ' ' not in term_str else freq for term_str, freq in term_freqs.items()} else: modified_tfidf = term_freqs # Take top_n values, but also those that have have equal tfidf with the top_n:th value # This guarantees that the algorithm produces similar results with every run ordered_tfidfs = sorted(modified_tfidf.items(), key=lambda t: t[1], reverse=True) top_n = min(top_n, len(ordered_tfidfs)) top_n_value = ordered_tfidfs[top_n-1][1] top_terms = set(str for str, val in it.takewhile(lambda t: t[1] >= top_n_value, ordered_tfidfs)) terms = [term for term in terms if term_strs[id(term)] in top_terms] term_weights = {} # Calculate term weights for term in terms: term_str = term_strs[id(term)] term_len = math.sqrt(len(term)) term_freq = term_freqs[term_str] occ_factor = math.log(cutoff_factor / (term.start + 1)) # Sum the frequencies of all other terms that contain this term subsum_count = sum(term_freqs[other] for other in top_terms if other != term_str and term_str in other) freq_diff = term_freq - subsum_count if idf and term_len == 1: freq_diff *= idf[term_str] weight = freq_diff * occ_factor * term_len if term_str in term_weights: # log(1/x) is a decreasing function, so the first occurrence has largest weight if weight > term_weights[term_str]: term_weights[term_str] = weight else: term_weights[term_str] = weight # Use only positive-weighted terms terms = [term for term in terms if term_weights[term_strs[id(term)]] > 0] num_co_occurrences = defaultdict(lambda: defaultdict(int)) total_log_distance = defaultdict(lambda: defaultdict(float)) # Calculate term co-occurrences and co-occurrence distances within the co-occurrence window for t1, t2 in it.combinations(terms, 2): dist = abs(t1.start - t2.start) if dist <= window: t1_str = term_strs[id(t1)] t2_str = term_strs[id(t2)] if t1_str != t2_str: num_co_occurrences[t1_str][t2_str] += 1 total_log_distance[t1_str][t2_str] += math.log(window / max(1, dist)) # Weight the graph edges based on word co-occurrences edge_weights = defaultdict(lambda: defaultdict(float)) for t1, neighbors in total_log_distance.items(): for n in neighbors: edge_weights[t1][n] = (total_log_distance[t1][n] / num_co_occurrences[t1][n]) \ * term_weights[t1] * term_weights[n] # Normalize edge weights by sum of outgoing edge weights norm_edge_weights = [] for t1, neighbors in edge_weights.items(): weights_sum = sum(neighbors.values()) norm_edge_weights.extend((t1, n, weight / weights_sum) for n, weight in neighbors.items()) term_graph = networkx.Graph() term_graph.add_weighted_edges_from(norm_edge_weights) term_ranks = networkx.pagerank_scipy(term_graph) if 0 < kp_count < 1: kp_count = round(kp_count * len(term_ranks)) kp_count = int(kp_count) top_phrases = top_keys(kp_count, term_ranks) return top_phrases
4830e30b85bd3020b7bae8bcd14e3cdac5671648
3,636,075
from typing import List from typing import Tuple def save_quantitative_results(quantitative_results: List[Tuple[int, List[float]]], metric_names: List[str], output_file: str): """ Saves the quantitative results into an output file. The quantitative results are passed as a list of entries, where each entry consists of the entry id, as well as a list of metric values. Example quantitative results: [(0, [0.81, 0.78, 0.88), (1, [0.62, 0.7, 0.5), ...] A list of metric names in the same oreder they appear in the quantitative results is also to be provided. Args: quantitative_results (list(tuple(int, list[int]))): the quantitative results metric_names (list[str]): the names of the metrics output_file (str): the output location """ metric_means = [] with open(output_file, 'w') as f: f.write('Overall results: AVERAGE +- STD DEVIATION\n') for idx, metric_name in enumerate(metric_names): raw_numbers = np.array([x[1][idx] for x in quantitative_results]) metric_avg = raw_numbers.mean() metric_std = raw_numbers.std() metric_means.append(metric_avg) f.write( f'{metric_name: <15} {metric_avg:.4f} +- {metric_std:.4f}\n') f.write('\n\nIndividual examples:\n') f.write(f'Metrics order {" - ".join(metric_names)}\n') for (idx, values) in quantitative_results: rounded_values = [str(round(i, 4)) for i in values] f.write(f'{idx:<3}: {", ".join(rounded_values)}\n') return metric_means
ee83d4c779b9376da84bf649f469ca316c351ceb
3,636,076
def interpret(parsed, source_url, base_href=None, item=None, use_rel_syndication=True, want_json=False, fetch_mf2_func=None): """Interpret a permalink of unknown type. Finds the first interesting h-* element, and delegates to :func:`interpret_entry` if it is an h-entry or :func:`interpret_event` for an h-event :param dict parsed: the result of parsing a mf2 document :param str source_url: the URL of the source document (used for authorship discovery) :param str base_href: (optional) the href value of the base tag :param dict item: (optional) the item to be parsed. If provided, this will be used instead of the first element on the page. :param boolean use_rel_syndication: (optional, default True) Whether to include rel=syndication in the list of syndication sources. Sometimes useful to set this to False when parsing h-feeds that erroneously include rel=syndication on each entry. :param boolean want_json: (optional, default False) If true, the result will be pure json with datetimes as strings instead of python objects :param callable fetch_mf2_func: (optional) function to fetch mf2 parsed output for a given URL. :return: a dict as described by interpret_entry or interpret_event, or None """ if not item: item = find_first_entry(parsed, ['h-entry', 'h-event']) if item: types = item.get('type', []) if 'h-event' in types: return interpret_event( parsed, source_url, base_href=base_href, hevent=item, use_rel_syndication=use_rel_syndication, want_json=want_json, fetch_mf2_func=fetch_mf2_func) elif 'h-entry' in types or 'h-cite' in types: return interpret_entry( parsed, source_url, base_href=base_href, hentry=item, use_rel_syndication=use_rel_syndication, want_json=want_json, fetch_mf2_func=fetch_mf2_func)
457b0f90e47ff9e37cda4c9359336ef8d1f24960
3,636,077
from typing import List import re def generate(*drf_globs: DRF_list) -> List[str]: """ Generates a list of valid requests from a DRF glob. :param drf_globs: A list of DRF globs. :return: A list of valid requests. """ results = [] def parse_globs(drf_globs): for drf_glob in drf_globs: # Parse nested lists and tuples if type(drf_glob) in [list, tuple]: parse_globs(drf_glob) break tokens = re.split(r'[{}]', drf_glob) iterations = [] for token in tokens: if '..' in token: first, second = token.split('..') iterations.append(token_range(first, second)) elif ',' in token: iterations.append(token.split(',')) else: if token != '': iterations.append([token]) for result in product(*iterations): results.append(''.join(map(str, result))) parse_globs(drf_globs) return results
96a8a036b4c659ed72b23c0c16c384b4ea8f7efd
3,636,078
def min_rank(series, ascending=True): """ Equivalent to `series.rank(method='min', ascending=ascending)`. Args: series: column to rank. Kwargs: ascending (bool): whether to rank in ascending order (default is `True`). """ ranks = series.rank(method="min", ascending=ascending) return ranks
a772618570517a324a202d4803983240cb54396b
3,636,079
def glCurrentViewport(x=None, y=None, width=None, height=None): """ Returns a (x, y, width, height)-tuple with the current viewport bounds. If x, y, width and height are given, set the viewport bounds. """ # Why? To switch between the size of the onscreen canvas and the offscreen buffer. # The canvas could be 256x256 while an offscreen buffer could be 1024x1024. # Without switching the viewport, information from the buffer would be lost. if x is not None and y is not None and width is not None and height is not None: glViewport(x, y, width, height) glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(x, width, y, height, -1, 1) glMatrixMode(GL_MODELVIEW) xywh = (GLint*4)(); glGetIntegerv(GL_VIEWPORT, xywh) return tuple(xywh)
e532fbaa710ee203cb75b0951ff560c7536a0b4a
3,636,080
def algorithms(): """Get a list of the names of the available stemming algorithms. The only algorithm currently supported is the "english", or porter2, algorithm. """ return ['english']
d09bef4090fbca1729a25784d7befdb8a436bfa6
3,636,081
import yaml def load_yaml(filepath): """Import YAML config file.""" with open(filepath, "r") as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc)
e1ec81bf36d293788303e4b3379e45ecdfb38dc0
3,636,082
import json import logging def parse_json(json_path): """ parser JSON Args: json_path: input json file path Returns: json_dict: parser json dict result """ try: with open(json_path) as json_file: json_dict = json.load(json_file) except Exception: logging.error("json file load error !") else: return json_dict
4bb9b14d3a751451dd2a75da9b60a355934ffa65
3,636,083
def _get_ex_msg(obj): """ Get exception message """ return obj.value.message if hasattr(obj, 'value') else obj.message
be7be0657afab2fe1daba174c441d18f12e78355
3,636,084
from typing import Any def __getattr__(name: str) -> Any: """Lazily imports modules and items within them. Args: name (str): name of sourdough module or item. Raises: AttributeError: if there is no module or item matching 'name'. Returns: Any: a module or item stored within a module. """ return lazily_import(name = name, package = __name__, mapping = importables)
9ec17f2f2b629e1082402dd7baad8ff8558d8d0b
3,636,085
import json import os def check_availabel_resourses(target_fname): """ Проверка доступности ресурсов до запуска паука.""" rpt = [] all_right = True target_generator = parser_target_for_spider(target_fname) for at in target_generator: info = at[0] head_rpt_msg = 'Name node: ['+info[0]+']\nUrl: ['+info[1]+']\n' # Проверка ключей params = json.loads(info[3]) available_keys = tools.get_app_cfg()['App']['Spider']['available_keys'] for at in params: if at not in available_keys: all_right = False rpt.append(head_rpt_msg+"\tError: find desabled key params.") # Проверка преобразователя if 'to_text' in params: available_convertors = tools.get_app_cfg()['App']['Spider']['to_text_convertors'] if params['to_text'] not in available_convertors: all_right = False rpt.append(head_rpt_msg+"\tError: no registred to-text-convertor - "+ params['to_text']+ ". May be registred in"+ " /app-cfgs/spider_cfg.yaml file.") else: if 'std_' not in params['to_text'] and 'custom_' not in params['to_text']: all_right = False rpt.append(head_rpt_msg+"\tError: bad name to-text-convertor - "+ params['to_text']+ ". Must begin with std_ or custom_ prefix.") # Проверка преобразователя по умолчанию url = info[1] extenton = url.split('.')[-1] if not params: auto_detected_urls = tools.get_app_cfg()['App']['Spider']['auto_detected_urls'] if extenton not in auto_detected_urls: all_right = False rpt.append(head_rpt_msg+"\tError: url не распознан и пользовательски настройки не задны") # Проверка доступности ресурса # Файл файловой системы url_exist = os.path.exists(url) if not url_exist: all_right = False rpt.append(head_rpt_msg+"\tError: url найден в пределах операционной системы."+ " Если это сетевой адрес задайте пареметры [external_url: yes get(or post) add params]") # Проверка доступности сетевого адреса if 'external_url' in params: all_right = False rpt.append(head_rpt_msg+"\tWarning: проверка внешних адресов не реализована") # Проверки пройдены return all_right, rpt
bd2b8cb1f3eca420abe3045d6b2a72b34e2c3126
3,636,086
def _validate_isofactor(isofactor, signed): """ [Docstring] """ if isofactor[0] == 0.0: return (False, "Error: 'isovalue' cannot be zero") if isofactor[1] <= 1.0: return (False, "Error: 'factor' must be greater than one") if not signed and isofactor[0] < 0: return (False, "Error: Negative 'isovalue' in absolute " "thresholding mode") return (True, "")
7b4a4faf3671fdee364cdae41e178f8e6a0453b8
3,636,087
import itertools def get_response_comments(request, comment_id, page, page_size, requested_fields=None): """ Return the list of comments for the given thread response. Arguments: request: The django request object used for build_absolute_uri and determining the requesting user. comment_id: The id of the comment/response to get child comments for. page: The page number (1-indexed) to retrieve page_size: The number of comments to retrieve per page requested_fields: Indicates which additional fields to return for each child comment. (i.e. ['profile_image']) Returns: A paginated result containing a list of comments """ try: cc_comment = Comment(id=comment_id).retrieve() cc_thread, context = _get_thread_and_context( request, cc_comment["thread_id"], retrieve_kwargs={ "with_responses": True, "recursive": True, } ) if cc_thread["thread_type"] == "question": thread_responses = itertools.chain(cc_thread["endorsed_responses"], cc_thread["non_endorsed_responses"]) else: thread_responses = cc_thread["children"] response_comments = [] for response in thread_responses: if response["id"] == comment_id: response_comments = response["children"] break response_skip = page_size * (page - 1) paged_response_comments = response_comments[response_skip:(response_skip + page_size)] if not paged_response_comments and page != 1: raise PageNotFoundError("Page not found (No results on this page).") results = _serialize_discussion_entities( request, context, paged_response_comments, requested_fields, DiscussionEntity.comment ) comments_count = len(response_comments) num_pages = (comments_count + page_size - 1) // page_size if comments_count else 1 paginator = DiscussionAPIPagination(request, page, num_pages, comments_count) return paginator.get_paginated_response(results) except CommentClientRequestError: raise CommentNotFoundError("Comment not found")
2ccc449858329bf9e3b5cbaa4477ca97c3ac50a0
3,636,088
from typing import Optional from typing import cast def GetParentStatementNode( node: AST.Node, ) -> Optional[AST.Node]: """\ Returns the statement that is the logical parent of this node. This code attempts to handle the complexities of embedded phrases (for example, a statement that is made up of other phrases) where this node may be nested multiple levels below what ultimately constitutes its parent. """ parent = node.Parent while parent is not None: if parent.Type is not None and parent.Type.Name.endswith("Statement"): break parent = parent.Parent return cast(Optional[AST.Node], parent)
1ff5ed85b9cc4b67a4a0cc0b2d2df43c8a3560bf
3,636,089
def count_possibilities(dic): """ Counts how many unique names can be created from the combinations of each lists contained in the passed dictionary. """ total = 1 for key, value in dic.items(): total *= len(value) return total
856eee9bac0ddf3dbc7b714bb26fe6d4f003ef95
3,636,090
def user_update(): """ test """ username = request.get_json()['username'] email = request.get_json()['email'] password = request.get_json()['password'] try: id = g.user.id currentuser = User.query.get(id) if password: currentuser.password = password currentuser.username = username db.session.add(currentuser) db.session.commit() msg = "success" res = { 'code': 200, 'msg': msg } return jsonify(res) except Exception as e: msg = "fail" res = { 'code': 400, 'msg': msg } return jsonify(res)
84026d9903d70d19b86746ab73f3e65c67d1168d
3,636,091
from typing import Iterable from typing import Tuple from typing import Set from pathlib import Path import ray import tqdm def feature_matrix_hdf5(smis: Iterable[str], size: int, *, featurizer: Featurizer = Featurizer(), name: str = 'fps', path: str = '.') -> Tuple[str, Set[int]]: """Precalculate the fature matrix of xs with the given featurizer and store the matrix in an HDF5 file Parameters ---------- xs: Iterable[T] the inputs for which to generate the feature matrix size : int the length of the iterable ncpu : int (Default = 0) the number of cores to parallelize feature matrix generation over featurizer : Featurizer, default=Featurizer() an object that encodes inputs from an identifier representation to a feature representation name : str (Default = 'fps') the name of the output HDF5 file path : str (Default = '.') the path under which the HDF5 file should be written Returns ------- fps_h5 : str the filename of an hdf5 file containing the feature matrix of the representations generated from the molecules in the input file. The row ordering corresponds to the ordering of smis invalid_idxs : Set[int] the set of indices in xs containing invalid inputs """ fps_h5 = str(Path(path)/f'{name}.h5') # fingerprint = featurizer.fingerprint # radius = featurizer.radius # length = featurizer.length ncpu = int(ray.cluster_resources()['CPU']) with h5py.File(fps_h5, 'w') as h5f: CHUNKSIZE = 512 fps_dset = h5f.create_dataset( 'fps', (size, len(featurizer)), chunks=(CHUNKSIZE, len(featurizer)), dtype='int8' ) batch_size = CHUNKSIZE * 2 * ncpu n_batches = size//batch_size + 1 invalid_idxs = set() i = 0 offset = 0 for smis_batch in tqdm(batches(smis, batch_size), total=n_batches, desc='Precalculating fps', unit='batch'): fps = feature_matrix(smis_batch, featurizer) for fp in tqdm(fps, total=batch_size, smoothing=0., leave=False): if fp is None: invalid_idxs.add(i+offset) offset += 1 continue # fp = next(fps) fps_dset[i] = fp i += 1 # original dataset size included potentially invalid xs valid_size = size - len(invalid_idxs) if valid_size != size: fps_dset.resize(valid_size, axis=0) return fps_h5, invalid_idxs
1318d703d27adbd9df800b7a1b97ecc6a67bf151
3,636,092
def xpath_error(code, message=None, token=None, prefix='err'): """ Returns an XPath error instance related with a code. An XPath/XQuery/XSLT error code (ref: https://www.w3.org/2005/xqt-errors/) is an alphanumeric token starting with four uppercase letters and ending with four digits. :param code: the error code. :param message: an optional custom additional message. :param token: an optional token instance. :param prefix: the namespace prefix to apply to the error code, defaults to 'err'. """ if ':' not in code: pcode = '%s:%s' % (prefix, code) if prefix else code elif not prefix or not code.startswith(prefix + ':'): raise ElementPathValueError('%r is not an XPath error code' % code) else: pcode = code code = code[len(prefix) + 1:] # XPath 2.0 parser error (https://www.w3.org/TR/xpath20/#id-errors) if code == 'XPST0001': return ElementPathValueError(message or 'Parser not bound to a schema', pcode, token) elif code == 'XPST0003': return ElementPathValueError(message or 'Invalid XPath expression', pcode, token) elif code == 'XPDY0002': return MissingContextError(message or 'Dynamic context required for evaluate', pcode, token) elif code == 'XPTY0004': return ElementPathTypeError(message or 'Type is not appropriate for the context', pcode, token) elif code == 'XPST0005': return ElementPathValueError(message or 'A not empty sequence required', pcode, token) elif code == 'XPST0008': return ElementPathNameError(message or 'Name not found', pcode, token) elif code == 'XPST0010': return ElementPathNameError(message or 'Axis not found', pcode, token) elif code == 'XPST0017': return ElementPathTypeError(message or 'Wrong number of arguments', pcode, token) elif code == 'XPTY0018': return ElementPathTypeError(message or 'Step result contains both nodes and atomic values', pcode, token) elif code == 'XPTY0019': return ElementPathTypeError(message or 'Intermediate step contains an atomic value', pcode, token) elif code == 'XPTY0020': return ElementPathTypeError(message or 'Context item is not a node', pcode, token) elif code == 'XPDY0050': return ElementPathTypeError(message or 'Type does not match sequence type', pcode, token) elif code == 'XPST0051': return ElementPathNameError(message or 'Unknown atomic type', pcode, token) elif code == 'XPST0080': return ElementPathNameError(message or 'Target type cannot be xs:NOTATION or xs:anyAtomicType', pcode, token) elif code == 'XPST0081': return ElementPathNameError(message or 'Unknown namespace', pcode, token) # XPath data types and function errors elif code == 'FOER0000': return ElementPathError(message or 'Unidentified error', pcode, token) elif code == 'FOAR0001': return ElementPathValueError(message or 'Division by zero', pcode, token) elif code == 'FOAR0002': return ElementPathValueError(message or 'Numeric operation overflow/underflow', pcode, token) elif code == 'FOCA0001': return ElementPathValueError(message or 'Input value too large for decimal', pcode, token) elif code == 'FOCA0002': return ElementPathValueError(message or 'Invalid lexical value', pcode, token) elif code == 'FOCA0003': return ElementPathValueError(message or 'Input value too large for integer', pcode, token) elif code == 'FOCA0005': return ElementPathValueError(message or 'NaN supplied as float/double value', pcode, token) elif code == 'FOCA0006': return ElementPathValueError( message or 'String to be cast to decimal has too many digits of precision', pcode, token ) elif code == 'FOCH0001': return ElementPathValueError(message or 'Code point not valid', pcode, token) elif code == 'FOCH0002': return ElementPathLocaleError(message or 'Unsupported collation', pcode, token) elif code == 'FOCH0003': return ElementPathValueError(message or 'Unsupported normalization form', pcode, token) elif code == 'FOCH0004': return ElementPathValueError(message or 'Collation does not support collation units', pcode, token) elif code == 'FODC0001': return ElementPathValueError(message or 'No context document', pcode, token) elif code == 'FODC0002': return ElementPathValueError(message or 'Error retrieving resource', pcode, token) elif code == 'FODC0003': return ElementPathValueError(message or 'Function stability not defined', pcode, token) elif code == 'FODC0004': return ElementPathValueError(message or 'Invalid argument to fn:collection', pcode, token) elif code == 'FODC0005': return ElementPathValueError(message or 'Invalid argument to fn:doc or fn:doc-available', pcode, token) elif code == 'FODT0001': return ElementPathValueError(message or 'Overflow/underflow in date/time operation', pcode, token) elif code == 'FODT0002': return ElementPathValueError(message or 'Overflow/underflow in duration operation', pcode, token) elif code == 'FODT0003': return ElementPathValueError(message or 'Invalid timezone value', pcode, token) elif code == 'FONS0004': return ElementPathKeyError(message or 'No namespace found for prefix', pcode, token) elif code == 'FONS0005': return ElementPathValueError(message or 'Base-uri not defined in the static context', pcode, token) elif code == 'FORG0001': return ElementPathValueError(message or 'Invalid value for cast/constructor', pcode, token) elif code == 'FORG0002': return ElementPathValueError(message or 'Invalid argument to fn:resolve-uri()', pcode, token) elif code == 'FORG0003': return ElementPathValueError( message or 'fn:zero-or-one called with a sequence containing more than one item', pcode, token ) elif code == 'FORG0004': return ElementPathValueError( message or 'fn:one-or-more called with a sequence containing no items', pcode, token ) elif code == 'FORG0005': return ElementPathValueError( message or 'fn:exactly-one called with a sequence containing zero or more than one item', pcode, token ) elif code == 'FORG0006': return ElementPathTypeError(message or 'Invalid argument type', pcode, token) elif code == 'FORG0008': return ElementPathValueError( message or 'The two arguments to fn:dateTime have inconsistent timezones', pcode, token ) elif code == 'FORG0009': return ElementPathValueError( message or 'Error in resolving a relative URI against a base URI in fn:resolve-uri', pcode, token ) elif code == 'FORX0001': return ElementPathValueError(message or 'Invalid regular expression flags', pcode, token) elif code == 'FORX0002': return ElementPathValueError(message or 'Invalid regular expression', pcode, token) elif code == 'FORX0003': return ElementPathValueError(message or 'Regular expression matches zero-length string', pcode, token) elif code == 'FORX0004': return ElementPathValueError(message or 'Invalid replacement string', pcode, token) elif code == 'FOTY0012': return ElementPathValueError(message or 'Argument node does not have a typed value', pcode, token) else: raise ElementPathValueError(message or 'Unknown XPath error code %r.' % code, token=token)
db35731f49fc1a1f51c61f365b0aba7a6bfc099e
3,636,093
def _setPropertyValue(self, name, value, typeString = ''): """Set the typed value of a property by its name, creating a child element to hold the property if needed.""" method = getattr(self.__class__, "_setPropertyValue" + getTypeString(value)) return method(self, name, value, typeString)
915091618fded898ab690b0ce223bdb00aa4308b
3,636,094
def build_toy_input_feature_values(features, use_rank_two=False, has_catset=False): """Create a set of input features values. These examples will fall respectively in the nodes 6, 5, 3, 2 of _build_toy_random_forest. Args: features: Dictionary of input feature tensors. If None, the features are indexed by name (used in tf2). use_rank_two: Should the feature be passed as one or two ranked tensors. has_catset: Add two categorical-set features to the dataspec. Returns: Dictionary of feature values. """ is_tf2 = features is None def shape(x): if use_rank_two: y = [[v] for v in x] else: y = x if is_tf2: return tf.constant(y) else: return y if is_tf2: class Identity: def __getitem__(self, key): return key features = Identity() feature_values = { features["a"]: shape([2, 2, 0, 0]), features["b"]: shape(["x", "z", "x", "z"]), features["c"]: shape([1, 2, 1, 2]), features["bool_feature"]: shape([1, 0, 1, 1]) } if has_catset: ragged_constant = tf.ragged.constant if is_tf2 else tf.ragged.constant_value feature_values[features["d"]] = ragged_constant( [["x"], ["y"], ["y", "z"], [""]], dtype=tf.string) feature_values[features["e"]] = ragged_constant( [[11, 12], [], [14, 15, 16], [-1]], dtype=tf.int32) return feature_values
3c18411f8d1934aed979cc43c7e6d674b88b078e
3,636,095
import argparse import logging def parse_arguments(): """Read arguments from a command line.""" parser = argparse.ArgumentParser(description="Arguments get parsed via --commands") parser.add_argument( "-v", metavar="verbosity", type=int, default=4, help="Verbosity of logging: 0 -critical, 1- error, 2 -warning, 3 -info, 4 -debug", ) args = parser.parse_args() verbose = {0: logging.CRITICAL, 1: logging.ERROR, 2: logging.WARNING, 3: logging.INFO, 4: logging.DEBUG} logging.basicConfig(format="%(message)s", level=verbose[args.v], filename="output/errors.log") return args
d5466e9379ba033f0920ee73c82f486611a62868
3,636,096
def light_head_preprocess_for_train(image, labels, bboxes, out_shape, data_format='NHWC', scope='light_head_preprocess_train'): """Preprocesses the given image for training. Note that the actual resizing scale is sampled from [`resize_size_min`, `resize_size_max`]. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. resize_side_min: The lower bound for the smallest side of the image for aspect-preserving resizing. resize_side_max: The upper bound for the smallest side of the image for aspect-preserving resizing. Returns: A preprocessed image. """ fast_mode = False with tf.name_scope(scope, 'light_head_preprocess_train', [image, labels, bboxes]): if image.get_shape().ndims != 3: raise ValueError('Input must be of size [height, width, C>0]') # Convert to float scaled [0, 1]. if image.dtype != tf.float32: image = tf.image.convert_image_dtype(image, dtype=tf.float32) tf_summary_image(image, bboxes, 'image_with_bboxes_0') # image, bboxes = control_flow_ops.cond(tf.random_uniform([1], minval=0., maxval=1., dtype=tf.float32)[0] < 0.5, lambda: (image, bboxes), lambda: tf_image.ssd_random_expand(image, bboxes, 2)) image, bboxes = control_flow_ops.cond(tf.random_uniform([1], minval=0., maxval=1., dtype=tf.float32)[0] < 0.3, lambda: (image, bboxes), lambda: tf_image.ssd_random_expand(image, bboxes, tf.random_uniform([1], minval=2, maxval=3, dtype=tf.int32)[0])) tf_summary_image(image, bboxes, 'image_on_canvas_1') # Distort image and bounding boxes. #print(image, labels, bboxes) random_sample_image, labels, bboxes = tf_image.ssd_random_sample_patch(image, labels, bboxes, ratio_list=[0.4, 0.6, 0.8, 1.]) tf_summary_image(random_sample_image, bboxes, 'image_shape_distorted_2') # Randomly flip the image horizontally. random_sample_flip_image, bboxes = tf_image.random_flip_left_right(random_sample_image, bboxes) random_sample_flip_resized_image = tf_image.resize_image(random_sample_flip_image, out_shape, method=tf.image.ResizeMethod.BILINEAR, align_corners=False) tf_summary_image(random_sample_flip_resized_image, bboxes, 'image_fliped_and_resized_3') # Randomly distort the colors. There are 4 ways to do it. dst_image = apply_with_random_selector( random_sample_flip_resized_image, lambda x, ordering: distort_color(x, ordering, fast_mode), num_cases=4) tf_summary_image(dst_image, bboxes, 'image_color_distorted_4') # Rescale to VGG input scale. image = dst_image * 2. image.set_shape([None, None, 3]) image = tf_image_whitened(image, [_R_MEAN/127.5, _G_MEAN/127.5, _B_MEAN/127.5]) # Image data format. if data_format == 'NCHW': image = tf.transpose(image, perm=(2, 0, 1)) return image, labels, bboxes
a2259af8dd79794b60021bcdab5c11e7ed6317d4
3,636,097
def get_dgs(align_dg_dict): """ Function that creates inverse dictionary of align_dg_dict align_dg_dict: dict. Dictionary of alignments and clustering DG assignments Returns dg_align_dict: dict, k=dg_id, v=[alignids] align_dg_dict comes from get_spectral(graph) or get_cliques(graph) """ dgs_list = set(align_dg_dict.values()) #list of all duplex groups dg_align_dict = {} for dg in dgs_list: dg_align_list =[x for (x,y) in align_dg_dict.items() if y == dg] dg_align_dict[dg] = dg_align_list return dg_align_dict #test case:
85bca47657c83d2b308d38f05d1c88d9a78fa448
3,636,098
def DateTime_GetBeginDST(*args, **kwargs): """DateTime_GetBeginDST(int year=Inv_Year, int country=Country_Default) -> DateTime""" return _misc_.DateTime_GetBeginDST(*args, **kwargs)
efb570d487dc68688572326d94a2bea57608ce46
3,636,099