content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def start_end_epoch(graph): """ Start epoch of graph. :return: (start epoch, end epoch). """ start = 0 end = 0 for e in graph.edges_iter(): for _, p in graph[e[0]][e[1]].items(): end = max(end, p['etime_epoch_secs']) if start == 0: start = p['stime_epoch_secs'] else: start = min(start, p['stime_epoch_secs']) return (start, end)
724726ec83d3a98539eed859ec584c6f1adb8567
3,642,100
def distance_metric(seg_A, seg_B, dx): """ Measure the distance errors between the contours of two segmentations. The manual contours are drawn on 2D slices. We calculate contour to contour distance for each slice. """ table_md = [] table_hd = [] X, Y, Z = seg_A.shape for z in range(Z): # Binary mask at this slice slice_A = seg_A[:, :, z].astype(np.uint8) slice_B = seg_B[:, :, z].astype(np.uint8) # The distance is defined only when both contours exist on this slice if np.sum(slice_A) > 0 and np.sum(slice_B) > 0: # Find contours and retrieve all the points _, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) pts_A = contours[0] for i in range(1, len(contours)): pts_A = np.vstack((pts_A, contours[i])) _, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) pts_B = contours[0] for i in range(1, len(contours)): pts_B = np.vstack((pts_B, contours[i])) # Distance matrix between point sets M = np.zeros((len(pts_A), len(pts_B))) for i in range(len(pts_A)): for j in range(len(pts_B)): M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0]) # Mean distance and hausdorff distance md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx table_md += [md] table_hd += [hd] # Return the mean distance and Hausdorff distance across 2D slices mean_md = np.mean(table_md) if table_md else None mean_hd = np.mean(table_hd) if table_hd else None return mean_md, mean_hd
4ae5de6428914c8352ae1c6cdd9e94183d9ea3f8
3,642,101
def tomographic_redshift_bin(z_s, version=default_version): """DES analyses work in pre-defined tomographic redshift bins. This function returns the photometric redshift bin as a function of photometric redshift. Parameters ---------- z_s : numpy array Photometric redshifts. version : string Which catalog version to use. Returns ------- z_bin : numpy array The tomographic redshift bin corresponding to each photometric redshift. Returns -1 in case a redshift does not fall into any bin. """ if version == 'Y1': z_bins = [0.2, 0.43, 0.63, 0.9, 1.3] else: raise RuntimeError( "Unkown version of DES. Supported versions are {}.".format( known_versions)) z_bin = np.digitize(z_s, z_bins) - 1 z_bin = np.where((z_s < np.amin(z_bins)) | (z_s >= np.amax(z_bins)) | np.isnan(z_s), -1, z_bin) return z_bin
b4a21c111b8d5b5a34c018315f95cf18deb356af
3,642,102
def is_point_in_rect(point, rect): """Checks whether is coordinate point inside the rectangle or not. Rectangle is defined by bounding box. :type point: list :param point: testing coordinate point :type rect: list :param rect: bounding box :rtype: boolean :return: boolean check result """ x0, y0, x1, y1 = rect x, y = point if x0 <= x <= x1 and y0 <= y <= y1: return True return False
d0c7a64138899f4e50b42dc75ea6030616d4dfec
3,642,103
def chinese_theorem_inv(modulo_list): """ Returns (x, n1*...*nk) such as x mod mk = ak for all k, with modulo_list = [(a1, n1), ..., (ak, nk)] n1, ..., nk most be coprime 2 by 2. """ a, n = modulo_list[0] for a2, n2 in modulo_list[1:]: u, v = bezout(n, n2) a, n = a*v*n2+a2*u*n, n*n2 for (a1, n1) in modulo_list: assert a % n1 == a1 return ((n+a % n) % n, n)
3d1398901b75ca8b21fb97af0acdfbd65fec0a3e
3,642,104
def compute_segment_cores(split_lines_of_utt): """ This function returns a list of pairs (start-index, end-index) representing the cores of segments (so if a pair is (s, e), then the core of a segment would span (s, s+1, ... e-1). The argument 'split_lines_of_utt' is list of lines from a ctm-edits file corresponding to a single utterance. By the 'core of a segment', we mean a sequence of ctm-edits lines including at least one 'cor' line and a contiguous sequence of other lines of the type 'cor', 'fix' and 'sil' that must be not tainted. The segment core excludes any tainted lines at the edge of a segment, which will be added later. We only initiate segments when it contains something correct and not realized as unk (i.e. ref==hyp); and we extend it with anything that is 'sil' or 'fix' or 'cor' that is not tainted. Contiguous regions of 'true' in the resulting boolean array will then become the cores of prototype segments, and we'll add any adjacent tainted words (or parts of them). """ num_lines = len(split_lines_of_utt) line_is_in_segment_core = [False] * num_lines # include only the correct lines for i in range(num_lines): if (split_lines_of_utt[i][7] == 'cor' and split_lines_of_utt[i][4] == split_lines_of_utt[i][6]): line_is_in_segment_core[i] = True # extend each proto-segment forwards as far as we can: for i in range(1, num_lines): if line_is_in_segment_core[i - 1] and not line_is_in_segment_core[i]: edit_type = split_lines_of_utt[i][7] if (not is_tainted(split_lines_of_utt[i]) and (edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix')): line_is_in_segment_core[i] = True # extend each proto-segment backwards as far as we can: for i in reversed(range(0, num_lines - 1)): if line_is_in_segment_core[i + 1] and not line_is_in_segment_core[i]: edit_type = split_lines_of_utt[i][7] if (not is_tainted(split_lines_of_utt[i]) and (edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix')): line_is_in_segment_core[i] = True # Get contiguous regions of line in the form of a list # of (start_index, end_index) segment_ranges = [] cur_segment_start = None for i in range(0, num_lines): if line_is_in_segment_core[i]: if cur_segment_start is None: cur_segment_start = i else: if cur_segment_start is not None: segment_ranges.append((cur_segment_start, i)) cur_segment_start = None if cur_segment_start is not None: segment_ranges.append((cur_segment_start, num_lines)) return segment_ranges
0d054d1f891127f0a27b20bfbd82ad6ce85dec39
3,642,105
import math def local_neighborhood_nodes_for_element(index, feature_radius_pixels): """ local_neighborhood_nodes_for_element returns the indices of nodes which are in the local neighborhood of an element. Note that the nodes and elements in a mesh have distinct coordinates: elements exist in the centroids of cubes formed by the mesh of nodes. :param index: the element for which we want the local neighborhood :param feature_radius_pixels: minimum feature radius, in pixels :return: the indices of the local neighborhood set """ # TODO: there might be an off-by-one error in here neighbors = set() x, y, z = elemental_index_to_nodal_index(index) # allow our first index to vary the entire range for i in range(math.ceil(x - feature_radius_pixels), math.floor(x + feature_radius_pixels) + 1): # how much variability is left for the second index given the first? leftover_y_radius = math.sqrt(feature_radius_pixels ** 2 - (x - i) ** 2) for j in range(math.ceil(y - leftover_y_radius), math.floor(y + leftover_y_radius) + 1): leftover_z_radius = math.sqrt(feature_radius_pixels ** 2 - (x - i) ** 2 - (y - j) ** 2) for k in range(math.ceil(z - leftover_z_radius), math.floor(z + leftover_z_radius) + 1): neighbors.add((i, j, k)) return neighbors
cf0f55b71af34cebce69cfc8fa6d201ff83c9808
3,642,106
def _classification(dataset='iris',k_range=[1,31],dist_metric='l1'): """ knn on classificaiton dataset Inputs: dataset: (str) name of dataset k: (list) k[0]:lower bound of number of nearest neighbours; k[1]:upper bound of number of nearest neighbours dist_metric: (str) 'l1' or 'l2' Outputs: validation accuracy """ print ('------Processing Dataset '+dataset+' ------') x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset(dataset) if y_train.dtype==np.dtype('bool'): y_train = _cast_TF(y_train) y_valid = _cast_TF(y_valid) y_test = _cast_TF(y_test) acc = [] predicted = _eval_knn(k_range,x_train,y_train,x_valid,y_valid,dist_metric,compute_loss=False) for k in range(k_range[0],k_range[1]): #print(k) curr_predict = predicted['k='+str(k)] #print(curr_predict) result = np.argmax(curr_predict,axis=1) #print(result) gt = np.where(y_valid==True,1,0) gt = np.argmax(gt,axis=1) unique, counts = np.unique(result-gt, return_counts=True) correct = dict(zip(unique, counts))[0] #print(correct) acc.append(correct/y_valid.shape[0]) return acc
ce5d0516cffcb545787abe15c46fe086ff8e4991
3,642,107
from os.path import expanduser def parse_pgpass(hostname='scidb2.nersc.gov', username='desidev_admin'): """Read a ``~/.pgpass`` file. Parameters ---------- hostname : :class:`str`, optional Database hostname. username : :class:`str`, optional Database username. Returns ------- :class:`str` A string suitable for creating a SQLAlchemy database engine, or None if no matching data was found. """ fmt = "postgresql://{3}:{4}@{0}:{1}/{2}" try: with open(expanduser('~/.pgpass')) as p: lines = p.readlines() except FileNotFoundError: return None data = dict() for l in lines: d = l.strip().split(':') if d[0] in data: data[d[0]][d[3]] = fmt.format(*d) else: data[d[0]] = {d[3]: fmt.format(*d)} if hostname not in data: return None try: pgpass = data[hostname][username] except KeyError: return None return pgpass
929b705fa8a753f773321e47c73d096ffb4bd171
3,642,108
def make_move(board, max_rows, max_cols, col, player): """Put player's piece in column COL of the board, if it is a valid move. Return a tuple of two values: 1. If the move is valid, make_move returns the index of the row the piece is placed in. Otherwise, it returns -1. 2. The updated board >>> rows, columns = 2, 2 >>> board = create_board(rows, columns) >>> row, board = make_move(board, rows, columns, 0, 'X') >>> row 1 >>> get_piece(board, 1, 0) 'X' >>> row, board = make_move(board, rows, columns, 0, 'O') >>> row 0 >>> row, board = make_move(board, rows, columns, 0, 'X') >>> row -1 >>> row, board = make_move(board, rows, columns, -4, '0') >>> row -1 """ if -1 < col and col < max_cols: return put_piece(board, max_rows, col, player) return (-1, board)
62ecffbabb83e0ee4119b8b8dbead6bdaeb24fb6
3,642,109
def convert_timestamp(ts): """Converts the timestamp to a format suitable for Billing. Examples of a good timestamp for startTime, endTime, and eventTime: '2016-05-20T00:00:00Z' Note the trailing 'Z'. Python does not add the 'Z' so we tack it on ourselves. """ return ts.isoformat() + 'Z'
6b8d19671cbeab69c398508fa942e36689802cdd
3,642,110
def object_id(obj, clazz=None): """Turn a given object into an ID that can be stored in with the notification.""" clazz = clazz or type(obj) if isinstance(obj, clazz): obj = obj.id elif is_mapping(obj): obj = obj.get('id') return obj
617ae362af894c2f27cc6e032aad7f8df4c33a7c
3,642,111
def get_email_from_request(request): """ Get 'Authorization' from request header, and parse the email address using cpg-util """ auth_header = request.headers.get('Authorization') if auth_header is None: raise web.HTTPUnauthorized(reason='Missing authorization header') try: id_token = auth_header[7:] # Strip the 'bearer' / 'Bearer' prefix. return email_from_id_token(id_token) except ValueError as e: raise web.HTTPForbidden(reason='Invalid authorization header') from e
353604d8021948f4cb6ed80d4fb8a9000b8457ce
3,642,112
def play(url, offset, text, card_data, response_builder): """Function to play audio. Using the function to begin playing audio when: - Play Audio Intent is invoked. - Resuming audio when stopped / paused. - Next / Previous commands issues. https://developer.amazon.com/docs/custom-skills/audioplayer-interface-reference.html#play REPLACE_ALL: Immediately begin playback of the specified stream, and replace current and enqueued streams. """ # type: (str, int, str, Dict, ResponseFactory) -> Response logger.info("play : 52 v2") logger.info(url) logger.info(offset) logger.info(text) logger.info(card_data) if card_data: logger.info("play : 60") response_builder.set_card( StandardCard( title=card_data["title"], text=card_data["text"], image=Image( small_image_url=card_data["small_image_url"], large_image_url=card_data["large_image_url"]) ) ) # Using URL as token as they are all unique logger.info("play : 71") response_builder.add_directive( PlayDirective( play_behavior=PlayBehavior.REPLACE_ALL, audio_item=AudioItem( stream=Stream( token=url, url=url, offset_in_milliseconds=offset, expected_previous_token=None), metadata=add_screen_background(card_data) if card_data else None ) ) ).set_should_end_session(True) logger.info("play : 85") if text: logger.info("play : 87") response_builder.speak(text) logger.info("play : 90") return response_builder.response
1a7159adc481d86c35c9206cf8525940b6d1ece3
3,642,113
from typing import List from typing import Dict def all_flags_match_bombs(cells: List[List[Dict]]) -> bool: """ Checks whether all flags are placed correctly and there are no flags over regular cells (not bombs) :param cells: array of array of cells dicts :return: True if all flags are placed correctly """ for row in cells: for cell in row: if cell["mask"] == CellMask.FLAG and cell["value"] != "*": return False return True
67cc53d8b2ea3541112245192763a6c5f8593b86
3,642,114
def household_id_list(filelist, pidp): """ For a set of waves, obtain a list of household IDs belonging to the same individual. """ hidp_list = [] wave_list = [] wn = {1:'a', 2:'b', 3:'c', 4:'d', 5:'e', 6:'f', 7:'g'} c=1 for name in filelist: print("Loading wave %d data..." % c) df = pd.read_csv(name, sep='\t') if pidp in df['pidp'].values: kword = wn[c]+'_hidp' hidp = df.loc[df['pidp'] == pidp, kword].values hidp_list.append(hidp) wave_list.append(c) c+=1 print("\nIndividual %d present in waves {}".format(wave_list) % pidp) return hidp_list
8fd7b271034eb953c708ea38bd75ab4671f420cb
3,642,115
from typing import Tuple def biggest_labelizer_arbitrary(metrics: dict, choice: str, *args, **kwargs) -> Tuple[str, float]: """Given dict of metrics result, returns (key, metrics[key]) whose value is maximal.""" metric_values = list(metrics.values()) metric_keys = list(metrics.keys()) # print(items) big = metric_values[0] draws = [0] for idx, val in enumerate(metric_values[1:], start=1): if val > big: big = val draws = [idx] elif val == big: draws.append(idx) if len(draws) > 1 and choice in (metric_keys[idx] for idx in draws): return choice, big return metric_keys[draws[0]], big
99f4a0f5233f33d80a328cef4e43f339813371a1
3,642,116
from typing import Optional from typing import Dict from typing import Any def _seaborn_viz_histogram(data, x: str, contrast: Optional[str] = None, **kwargs): """Plot a single histogram. Args: data (DataFrame): The data x (str): The name of the column to plot. contrast (str, optional): The name of the categorical column to use for multiple contrasts. **kwargs: Keyword arguments passed to seaborn.distplot Raises: ValueError: Not a numeric column. Returns: Seaborn Axis Object """ if x not in data.select_dtypes("number").columns: raise ValueError("x must be numeric column") default_hist_kwargs: Dict[str, Any] = {} hist_kwargs = {**default_hist_kwargs, **(kwargs or {})} if contrast: data[contrast] = data[contrast].astype("category") ax = sns.histplot(x=x, hue=contrast, data=data, **hist_kwargs) else: ax = sns.histplot(data[x], **hist_kwargs) ax.set_title(f"Histogram of {x}") return ax
27aed8280c372273e02e7b49647b4e2285a81fa7
3,642,117
def str2bool( s ): """ Description: ---------- Converting an input string to a boolean Arguments: ---------- [NAME] [TYPE] [DESCRIPTION] (1) s dict, str The string which Returns: ---------- True/False depending on the given input strin gv """ if isinstance( s, dict ): for key, _ in s.items(): s[ key ] = str2bool( s[ key ] ) else: return v.lower() in ( "yes", "true", "t", "1" )
cb68fe0382561d69fb332b75c99c01c5a338196f
3,642,118
def im_detect(net, im, boxes=None): """Detect object classes in an image given object proposals. Arguments: net (caffe.Net): Fast R-CNN network to use im (ndarray): color image to test (in BGR order) boxes (ndarray): R x 4 array of object proposals or None (for RPN) Returns: scores (ndarray): R x K array of object class scores (K includes background as object category 0) boxes (ndarray): R x (4*K) array of predicted bounding boxes """ blobs, im_scales = _get_blobs(im, boxes) # When mapping from image ROIs to feature map ROIs, there's some aliasing # (some distinct image ROIs get mapped to the same feature ROI). # Here, we identify duplicate feature ROIs, so we only compute features # on the unique subset. # if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN: # v = np.array([1, 1e3, 1e6, 1e9, 1e12]) # hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v) # _, index, inv_index = np.unique(hashes, return_index=True, # return_inverse=True) # blobs['rois'] = blobs['rois'][index, :] # boxes = boxes[index, :] if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array( [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32) # reshape network inputs net.blobs['data'].reshape(*(blobs['data'].shape)) if cfg.TEST.HAS_RPN: net.blobs['im_info'].reshape(*(blobs['im_info'].shape)) else: net.blobs['rois'].reshape(*(blobs['rois'].shape)) # do forward forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)} if cfg.TEST.HAS_RPN: forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False) else: forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False) blobs_out = net.forward(**forward_kwargs) if cfg.TEST.HAS_RPN: assert len(im_scales) == 1, "Only single-image batch implemented" rois = net.blobs['rois'].data.copy() # unscale back to raw image space boxes = rois[:, 1:5] / im_scales[0] if cfg.TEST.SVM: # use the raw scores before softmax under the assumption they # were trained as linear SVMs scores = net.blobs['cls_score'].data else: # use softmax estimated probabilities scores = blobs_out['cls_prob'] # if cfg.TEST.BBOX_REG: if False: # Apply bounding-box regression deltas box_deltas = blobs_out['bbox_pred'] pred_boxes = bbox_transform_inv(boxes, box_deltas) pred_boxes = clip_boxes(pred_boxes, im.shape) else: # Simply repeat the boxes, once for each class pred_boxes = np.tile(boxes, (1, scores.shape[1])) # if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN: # # Map scores and predictions back to the original set of boxes # scores = scores[inv_index, :] # pred_boxes = pred_boxes[inv_index, :] fc7 = net.blobs['fc7'].data return net.blobs['cls_score'].data[:, :], scores, fc7, pred_boxes
f37cb46375f39b6baf839be5ec68388c79f47e16
3,642,119
def preCSVdatagen(xy_p, radius, nbin, PlainFirst): """Format the data before generating the csv input for ili'. Args: xy_p (str): path to the X and Y coordiantes of ablation marks .npy file. radius (int): displayed radius of the marks in ili'. nbin (int): bin factor used to bin the image for ili'. PlainFirst (bool): intensity values of each datapoints are equal to 1. Used to visualize the ablation mark coordinates on the postMALDI brighfield in ili'. Returns: data (list): formatted data (2D). """ X, Y = np.load(xy_p) Xs = X /( nbin) # todo check relevance of Y <-> X Ys = Y /( nbin) Ys = Ys - np.min(Ys) Xs = Xs - np.min(Xs) Rs = np.ones(np.shape(Xs)) * radius data = [] data.append(list(np.append('Num', list(range(np.shape(Xs.ravel())[0]))))) data.append(list(np.append('X', Ys.ravel()))) data.append(list(np.append('Y', Xs.ravel()))) data.append(list(np.append('Z', np.zeros(np.shape(Xs.ravel()))))) data.append(list(np.append('R', Rs.ravel()))) if PlainFirst: data.append(list(np.append('Flat', np.ones(np.shape(Xs.ravel()))))) return data
2d19439eb82930e622b897a9ee1ddda364f0a04a
3,642,120
def dispatch_error_adaptor(func): """Construct a signature isomorphic to dispatch_error. The actual handler will receive only arguments explicitly declared, and a possible tg_format parameter. """ def adaptor(controller, tg_source, tg_errors, tg_exceptions, *args, **kw): tg_format = kw.pop('tg_format', None) args, kw = inject_args(func, {"tg_source": tg_source, "tg_errors": tg_errors, "tg_exceptions": tg_exceptions}, args, kw, 1) args, kw = adapt_call(func, args, kw, 1) if tg_format is not None: kw['tg_format'] = tg_format return func(controller, *args, **kw) return adaptor
67be23f01c11d668d86f5e2b1afcfb76db79ea6c
3,642,121
import re def address_split(address, env=None): """The address_split() function splits an address into its four components. Address strings are on the form detector-detectorID|device-deviceID, where the detectors must be in dir(xtc.DetInfo.Detector) and device must be in (xtc.DetInfo.Device). @param address Full data source address of the DAQ device @param env Optional env to dereference an alias into an address @return Four-tuple of detector name, detector ID, device, and device ID """ # pyana m = re.match( r"^(?P<det>\S+)\-(?P<det_id>\d+)\|(?P<dev>\S+)\-(?P<dev_id>\d+)$", address) if m is not None: return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id')) # psana m = re.match( r"^(?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)$", address) if m is not None: return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id')) # psana DetInfo string m = re.match( r"^DetInfo\((?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)\)$", address) if m is not None: return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id')) if env is not None: # Try to see if this is a detector alias, and if so, dereference it. Code from psana's Detector/PyDetector.py amap = env.aliasMap() alias_src = amap.src(address) # string --> DAQ-style psana.Src # if it is an alias, look up the full name if amap.alias(alias_src) != '': # alias found address = str(alias_src) return address_split(address) return (None, None, None, None)
c5d362c7fc6121d64ec6a660bcdb7a9b4b532553
3,642,122
import time def solveGroth(A, n, init_val=None): """ ... Parameters ---------- A: np.matrix dfajdslkf n: int ddddddd init_val: float, optional dsfdsafdasfd Returns ------- list of float: float: float: float: """ eps=0.5 eta=0.05 threshold = 1.1 N = n Ap = A if init_val is not None: w=init_val else: w = np.ones(N) min_val = np.sum(np.abs(Ap)) curr_y = np.zeros(N) curr_alpha = np.zeros(N) avg_y_val = np.zeros(N) avg_X=np.zeros((N,N)) avg_alpha=np.zeros(N) T=4*N schedule_size = round(T/8) #We change eps in epochs print("iteration bound:",T) z = np.zeros(N) vals = 0 g = np.random.standard_normal(T) for i in range(T): if (i+1)%(T/2)==0: eps=0.01 if i%schedule_size ==0 and eps>0.01 : eps=eps/2 if i%schedule_size == 0 and i>T/2: eps /= 2 wtil=(1-eta)*w+eta*np.ones(N) w1 = np.array([1/np.sqrt(j) for j in wtil]) start_time = time.time() d = np.tile(w1, (N, 1)) M = np.multiply(Ap,d) d = np.tile(np.array([w1]).transpose(), (1,N)) M = np.multiply(M,d) start_time = time.time() eigval, eigvec = lasp.eigsh(M, k=1, which='LA', tol=0.00001) y = eigvec[:,0] y *= np.sqrt(N) y = np.multiply(y,w1) avg_y_val += y**2 val = np.matmul(np.transpose(y), np.matmul(Ap,y)) avg_alpha+=val*w if val < min_val: min_val = val curr_y = y curr_alpha = w vals += val print("iterate", i, "val = ", val, " minval=", min_val, " linf of curr y=", np.max(np.abs(y**2)) , " infinity norm avg X =", np.max((1.0/(i+1))*avg_y_val), "SDP sol val:", vals/(i+1), "eps,eta=", eps, " , ", eta) if checkCondition(y,threshold): print(y,"Current iterate Condition satisfied, i : ",i) print("min val = ", min_val) print("curr_y = ", curr_y) print("curr_alpha = ", curr_alpha) print("inf norm of curr_y=", max(abs(curr_y))) return [np.matmul(curr_y,curr_y.T),min_val, curr_alpha, avg_y_val] elif checkCondition((1.0/(i+1))*avg_y_val, threshold): avg_y_val=(1.0/(i+1))*avg_y_val avg_val = vals/(i+1) print(avg_y_val," Avg Condition satisfied, i : ",i) print("min val = ", min_val) print("curr val=", avg_val) print("curr_alpha = ", (1.0/i)*avg_alpha) print("inf norm of avg_y=", max(abs(avg_y_val))) return [(1.0/(i+1))*avg_X,min_val, curr_alpha, avg_y_val] if i < T/2: w = updateWeights_2(w,y,threshold, eps, N) else: w = updateWeights(w,y,threshold, 2*eps, N) u = y*g[i] z += u print("min val = ", min_val) print("sum of curr_alpha = ", sum(curr_alpha)) print("sum weights at end = ", sum(w)) print("inf norm of curr_y=", max(abs(curr_y))) return [np.matmul(curr_y, curr_y.T), min_val, curr_alpha, avg_y_val]
a8f4a3ea2274bd1a81565500683dea84378ccddc
3,642,123
def verify_any(func, *args, **kwargs): """ Assert that any of `func(*args, **kwargs)` are true. """ return _verify(func, 'any', *args, **kwargs)
618165e6a9f252ac2ddeffdb9defa34f2d281900
3,642,124
def can_create_election(user_id, user_info): """ for now, just let it be""" return True
06c8290b41b38a840b7826173fd65130d38260a7
3,642,125
import locale def get_system_language(): """ Get system language and locale """ try: default_locale = locale.getdefaultlocale() except ValueError: if IS_MAC: # Fix for "ValueError: unknown locale: UTF-8" on Mac. # The default English locale on Mac is set as "UTF-8" instead of "en-US.UTF-8" # see https://bugs.python.org/issue18378 return 'en_US', 'UTF-8' # re-throw any other issue raise system_lang = default_locale[0] system_locale = default_locale[1] return system_lang, system_locale
6ab566ca6274480c7af5dd65456e675ae614df7a
3,642,126
from .path import Path2D def circle_pattern(pattern_radius, circle_radius, count, center=[0.0, 0.0], angle=None, **kwargs): """ Create a Path2D representing a circle pattern. Parameters ------------ pattern_radius : float Radius of circle centers circle_radius : float The radius of each circle count : int Number of circles in the pattern center : (2,) float Center of pattern angle : float If defined pattern will span this angle If None, pattern will be evenly spaced Returns ------------- pattern : trimesh.path.Path2D Path containing circular pattern """ if angle is None: angles = np.linspace(0.0, np.pi * 2.0, count + 1)[:-1] elif isinstance(angle, float) or isinstance(angle, int): angles = np.linspace(0.0, angle, count) else: raise ValueError('angle must be float or int!') # centers of circles centers = np.column_stack(( np.cos(angles), np.sin(angles))) * pattern_radius vert = [] ents = [] for circle_center in centers: # (3,3) center points of arc three = arc.to_threepoint(angles=[0, np.pi], center=circle_center, radius=circle_radius) # add a single circle entity ents.append( Arc( points=np.arange(3) + len(vert), closed=True)) # keep flat array by extend instead of append vert.extend(three) # translate vertices to pattern center vert = np.array(vert) + center pattern = Path2D(entities=ents, vertices=vert, **kwargs) return pattern
b82d60c7a76f12349605191b16bf04d7899c3a3a
3,642,127
def boolean(input): """Convert the given input to a boolean value. Intelligently handles boolean and non-string values, returning as-is and passing to the bool builtin respectively. This process is case-insensitive. Acceptable values: True * yes * y * on * true * t * 1 False * no * n * off * false * f * 0 :param input: the value to convert to a boolean :type input: any :returns: converted boolean value :rtype: bool """ try: input = input.strip().lower() except AttributeError: return bool(input) if input in ('yes', 'y', 'on', 'true', 't', '1'): return True if input in ('no', 'n', 'off', 'false', 'f', '0'): return False raise ValueError("Unable to convert {0!r} to a boolean value.".format(input))
09c09206d5487bf02e3271403e2ba67358e1d148
3,642,128
def find_horizontal_up_down_links(tc, u, out_up=None, out_down=None): """Find indices of nodes that locate at horizontally upcurrent and downcurrent directions """ if out_up is None: out_up = np.zeros(u.shape[0], dtype=np.int) if out_down is None: out_down = np.zeros(u.shape[0], dtype=np.int) out_up[:] = tc.link_west[:] out_down[:] = tc.link_east[:] negative_u_index = np.where(u < 0)[0] out_up[negative_u_index] = tc.link_east[negative_u_index] out_down[negative_u_index] = tc.link_west[negative_u_index] return out_up, out_down
b61976a57d8dd850c26c7a9baff11483ccdb306f
3,642,129
def _compute_composite_beta(model, robo, j, i): """ Compute the composite beta wrench for link i. Args: model: An instance of DynModel robo: An instance of Robot j: link number i: antecedent value Returns: An instance of DynModel that contains all the new values. """ i_beta_i_c = Screw() # local variables j_s_i = robo.geos[j].tmat.s_i_wrt_j i_beta_i = model.composite_betas[i].val j_beta_j_c = model.composite_betas[j].val j_inertia_j_c = model.composite_inertias[j].val j_zeta_j = model.zetas[j].val # actual computation i_beta_i_c.val = i_beta_i + (j_s_i.transpose() * j_beta_j_c) - \ (j_s_i.transpose() * j_inertia_j_c * j_zeta_j) # store computed beta in model model.composite_betas[i] = i_beta_i_c return model
0fa80859787a4e523402d10237b63e33ca0082f4
3,642,130
def pos(x, y): """Returns floored and camera-offset x,y tuple. Setting out of bounds is possible, but getting is not; mod in callers for get_at. """ return (flr(xo + x), flr(yo + y))
1a17648c074157c6164856f44cfa309923ca2226
3,642,131
from typing import List from typing import Dict from typing import Tuple def _check_blockstream_for_transactions( accounts: List[BTCAddress], ) -> Dict[BTCAddress, Tuple[bool, FVal]]: """May raise connection errors or KeyError""" have_transactions = {} for account in accounts: url = f'https://blockstream.info/api/address/{account}' response_data = request_get_dict(url=url, handle_429=True, backoff_in_seconds=4) stats = response_data['chain_stats'] balance = satoshis_to_btc(int(stats['funded_txo_sum']) - int(stats['spent_txo_sum'])) have_txs = stats['tx_count'] != 0 have_transactions[account] = (have_txs, balance) return have_transactions
a17a9204dc0d5f11b8c0352d15c871141e7bb09b
3,642,132
from typing import Callable from typing import Tuple def metropolis_hastings( proposal: Proposal, state: State, step_size: float, ns: int, unif: float, inverse_transform: Callable ) -> Tuple[State, Info, np.ndarray, bool]: """Computes the Metropolis-Hastings accept-reject criterion given a proposal, a current state of the chain, a integration step-size, and a number of itnegration steps. We also provide a uniform random variable for determining the accept-reject criterion and the inverse transformation function for transforming parameters from an unconstrained space to a constrained space. Args: proposal: A proposal operator to advance the state of the Markov chain. state: An augmented state object with the updated position and momentum and values for the log-posterior and metric and their gradients. step_size: The integration step-size. num_steps: The number of integration steps. unif: Uniform random number for determining the accept-reject decision. inverse_transform: Inverse transformation to map samples back to the original space. Returns: state: An augmented state object with the updated position and momentum and values for the log-posterior and metric and their gradients. info: An information object with the updated number of fixed point iterations and boolean indicator for successful integration. q: The position variable in the constrained space. accept: Whether or not the proposal was accepted. """ ham = hamiltonian( state.momentum, state.log_posterior, state.logdet_metric, state.inv_metric) q, fldj = inverse_transform(state.position) ildj = -fldj new_state, prop_info = proposal.propose(state, step_size, ns) new_chol, new_logdet = new_state.sqrtm_metric, new_state.logdet_metric new_q, new_fldj = inverse_transform(new_state.position) new_ham = hamiltonian( new_state.momentum, new_state.log_posterior, new_state.logdet_metric, new_state.inv_metric) # Notice the relevant choice of sign when the Jacobian determinant of the # forward or inverse transform is used. # # Write this expression as, # (exp(-new_ham) / exp(new_fldj)) / (exp(-ham) * exp(ildj)) # # See the following resource for understanding the Metropolis-Hastings # correction with a Jacobian determinant correction [1]. # # [1] https://wiki.helsinki.fi/download/attachments/48865399/ch7-rev.pdf logu = np.log(unif) metropolis = logu < ham - new_ham - new_fldj - ildj + prop_info.logdet accept = np.logical_and(metropolis, prop_info.success) if accept: state = new_state q = new_q ildj = -new_fldj state.momentum *= -1.0 return state, prop_info, q, accept
b5390d8a420ebb3d62c700fe246127935b658b6c
3,642,133
import os def continuous_future(root_symbol_str, offset=0, roll="volume", adjustment="mul", bundle=None): """ Return a ContinuousFuture object for the specified root symbol in the specified bundle (or default bundle). Parameters ---------- root_symbol_str : str The root symbol for the future chain. offset : int, optional The distance from the primary contract. Default is 0. roll : str, optional How rolls are determined. Possible choices: 'volume', (roll when back contract volume exceeds front contract volume), or 'calendar' (roll on rollover date). Default is 'volume'. adjustment : str, optional Method for adjusting lookback prices between rolls. Options are 'mul', 'add' or None. 'mul' calculates the ratio of front and back contracts on the roll date ((back - front)/front) and multiplies front contract prices by (1 + ratio). 'add' calculates the difference between back and front contracts on the roll date (back - front) and adds the difference to front contract prices. None concatenates contracts without any adjustment. Default is 'mul'. bundle : str, optional the bundle code. If omitted, the default bundle will be used (and must be set). Returns ------- asset : zipline.assets.ContinuousFuture Examples -------- Get the continuous future object for ES and get the current chain as of 2020-09-18: >>> es = continuous_future("ES", roll="volume", bundle="es-1min") # doctest: +SKIP >>> data = get_data("2020-09-18 10:00:00", bundle="es-1min") # doctest: +SKIP >>> print(data.current_chain(es)) # doctest: +SKIP """ if not bundle: bundle = get_default_bundle() if not bundle: raise ValidationError("you must specify a bundle or set a default bundle") bundle = bundle["default_bundle"] load_extensions(code=bundle) bundle_data = bundles.load( bundle, os.environ, pd.Timestamp.utcnow(), ) asset_finder = asset_finder_cache.get(bundle, bundle_data.asset_finder) asset_finder_cache[bundle] = asset_finder continuous_future = asset_finder.create_continuous_future( root_symbol_str, offset, roll, adjustment, ) return continuous_future
e48241a8d098089f0f61c1e6587ccad0cf366f82
3,642,134
from datetime import datetime def Now(): """Returns a datetime.datetime instance representing the current time. This is just a wrapper to ease testing against the datetime module. Returns: An instance of datetime.datetime. """ return datetime.datetime.now()
9a0657011e10b47eb755a575216944a786218f2e
3,642,135
def ndvi_list_hdf(hdf_dir, satellite=None): """ List all the available HDF files, grouped by tile Args: hdf_dir: directory containing one subdirectory per year which contains HDF files satellite: None to select both Tera and Aqua, 'mod13q1' for MODIS, 'myd13q1' for Aqua Returns: list: A dict (keyed by tilename) of list of (full filepath, timestamp_ms) tuples, sorted by timestamp_ms """ files = collections.defaultdict(lambda: []) for subdir in os.listdir(hdf_dir): subdir = os.path.join(hdf_dir, subdir) if not os.path.isdir(subdir): continue for hdf_file in os.listdir(subdir): if not hdf_file.endswith('.hdf'): continue try: full_fname = os.path.join(subdir, hdf_file) d = parse_ndvi_filename(hdf_file) if satellite is not None and satellite != d['satellite']: continue files[d['tile_name']].append((full_fname, d['timestamp_ms'])) except ValueError as e: print e for tile_name in files.keys(): files[tile_name] = sorted(files[tile_name], key=lambda t: t[1]) return files
068062bdef503b6652c62c142a1cf80d830fc8db
3,642,136
from typing import List import os def read_levels(dir_path: str, progress_monitor: PyramidLevelCallback = None) -> List[xr.Dataset]: """ Read the of a multi-level pyramid with spatial resolution decreasing by a factor of two in both spatial dimensions. :param dir_path: The directory path. :param progress_monitor: An optional progress monitor. :return: A list of dataset instances representing the multi-level pyramid. """ file_paths = os.listdir(dir_path) level_paths = {} num_levels = -1 for filename in file_paths: file_path = os.path.join(dir_path, filename) basename, ext = os.path.splitext(filename) if basename.isdigit(): index = int(basename) num_levels = max(num_levels, index + 1) if os.path.isfile(file_path) and ext == ".link": level_paths[index] = (ext, file_path) elif os.path.isdir(file_path) and ext == ".zarr": level_paths[index] = (ext, file_path) if num_levels != len(level_paths): raise ValueError(f"Inconsistent pyramid directory:" f" expected {num_levels} but found {len(level_paths)} entries:" f" {dir_path}") levels = [] for index in range(num_levels): ext, file_path = level_paths[index] if ext == ".link": with open(file_path, "r") as fp: link_file_path = fp.read() dataset = xr.open_zarr(link_file_path) else: dataset = xr.open_zarr(file_path) if progress_monitor is not None: progress_monitor(dataset, index, num_levels) levels.append(dataset) return levels
5e8145c5dcca47f51e1f5190e42976b37ea433c6
3,642,137
def create_provisioned_product_name(account_name: str) -> str: """ Replaces all space characters in an Account Name with hyphens, also removes all trailing and leading whitespace """ return account_name.strip().replace(" ", "-")
743e7438f421d5d42c071d27d1b0fa2a816a9b4d
3,642,138
def case34(): """ Create the IEEE 34 bus from IEEE PES Test Feeders: "https://site.ieee.org/pes-testfeeders/resources/”. OUTPUT: **net** - The pandapower format network. """ net = pp.create_empty_network() # Linedata # CF-300 line_data = {'c_nf_per_km': 3.8250977, 'r_ohm_per_km': 0.69599766, 'x_ohm_per_km': 0.5177677, 'c0_nf_per_km': 1.86976748, 'r0_ohm_per_km': 1.08727498, 'x0_ohm_per_km': 1.47374703, 'max_i_ka': 0.23, 'type': 'ol'} pp.create_std_type(net, line_data, name='CF-300', element='line') # CF-301 line_data = {'c_nf_per_km': 3.66884364, 'r_ohm_per_km': 1.05015841, 'x_ohm_per_km': 0.52265586, 'c0_nf_per_km': 1.82231544, 'r0_ohm_per_km': 1.48350255, 'x0_ohm_per_km': 1.60203942, 'max_i_ka': 0.18, 'type': 'ol'} pp.create_std_type(net, line_data, name='CF-301', element='line') # CF-302 line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427, 'x_ohm_per_km': 0.30768221, 'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427, 'x0_ohm_per_km': 0.30768221, 'max_i_ka': 0.14, 'type': 'ol'} pp.create_std_type(net, line_data, name='CF-302', element='line') # CF-303 line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427, 'x_ohm_per_km': 0.30768221, 'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427, 'x0_ohm_per_km': 0.30768221, 'max_i_ka': 0.14, 'type': 'ol'} pp.create_std_type(net, line_data, name='CF-303', element='line') # CF-304 line_data = {'c_nf_per_km': 0.90382554, 'r_ohm_per_km': 0.39802955, 'x_ohm_per_km': 0.29436416, 'c0_nf_per_km': 0.90382554, 'r0_ohm_per_km': 0.39802955, 'x0_ohm_per_km': 0.29436416, 'max_i_ka': 0.18, 'type': 'ol'} pp.create_std_type(net, line_data, name='CF-304', element='line') # Busses # bus0 = pp.create_bus(net, name='Bus 0', vn_kv=69.0, type='n', zone='34_BUS') bus_800 = pp.create_bus(net, name='Bus 800', vn_kv=24.9, type='n', zone='34_BUS') bus_802 = pp.create_bus(net, name='Bus 802', vn_kv=24.9, type='n', zone='34_BUS') bus_806 = pp.create_bus(net, name='Bus 806', vn_kv=24.9, type='n', zone='34_BUS') bus_808 = pp.create_bus(net, name='Bus 808', vn_kv=24.9, type='n', zone='34_BUS') bus_810 = pp.create_bus(net, name='Bus 810', vn_kv=24.9, type='n', zone='34_BUS') bus_812 = pp.create_bus(net, name='Bus 812', vn_kv=24.9, type='n', zone='34_BUS') bus_814 = pp.create_bus(net, name='Bus 814', vn_kv=24.9, type='n', zone='34_BUS') bus_850 = pp.create_bus(net, name='Bus 850', vn_kv=24.9, type='n', zone='34_BUS') bus_816 = pp.create_bus(net, name='Bus 816', vn_kv=24.9, type='n', zone='34_BUS') bus_818 = pp.create_bus(net, name='Bus 818', vn_kv=24.9, type='n', zone='34_BUS') bus_820 = pp.create_bus(net, name='Bus 820', vn_kv=24.9, type='n', zone='34_BUS') bus_822 = pp.create_bus(net, name='Bus 822', vn_kv=24.9, type='n', zone='34_BUS') bus_824 = pp.create_bus(net, name='Bus 824', vn_kv=24.9, type='n', zone='34_BUS') bus_826 = pp.create_bus(net, name='Bus 826', vn_kv=24.9, type='n', zone='34_BUS') bus_828 = pp.create_bus(net, name='Bus 828', vn_kv=24.9, type='n', zone='34_BUS') bus_830 = pp.create_bus(net, name='Bus 830', vn_kv=24.9, type='n', zone='34_BUS') bus_854 = pp.create_bus(net, name='Bus 854', vn_kv=24.9, type='n', zone='34_BUS') bus_852 = pp.create_bus(net, name='Bus 852', vn_kv=24.9, type='n', zone='34_BUS') bus_832 = pp.create_bus(net, name='Bus 832', vn_kv=24.9, type='n', zone='34_BUS') bus_858 = pp.create_bus(net, name='Bus 858', vn_kv=24.9, type='n', zone='34_BUS') bus_834 = pp.create_bus(net, name='Bus 834', vn_kv=24.9, type='n', zone='34_BUS') bus_842 = pp.create_bus(net, name='Bus 842', vn_kv=24.9, type='n', zone='34_BUS') bus_844 = pp.create_bus(net, name='Bus 844', vn_kv=24.9, type='n', zone='34_BUS') bus_846 = pp.create_bus(net, name='Bus 846', vn_kv=24.9, type='n', zone='34_BUS') bus_848 = pp.create_bus(net, name='Bus 848', vn_kv=24.9, type='n', zone='34_BUS') bus_860 = pp.create_bus(net, name='Bus 860', vn_kv=24.9, type='n', zone='34_BUS') bus_836 = pp.create_bus(net, name='Bus 836', vn_kv=24.9, type='n', zone='34_BUS') bus_840 = pp.create_bus(net, name='Bus 840', vn_kv=24.9, type='n', zone='34_BUS') bus_862 = pp.create_bus(net, name='Bus 862', vn_kv=24.9, type='n', zone='34_BUS') bus_838 = pp.create_bus(net, name='Bus 838', vn_kv=24.9, type='n', zone='34_BUS') bus_864 = pp.create_bus(net, name='Bus 864', vn_kv=24.9, type='n', zone='34_BUS') bus_888 = pp.create_bus(net, name='Bus 888', vn_kv=4.16, type='n', zone='34_BUS') bus_890 = pp.create_bus(net, name='Bus 890', vn_kv=4.16, type='n', zone='34_BUS') bus_856 = pp.create_bus(net, name='Bus 856', vn_kv=24.9, type='n', zone='34_BUS') # Lines pp.create_line(net, bus_800, bus_802, length_km=0.786384, std_type='CF-300', name='Line 0') pp.create_line(net, bus_802, bus_806, length_km=0.527304, std_type='CF-300', name='Line 1') pp.create_line(net, bus_806, bus_808, length_km=9.823704, std_type='CF-300', name='Line 2') pp.create_line(net, bus_808, bus_810, length_km=1.769059, std_type='CF-303', name='Line 3') pp.create_line(net, bus_808, bus_812, length_km=11.43000, std_type='CF-300', name='Line 4') pp.create_line(net, bus_812, bus_814, length_km=9.061704, std_type='CF-300', name='Line 5') # pp.create_line(net, bus_814, bus_850, length_km=0.003048, std_type='CF-301', name='Line 6') pp.create_line(net, bus_816, bus_818, length_km=0.521208, std_type='CF-302', name='Line 7') pp.create_line(net, bus_816, bus_824, length_km=3.112008, std_type='CF-301', name='Line 8') pp.create_line(net, bus_818, bus_820, length_km=14.67612, std_type='CF-302', name='Line 9') pp.create_line(net, bus_820, bus_822, length_km=4.187952, std_type='CF-302', name='Line 10') pp.create_line(net, bus_824, bus_826, length_km=0.923544, std_type='CF-303', name='Line 11') pp.create_line(net, bus_824, bus_828, length_km=0.256032, std_type='CF-301', name='Line 12') pp.create_line(net, bus_828, bus_830, length_km=6.230112, std_type='CF-301', name='Line 13') pp.create_line(net, bus_830, bus_854, length_km=0.158496, std_type='CF-301', name='Line 14') pp.create_line(net, bus_832, bus_858, length_km=1.493520, std_type='CF-301', name='Line 15') pp.create_line(net, bus_834, bus_860, length_km=0.615696, std_type='CF-301', name='Line 16') pp.create_line(net, bus_834, bus_842, length_km=0.085344, std_type='CF-301', name='Line 17') pp.create_line(net, bus_836, bus_840, length_km=0.262128, std_type='CF-301', name='Line 18') pp.create_line(net, bus_836, bus_862, length_km=0.085344, std_type='CF-301', name='Line 19') pp.create_line(net, bus_842, bus_844, length_km=0.411480, std_type='CF-301', name='Line 20') pp.create_line(net, bus_844, bus_846, length_km=1.109472, std_type='CF-301', name='Line 21') pp.create_line(net, bus_846, bus_848, length_km=0.161544, std_type='CF-301', name='Line 22') pp.create_line(net, bus_850, bus_816, length_km=0.094488, std_type='CF-301', name='Line 23') # pp.create_line(net, bus_852, bus_832, length_km=0.003048, std_type='CF-301', name='Line 24') pp.create_line(net, bus_854, bus_856, length_km=7.110984, std_type='CF-303', name='Line 25') pp.create_line(net, bus_854, bus_852, length_km=11.22578, std_type='CF-301', name='Line 26') pp.create_line(net, bus_858, bus_864, length_km=0.493776, std_type='CF-302', name='Line 27') pp.create_line(net, bus_858, bus_834, length_km=1.776984, std_type='CF-301', name='Line 28') pp.create_line(net, bus_860, bus_836, length_km=0.816864, std_type='CF-301', name='Line 29') pp.create_line(net, bus_860, bus_838, length_km=1.481328, std_type='CF-304', name='Line 30') pp.create_line(net, bus_888, bus_890, length_km=3.218688, std_type='CF-300', name='Line 31') # Regulator 1 pp.create_transformer_from_parameters(net, bus_814, bus_850, sn_mva=2.5, vn_hv_kv=24.9, vn_lv_kv=24.9, vkr_percent=0.320088*2.5, vk_percent=0.357539*2.5, pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0, tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16, tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False, name='Regulator 1') # Regulator 2 pp.create_transformer_from_parameters(net, bus_852, bus_832, sn_mva=2.5, vn_hv_kv=24.9, vn_lv_kv=24.9, vkr_percent=0.320088*2.5, vk_percent=0.357539*2.5, pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0, tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16, tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False, name='Regulator 2') # # Substation # pp.create_transformer_from_parameters(net, bus0, bus_800, sn_mva=2.5, vn_hv_kv=69.0, # vn_lv_kv=24.9, vkr_percent=1.0, vk_percent=8.062257, # pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0, # tap_side='lv', tap_neutral=0, tap_max=2, tap_min=-2, # tap_step_percent=2.5, tap_pos=0, tap_phase_shifter=False, # name='Substation') # Traformer pp.create_transformer_from_parameters(net, bus_832, bus_888, sn_mva=0.5, vn_hv_kv=24.9, vn_lv_kv=4.16, vkr_percent=1.9, vk_percent=4.5, pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0, name='Transformer 1') # Loads pp.create_load(net, bus_806, p_mw=0.055, q_mvar=0.029, name='Load 806') pp.create_load(net, bus_810, p_mw=0.016, q_mvar=0.008, name='Load 810') pp.create_load(net, bus_820, p_mw=0.034, q_mvar=0.017, name='Load 820') pp.create_load(net, bus_822, p_mw=0.135, q_mvar=0.070, name='Load 822') pp.create_load(net, bus_824, p_mw=0.005, q_mvar=0.002, name='Load 824') pp.create_load(net, bus_826, p_mw=0.004, q_mvar=0.020, name='Load 826') pp.create_load(net, bus_828, p_mw=0.004, q_mvar=0.002, name='Load 828') pp.create_load(net, bus_830, p_mw=0.007, q_mvar=0.003, name='Load 830') pp.create_load(net, bus_856, p_mw=0.004, q_mvar=0.002, name='Load 856') pp.create_load(net, bus_858, p_mw=0.015, q_mvar=0.007, name='Load 858') pp.create_load(net, bus_864, p_mw=0.002, q_mvar=0.001, name='Load 864') pp.create_load(net, bus_834, p_mw=0.032, q_mvar=0.017, name='Load 834') pp.create_load(net, bus_860, p_mw=0.029, q_mvar=0.073, name='Load 860') pp.create_load(net, bus_836, p_mw=0.082, q_mvar=0.043, name='Load 836') pp.create_load(net, bus_840, p_mw=0.040, q_mvar=0.020, name='Load 840') pp.create_load(net, bus_838, p_mw=0.028, q_mvar=0.014, name='Load 838') pp.create_load(net, bus_844, p_mw=0.009, q_mvar=0.005, name='Load 844') pp.create_load(net, bus_846, p_mw=0.037, q_mvar=0.031, name='Load 846') pp.create_load(net, bus_848, p_mw=0.023, q_mvar=0.011, name='Load 848') pp.create_load(net, bus_860, p_mw=0.060, q_mvar=0.048, name='Load 860 spot') pp.create_load(net, bus_840, p_mw=0.027, q_mvar=0.021, name='Load 840 spot') pp.create_load(net, bus_844, p_mw=0.405, q_mvar=0.315, name='Load 844 spot') pp.create_load(net, bus_848, p_mw=0.060, q_mvar=0.048, name='Load 848 spot') pp.create_load(net, bus_890, p_mw=0.450, q_mvar=0.225, name='Load 890 spot') pp.create_load(net, bus_830, p_mw=0.045, q_mvar=0.020, name='Load 830 spot') # External grid pp.create_ext_grid(net, bus_800, vm_pu=1.0, va_degree=0.0, s_sc_max_mva=10.0, s_sc_min_mva=10.0, rx_max=1, rx_min=1, r0x0_max=1, x0x_max=1) # Distributed generators pp.create_sgen(net, bus_848, p_mw=0.66, q_mvar=0.500, name='DG 1', max_p_mw=0.66, min_p_mw=0, max_q_mvar=0.5, min_q_mvar=0) pp.create_sgen(net, bus_890, p_mw=0.50, q_mvar=0.375, name='DG 2', max_p_mw=0.50, min_p_mw=0, max_q_mvar=0.375, min_q_mvar=0) pp.create_sgen(net, bus_822, p_mw=0.1, type='PV', name='PV 1', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0) pp.create_sgen(net, bus_856, p_mw=0.1, type='PV', name='PV 2', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0) pp.create_sgen(net, bus_838, p_mw=0.1, type='PV', name='PV 3', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0) pp.create_sgen(net, bus_822, p_mw=0.1, type='WP', name='WP 1', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0) pp.create_sgen(net, bus_826, p_mw=0.1, type='WP', name='WP 2', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0) pp.create_sgen(net, bus_838, p_mw=0.1, type='WP', name='WP 3', max_p_mw=0.1, min_p_mw=0, max_q_mvar=0, min_q_mvar=0) # Shunt capacity bank pp.create_shunt(net, bus_840, q_mvar=-0.12, name='SCB 1', step=4, max_step=4) pp.create_shunt(net, bus_864, q_mvar=-0.12, name='SCB 2', step=4, max_step=4) # storage pp.create_storage(net, bus_810, p_mw=0.5, max_e_mwh=2, sn_mva=1.0, soc_percent=50, min_e_mwh=0.2, name='Storage') return net
8e04a125df0e0a64008724d419bafe19481f5ac1
3,642,139
def _hack_namedtuple(cls): """Make class generated by namedtuple picklable.""" name = cls.__name__ fields = cls._fields def reduce(self): return (_restore, (name, fields, tuple(self))) cls.__reduce__ = reduce cls._is_namedtuple_ = True return cls
89468f0ffb5506ef0c9a33fec0d390576638e659
3,642,140
import os import logging import json def load_config(settings_file='./test_settings.py'): """ Loads the config files merging the defaults with the file defined in environ.PULLSBURY_SETTINGS if it exists. """ config = Config(os.getcwd()) if 'PULLSBURY_SETTINGS' in os.environ: config.from_envvar('PULLSBURY_SETTINGS') else: config.from_pyfile(settings_file) if config.get('LOGGING_CONFIG'): logging.config.fileConfig( config.get('LOGGING_CONFIG'), disable_existing_loggers=False) json_values = [ 'TEAMS', 'HAPPY_SLACK_EMOJIS', 'REPO_BLACKLIST', 'SLACK_CUSTOM_EMOJI_MAPPING' ] for value in json_values: config.update({ value: json.loads(config.get(value, '{}')) }) return config
c85060806c0045d0e4a9410de2583fad879e99f6
3,642,141
import tokenize def build_model(): """ Returns built and tuned model using pipeline Parameters: No arguments Returns: cv (estimator): tuned model """ pipeline = Pipeline([ ('Features', FeatureUnion([ ('text_pipeline', Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()) ])), ('starting_verb', StartingVerbExtractor()) ])), ('clf', MultiOutputClassifier(DecisionTreeClassifier())) ]) # now we can perform another grid search on this new estimator to be sure we have the best parameters parameters = { #'Features__text_pipeline__vect__max_df': [0.5,1.0], 'Features__text_pipeline__tfidf__smooth_idf': (True, False) } cv = GridSearchCV(pipeline, param_grid=parameters) return cv
94bc0ad8a3eb48531cb6229972099369a9b9cf61
3,642,142
def INPUT_BTN(**attributes): """ Utility function to create a styled button """ return SPAN(INPUT(_class = "button-right", **attributes), _class = "button-left")
6c6610626367795518ca737b53950d3687ae4d91
3,642,143
def load_annotations(file_path): """Loads a file containing annotations for multiple documents. The file should contain lines with the following format: <DOCUMENT ID> <LINES> <SPAN START POSITIONS> <SPAN LENGTHS> <SEVERITY> Fields are separated by tabs; LINE, SPAN START POSITIONS and SPAN LENGTHS can have a list of values separated by white space. Args: file_path: path to the file. Returns: a dictionary mapping document id's to a list of annotations. """ annotations = defaultdict(list) with open(file_path, 'r', encoding='utf8') as f: for i, line in enumerate(f): line = line.strip() if not line: continue fields = line.split('\t') doc_id = fields[0] try: annotation = Annotation.from_fields(fields[1:]) except OverlappingSpans: msg = 'Overlapping spans when reading line %d of file %s ' msg %= (i, file_path) print(msg) continue annotations[doc_id].append(annotation) return annotations
0c674142ae0d99670e63959c3c00ed0ca2c8fac1
3,642,144
import re def search(request, template_name='blog/post_search.html'): """ Search for blog posts. This template will allow you to setup a simple search form that will try to return results based on given search strings. The queries will be put through a stop words filter to remove words like 'the', 'a', or 'have' to help imporve the result set. Template: ``blog/post_search.html`` Context: object_list List of blog posts that match given search term(s). search_term Given search term. """ context = {} if request.GET: stop_word_list = re.compile(STOP_WORDS_RE, re.IGNORECASE) search_term = '%s' % request.GET['q'] cleaned_search_term = stop_word_list.sub('', search_term) cleaned_search_term = cleaned_search_term.strip() if len(cleaned_search_term) != 0: post_list = Post.objects.published().filter(Q(title__icontains=cleaned_search_term) | Q(body__icontains=cleaned_search_term) | Q(tags__icontains=cleaned_search_term) | Q(categories__title__icontains=cleaned_search_term)) context = {'object_list': post_list, 'search_term':search_term} else: message = 'Search term was too vague. Please try again.' context = {'message':message} return render(request, template_name, context)
fdb72279b6ed5fe5e87c888b7a10c8a3ed8f94d0
3,642,145
import math def orient_data (data, header, header_out=None, MLBG_rot90_flip=False, log=None, tel=None): """Function to remap [data] from the CD matrix defined in [header] to the CD matrix taken from [header_out]. If the latter is not provided the output orientation will be North up, East left. If [MLBG_rot90_flip] is switched on and the data is from MeerLICHT or BlackGEM, the data will be oriented within a few degrees from North up, East left while preserving the pixel values in the new, *remapped* reference, D and Scorr images. """ # rotation matrix: # R = [[dx * cos(theta), dy * -sin(theta)], # [dx * sin(theta), dy * cos(theta)]] # with theta=0: North aligned with positive y-axis # and East with the positive x-axis (RA increases to the East) # # N.B.: np.dot(R, [[x], [y]]) = np.dot([x,y], R.T) # # matrices below are defined using the (WCS) header keywords # CD?_?: # # [ CD1_1 CD2_1 ] # [ CD1_2 CD2_2 ] # # orient [data] with its orientation defined in [header] to the # orientation defined in [header_out]. If the latter is not # provided, the output orientation will be North up, East left. # check if input data is square; if it is not, the transformation # will not be done properly. assert data.shape[0] == data.shape[1] # define data CD matrix, assumed to be in [header] CD_data = read_CD_matrix (header, log=log) # determine output CD matrix, either from [header_out] or North # up, East left if header_out is not None: CD_out = read_CD_matrix (header_out, log=log) else: # define de CD matrix with North up and East left, using the # pixel scale from the input [header] pixscale = read_header(header, ['pixscale']) cdelt = pixscale/3600 CD_out = np.array([[-cdelt, 0], [0, cdelt]]) # check if values of CD_data and CD_out are similar CD_close = [math.isclose(CD_data[i,j], CD_out[i,j], rel_tol=1e-3) for i in range(2) for j in range(2)] #if log is not None: # log.info ('CD_close: {}'.format(CD_close)) if np.all(CD_close): #if log is not None: # log.info ('data CD matrix already similar to CD_out matrix; ' # 'no need to remap data') # if CD matrix values are all very similar, do not bother to # do the remapping data2return = data elif MLBG_rot90_flip and tel in ['ML1', 'BG2', 'BG3', 'BG4']: #if log is not None: # log.info ('for ML/BG: rotating data by exactly 90 degrees and for ' # 'ML also flip left/right') # rotate data by exactly 90 degrees counterclockwise (when # viewing data with y-axis increasing to the top!) and for ML1 # also flip in the East-West direction; for ML/BG this will # result in an image within a few degrees of the North up, # East left orientation while preserving the original pixel # values of the new, *remapped* reference, D and Scorr images. data2return = np.rot90(data, k=-1) if tel=='ML1': data2return = np.fliplr(data2return) # equivalent operation: data2return = np.flipud(np.rot90(data)) else: #if log is not None: # log.info ('remapping data from input CD matrix: {} to output CD ' # 'matrix: {}'.format(CD_data, CD_out)) # transformation matrix, which is the dot product of the # output CD matrix and the inverse of the data CD matrix CD_data_inv = np.linalg.inv(CD_data) CD_trans = np.dot(CD_out, CD_data_inv) # transpose and flip because [affine_transform] performs # np.dot(matrix, [[y],[x]]) rather than np.dot([x,y], matrix) matrix = np.flip(CD_trans.T) # offset, calculated from # # [xi - dxi, yo - dyo] = np.dot( [xo - dxo, yo - dyo], CD_trans ) # # where xi, yi are the input coordinates corresponding to the # output coordinates xo, yo in data and dxi/o, dyi/o are the # corresponding offsets from the point of # rotation/transformation, resulting in # # [xi, yi] = np.dot( [xo, yo], CD_trans ) + offset # with # offset = -np.dot( [dxo, dyo], CD_trans ) + [dxi, dyi] # setting [dx0, dy0] and [dxi, dyi] to the center center = (np.array(data.shape)-1)/2 offset = -np.dot(center, np.flip(CD_trans)) + center # infer transformed data data2return = ndimage.affine_transform(data, matrix, offset=offset, mode='nearest') return data2return
6ef27074692f46de56e5decd7d6b315e11c4d686
3,642,146
def _batchnorm_to_groupnorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module: """ Converts a BatchNorm ``module`` to GroupNorm module. This is a helper function. Args: module: BatchNorm module to be replaced Returns: GroupNorm module that can replace the BatchNorm module provided Notes: A default value of 32 is chosen for the number of groups based on the paper *Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour* https://arxiv.org/pdf/1706.02677.pdf """ return nn.GroupNorm(min(32, module.num_features), module.num_features, affine=True)
1b923a28a4727b72768acf0fc00d93d9012c5349
3,642,147
def _compute_bic( data: np.array, n_clusters: int ) -> BICResult: """Compute the BIC statistic. Parameters ---------- data: np.array The data to cluster. n_clusters: int Number of clusters to test. Returns ------- results: BICResult The results as a BICResult object. """ gm = GaussianMixture(n_clusters) gm.fit(data) return BICResult(gm.bic(data), n_clusters)
3eb1a759a60f834f4e4fb7e364c9bce4ffb61230
3,642,148
def release_branch_name(config): """ build expected release branch name from current config """ branch_name = "{0}{1}".format( config.gitflow_release_prefix(), config.package_version() ) return branch_name
0d97c515aca8412882c8b260405a63d20b4b0f63
3,642,149
def torch2numpy(data): """ Transfer data from the torch tensor (on CPU) to the numpy array (on CPU). """ return data.numpy()
c7ca4123743c4f054d809f0e307a4de079b0af10
3,642,150
def new_schema(name, public_name, is_active=True, **options): """ This function adds a schema in schema model and creates physical schema. """ try: schema = Schema(name=name, public_name=public_name, is_active=is_active) schema.save() except IntegrityError: raise Exception('Schema already exists.') create_schema(name, **options) return schema
efd6ed2737c6a25e8beeaef9f7fffebdb9592f10
3,642,151
def find_appropriate_timestep(simulation_factory, equilibrium_samples, M, midpoint_operator, temperature, timestep_range, DeltaF_neq_threshold=1.0, max_samples=10000, batch_size=1000, verbose=True ): """Perform binary search* over the timestep range, trying to find the maximum timestep that results in DeltaF_neq that doesn't exceed threshold or have gross instability problems. (*Not-quite-binary-search: instead of deterministic comparisons, it performs hypothesis tests at regular intervals.) Sketch ------ * Maintain an interval (min_timestep, max_timestep) * At each iteration: * timestep <- (min_timestep + max_timestep) / 2 * Only simulate long enough to be confident that DeltaF_neq(timestep) != threshold. * If we're confident DeltaF_neq(timestep) > threshold, reduce max_timestep to current timestep. * If we're confident DeltaF_neq(timestep) < threshold, increase min_timestep to current timestep Parameters ---------- simulation_factory: function accepts a timestep argument and returns a simulation equipped with an integrator with that timestep equilibrium_samples: list list of samples from the configuration distribution at equilibrium M: int protocol length midpoint_operator: function accepts a simulation as an argument, doesn't return anything temperature: unit'd quantity temperature used to resample velocities timestep_range: iterable (min_timestep, max_timestep) DeltaF_neq_threshold: double, default=1.0 maximum allowable DeltaF_neq max_samples: int number of samples verbose: boolean if True, print a bunch of stuff to the command prompt Returns ------- timestep: unit'd quantity Maximum timestep tested that doesn't exceed the DeltaF_neq_threshold """ max_iter = 10 alpha = 1.96 # for now hard-coded confidence level min_timestep, max_timestep = timestep_range[0], timestep_range[-1] for i in range(max_iter): timestep = (min_timestep + max_timestep) / 2 if verbose: print("Current feasible range: [{:.3f}fs, {:.3f}fs]".format( min_timestep.value_in_unit(unit.femtosecond), max_timestep.value_in_unit(unit.femtosecond) )) print("Testing: {:.3f}fs".format(timestep.value_in_unit(unit.femtosecond))) simulation = simulation_factory(timestep) simulation_crashed = False changed_timestep_range = False W_shads_F, W_shads_R, W_midpoints = [], [], [] def update_lists(W_shad_F, W_midpoint, W_shad_R): W_shads_F.append(W_shad_F) W_midpoints.append(W_midpoint) W_shads_R.append(W_shad_R) # collect up to max_samples protocol samples, making a decision about whether to proceed # every batch_size samples for _ in range(max_samples / batch_size): # collect another batch_size protocol samples for _ in range(batch_size): # draw equilibrium sample #x, v = equilibrium_sampler() #simulation.context.setPositions(x) #simulation.context.setVelocities(v) simulation.context.setPositions(equilibrium_samples[np.random.randint(len(equilibrium_samples))]) simulation.context.setVelocitiesToTemperature(temperature) # collect and store measurements # if the simulation crashes, set simulation_crashed flag try: update_lists(*apply_protocol(simulation, M, midpoint_operator)) except: simulation_crashed = True if verbose: print("A simulation crashed! Considering this timestep unstable...") # if we didn't crash, update estimate of DeltaF_neq upper and lower confidence bounds DeltaF_neq, sq_uncertainty = estimate_nonequilibrium_free_energy(np.array(W_shads_F)[:,-1], np.array(W_shads_R)[:,-1]) if np.isnan(DeltaF_neq + sq_uncertainty): if verbose: print("A simulation encountered NaNs!") simulation_crashed = True bound = alpha * np.sqrt(sq_uncertainty) DeltaF_neq_lcb, DeltaF_neq_ucb = DeltaF_neq - bound, DeltaF_neq + bound out_of_bounds = (DeltaF_neq_lcb > DeltaF_neq_threshold) or (DeltaF_neq_ucb < DeltaF_neq_threshold) if verbose and (out_of_bounds or simulation_crashed): print("After collecting {} protocol samples, DeltaF_neq is likely in the following interval: " "[{:.3f}, {:.3f}]".format(len(W_shads_F), DeltaF_neq_lcb, DeltaF_neq_ucb)) # if (DeltaF_neq_lcb > threshold) or (nans are encountered), then we're pretty sure this timestep is too big, # and we can move on to try a smaller one if simulation_crashed or (DeltaF_neq_lcb > DeltaF_neq_threshold): if verbose: print("This timestep is probably too big!\n") max_timestep = timestep changed_timestep_range = True break # else, if (DeltaF_neq_ucb < threshold), then we're pretty sure we can get # away with a larger timestep elif (DeltaF_neq_ucb < DeltaF_neq_threshold): if verbose: print("We can probably get away with a larger timestep!\n") min_timestep = timestep changed_timestep_range = True break # else, the threshold is within the upper and lower confidence bounds, and we keep going if (not changed_timestep_range): timestep = (min_timestep + max_timestep) / 2 if verbose: print("\nTerminating early: found the following timestep: ".format(timestep.value_in_unit(unit.femtosecond))) return timestep if verbose: timestep = (min_timestep + max_timestep) / 2 print("\nTerminating: found the following timestep: ".format(timestep.value_in_unit(unit.femtosecond))) return timestep
94000a07cfc8cf00e3440ff242c63da5c8be5d00
3,642,152
def critical_bands(): """ Compute the Critical bands as defined in the book: Psychoacoustics by Zwicker and Fastl. Table 6.1 p. 159 """ # center frequencies fc = [ 50, 150, 250, 350, 450, 570, 700, 840, 1000, 1170, 1370, 1600, 1850, 2150, 2500, 2900, 3400, 4000, 4800, 5800, 7000, 8500, 10500, 13500, ] # boundaries of the bands (e.g. the first band is from 0Hz to 100Hz # with center 50Hz, fb[0] to fb[1], center fc[0] fb = [ 0, 100, 200, 300, 400, 510, 630, 770, 920, 1080, 1270, 1480, 1720, 2000, 2320, 2700, 3150, 3700, 4400, 5300, 6400, 7700, 9500, 12000, 15500, ] # now just make pairs bands = [[fb[j], fb[j + 1]] for j in range(len(fb) - 1)] return np.array(bands), fc
6301a6ee86d0ea3fb588213aa8b9453b14fb7036
3,642,153
def repackage(r, amo_id, amo_file, target_version=None, sdk_dir=None): """Pull amo_id/amo_file.xpi, schedule xpi creation, return hashtag """ # validate entries # prepare data hashtag = get_random_string(10) sdk = SDK.objects.all()[0] # if (when?) choosing sdk_dir will be possible # sdk = SDK.objects.get(dir=sdk_dir) if sdk_dir else SDK.objects.all()[0] sdk_source_dir = sdk.get_source_dir() # extract packages tasks.repackage.delay( amo_id, amo_file, sdk_source_dir, hashtag, target_version) # call build xpi task # respond with a hashtag which will identify downloadable xpi # URL to check if XPI is ready: # /xpi/check_download/{hashtag}/ # URL to download: # /xpi/download/{hashtag}/{desired_filename}/ return HttpResponse('{"hashtag": "%s"}' % hashtag, mimetype='application/json')
9527f2fbe6077e25eee72a570f2e9702cbf3b510
3,642,154
def edges_to_adj_list(edges): """ Transforms a set of edges in an adjacency list (represented as a dictiornary) For UNDIRECTED graphs, i.e. if v2 in adj_list[v1], then v1 in adj_list[v2] INPUT: - edges : a set or list of edges OUTPUT: - adj_list: a dictionary with the vertices as keys, each with a set of adjacent vertices. """ adj_list = {} # store in dictionary for v1, v2 in edges: if v1 in adj_list: # edge already in it adj_list[v1].add(v2) else: adj_list[v1] = set([v2]) if v2 in adj_list: # edge already in it adj_list[v2].add(v1) else: adj_list[v2] = set([v1]) return adj_list
683f10e9a0a9b8a29d63b276b2e550ebe8287a05
3,642,155
from typing import Optional def _get_lookups( name: str, project: interface.Project, base: Optional[str] = None) -> list[str]: """[summary] Args: name (str): [description] design (Optional[str]): [description] kind (Optional[str]): [description] Returns: list[str]: [description] """ lookups = [name] if name in project.outline.designs: lookups.append(project.outline.designs[name]) if name in project.outline.kinds: lookups.append(project.outline.kinds[name]) if base is not None: lookups.append(base) return lookups
c8f700a19bbae0167c8474f033d625a763743db8
3,642,156
def unwrap(value): """ Unwraps the given Document or DocumentList as applicable. """ if isinstance(value, Document): return value.to_dict() elif isinstance(value, DocumentList): return value.to_list() else: return value
7e25c2935ff0a467e51097c4291e8d5f751c34db
3,642,157
def home_all(): """Home page view. On this page a summary campaign manager view will shown with all campaigns. """ context = dict( oauth_consumer_key=OAUTH_CONSUMER_KEY, oauth_secret=OAUTH_SECRET, all=True, map_provider=map_provider() ) # noinspection PyUnresolvedReferences return render_template('index.html', **context)
d987486f30cc5a8f6e697d9ccb92741b2d2067e4
3,642,158
import math def _sqrt(x): """_sqrt.""" isnumpy = isinstance(x, np.ndarray) isscalar = np.isscalar(x) return np.sqrt(x) if isnumpy else math.sqrt(x) if isscalar else x.sqrt()
16f566493deeaaf35841548e6db89408ca686bfe
3,642,159
def update_subnet(context, id, subnet): """Update values of a subnet. : param context: neutron api request context : param id: UUID representing the subnet to update. : param subnet: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_subnet %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE) if not subnet_db: raise exceptions.SubnetNotFound(id=id) s = subnet["subnet"] always_pop = ["_cidr", "cidr", "first_ip", "last_ip", "ip_version", "segment_id", "network_id"] admin_only = ["do_not_use", "created_at", "tenant_id", "next_auto_assign_ip", "enable_dhcp"] utils.filter_body(context, s, admin_only, always_pop) dns_ips = utils.pop_param(s, "dns_nameservers", []) host_routes = utils.pop_param(s, "host_routes", []) gateway_ip = utils.pop_param(s, "gateway_ip", None) allocation_pools = utils.pop_param(s, "allocation_pools", None) if not CONF.QUARK.allow_allocation_pool_update: if allocation_pools: raise exceptions.BadRequest( resource="subnets", msg="Allocation pools cannot be updated.") alloc_pools = allocation_pool.AllocationPools( subnet_db["cidr"], policies=models.IPPolicy.get_ip_policy_cidrs(subnet_db)) else: alloc_pools = allocation_pool.AllocationPools(subnet_db["cidr"], allocation_pools) if gateway_ip: alloc_pools.validate_gateway_excluded(gateway_ip) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: default_route = route break if default_route is None: route_model = db_api.route_find( context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id, scope=db_api.ONE) if route_model: db_api.route_update(context, route_model, gateway=gateway_ip) else: db_api.route_create(context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip, subnet_id=id) if dns_ips: subnet_db["dns_nameservers"] = [] for dns_ip in dns_ips: subnet_db["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) if host_routes: subnet_db["routes"] = [] for route in host_routes: subnet_db["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) if CONF.QUARK.allow_allocation_pool_update: if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() ip_policies.ensure_default_policy(cidrs, [subnet_db]) subnet_db["ip_policy"] = db_api.ip_policy_update( context, subnet_db["ip_policy"], exclude=cidrs) subnet = db_api.subnet_update(context, subnet_db, **s) return v._make_subnet_dict(subnet)
f1ac159f612d3b8a5459ee3b70c440cf7cf84cd5
3,642,160
def validate_params(): """@rtype bool""" def validate_single_param(param_name, required_type): """@rtype bool""" inner_result = True if not rospy.has_param(param_name): rospy.logfatal('Parameter {} is not defined but needed'.format(param_name)) inner_result = False else: if type(required_type) is list and len(required_type) > 0: if type(rospy.get_param(param_name)) in required_type: rospy.logfatal('Parameter {} is not any of type {}'.format(param_name, required_type)) inner_result = False else: if type(rospy.get_param(param_name)) is not required_type: rospy.logfatal('Parameter {} is not of type {}'.format(param_name, required_type)) inner_result = False return inner_result result = True result = result and validate_single_param('~update_frequency', int) result = result and validate_single_param('~do_cpu', bool) result = result and validate_single_param('~do_memory', bool) result = result and validate_single_param('~do_network', bool) return result
8734d9db7e29b8b6c30dc8a3ae72b0cf18c85310
3,642,161
import os def run_test_general_base_retrieval_methods(query_dic, query_types, trec_cast_eval, similarity, string_params, searcher: SimpleSearcher, reranker, write_to_trec_eval, write_results_to_file, reranker_query_config, reranking_threshold, use_rrf): """ Run topics in trec_cast_eval query_dic is a dict with string keys and a QueryConfig object query_types is a list of strings that denote keys we want to use that are in query_dic trec_cast_eval - object of type ConvSearchEvaluationGeneral If write_to_trec_eval writes the results in trec eval format If write_results_to_file writes the results in tsv format (including the query and document's content for later use) """ metric_results = {} doc_results = {} for query_type in query_types: print(similarity + " " + query_type + " " + string_params) current_key = similarity + "_" + query_type + "_" + string_params metric_results[current_key], _, doc_results[current_key] = \ run_topics_general(trec_cast_eval=trec_cast_eval, query_config=query_dic[query_type], searcher=searcher, reranker=reranker, reranker_query_config=reranker_query_config, reranking_threshold=reranking_threshold, use_rrf=use_rrf) index_name = os.path.basename(os.path.normpath(searcher.index_dir)) run_file_name = index_name + "_" + current_key run_name = query_type if searcher.is_using_rm3(): run_file_name += "_rm3" if reranker: run_file_name += "_" + reranker.RERANKER_TYPE + "_" + str(reranking_threshold) run_name += "_" + reranker.RERANKER_TYPE + "_" + str(reranking_threshold) if write_to_trec_eval: write_trec_results(file_name=run_file_name, result=doc_results[current_key], run_name=run_name) if write_results_to_file: write_doc_results_to_file(file_name=run_file_name + ".tsv", result=doc_results[current_key]) return metric_results, doc_results
c0320231f65955b65f447bf0303f4443dcc5110d
3,642,162
def user_exists(username): """Return True if the username exists, or False if it doesn't.""" try: adobe_api.AdobeAPIObject(username) except adobe_api.AdobeAPINoUserException: return False return True
3767bec38c8058e7bd193e5532e4150ca501a96a
3,642,163
def bags_containing_bag(bag: str, rules: dict[str, list]) -> int: """Returns the bags that have bag in their rules.""" return {r_bag for r_bag, r_rule in rules.items() for _, r_color in r_rule if bag in r_color}
f9e67a4ade4dd9bdf25e05669741c71270007215
3,642,164
def default_mutable_arguments(): """Explore default mutable arguments, which are a dangerous game in themselves. Why do mutable default arguments suffer from this apparent problem? A function's default values are evaluated at the point of function definition in the defining scope. In particular, we can examine these bindings by printing append_twice.__defaults__ after append_twice has been defined. For this function, we have print(append_twice.__defaults__) # ([],) If a binding for `lst` is not supplied, then the `lst` name inside append_twice falls back to the array object that lives inside append_twice.__defaults__. In particular, if we update `lst` in place during one function call, we have changed the value of the default argument. That is, print(append_twice.__defaults__) # ([], ) append_twice(1) print(append_twice.__defaults__) # ([1, 1], ) append_twice(2) print(append_twice.__defaults__) # ([1, 1, 2, 2], ) In each case where a user-supplied binding for `lst is not given, we modify the single (mutable) default value, which leads to this crazy behavior. """ def append_twice(a, lst=[]): """Append a value to a list twice.""" lst.append(a) lst.append(a) return lst print(append_twice(1, lst=[4])) # => [4, 1, 1] print(append_twice(11, lst=[2, 3, 5, 7])) # => [2, 3, 5, 7, 11, 11] print(append_twice(1)) # => [1, 1] print(append_twice(2)) # => [1, 1, 2, 2] print(append_twice(3))
a58a8c2807e29af68d501aa5ad4b33ad1aa80252
3,642,165
def is_text_file(file_): """ detect if file is of type text :param file_: file to be tested :returns: `bool` of whether the file is text """ with open(file_, 'rb') as ff: data = ff.read(1024) return not is_binary_string(data)
d064b51ea239f34ed97d47416b1f411650ce8a1a
3,642,166
from typing import Union from datetime import datetime from typing import List import pytz def soft_update_datetime_field( model_inst: models.Model, field_name: str, warehouse_field_value: Union[datetime, None], ) -> List[str]: """ Uses Django ORM to update DateTime field of model instance if the field value is null and the warehouse data is non-null. """ model_name: str = model_inst.__class__.__name__ current_field_value: Union[datetime, None] = getattr(model_inst, field_name) # Skipping update if the field already has a value, provided by a previous cron run or administrator if current_field_value is not None: logger.info( f'Skipped update of {field_name} for {model_name} instance ({model_inst.id}); existing value was found') else: if warehouse_field_value: warehouse_field_value = warehouse_field_value.replace(tzinfo=pytz.UTC) setattr(model_inst, field_name, warehouse_field_value) logger.info(f'Updated {field_name} for {model_name} instance ({model_inst.id})') return [field_name] return []
33034a548ee572706cd1e6e696d5a9249ad0b528
3,642,167
import itertools def plot_confusion_matrix( y_true, y_pred, normalize=False, cmap=plt.cm.Blues, label_list = None, visible=True, savepath=None): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ cm = confusion_matrix(y_true, y_pred) acc = accuracy_score(y_true, y_pred) f1 = f1_score(y_true, y_pred, average="micro") title = f"Confusion Matrix, Acc: {acc:.2f}, F1: {f1:.2f}" if label_list == None: classes = range(0, max(y_true)) else: classes = label_list if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.figure(figsize=(13,13)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') if savepath is not None: plt.savefig(savepath) if visible: plt.show() return acc, f1
f15d2170ba0e869cb47e554ea374f93b05dbcab8
3,642,168
def _test_pressure_reconstruction(self, g, recon_p, point_val, point_coo): """ Testing pressure reconstruction. This function uses the reconstructed pressure local polynomial and perform an evaluation at the Lagrangian points, and checks if the those values are equal to the point_val array. Parameters ---------- g : PorePy object Grid. recon_p : NumPy nd-Array Reconstructed pressure polynomial. point_val : NumPy nd-Array Pressure avlues at the Lagrangian nodes. point_coo : NumPy array Coordinates at the Lagrangian nodes. Returns ------- None. """ def assert_reconp(eval_poly, point_val): np.testing.assert_allclose( eval_poly, point_val, rtol=1e-6, atol=1e-3, err_msg="Pressure reconstruction has failed" ) eval_poly = utils.eval_P1(recon_p, point_coo) assert_reconp(eval_poly, point_val) return None
b70b202cc21ba632f18af2f5fcf72f7b6d509e91
3,642,169
def logout(): """Logout :return: Function used to log out the current user """ logout_user() return redirect(url_for('index'))
f5e2ef30b47c645ba5671395a115eb6d6c9425f1
3,642,170
import json import base64 import os import requests def get_authorization_url( app_id, redirect_uri, scope='all', state='', extra_data='', **params): """ Get the url to start the first leg of OAuth flow. Refer to `Authentication Docs <https://developers.kloudless.com/docs/latest/ authentication#oauth-2.0>`_ for more information. :param str app_id: Application ID :param str redirect_uri: Redirect URI to your application server :param str scope: A space-delimited string of scopes that indicate which services a user can connect, and which permissions to request :param str state: An arbitrary string which would be redirected back via ``redirect_uri`` as query parameter. Random url-safe Base64 string would be generated by default :param str extra_data: A URL-encoded JSON object containing data used to pre-fill default values for fields in the Kloudless authentication forms. For example, the domain of a WebDAV server :param params: Additional query parameters :returns: `tuple(url, state)`: Redirect the user to ``url`` to start authorization. Saved ``state`` in user's session for future validation :rtype: `tuple(str, str)` """ if extra_data and isinstance(extra_data, dict): extra_data = json.dumps(extra_data) if not state: state = base64.urlsafe_b64encode(os.urandom(12)).decode('utf8') params.update({ 'client_id': app_id, 'response_type': 'code', 'redirect_uri': redirect_uri, 'scope': scope, 'state': state, 'extra_data': extra_data, }) endpoint = construct_kloudless_endpoint('oauth', api_version=OAUTH_API_VERSION) url = requests.Request('GET', endpoint, params=params).prepare().url return url, state
27abfc9c9c14887f1a3a963f7eb07d92706e288a
3,642,171
def get_user_messages(user, index=0, number=0): """ 返回指定user按时间倒序的从index索引开始的number个message """ if not user or user.is_anonymous or index < 0 or number < 0: return tuple() # noinspection PyBroadException try: if index == 0 and number == 0: all_message = user.messages.all() else: all_message = user.messages.all()[index:index+number] except Exception as e: all_message = tuple() return all_message
bb0c499e5ca8ec650d2ebca12852d2345733e882
3,642,172
def third_party_apps_default_dc_modules_and_settings(klass): """ Decorator for DefaultDcSettingsSerializer class. Updates modules and settings fields defined in installed third party apps. """ logger.info('Loading third party apps DEFAULT DC modules and settings.') for third_party_app, app_dc_settings in get_third_party_apps_serializer_settings(): try: app_dc_settings.DEFAULT_DC_MODULES except AttributeError: logger.info('Skipping app: %s does not have any DEFAULT DC modules defined.', third_party_app) else: _update_serializer_modules(third_party_app, app_dc_settings.DEFAULT_DC_MODULES, klass, default_dc=True) try: app_dc_settings.DEFAULT_DC_SETTINGS except AttributeError: logger.info('Skipping app: %s does not have any DEFAULT DC settings defined.', third_party_app) else: _update_serializer_settings(third_party_app, app_dc_settings, klass, default_dc=True) return klass
59be03a271e60352b429d45ecff647100388f9ab
3,642,173
from typing import Union from pathlib import Path def split_lvis( n_experiences: int, train_transform=None, eval_transform=None, shuffle=True, root_path: Union[str, Path] = None, ): """ Creates the example Split LVIS benchmark. This is a toy benchmark created only to show how a detection benchmark can be created. It was not meant to be used for research purposes! :param n_experiences: The number of train experiences to create. :param train_transform: The train transformation. :param eval_transform: The eval transformation. :param shuffle: If True, the dataset will be split randomly :param root_path: The root path of the dataset. Defaults to None, which means that the default path will be used. :return: A :class:`DetectionScenario` instance. """ train_dataset = LvisDataset(root=root_path, train=True) val_dataset = LvisDataset(root=root_path, train=False) all_cat_ids = set(train_dataset.lvis_api.get_cat_ids()) all_cat_ids.union(val_dataset.lvis_api.get_cat_ids()) return split_detection_benchmark( n_experiences=n_experiences, train_dataset=train_dataset, test_dataset=val_dataset, n_classes=len(all_cat_ids), train_transform=train_transform, eval_transform=eval_transform, shuffle=shuffle, )
efece586ec6bfbc45911ed9f4f2ad5ead2cfd88b
3,642,174
def compute_log_ksi_normalized(log_edge_pot, #'(t-1,t)', log_node_pot, # '(t, label)', T, n_labels, log_alpha, log_beta, temp_array_1, temp_array_2): """ to obtain the two-slice posterior marginals p(y_t = i, y_t+1 = j| X_1:T) = normalized ksi_t,t+1(i,j) """ # in the following, will index log_ksi only with t, to stand for log_ksi[t,t+1]. including i,j: log_ksi[t,i,j] log_alpha = compute_log_alpha(log_edge_pot, log_node_pot, T, n_labels, log_alpha, temp_array_1, temp_array_2) log_beta = compute_log_beta(log_edge_pot, log_node_pot, T, n_labels, log_beta, temp_array_1, temp_array_2) log_ksi = np.empty((T-1, n_labels, n_labels)) for t in range(T-1): psi_had_beta = log_node_pot[t+1,:] + log_beta[t+1, :] # represents psi_t+1 \hadamard beta_t+1 in MLAPP eq 17.67 log_ksi[t,:,:] = log_edge_pot for c in range(n_labels): for d in range(n_labels): log_ksi[t,c,d] += log_alpha[t,d] + psi_had_beta[c] # normalize current ksi[t,:,:] over both dimensions. This is not required of ksi, strictly speaking, but the output of the function needs to be normalized, and it's cheaper to do it in-place on ksi than to create a fresh variable to hold the normalized values log_ksi[t,:,:] -= lse_numba_2d(log_ksi[t,:,:]) return log_ksi
e4e6ea464851ba64d640e14fd7c88e9c52f28f50
3,642,175
import argparse from pathlib import Path def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description="Map gleason data to standard format.") parser.add_argument("-d", "--data_path", type=Path, help="Path to folder with the data.", required=True) parser.add_argument("-n", "--n_jobs", type=int, help="Number of jobs to run in parallel.", required=True) return parser.parse_args()
0a1a1cb404d74c48d550642e31399410d1bd13c3
3,642,176
from typing import Any from typing import Type def _deserialize_union(x: Any, field_type: Type) -> Any: """Deserialize values for Union typed fields Args: x (Any): value to be deserialized. field_type (Type): field type. Returns: [Any]: desrialized value. """ for arg in field_type.__args__: # stop after first matching type in Union try: x = _deserialize(x, arg) break except ValueError: pass return x
01793983a0a82fc16c03adbe57f52de9be5c81ea
3,642,177
def read_simplest_expandable(expparams, config): """ Read expandable parameters from config file of the type `param_1`. Parameters ---------- expparams : dict, dict.keys, set, or alike The parameter names that should be considered as expandable. Usually, this is a module subdictionary of `type_simplest_ep`. config : dict, dict.keys, set, or alike The user configuration file. Returns ------- set of str The parameters in `config` that comply with `expparams`. """ new = set() for param in config: try: name, idx = param.split("_") except ValueError: continue if idx.isdigit() and name in expparams: new.add(param) return new
4e2068e4a6cbca050da6a33a24b5fb0d2477e4e3
3,642,178
from typing import Callable from typing import Iterable from typing import Any def rec_map_reduce_array_container( reduce_func: Callable[[Iterable[Any]], Any], map_func: Callable[[Any], Any], ary: ArrayOrContainerT) -> "DeviceArray": """Perform a map-reduce over array containers recursively. :param reduce_func: callable used to reduce over the components of *ary* (and those of its sub-containers) if *ary* is a :class:`~arraycontext.ArrayContainer`. Must be associative. :param map_func: callable used to map a single array of type :class:`arraycontext.ArrayContext.array_types`. Returns an array of the same type or a scalar. .. note:: The traversal order is unspecified. *reduce_func* must be associative in order to guarantee a sensible result. This is because *reduce_func* may be called on subsets of the component arrays, and then again (potentially multiple times) on the results. As an example, consider a container made up of two sub-containers, *subcontainer0* and *subcontainer1*, that each contain two component arrays, *array0* and *array1*. The same result must be computed whether traversing recursively:: reduce_func([ reduce_func([ map_func(subcontainer0.array0), map_func(subcontainer0.array1)]), reduce_func([ map_func(subcontainer1.array0), map_func(subcontainer1.array1)])]) reducing all of the arrays at once:: reduce_func([ map_func(subcontainer0.array0), map_func(subcontainer0.array1), map_func(subcontainer1.array0), map_func(subcontainer1.array1)]) or any other such traversal. """ def rec(_ary: ArrayOrContainerT) -> ArrayOrContainerT: try: iterable = serialize_container(_ary) except NotAnArrayContainerError: return map_func(_ary) else: return reduce_func([ rec(subary) for _, subary in iterable ]) return rec(ary)
885862371ece1e1f041a44693704300945d8d4a0
3,642,179
def load_data_test(test_path, diagnoses_list, baseline=True, multi_cohort=False): """ Load data not managed by split_manager. Args: test_path (str): path to the test TSV files / split directory / TSV file for multi-cohort diagnoses_list (List[str]): list of the diagnoses wanted in case of split_dir or multi-cohort baseline (bool): If True baseline sessions only used (split_dir handling only). multi_cohort (bool): If True considers multi-cohort setting. """ # TODO: computes baseline sessions on-the-fly to manager TSV file case if multi_cohort: if not test_path.endswith(".tsv"): raise ValueError( "If multi_cohort is given, the tsv_path argument should be a path to a TSV file." ) else: tsv_df = pd.read_csv(test_path, sep="\t") check_multi_cohort_tsv(tsv_df, "labels") test_df = pd.DataFrame() found_diagnoses = set() for idx in range(len(tsv_df)): cohort_name = tsv_df.loc[idx, "cohort"] cohort_path = tsv_df.loc[idx, "path"] cohort_diagnoses = ( tsv_df.loc[idx, "diagnoses"].replace(" ", "").split(",") ) if bool(set(cohort_diagnoses) & set(diagnoses_list)): target_diagnoses = list(set(cohort_diagnoses) & set(diagnoses_list)) cohort_test_df = load_data_test_single( cohort_path, target_diagnoses, baseline=baseline ) cohort_test_df["cohort"] = cohort_name test_df = pd.concat([test_df, cohort_test_df]) found_diagnoses = found_diagnoses | ( set(cohort_diagnoses) & set(diagnoses_list) ) if found_diagnoses != set(diagnoses_list): raise ValueError( f"The diagnoses found in the multi cohort dataset {found_diagnoses} " f"do not correspond to the diagnoses wanted {set(diagnoses_list)}." ) test_df.reset_index(inplace=True, drop=True) else: if test_path.endswith(".tsv"): tsv_df = pd.read_csv(test_path, sep="\t") multi_col = {"cohort", "path"} if multi_col.issubset(tsv_df.columns.values): raise ValueError( "To use multi-cohort framework, please add --multi_cohort flag." ) test_df = load_data_test_single(test_path, diagnoses_list, baseline=baseline) test_df["cohort"] = "single" return test_df
50b739425ca76a3d170688f19e230a0d84c21221
3,642,180
import json def load_augmentations_config( placeholder_params: dict, path_to_config: str = "configs/augmentations.json" ) -> dict: """Load the json config with params of all transforms Args: placeholder_params (dict): dict with values of placeholders path_to_config (str): path to the json config file """ with open(path_to_config, "r") as config_file: augmentations = json.load(config_file) for name, params in augmentations.items(): params = [fill_placeholders(param, placeholder_params) for param in params] return augmentations
49f3170033411418e7e5468aecdcdc612a677e66
3,642,181
import numpy def simplify_mask(mask, r_ids, r_p_zip, replace=True): """Simplify the mask by replacing all `region_ids` with their `root_parent_id` The `region_ids` and `parent_ids` are paired from which a tree is inferred. The root of this tree is value `0`. `region_ids` that have a corresponding `parent_id` of 0 are penultimate roots. This method replaces each `region_id` with its penultimate `parent_id`. It *simplifies* the volume. :param mask: a 3D volume :type mask: `numpy.array` :param r_id: sequence of `region_id` :type r_id: iterable :param r_p_zip: sequence of 2-tuples with `region_id` and `parent_id` :type r_p_zip: iterable :param bool replace: if `True` then the returned `mask` will have values; `False` will leave the `mask` unchanged (useful for running tests to speed things up) :return: `simplified_mask`, `segment_colours`, `segment_ids` :rtype: tuple """ simplified_mask = numpy.ndarray(mask.shape, dtype=int) # @UnusedVariable simplified_mask = 0 # group regions_ids by parent_id root_parent_id_group = dict() for r in r_ids: p = get_root(r_p_zip, r) if p not in root_parent_id_group: root_parent_id_group[p] = [r] else: root_parent_id_group[p] += [r] if replace: # It is vastly faster to use multiple array-wide comparisons than to do # comparisons element-wise. Therefore, we generate a string to be executed #  that will do hundreds of array-wide comparisons at a time. # Each comparison is for all region_ids for a parent_id which will # then get assigned the parent_id. for parent_id, region_id_list in root_parent_id_group.items(): # check whether any element in the mask has a value == r0 OR r1 ... OR rN # e.g. (mask == r0) | (mask == r1) | ... | (mask == rN) comp = ' | '.join(['( mask == %s )' % r for r in region_id_list]) # set those that satisfy the above to have the parent_id # Because parent_ids are non-overlapping (i.e. no region_id has two parent_ids) # we can do successive summation instead of assignments. full_op = 'simplified_mask += (' + comp + ') * %s' % parent_id exec(full_op) else: simplified_mask = mask segment_ids = root_parent_id_group.keys() # segment_colors = [r_c_zip[s] for s in segment_ids] return simplified_mask, segment_ids
b8344a893319ad7a26f931b2edbc6ef452b82c24
3,642,182
def getStops(ll): """ getStops Returns a list of stops based off of a lat long pair :param: ll { lat : float, lng : float } :return: list """ if not ll: return None url = "%sstops?appID=%s&ll=%s,%s" % (BASE_URI, APP_ID, ll['lat'], ll['lng']) try: f = urlopen(url) except HTTPError: return None response = f.read() dom = parseString(response) stopElems = dom.getElementsByTagName("location") stops = [] for se in stopElems: locid = se.getAttribute("locid") desc = se.getAttribute("desc") direction = se.getAttribute("dir") stops.append("ID: %s, %s on %s" % (locid, direction, desc)) return stops
eedfc49a02ab6c2ccf45e241262236804007a156
3,642,183
import warnings import six def rws(log_joint, observed, latent, axis=None): """ Implements Reweighted Wake-sleep from (Bornschein, 2015). This works for both continuous and discrete latent `StochasticTensor` s. :param log_joint: A function that accepts a dictionary argument of ``(string, Tensor)`` pairs, which are mappings from all `StochasticTensor` names in the model to their observed values. The function should return a Tensor, representing the log joint likelihood of the model. :param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from names of observed `StochasticTensor` s to their values. :param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs. Mapping from names of latent `StochasticTensor` s to their samples and log probabilities. :param axis: The sample dimension(s) to reduce when computing the outer expectation in log likelihood and in the cost for adapting proposals. If `None`, no dimension is reduced. :return: A Tensor. The surrogate cost to minimize. :return: A Tensor. Estimated log likelihoods. """ warnings.warn("rws(): This function will be deprecated in the coming " "version (0.3.1). Variational utilities are moving to " "`zs.variational`. Features of the original rws() can be " "achieved by two new variational objectives. For learning " "model parameters, please use the importance weighted " "objective: `zs.variational.iw_objective()`. For adapting " "the proposal, the new rws gradient estimator can be " "accessed by first constructing the inclusive KL divergence " "objective using `zs.variational.klpq` and then calling " "its rws() method.", category=FutureWarning) latent_k, latent_v = map(list, zip(*six.iteritems(latent))) latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v))) latent_logpdfs = map(lambda x: x[1], latent_v) joint_obs = merge_dicts(observed, latent_outputs) log_joint_value = log_joint(joint_obs) entropy = -sum(latent_logpdfs) log_w = log_joint_value + entropy if axis is not None: log_w_max = tf.reduce_max(log_w, axis, keep_dims=True) w_u = tf.exp(log_w - log_w_max) w_tilde = tf.stop_gradient( w_u / tf.reduce_sum(w_u, axis, keep_dims=True)) log_likelihood = log_mean_exp(log_w, axis) fake_log_joint_cost = -tf.reduce_sum(w_tilde * log_joint_value, axis) fake_proposal_cost = tf.reduce_sum(w_tilde * entropy, axis) cost = fake_log_joint_cost + fake_proposal_cost else: cost = log_w log_likelihood = log_w return cost, log_likelihood
eb6278919dd484884b3110681680e67d3ee17d2f
3,642,184
def fit_svr(X, y, kernel: str = 'rbf') -> LinearSVR: """ Fit support vector regression for the given input X and expected labes y. :param X: Feature data :param y: Labels that should be correctly computed :param kernel: type of kernel used by the SVR {‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’}, default=’rbf’ :return: SVR that is fitted to X and y """ svr = LinearSVR() svr.fit(X=X, y=y) return svr
10a38fca990c4ab058d582fbe38bd05df7456660
3,642,185
import typing def process_get_namespaces_from_accounts( status: int, json: list, network_type: models.NetworkType, ) -> typing.Sequence[models.NamespaceInfo]: """ Process the "/account/namespaces" HTTP response. :param status: Status code for HTTP response. :param json: JSON data for response message. """ assert status == 200 return [models.NamespaceInfo.create_from_dto(i, network_type) for i in json]
748bdca72db0640e75f8a0c063f7968ea9583e94
3,642,186
from typing import Optional import os def _initialize_pydataverse(DATAVERSE_URL: Optional[str], API_TOKEN: Optional[str]): """Sets up a pyDataverse API for upload.""" # Get environment variables if DATAVERSE_URL is None: try: DATAVERSE_URL = os.environ["DATAVERSE_URL"] except KeyError: raise MissingURLException if API_TOKEN is None: try: API_TOKEN = os.environ["DATAVERSE_API_TOKEN"] except KeyError: raise MissingCredentialsException return NativeApi(DATAVERSE_URL, API_TOKEN), DataAccessApi(DATAVERSE_URL, API_TOKEN)
716ce3a3e589cc9323159e6285194ab74706dc92
3,642,187
def _hexsplit(string): """ Split a hex string into 8-bit/2-hex-character groupings separated by spaces""" return ' '.join([string[i:i+2] for i in range(0, len(string), 2)])
672e475edeaafaa08254845e620b0a771b294fa8
3,642,188
def get_analysis_id(analysis_id): """ Get the new analysis id :param analysis_id: analysis_index DataFrame :return: new analysis_id """ if analysis_id.size == 0: analysis_id = 0 else: analysis_id = np.nanmax(analysis_id.values) + 1 return int(analysis_id)
3318764daadca6c1e1921847f623fcac169e2cb5
3,642,189
from typing import Union def get_station_pqr(station_name: str, rcu_mode: Union[str, int], db): """ Get PQR coordinates for the relevant subset of antennas in a station. Args: station_name: Station name, e.g. 'DE603LBA' or 'DE603' rcu_mode: RCU mode (0 - 6, can be string) db: instance of LofarAntennaDatabase from lofarantpos Example: >>> from lofarantpos.db import LofarAntennaDatabase >>> db = LofarAntennaDatabase() >>> pqr = get_station_pqr("DE603", "outer", db) >>> pqr.shape (96, 3) >>> pqr[0, 0] 1.7434713 >>> pqr = get_station_pqr("LV614", "5", db) >>> pqr.shape (96, 3) """ full_station_name = get_full_station_name(station_name, rcu_mode) station_type = get_station_type(full_station_name) if 'LBA' in station_name or str(rcu_mode) in ('1', '2', '3', '4', 'inner', 'outer'): # Get the PQR positions for an individual station station_pqr = db.antenna_pqr(full_station_name) # Exception: for Dutch stations (sparse not yet accommodated) if (station_type == 'core' or station_type == 'remote') and int(rcu_mode) in (3, 4): station_pqr = station_pqr[0:48, :] elif (station_type == 'core' or station_type == 'remote') and int(rcu_mode) in (1, 2): station_pqr = station_pqr[48:, :] elif 'HBA' in station_name or str(rcu_mode) in ('5', '6', '7', '8'): selected_dipole_config = { 'intl': GENERIC_INT_201512, 'remote': GENERIC_REMOTE_201512, 'core': GENERIC_CORE_201512 } selected_dipoles = selected_dipole_config[station_type] + \ np.arange(len(selected_dipole_config[station_type])) * 16 station_pqr = db.hba_dipole_pqr(full_station_name)[selected_dipoles] else: raise RuntimeError("Station name did not contain LBA or HBA, could not load antenna positions") return station_pqr.astype('float32')
d796639866421876bc58a7621d37bbe7239da6df
3,642,190
from typing import List def hello_world(cities: List[str] = ["Berlin", "Paris"]) -> bool: """ Hello world function. Arguments: - cities: List of cities in which 'hello world' is posted. Return: - success: Whether or not function completed successfully. """ try: [print("Hello {}!".format(c)) for c in cities] # for loop one-liner return True except KeyboardInterrupt: return False finally: pass
a24f0f47c9b44c97f46524d354fff0ed9a735fe3
3,642,191
import os import re def parse_dir(directory, default_settings, oldest_revision, newest_revision, rep): """Parses bench data from files like bench_r<revision>_<scalar>. (str, {str, str}, Number, Number) -> {int:[BenchDataPoints]}""" revision_data_points = {} # {revision : [BenchDataPoints]} file_list = os.listdir(directory) file_list.sort() for bench_file in file_list: file_name_match = re.match('bench_r(\d+)_(\S+)', bench_file) if (file_name_match is None): continue revision = int(file_name_match.group(1)) scalar_type = file_name_match.group(2) if (revision < oldest_revision or revision > newest_revision): continue file_handle = open(directory + '/' + bench_file, 'r') if (revision not in revision_data_points): revision_data_points[revision] = [] default_settings['scalar'] = scalar_type revision_data_points[revision].extend( bench_util.parse(default_settings, file_handle, rep)) file_handle.close() return revision_data_points
8a802eec0c528f67bffdfc239079eba3729eb2f3
3,642,192
import random def random_samples(traj_obs, expert, num_sample): """Randomly sample a subset of states to collect expert feedback. Args: traj_obs: observations from a list of trajectories. expert: an expert policy. num_sample: the number of samples to collect. Returns: new expert data. """ expert_data = [] for i in range(len(traj_obs)): obs = traj_obs[i] random.shuffle(obs) new_expert_data = [] chosen = np.random.choice(range(len(obs)), size=min(num_sample, len(obs)), replace=False) for ch in chosen: state = obs[ch].observation action_step = expert.action(obs[ch]) action = action_step.action new_expert_data.append((state, action)) expert_data.extend(new_expert_data) return expert_data
55aa4312c095ce97b8cf2840ff9ca61e393dff63
3,642,193
import os def makeFolder(path): """Build a folder. Args: path (str): Folder path. Returns: bool: Creation status. """ if(not os.path.isdir(path)): try: os.makedirs(path) except OSError as error: print("Directory %s can't be created (%s)" % (path, error)) return False else: return True else: return False
4bd1535fb3ffc69f5638b6cfbeaf90a1ccbdf2f9
3,642,194
def get_p2_vector(img): """ Returns a p2 vector. We calculate the p2 vector by taking the radial mean of the autocorrelation of the input image. """ radvars = [] dimX = img.shape[0] dimY = img.shape[1] fftimage = np.fft.fft2(img) final_image = np.fft.ifft2(fftimage*np.conj(fftimage)) finImg = np.abs(final_image)/(dimX*dimY) centrdImg = np.fft.fftshift(finImg) center = [int(dimX/2), int(dimY/2)] radvar, _ = radial_profile(centrdImg, center, (dimX, dimY)) radvars.append(radvar) p2_vec = np.array(radvars) return p2_vec[0]
7544751bf268d6eea432e21efe3d3a7703b16c1b
3,642,195
import os def start_replica_cmd(builddir, replica_id): """ Return a command that starts an skvbc replica when passed to subprocess.Popen. Note each arguments is an element in a list. """ statusTimerMilli = "500" viewChangeTimeoutMilli = "10000" path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica") return [path, "-k", KEY_FILE_PREFIX, "-i", str(replica_id), "-s", statusTimerMilli, "-v", viewChangeTimeoutMilli, "-e", str(True) ]
5f30b328dd0e583581310afeb3de0af3bc79c17c
3,642,196
def multiplex(n, q, **kwargs): """ Convert one queue into several equivalent Queues >>> q1, q2, q3 = multiplex(3, in_q) """ out_queues = [Queue(**kwargs) for i in range(n)] def f(): while True: x = q.get() for out_q in out_queues: out_q.put(x) t = Thread(target=f) t.daemon = True t.start() return out_queues
4de6fa4fd495c2b320c4cdf28aa56df4411b7aa9
3,642,197
def stack(arrays, axis=0): """ Join a sequence of arrays along a new axis. The `axis` parameter specifies the index of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. .. versionadded:: 1.10.0 Parameters ---------- arrays : sequence of array_like Each array must have the same shape. axis : int, optional The axis in the result array along which the input arrays are stacked. Returns ------- stacked : ndarray The stacked array has one more dimension than the input arrays. See Also -------- concatenate : Join a sequence of arrays along an existing axis. split : Split array into a list of multiple sub-arrays of equal size. Examples -------- >>> arrays = [np.random.randn(3, 4) for _ in range(10)] >>> np.stack(arrays, axis=0).shape (10, 3, 4) >>> np.stack(arrays, axis=1).shape (3, 10, 4) >>> np.stack(arrays, axis=2).shape (3, 4, 10) >>> a = np.array_create.array([1, 2, 3]) >>> b = np.array_create.array([2, 3, 4]) >>> np.stack((a, b)) array_create.array([[1, 2, 3], [2, 3, 4]]) >>> np.stack((a, b), axis=-1) array_create.array([[1, 2], [2, 3], [3, 4]]) """ arrays = [array_create.array(arr) for arr in arrays] if not arrays: raise ValueError('need at least one array to stack') shapes = set(arr.shape for arr in arrays) if len(shapes) != 1: raise ValueError('all input arrays must have the same shape') result_ndim = arrays[0].ndim + 1 if not -result_ndim <= axis < result_ndim: msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim) raise IndexError(msg) if axis < 0: axis += result_ndim sl = (slice(None),) * axis + (None,) expanded_arrays = [arr[sl] for arr in arrays] return concatenate(expanded_arrays, axis=axis)
ba8a2b514c32a1dc7a15215e5e26a90f2ace9a26
3,642,198
import logging import sys def get_runs(runs, selected_runs, cmdline): """Selects which run(s) to execute based on parts of the command-line. Will return an iterable of run numbers. Might also fail loudly or exit after printing the original command-line. """ name_map = dict((r['id'], i) for i, r in enumerate(runs) if 'id' in r) run_list = [] def parse_run(s): try: r = int(s) except ValueError: logging.critical("Error: Unknown run %s", s) raise UsageError if r < 0 or r >= len(runs): logging.critical("Error: Expected 0 <= run <= %d, got %d", len(runs) - 1, r) sys.exit(1) return r if selected_runs is None: run_list = list(irange(len(runs))) else: for run_item in selected_runs.split(','): run_item = run_item.strip() if run_item in name_map: run_list.append(name_map[run_item]) continue sep = run_item.find('-') if sep == -1: run_list.append(parse_run(run_item)) else: if sep > 0: first = parse_run(run_item[:sep]) else: first = 0 if sep + 1 < len(run_item): last = parse_run(run_item[sep + 1:]) else: last = len(runs) - 1 if last < first: logging.critical("Error: Last run number should be " "greater than the first") sys.exit(1) run_list.extend(irange(first, last + 1)) # --cmdline without arguments: display the original command-line if cmdline == []: print("Original command-lines:") for run in run_list: print(' '.join(shell_escape(arg) for arg in runs[run]['argv'])) sys.exit(0) return run_list
fefeb7ec2e9c11767e2e15856321bf68aaad8a83
3,642,199