content
stringlengths
22
815k
id
int64
0
4.91M
def _is_disk_larger_than_max_size(device, node_uuid): """Check if total disk size exceeds 2TB msdos limit :param device: device path. :param node_uuid: node's uuid. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :returns: True if total disk size exceeds 2TB. Returns False otherwise. """ try: disksize_bytes, err = utils.execute('blockdev', '--getsize64', device, use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to get size of disk %(disk)s for node %(node)s. ' 'Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) disksize_mb = int(disksize_bytes.strip()) // 1024 // 1024 return disksize_mb > MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR
27,500
def getLines(filename): """Return list of lines from file""" with open(filename, 'r', errors='ignore') as ff: return ff.readlines()
27,501
def bbox_next_frame_v3(F_first, F_pre, seg_pre, seg_first, F_tar, bbox_first, bbox_pre, temp, name): """ METHOD: combining tracking & direct recognition, calculate bbox in target frame using both first frame and previous frame. """ F_first, F_pre, seg_pre, seg_first, F_tar = squeeze_all(F_first, F_pre, seg_pre, seg_first, F_tar) c, h, w = F_first.size() coords_pre_tar = match_ref_tar(F_pre, F_tar, seg_pre, temp) coords_first_tar = match_ref_tar(F_first, F_tar, seg_first, temp) coords_tar = {} for cnt, coord_first in coords_first_tar.items(): coord_pre = coords_pre_tar[cnt] # fall-back schema if(coord_pre is None): coord_tar_ = coord_first else: coord_tar_ = coord_pre coords_tar[cnt] = coord_tar_ _, seg_pre_idx = torch.max(seg_pre, dim = 0) coords_tar = clean_coords(coords_tar, bbox_pre, threshold=4) bbox_tar = bbox_in_tar(coords_tar, bbox_first, h, w) # recoginition seg_pred = recoginition(F_first, F_tar, bbox_first, bbox_tar, seg_first, temp) seg_cleaned = clean_seg(seg_pred, bbox_tar, threshold=1) # move bbox w.r.t cleaned seg bbox_tar = shift_bbox(seg_cleaned, bbox_tar) seg_post = post_process_seg(seg_pred.unsqueeze(0)) return seg_pred, seg_post, bbox_tar
27,502
async def test_duplicate_bridge_import(hass): """Test that creating a bridge entry with a duplicate host errors.""" entry_mock_data = { CONF_HOST: "1.1.1.1", CONF_KEYFILE: "", CONF_CERTFILE: "", CONF_CA_CERTS: "", } mock_entry = MockConfigEntry(domain=DOMAIN, data=entry_mock_data) mock_entry.add_to_hass(hass) with patch( "homeassistant.components.lutron_caseta.async_setup_entry", return_value=True, ) as mock_setup_entry: # Mock entry added, try initializing flow with duplicate host result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=entry_mock_data, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == CasetaConfigFlow.ABORT_REASON_ALREADY_CONFIGURED assert len(mock_setup_entry.mock_calls) == 0
27,503
def hough_lines(img, rho=2, theta=np.pi / 180, threshold=20, min_line_len=5, max_line_gap=25, thickness=3): """Perform a Hough transform on img Args: img (numpy.ndarray): input image rho (float, optional): distance resolution in pixels of the Hough grid theta (float, optional): angular resolution in radians of the Hough grid threshold (float, optional): minimum number of votes (intersections in Hough grid cell) min_line_len (int, optional): minimum number of pixels making up a line max_line_gap (int, optional): maximum gap in pixels between connectable line segments thickness (int, optional): thickness of lines drawn on resulting image Returns: numpy.ndarray: result image """ # Hough transform lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) # Line extrapolation extrapolated_lines = extrapolate_lines(lines, line_img.shape) # Image display draw_lines(line_img, extrapolated_lines, thickness=thickness) return line_img
27,504
def multi_dev_init(params): """ Function to be invoked when executing data loading pipeline on multiple machines Parameters: ----------- params : argparser object argparser object providing access to command line arguments. """ #init the gloo process group here. dist.init_prcess_group("gloo", rank=params.rank, world_size=params.world_size) print('[Rank: ', params.rank, '] Done with process group initialization...') #invoke the main function here. proc_exec(params.rank, params.world_size, params) print('[Rank: ', params.rank, '] Done with Distributed data processing pipeline processing.')
27,505
def verify_package_version(ctx, config, remote): """ Ensures that the version of package installed is what was asked for in the config. For most cases this is for ceph, but we also install samba for example. """ # Do not verify the version if the ceph-deploy task is being used to # install ceph. Verifying the ceph installed by ceph-deploy should work, # but the qa suites will need reorganized first to run ceph-deploy # before the install task. # see: http://tracker.ceph.com/issues/11248 if config.get("extras"): log.info("Skipping version verification...") return True if 'repos' in config and config.get('repos'): log.info("Skipping version verification because we have custom repos...") return True builder = _get_builder_project(ctx, remote, config) version = builder.version pkg_to_check = builder.project installed_ver = packaging.get_package_version(remote, pkg_to_check) if installed_ver and version in installed_ver: msg = "The correct {pkg} version {ver} is installed.".format( ver=version, pkg=pkg_to_check ) log.info(msg) else: raise RuntimeError( "{pkg} version {ver} was not installed, found {installed}.".format( ver=version, installed=installed_ver, pkg=pkg_to_check ) )
27,506
def convert_file(): """Setup code to create a groceries cart object with 6 items in it""" local_path = os.path.join('data', '2017-07-31_072433.txt') fixture_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), local_path, ) files = glob.glob(fixture_path) for file_ in files: convert_sitl(file_) converted_path = generate_output_path(fixture_path) converted_files = glob.glob(converted_path) return converted_files
27,507
def decrypt(v1: int, v2: int): """funcao desencriptadora""" palavra_encriptada = int(v1) ^ int(v2) desencriptada = palavra_encriptada.to_bytes((palavra_encriptada.bit_length() + 7) // 8, 'big') return desencriptada.decode()
27,508
def enforce_mixture_consistency_time_domain(mixture_waveforms, separated_waveforms, mix_weights=None, mix_weights_type=''): """Projection implementing mixture consistency in time domain. This projection makes the sum across sources of separated_waveforms equal mixture_waveforms and minimizes the unweighted mean-squared error between the sum across sources of separated_waveforms and mixture_waveforms. See https://arxiv.org/abs/1811.08521 for the derivation. Args: mixture_waveforms: Tensor of mixture waveforms in waveform format. separated_waveforms: Tensor of separated waveforms in source image format. mix_weights: None or Tensor of weights used for mixture consistency, shape should broadcast with denoised_waveforms. Overrides mix_weights_type. mix_weights_type: Type of weights used for mixture consistency. Options are: `` - No weighting. `magsq` - Mix weights are magnitude-squared of the separated signal. Returns: Projected separated_waveforms as a Tensor in source image format. """ # Modify the source estimates such that they sum up to the mixture, where # the mixture is defined as the sum across sources of the true source # targets. Uses the least-squares solution under the constraint that the # resulting source estimates add up to the mixture. num_sources = tf.shape(separated_waveforms)[1] # Add a sources axis to mixture_spectrograms. mix = tf.expand_dims(mixture_waveforms, axis=1) # mix is now of shape: # (batch_size, 1, num_mics, samples). mix_estimate = tf.reduce_sum(separated_waveforms, axis=1, keepdims=True) # mix_estimate is of shape: # (batch_size, 1, num_mics, samples). if mix_weights is None: if mix_weights_type == 'magsq': mix_weights = tf.reduce_mean(tf.square(separated_waveforms), axis=[2, 3], keepdims=True) mix_weights /= tf.reduce_sum(mix_weights, axis=1, keepdims=True) else: mix_weights = (1.0 / num_sources) mix_weights = tf.cast(mix_weights, mix.dtype) correction = mix_weights * (mix - mix_estimate) separated_waveforms = separated_waveforms + correction return separated_waveforms
27,509
def MatchNormsLoss(anchor_tensors, paired_tensors): """A norm on the difference between the norms of paired tensors. Gradients are only applied to the paired_tensor. Args: anchor_tensors: batch of embeddings deemed to have a "correct" norm. paired_tensors: batch of embeddings that will be pushed to the norm of anchor_tensors. Returns: A scalar loss """ anchor_norms = tf.stop_gradient(tf.norm(anchor_tensors, axis=1)) paired_norms = tf.norm(paired_tensors, axis=1) tf.summary.histogram('norms_difference', tf.nn.l2_loss(anchor_norms -paired_norms)) loss = tf.reduce_mean(tf.nn.l2_loss(anchor_norms-paired_norms)) return loss
27,510
def notice_baseball(): """ Czas na kopaninkę :return: None """ logging.info("Och, czas na kopaninkę!")
27,511
def get_ei(xx_tf, yn_tf, gp): """ :param xx_tf: A tensor giving the new point to evaluate at. :param yn_tf: A tensor giving all previously observed responses. :param gp: A gp used to predict. GP should be trained on the locations yn_tf was observed. """ N, P = gp.index_points.numpy().shape k = gp.kernel kxx = tf.reshape(k.apply(xx_tf, gp.index_points), [N,1]) K = tf.squeeze(gp.covariance()) Kl = tf.squeeze(tf.linalg.cholesky(gp.covariance())) alpha = tf.linalg.solve(tf.cast(tf.transpose(Kl), tf.float64), tf.linalg.solve(tf.cast(Kl, tf.float64), yn_tf)) v = tf.linalg.solve(Kl, kxx) zpred_mean = tf.squeeze(tf.matmul(tf.transpose(kxx), alpha)) #TODO: Made a small change right here. kkxx = k.apply(xx_tf, xx_tf) zpred_vars = tf.squeeze(kkxx - tf.matmul(tf.transpose(v),v)) miny = tf.reduce_min(yn_tf) pdist = tfp.distributions.Normal(tf.squeeze(zpred_mean), tf.squeeze(tf.sqrt(zpred_vars))) #pdist = tfp.distributions.Normal(tf.squeeze(zpred_mean), tf.squeeze((zpred_vars))) ei = (miny - zpred_mean) * pdist.cdf(miny) + \ zpred_vars * pdist.prob(miny) return(ei)
27,512
def authenticate_begin(username, **_): """ Begin authentication procedure Variables: username user name of the user you want to login with Arguments: None Data Block: None Result example: <WEBAUTHN_AUTHENTICATION_DATA> """ user = STORAGE.user.get(username, as_obj=False) if not user: return make_api_response({'success': False}, err="Bad Request", status_code=400) session.pop('state', None) security_tokens = user.get('security_tokens', {}) or {} credentials = [AttestedCredentialData(websafe_decode(x)) for x in security_tokens.values()] auth_data, state = server.authenticate_begin(credentials) session['state'] = state return make_api_response(list(cbor.encode(auth_data)))
27,513
def calculate_aspect_ratios(apps, schema_editor): """ Assignes every projector one aspect ratio of the ones, that OS supported until this migration. If no matching ratio was found, the default of 16:9 is assigned. """ Projector = apps.get_model("core", "Projector") ratio_environment = 0.05 aspect_ratios = { 4 / 3: (4, 3), 16 / 9: (16, 9), 16 / 10: (16, 10), 30 / 9: (30, 9), } for projector in Projector.objects.all(): projector_ratio = projector.width / projector.height ratio = (16, 9) # default, if no matching aspect ratio was found. # Search ratio, that fits to the projector_ratio. Take first one found. for value, _ratio in aspect_ratios.items(): if ( value >= projector_ratio - ratio_environment and value <= projector_ratio + ratio_environment ): ratio = _ratio break projector.aspect_ratio_numerator = ratio[0] projector.aspect_ratio_denominator = ratio[1] projector.save(skip_autoupdate=True)
27,514
def tempSHT31(): """Read temp and humidity from SHT31""" return sht31sensor.get_temp_humi()
27,515
def dog(argv, params): """Returns a slack attachment with a picture of a dog from thedogapi""" # Print prints logs to cloudwatch # Send response to response url dogurl = 'https://api.thedogapi.com/v1/images/search?mime_types=jpg,png' dogr = requests.get(dogurl) url = dogr.json()[0].get('url') payload = { 'statusCode': '200', "attachments": [ { "author_name": '@{} /catops dog'.format( params.get('user_name', ['CatOps'])[0]), "fallback": "Woof woof.", "title": "Woof!", "text": "Evil doggo.", "image_url": url, "color": "#764FA5" } ], 'response_type': 'in_channel', 'headers': {'Content-Type': 'application/json'} } return payload
27,516
def get_public_suffix (domain): """ get_public_suffix("www.example.com") -> "example.com" Calling this function with a DNS name will return the public suffix for that name. Note that if the input does not contain a valid TLD, e.g. "xxx.residential.fw" in which "fw" is not a valid TLD, the returned public suffix will be "fw", and TLD will be empty Note that for internationalized domains the list at http://publicsuffix.org uses decoded names, so it is up to the caller to decode any Punycode-encoded names. """ global Root, Domain_to_t2ld_cache try: return Domain_to_t2ld_cache [domain] except KeyError: parts = domain.lower().lstrip('.').split('.') hits = [None] * len(parts) _lookup_node (hits, 1, Root, parts) for i, what in enumerate(hits): if what is not None and what == 0: t2ld = '.'.join(parts[i:]) Domain_to_t2ld_cache [domain] = t2ld return t2ld
27,517
def interpol(data,x): """ Resamples data by given factor with interpolation """ from scipy.interpolate import interp1d # Resamples data by given factor by interpolation x0 = np.linspace(0, len(data)-1, len(data)) x1 = np.linspace(0, len(data)-1, len(data)*x-(x-1)) f = interp1d(x0, data) return f(x1)
27,518
def test_excess_verbosity(parser, verbosity): """ Verbosity saturates / maxes out. """ with pytest.raises(SystemExit): parser.parse_args([VERBOSITY_OPTNAME, str(verbosity)])
27,519
def game_summary(game_score: int, game_max_score: int, secret_word: str, user_won: bool, wrong_guesses: List[str]): """Give a game summary to the user after the game. Args: game_score (int): The score of the game. game_max_score (int): The maximum potential score for the game. secret_word (str): The secret word. user_won (bool): If the user won or not. """ input(f"""{hangman_art(len(wrong_guesses))} You {"Won" if user_won else "Lost"}! The word was: {secret_word} Score: {game_score}/{game_max_score} Press enter to continue""")
27,520
def test_server_json_pretty(monkeypatch): """Test whole server""" data = _test_server(monkeypatch, query_string='fmt=json_pretty', port=9004) assert data.startswith('{\n')
27,521
def convert_to_torch_tensors(X_train, y_train, X_test, y_test): """ Function to quickly convert datasets to pytorch tensors """ # convert training data _X_train = torch.LongTensor(X_train) _y_train = torch.FloatTensor(y_train) # convert test data _X_test = torch.LongTensor(X_test) _y_test = torch.FloatTensor(y_test) # return the tensors return _X_train, _y_train, _X_test, _y_test
27,522
def api_auth(func): """ If the user is not logged in, this decorator looks for basic HTTP auth data in the request header. """ @wraps(func) def _decorator(request, *args, **kwargs): authentication = APIAuthentication(request) if authentication.authenticate(): return func(request, *args, **kwargs) raise Http404 return _decorator
27,523
def object_miou(y_true, y_pred, num_classes=cfg.num_classes): """ 衡量图中目标的iou :param y_true: 标签 :param y_pred: 预测 :param num_classes: 分类数量 :return: miou """ confusion_matrix = get_confusion_matrix(y_true, y_pred, num_classes) # Intersection = TP Union = TP + FP + FN # IoU = TP / (TP + FP + FN) # 取对角元素的值,对角线上的值可认为是TP或是交集 intersection = tf.linalg.diag_part(confusion_matrix) # axis = 1表示混淆矩阵行的值;axis = 0表示取混淆矩阵列的值,都是返回一个一维列表,需要求和 union = tf.reduce_sum(confusion_matrix, axis=1) + tf.reduce_sum(confusion_matrix, axis=0) - intersection intersection = intersection union = union iou = intersection / union # 其值为各个类别的IoU # 避免nan iou = tf.where(tf.math.is_nan(iou), tf.zeros_like(iou), iou) # 不求包含背景部分的iou miou = tf.reduce_mean(iou[1:]) return miou
27,524
def post_discussion(title: str, content: str, path: str, top: bool, private: bool = False): """ 发送讨论 参数: title:str 讨论题目 content:str 内容 path:str 路径 top:bool 是否置顶 返回 { "code":-1,//是否成功执行 "discussion_id":"成功执行时的讨论ID", "message":"错误信息" } """ if not session.get("uid"): return make_response(-1, message="请登录") user: User = User.by_id(int(session.get("uid"))) if not permission_manager.has_permission(user.id, "discussion.manage") and top: return make_response(-1, message="只有管理员才能发置顶讨论") if not can_post_at(user, path): return make_response(-1, message="你无权在这里发帖") if not title: return make_response(-1, message="标题不得为空") discussion = Discussion() discussion.content = content discussion.title = title discussion.path = path import datetime discussion.time = datetime.datetime.now() discussion.top = top discussion.uid = user.id discussion.private = private db.session.add(discussion) db.session.commit() return make_response(0, discussion_id=discussion.id)
27,525
def logs_handler(request): """Return the log file on disk. :param request: a web requeest object. :type request: request | None """ log.info("Request for logs endpoint made.") complete_log_path = 'genconf/state/complete.log' json_files = glob.glob('genconf/state/*.json') complete_log = [] for f in json_files: log.debug('Adding {} to complete log file.'.format(f)) with open(f) as blob: complete_log.append(json.loads(blob.read())) with open(complete_log_path, 'w') as f: f.write(json.dumps(complete_log, indent=4, sort_keys=True)) return web.HTTPFound('/download/log/complete.log'.format(VERSION))
27,526
def _get_fields_usage_data(session): """ Obtaining metrics of field usage in lingvodoc, the metrics are quantity of all/deleted dictionary perspectives using this field (also with URLs) and quantity of lexical entries in such dictionary perspectives Result: dict { (client_id, object_id): dict { 'URLs': list['url_string', ...], 'metrics': dict { 'dp': dict { 'sum': quantity of all parent dictionary perspectives, 'deleted': quantity of deleted parent dictionary perspectives }, 'le': dict { 'sum': quantity of lexical entries of all parent dictionary perspectives, 'deleted': quantity of lexical entries of deleted parent dictionary perspectives } } } } """ f_client_id = Field.client_id.label('field_client_id') f_object_id = Field.object_id.label('field_object_id') dp_client_id = DictionaryPerspective.client_id.label('dictionary_perspective_client_id') dp_object_id = DictionaryPerspective.object_id.label('dictionary_perspective_object_id') dp_marked_for_deletion = \ DictionaryPerspective.marked_for_deletion.label('dictionary_perspective_marked_for_deletion') subquery = session.query(f_client_id, f_object_id, dp_client_id, dp_object_id, dp_marked_for_deletion) subquery = subquery.select_from(Field).join(DictionaryPerspectiveToField, and_(DictionaryPerspectiveToField.field_client_id == Field.client_id, DictionaryPerspectiveToField.field_object_id == Field.object_id)) subquery = subquery.filter(DictionaryPerspective.marked_for_deletion == False, Field.marked_for_deletion == False) subquery = subquery.join(DictionaryPerspective, and_(DictionaryPerspectiveToField.parent_client_id == DictionaryPerspective.client_id, DictionaryPerspectiveToField.parent_object_id == DictionaryPerspective.object_id)) subquery = subquery.distinct(Field.client_id, Field.object_id, DictionaryPerspective.client_id, DictionaryPerspective.object_id) subquery = subquery.order_by(Field.client_id, Field.object_id, DictionaryPerspective.client_id, DictionaryPerspective.object_id) log.info(subquery) fields_usage = dict() try: for data in subquery.all(): field_id = (data.field_client_id, data.field_object_id) if not fields_usage.get(field_id, None): fields_usage[field_id] = { 'URLs': list(), 'metrics': { 'dp': { 'sum': 0, 'deleted': 0 }, 'le': { 'sum': 0, 'deleted': 0 } } } fields_usage[field_id]['URLs'].append( _dictionary_perspective_url( data.dictionary_perspective_client_id, data.dictionary_perspective_object_id ) ) except exc.SQLAlchemyError as ex: log.warning('Failed to obtain fields usage URLs at ' + __name__) log.warning(ex) raise subquery = subquery.subquery('subquery') query = session.query('subquery.field_client_id', 'subquery.field_object_id', func.count('*'), 'subquery.dictionary_perspective_marked_for_deletion') query = query.select_from(subquery).group_by('subquery.field_client_id', 'subquery.field_object_id', 'subquery.dictionary_perspective_marked_for_deletion') query = query.order_by('subquery.field_client_id', 'subquery.field_object_id') log.info(query) try: for data in query.all(): usage = fields_usage.get((data[0], data[1]), None) if usage: if data[3]: usage['metrics']['dp']['deleted'] += data[2] usage['metrics']['dp']['sum'] += data[2] except exc.SQLAlchemyError as ex: log.warning('Failed to obtain fields dictionary perspective metrics at ' + __name__) log.warning(ex) raise query = session.query('subquery.field_client_id', 'subquery.field_object_id', func.count('*'), 'subquery.dictionary_perspective_marked_for_deletion') query = query.select_from(LexicalEntry) query = query.join(subquery, and_('subquery.dictionary_perspective_client_id = lexicalentry.parent_client_id', 'subquery.dictionary_perspective_object_id = lexicalentry.parent_object_id')) query = query.filter('lexicalentry.marked_for_deletion = false') query = query.group_by('subquery.field_client_id', 'subquery.field_object_id', 'subquery.dictionary_perspective_marked_for_deletion') log.info(query) try: for data in query.all(): usage = fields_usage.get((data[0], data[1]), None) if usage: if data[3]: usage['metrics']['le']['deleted'] += data[2] usage['metrics']['le']['sum'] += data[2] except exc.SQLAlchemyError as ex: log.warning('Failed to obtain fields lexical entry metrics at ' + __name__) log.warning(ex) raise return fields_usage
27,527
def output_is_new(output): """Check if the output file is up to date. Returns: True if the given output file exists and is newer than any of *_defconfig, MAINTAINERS and Kconfig*. False otherwise. """ try: ctime = os.path.getctime(output) except OSError as exception: if exception.errno == errno.ENOENT: # return False on 'No such file or directory' error return False else: raise for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR): for filename in fnmatch.filter(filenames, '*_defconfig'): if fnmatch.fnmatch(filename, '.*'): continue filepath = os.path.join(dirpath, filename) if ctime < os.path.getctime(filepath): return False for (dirpath, dirnames, filenames) in os.walk('.'): for filename in filenames: if (fnmatch.fnmatch(filename, '*~') or not fnmatch.fnmatch(filename, 'Kconfig*') and not filename == 'MAINTAINERS'): continue filepath = os.path.join(dirpath, filename) if ctime < os.path.getctime(filepath): return False # Detect a board that has been removed since the current board database # was generated with open(output, encoding="utf-8") as f: for line in f: if line[0] == '#' or line == '\n': continue defconfig = line.split()[6] + '_defconfig' if not os.path.exists(os.path.join(CONFIG_DIR, defconfig)): return False return True
27,528
def test_merge_batch_grad_transforms_same_key_same_trafo(): """Test merging multiple ``BatchGradTransforms`` with same key and same trafo.""" def func(t): return t bgt1 = BatchGradTransformsHook({"x": func}) bgt2 = BatchGradTransformsHook({"x": func}) merged = Cockpit._merge_batch_grad_transform_hooks([bgt1, bgt2]) assert len(merged._transforms.keys()) == 1 assert id(merged._transforms["x"]) == id(func)
27,529
def main(selected_ssids, sample_interval, no_header, args=None): """ Repeatedly check internet connection status (connected or disconnected) for given WiFi SSIDs. Output is writen as .csv to stdout. """ wireless_connections = [ c for c in NetworkManager.Settings.Connections if '802-11-wireless' in c.GetSettings().keys() ] known_ssids = [ c.GetSettings()['802-11-wireless']['ssid'] for c in wireless_connections ] # confirm selected ssids are available as network manager connections for ssid in selected_ssids: assert ssid in known_ssids, f"SSID '{ssid}' not found in network manager connections. Available SSIDs: {sorted(known_ssids)}" # get the network manager connection objects for the selected ssids connections = { ssid: connection for connection in wireless_connections for ssid in selected_ssids if connection.GetSettings()['802-11-wireless']['ssid'] == ssid } # get the wireless device wireless_devs = [ d for d in NetworkManager.NetworkManager.GetDevices() if d.DeviceType == NetworkManager.NM_DEVICE_TYPE_WIFI ] assert len(wireless_devs) > 0, "No wifi device found. Aborting" wireless_dev = wireless_devs[0] # save the current active connection, to restore once this script exits initial_connection = wireless_dev.ActiveConnection.Connection if wireless_dev.ActiveConnection else None def restore_initial_connection(): if initial_connection: NetworkManager.NetworkManager.ActivateConnection( initial_connection, wireless_dev, "/") atexit.register(restore_initial_connection) # write the csv header if not no_header: print("timestamp,ssid,device_connected,ping_successful", flush=True) # begin logging loop. next_log_time = time.time() while True: # wait for the next logging iteration restore_initial_connection( ) # leave initial connection active while waiting time.sleep(max(next_log_time - time.time(), 0)) next_log_time += sample_interval * 60 for ssid in selected_ssids: # activate the connection if wireless_dev.State == NetworkManager.NM_DEVICE_STATE_ACTIVATED: wireless_dev.Disconnect() NetworkManager.NetworkManager.ActivateConnection( connections[ssid], wireless_dev, "/") connected = wait_for_connection(wireless_dev) if connected: # now test internet (by pinging google) ping_successful = ping("www.google.com") else: ping_successful = False # write out result print( f"{time.time()},{ssid},{int(connected)},{int(ping_successful)}", flush=True) return 0
27,530
def test_get_funcs_invalid_syntax(tmp_path: Path): """ Atom IDE flake8 plugin can call flake8 with AST with correct syntax but with path to code with invalid syntax. In that case, we should ignore the file and fallback to the passed AST. """ path = tmp_path / 'test.py' path.write_text('1/') checker = Checker(tree=ast.parse(TEXT), filename=str(path)) errors = list(checker.run()) assert errors == EXPECTED
27,531
def getNarrowBandULAMIMOChannel(azimuths_tx, azimuths_rx, p_gainsdB, number_Tx_antennas, number_Rx_antennas, normalizedAntDistance=0.5, angleWithArrayNormal=0, pathPhases=None): """This .m file uses ULAs at both TX and RX. - assumes one beam per antenna element the first column will be the elevation angle, and the second column is the azimuth angle correspondingly. p_gain will be a matrix size of (L, 1) departure angle/arrival angle will be a matrix as size of (L, 2), where L is the number of paths t1 will be a matrix of size (nt, nr), each element of index (i,j) will be the received power with the i-th precoder and the j-th combiner in the departing and arrival codebooks respectively :param departure_angles: ((elevation angle, azimuth angle),) (L, 2) where L is the number of paths :param arrival_angles: ((elevation angle, azimuth angle),) (L, 2) where L is the number of paths :param p_gaindB: path gain (L, 1) in dB where L is the number of paths :param number_Rx_antennas, number_Tx_antennas: number of antennas at Rx and Tx, respectively :param pathPhases: in degrees, same dimension as p_gaindB :return: """ azimuths_tx = np.deg2rad(azimuths_tx) azimuths_rx = np.deg2rad(azimuths_rx) # nt = number_Rx_antennas * number_Tx_antennas #np.power(antenna_number, 2) m = np.shape(azimuths_tx)[0] # number of rays H = np.matrix(np.zeros((number_Rx_antennas, number_Tx_antennas))) gain_dB = p_gainsdB path_gain = np.power(10, gain_dB / 10) path_gain = np.sqrt(path_gain) #generate uniformly distributed random phase in radians if pathPhases is None: pathPhases = 2*np.pi * np.random.rand(len(path_gain)) else: #convert from degrees to radians pathPhases = np.deg2rad(pathPhases) #include phase information, converting gains in complex-values path_complexGains = path_gain * np.exp(-1j * pathPhases) # recall that in the narrowband case, the time-domain H is the same as the # frequency-domain H for i in range(m): # at and ar are row vectors (using Python's matrix) at = np.matrix(arrayFactorGivenAngleForULA(number_Tx_antennas, azimuths_tx[i], normalizedAntDistance, angleWithArrayNormal)) ar = np.matrix(arrayFactorGivenAngleForULA(number_Rx_antennas, azimuths_rx[i], normalizedAntDistance, angleWithArrayNormal)) H = H + path_complexGains[i] * ar.conj().T * at # outer product of ar Hermitian and at #factor = (np.linalg.norm(path_complexGains) / np.sum(path_complexGains)) * np.sqrt( # number_Rx_antennas * number_Tx_antennas) # scale channel matrix #H *= factor # normalize for compatibility with Anum's Matlab code return H
27,532
def get_pybricks_reset_vector(): """Gets the boot vector of the pybricks firmware.""" # Extract reset vector from dual boot firmware. with open("_pybricks/firmware-dual-boot-base.bin", "rb") as pybricks_bin_file: pybricks_bin_file.seek(4) return pybricks_bin_file.read(4)
27,533
def nSideCurve(sides=6, radius=1.0): """ nSideCurve( sides=6, radius=1.0 ) Create n-sided curve Parameters: sides - number of sides (type=int) radius - radius (type=float) Returns: a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n] (type=list) """ newpoints = [] step = 2.0 / sides i = 0 while i < sides: t = i * step x = sin(t * pi) * radius y = cos(t * pi) * radius newpoints.append([x, y, 0]) i += 1 return newpoints
27,534
def _get_corr_mat(corr_transform, n_dim): """ Input check for the arguments passed to DirectionalSimulator""" if corr_transform is None: return np.eye(n_dim) if not isinstance(corr_transform, np.ndarray) or corr_transform.ndim < 2: err_msg = "corr_transform must be a 2-D numpy array" raise ValueError(err_msg) if corr_transform.shape[0] != n_dim: err_msg = "Inconsistent number of marginal distributions and " err_msg += "corr_transform shape" raise ValueError(err_msg) if corr_transform.shape[0] != corr_transform.shape[1]: err_msg = "corr_transform must be square" raise ValueError(err_msg) if not (corr_transform == corr_transform.T).all(): err_msg = "corr_transform must be symmetrical" raise ValueError(err_msg) return corr_transform
27,535
def get_flowline_routing(NHDPlus_paths=None, PlusFlow=None, mask=None, mask_crs=None, nhdplus_crs=4269): """Read a collection of NHDPlus version 2 PlusFlow (routing) tables from one or more drainage basins and consolidate into a single pandas DataFrame, returning the `FROMCOMID` and `TOCOMID` columns. Parameters ---------- NHDPlus_paths : sequence Sequence of paths to the top level folder for each drainage basin. For example: .. code-block:: python ['NHDPlus/NHDPlusGL/NHDPlus04', 'NHDPlus/NHDPlusMS/NHDPlus07'] by default None PlusFlow : string or sequence Single path to a PlusFlow table or sequence of PlusFlow table filepaths, by default None Returns ------- flowline_routing : DataFrame [description] Raises ------ ValueError [description] """ if NHDPlus_paths is not None: flowlines_files, pfvaa_files, pf_files, elevslope_files = \ get_nhdplus_v2_filepaths(NHDPlus_paths, raise_not_exist_error=False) pf = shp2df(pf_files) if mask is not None: if isinstance(mask, tuple): extent_poly_nhd_crs = box(*mask) filter = mask elif mask is not None: extent_poly_nhd_crs = read_polygon_feature(mask, feature_crs=mask_crs, dest_crs=nhdplus_crs) # ensure that filter bbox is in same crs as flowlines # get filters from shapefiles, shapley Polygons or GeoJSON polygons filter = get_bbox(extent_poly_nhd_crs, dest_crs=nhdplus_crs) else: filter = None flowlines = shp2df(flowlines_files, filter=filter) keep_comids = pf['FROMCOMID'].isin(flowlines['COMID']) | \ pf['TOCOMID'].isin(flowlines['COMID']) pf = pf.loc[keep_comids] elif PlusFlow is not None: pf = shp2df(PlusFlow) else: raise ValueError(("get_flowline_routing: Must provide one of more" " NHDPlus_path or PlusFlow table.")) pf = pf.loc[pf['FROMCOMID'] != 0] return pf[['FROMCOMID', 'TOCOMID']]
27,536
def df_wxyz( time_slot_sensor: Sensor, test_source_a: BeliefSource, test_source_b: BeliefSource ) -> Callable[[int, int, int, int, Optional[datetime]], BeliefsDataFrame]: """Convenient BeliefsDataFrame to run tests on. For a single sensor, it contains w events, for each of which x beliefs by y sources each (max 2), described by z probabilistic values (max 3). Note that the event resolution of the sensor is 15 minutes. """ sources = [test_source_a, test_source_b] # expand to increase max y cps = [0.1587, 0.5, 0.8413] # expand to increase max z def f(w: int, x: int, y: int, z: int, start: Optional[datetime] = None): if start is None: start = datetime(2000, 1, 3, 9, tzinfo=pytz.utc) # Build up a BeliefsDataFrame with various events, beliefs, sources and probabilistic accuracy (for a single sensor) beliefs = [ TimedBelief( source=sources[s], sensor=time_slot_sensor, value=1000 * e + 100 * b + 10 * s + p, belief_time=datetime(2000, 1, 1, tzinfo=pytz.utc) + timedelta(hours=b), event_start=start + timedelta(hours=e), cumulative_probability=cps[p], ) for e in range(w) # w events for b in range(x) # x beliefs for s in range(y) # y sources for p in range(z) # z cumulative probabilities ] return BeliefsDataFrame(sensor=time_slot_sensor, beliefs=beliefs) return f
27,537
def not_posted(child, conn) -> bool: """Check if a post has been already tooted.""" child_data = child["data"] child_id = child_data["id"] last_posts = fetch_last_posts(conn) return child_id not in last_posts
27,538
def prepend_path(path, paths): """Prepends a path to the list of paths making sure it remains unique""" if path in paths: paths.remove(path) paths.insert(0, path)
27,539
def test_having_multiple_conditions(): """ Test having clause :return: """ my_frame = query( "select min(temp) from forest_fires having min(temp) > 2 and " "max(dc) < 200 or max(dc) > 1000" ) pandas_frame = FOREST_FIRES.copy() pandas_frame["_col0"] = FOREST_FIRES["temp"] aggregated_df = pandas_frame.aggregate({"_col0": "min"}).to_frame().transpose() max_dc_df = ( FOREST_FIRES["DC"].aggregate({"DC": "max"}).to_frame().reset_index(drop=True) ) pandas_frame = aggregated_df[ (aggregated_df["_col0"] > 2) & (max_dc_df["DC"] < 200) | (max_dc_df["DC"] > 1000) ] tm.assert_frame_equal(pandas_frame, my_frame)
27,540
def _fill_area_map(area_map, record_dict): """ 填充三级区划(区级)地名,包括简称 :param area_map: AddrMap, dict :param record_dict: dict :return: area_map """ area_name = record_dict[3] pca_tuple = (record_dict[1], record_dict[2], record_dict[3]) area_map.append_relational_addr(area_name, pca_tuple, A) # 自治县区划简称 if area_name in short_area_names.keys(): area_map.append_relational_addr(short_area_names[area_name], pca_tuple, A) # 4字区划简称 elif len(area_name) > 3 and (area_name.endswith(u'新区') or area_name.endswith(u'城区') or area_name.endswith(u'林区')): area_map.append_relational_addr(area_name[:-2], pca_tuple, A) # 过滤的区划名称 elif area_name in filter_area_names: pass # 3字区划简称,'XX区'不简写 elif len(area_name) > 2 and (area_name.endswith(u'市') or area_name.endswith(u'县')): area_map.append_relational_addr(area_name[:-1], pca_tuple, A)
27,541
def test_parallel(): """Test the parallel activity.""" simulation_start = 0 env = simpy.Environment(initial_time=simulation_start) registry = {} reporting_activity = model.BasicActivity( env=env, name="Reporting activity", registry=registry, duration=0, ) sub_processes = [ model.BasicActivity( env=env, name="Basic activity1", registry=registry, duration=14, additional_logs=[reporting_activity], ), model.BasicActivity( env=env, name="Basic activity2", registry=registry, duration=5, additional_logs=[reporting_activity], ), model.BasicActivity( env=env, name="Basic activity3", registry=registry, duration=220, additional_logs=[reporting_activity], ), ] activity = model.ParallelActivity( env=env, name="Parallel process", registry=registry, sub_processes=sub_processes, ) model.register_processes([activity]) env.run() assert env.now == 220 assert_log(activity) assert_log(reporting_activity)
27,542
def parse_img_name(path): """parse image by frame name :param name [str] :output img_lists """ code = path.split('\\')[-1].split('.')[0] vid_id = path.split('\\')[-2] rcp_id = path.split('\\')[-3] seg_id = int(code[:4]) frm_id = int(code[4:]) return rcp_id, vid_id, seg_id, frm_id
27,543
def get_bspline_kernel(x, channels, transpose=False, dtype=tf.float32, order=4): """Creates a 5x5x5 b-spline kernel. Args: num_channels: The number of channels of the image to filter. dtype: The type of an element in the kernel. Returns: A tensor of shape `[5, 5, 5, num_channels, num_channels]`. """ mesh = x.mesh in_dim = x.shape[-1] num_channels = channels.size if order == 8: kernel = np.array(( 1., 8., 28., 56., 70., 56., 28., 8., 1.), dtype=dtype.as_numpy_dtype()) elif order == 6: kernel = np.array(( 1., 6., 15., 20., 15., 6., 1.), dtype=dtype.as_numpy_dtype()) elif order==2: kernel = np.array(( 1., 2., 1.), dtype=dtype.as_numpy_dtype()) else: kernel = np.array(( 1., 4., 6., 4., 1.), dtype=dtype.as_numpy_dtype()) size = len(kernel) kernel = np.einsum('ij,k->ijk', np.outer(kernel, kernel), kernel) kernel /= np.sum(kernel) kernel = kernel[:, :, :, np.newaxis, np.newaxis] kernel = tf.constant(kernel, dtype=dtype) * tf.eye(num_channels, dtype=dtype) fd_dim = mtf.Dimension("fd", size) fh_dim = mtf.Dimension("fh", size) fw_dim = mtf.Dimension("fw", size) if transpose: return mtf.import_tf_tensor(mesh, kernel, shape=[fd_dim, fh_dim, fw_dim, channels, in_dim]) else: return mtf.import_tf_tensor(mesh, kernel, shape=[fd_dim, fh_dim, fw_dim, in_dim, channels])
27,544
def gen_prot_dict(): """ :param input_list: :return: """ from .protocols import Stock_solution,MonoDispensing_type1,MonoDispensing_type2,MultiBase,SMTransfer,ReactionQC,QCSolubilise,DMATransfer,\ PostWorkupTransfer,Workup,PostWorkupQCAndTransfer,PostWorkupDMSOAddition,BaseT3PMulti, PoisedReactor input_list = [Stock_solution,MonoDispensing_type1,MonoDispensing_type2,MultiBase,SMTransfer,ReactionQC,QCSolubilise,DMATransfer,PostWorkupTransfer, Workup,PostWorkupQCAndTransfer,PostWorkupDMSOAddition,BaseT3PMulti, PoisedReactor] out_dict = {} for protocol in input_list: out_dict[str(protocol())] = protocol return out_dict
27,545
def f(x): """ 예측해야 하는 함수입니다. """ return np.matmul(x * np.absolute(np.sin(x)), np.array([[2], [1]]))
27,546
def create_frame_coords_list(coords_path): """ :param coords_path: [int] :type coords_path: list :return: int, [int] :rtype: tuple """ id_number = coords_path[0] fr_coordinates = [None]*int((len(coords_path) - 1) / 3) # excluding the index 0 (which is the id) the number of triples is the length of this array index = 0 for i in range(1, len(coords_path), 3): x = coords_path[i] y = coords_path[i + 1] frame_number = coords_path[i + 2] fr_coordinates[index] = FrameCoord(x, y, frame_number) index += 1 return id_number, fr_coordinates
27,547
def parse_args(): """ Parses command-line arguments and returns a run configuration """ runconfig = types.SimpleNamespace() runconfig.ssl = False runconfig.port = None runconfig.connection_string = None i = 1 try: while i < len(sys.argv): arg = sys.argv[i] if arg == '-s': if runconfig.ssl: raise ValueError runconfig.ssl = True runconfig.certificate = sys.argv[i + 1] runconfig.key = sys.argv[i + 2] runconfig.keypassword = sys.argv[i + 3] i += 4 elif arg == '-p': if runconfig.port is not None: raise ValueError runconfig.port = int(sys.argv[i + 1]) if runconfig.port <= 0 or runconfig.port > 65536: raise ValueError i += 2 elif arg == '-c': if runconfig.connection_string is not None: raise ValueError runconfig.connection_string = sys.argv[i + 1] i += 2 else: raise ValueError if runconfig.connection_string is None: raise ValueError except (IndexError, ValueError): print(USAGE) sys.exit(1) if runconfig.port is None: runconfig.port = 1995 return runconfig
27,548
def arcToolReport(function=None, arcToolMessageBool=False, arcProgressorBool=False): """This decorator function is designed to be used as a wrapper with other GIS functions to enable basic try and except reporting (if function fails it will report the name of the function that failed and its arguments. If a report boolean is true the function will report inputs and outputs of a function.-David Wasserman""" def arcToolReport_Decorator(function): def funcWrapper(*args, **kwargs): try: funcResult = function(*args, **kwargs) if arcToolMessageBool: arcpy.AddMessage("Function:{0}".format(str(function.__name__))) arcpy.AddMessage(" Input(s):{0}".format(str(args))) arcpy.AddMessage(" Ouput(s):{0}".format(str(funcResult))) if arcProgressorBool: arcpy.SetProgressorLabel("Function:{0}".format(str(function.__name__))) arcpy.SetProgressorLabel(" Input(s):{0}".format(str(args))) arcpy.SetProgressorLabel(" Ouput(s):{0}".format(str(funcResult))) return funcResult except Exception as e: arcpy.AddMessage( "{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__), str(args))) print( "{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__), str(args))) print(e.args[0]) return funcWrapper if not function: # User passed in a bool argument def waiting_for_function(function): return arcToolReport_Decorator(function) return waiting_for_function else: return arcToolReport_Decorator(function)
27,549
async def test_async_get_server_version(client_session, ws_client, url, version_data): """Test the get server version helper.""" ws_client.receive_json.return_value = version_data version_info = await async_get_server_version(url, client_session) assert client_session.ws_connect.called assert client_session.ws_connect.call_args == call(url) assert version_info.driver_version == version_data["driverVersion"] assert version_info.server_version == version_data["serverVersion"] assert version_info.min_schema_version == version_data["minSchemaVersion"] assert version_info.max_schema_version == version_data["maxSchemaVersion"] assert ws_client.close.called
27,550
def pes_events_scanner(pes_json_filepath): """Entrypoint to the library""" installed_pkgs = get_installed_pkgs() transaction_configuration = get_transaction_configuration() events = get_events(pes_json_filepath) arch = api.current_actor().configuration.architecture arch_events = filter_events_by_architecture(events, arch) add_output_pkgs_to_transaction_conf(transaction_configuration, arch_events) tasks = process_events(arch_events, installed_pkgs) filter_out_transaction_conf_pkgs(tasks, transaction_configuration) produce_messages(tasks)
27,551
def check_for_pattern(input_string): """ Check a string for a recurring pattern. If no pattern, return False. If pattern present, return smallest integer length of pattern. Warning: equal_divisions discards the remainder, so if it doesn't fit the pattern, you will get a false postive. The specific use is to check recurring decimal patterns, so it doesn't matter for that use. """ if len(input_string) < 2: return False length_of_division = 1 limit = len(input_string)//2 while length_of_division < limit + 1: divisions = equal_division(input_string, length_of_division) divisions = set(divisions) if len(divisions) == 1: return length_of_division else: length_of_division += 1 return False
27,552
def get_xml_namespace(file_name,pkg_type): """Get xml's namespace. Args: file_name: The path of xml file. Returns: xml_namespace: The namespace of xml. for example: xml file content: ... <config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> <interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces" xmlns:ni="urn:ietf:params:xml:ns:yang:ietf-network-instance"> ... </interfaces> </config> xml_namespace: 'urn:ietf:params:xml:ns:yang:ietf-interfaces' Raises: Exception: Capture execution exception. """ feature_namespaces = [] try: doc = parse(file_name) root = doc.documentElement if pkg_type in ['get','get-config']: if root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "filter"): child_nodes = root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "filter")[ 0].childNodes elif pkg_type == 'config': if root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "config"): child_nodes = root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "config")[ 0].childNodes else: child_nodes = root.childNodes logging.info("This is rpc-xml:" + file_name) for child_node in child_nodes: if child_node.nodeType == 1 and hasattr(child_node, 'namespaceURI'): feature_namespaces.append(child_node.namespaceURI) except ExpatError as expat_exception: xml_structure_except(expat_exception, file_name) except Exception as error_str: error_write(error_str) return feature_namespaces
27,553
def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: table[str(prev)] = [str(word)] else: # if already in table then add this word to the list of successors table[str(prev)] += [str(word)] prev = word return table
27,554
def _rolling_nanmin_1d(a, w=None): """ Compute the rolling min for 1-D while ignoring NaNs. This essentially replaces: `np.nanmin(rolling_window(T[..., start:stop], m), axis=T.ndim)` Parameters ---------- a : numpy.ndarray The input array w : numpy.ndarray, default None The rolling window size Returns ------- output : numpy.ndarray Rolling window nanmin. """ if w is None: w = a.shape[0] half_window_size = int(math.ceil((w - 1) / 2)) return minimum_filter1d(a, size=w)[ half_window_size : half_window_size + a.shape[0] - w + 1 ]
27,555
def get_model_init_fn(train_logdir, tf_initial_checkpoint, initialize_last_layer, last_layers, ignore_missing_vars=False): """Gets the function initializing model variables from a checkpoint. Args: train_logdir: Log directory for training. tf_initial_checkpoint: TensorFlow checkpoint for initialization. initialize_last_layer: Initialize last layer or not. last_layers: Last layers of the model. ignore_missing_vars: Ignore missing variables in the checkpoint. Returns: Initialization function. """ if tf_initial_checkpoint is None: tf.logging.info('Not initializing the model from a checkpoint.') return None if tf.train.latest_checkpoint(train_logdir): tf.logging.info('Ignoring initialization; other checkpoint exists') return None tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint) # Variables that will not be restored. exclude_list = ['global_step'] if not initialize_last_layer: exclude_list.extend(last_layers) variables_to_restore = contrib_framework.get_variables_to_restore(exclude=exclude_list) if variables_to_restore: init_op, init_feed_dict = contrib_framework.assign_from_checkpoint( tf_initial_checkpoint, variables_to_restore, ignore_missing_vars=ignore_missing_vars) global_step = tf.train.get_or_create_global_step() def restore_fn(sess): sess.run(init_op, init_feed_dict) sess.run([global_step]) return restore_fn return None
27,556
def tokenize_protein(text): """ Tokenizes from a proteins string into a list of strings """ aa = ['A','C','D','E','F','G','H','I','K','L', 'M','N','P','Q','R','S','T','V','W','Y'] N = len(text) n = len(aa) i=0 seq = list() timeout = time.time()+5 for i in range(N): symbol = text[i] if (symbol in aa): seq.append(symbol) else: seq.append('X') if time.time() > timeout: break return seq
27,557
def read_xml_string() -> Callable[[int, int, str], str]: """Read an XML file to a string. Subsection string needs to include a prepending '-'.""" def _read_xml_string(number: int, year: int, subsection: str) -> str: xmlfile = f"tests/data/xmls/session-{number:03}-{year}{subsection}.xml" with open(xmlfile, "r", encoding="utf-8") as infile: lines = infile.readlines() return " ".join([line.strip() for line in lines]) return _read_xml_string
27,558
def main(): """ Entrypoint for events processor agent. :return: """ # operator should pass the name of the events channel that this events agent should subscribe to. # ch_name = os.environ.get('events_ch_name') idx = 0 while idx < 3: try: if ch_name: ch = EventsChannel(name=ch_name) else: ch = EventsChannel() logger.info("events processor made connection to rabbit, entering main loop") logger.info("events processor using abaco_conf_host_path={}".format(os.environ.get('abaco_conf_host_path'))) run(ch) except (rabbitpy.exceptions.ConnectionException, RuntimeError): # rabbit seems to take a few seconds to come up time.sleep(5) idx += 1 logger.critical("events agent could not connect to rabbitMQ. Shutting down!")
27,559
def compute_mean_std_data(filelist): """ Compute mean and standard deviation of a dataset. :param filelist: list of str :return: tuple of floats """ tensor_list = [] for file in filelist: img = Image.open(file) img_np = np.array(img).ravel() tensor_list.append(img_np.ravel()) pixels = np.concatenate(tensor_list, axis=0) return np.mean(pixels), np.std(pixels)
27,560
def delete(file, key): """ Delete a larry from a HDF5 archive. Parameters ---------- file : str or h5py.File Filename or h5py.File object of the archive. key : str Name of larry. Returns ------- out : None Nothing is returned, just None. See Also -------- la.save : Save larrys without a dictionary-like interface. la.load : Load larrys without a dictionary-like interface. la.IO : A dictionary-like interface to the archive. Examples -------- Create a larry: >>> x = la.larry([1, 2, 3]) Save the larry: >>> la.save('/tmp/x.hdf5', x, 'x') Now delete it: >>> la.delete('/tmp/x.hdf5', 'x') """ # Check input if type(key) != str: raise TypeError, 'key must be a string.' f, opened = _openfile(file) if key not in f: raise KeyError, "A larry named '%s' is not in archive." % key if not _is_archived_larry(f[key]): raise KeyError, 'key (%s) is not a larry.' % key # Delete del f[key] # Close if file is a filename if opened: f.close()
27,561
def ConstVal(val): """ Creates a LinComb representing a constant without creating a witness or instance variable Should be used carefully. Using LinCombs instead of integers where not needed will hurt performance """ if not isinstance(val, int): raise RuntimeError("Wrong type for ConstVal") return LinComb(val, backend.one() * val)
27,562
def filter_required_flat_tensor_spec(flat_tensor_spec): """Process a flat tensor spec structure and return only the required subset. Args: flat_tensor_spec: A flattened sequence (result of flatten_spec_structure) with the joined string paths as OrderedDict. Since we use OrderedDicts we can safely call flatten_spec_structure multiple times. Raises: ValueError: If the passed flat_tensor_spec is not a valid flat tensor_spec structure. Returns: filtered_flat_required_tensor_spec: The same flattened sequence but only the {key: tensor_spec} pairs for the non optional tensor_spec. """ if not is_flat_spec_or_tensors_structure(flat_tensor_spec): raise ValueError('Only flat tensor_spec structures are allowed.') filtered_flat_required_tensor_spec = TensorSpecStruct() for key, value in flat_tensor_spec.items(): if hasattr(value, 'is_optional') and value.is_optional: continue filtered_flat_required_tensor_spec[key] = value return filtered_flat_required_tensor_spec
27,563
def callback(photolog_id): """ twitter로부터 callback url이 요청되었을때 최종인증을 한 후 트위터로 해당 사진과 커멘트를 전송한다. """ Log.info("callback oauth_token:" + request.args['oauth_token']); Log.info("callback oauth_verifier:" + request.args['oauth_verifier']); # oauth에서 twiter로 부터 넘겨받은 인증토큰을 세션으로 부터 가져온다. OAUTH_TOKEN = session['OAUTH_TOKEN'] OAUTH_TOKEN_SECRET = session['OAUTH_TOKEN_SECRET'] oauth_verifier = request.args['oauth_verifier'] try: # 임시로 받은 인증토큰을 이용하여 twitter 객체를 만들고 인증토큰을 검증한다. twitter = Twython(current_app.config['TWIT_APP_KEY'], current_app.config['TWIT_APP_SECRET'], OAUTH_TOKEN, OAUTH_TOKEN_SECRET) final_step = twitter.get_authorized_tokens(oauth_verifier) # oauth_verifier를 통해 얻은 최종 인증토큰을 이용하여 twitter 객체를 새로 생성한다. twitter = Twython(current_app.config['TWIT_APP_KEY'], current_app.config['TWIT_APP_SECRET'], final_step['oauth_token'], final_step['oauth_token_secret']) session['TWITTER'] = twitter # 파라미터로 받은 photolog_id를 이용하여 해당 사진과 커멘트를 트위터로 전송한다. __send_twit(twitter, photolog_id) except TwythonError as e: Log.error("callback(): TwythonError , "+ str(e)) session['TWITTER_RESULT'] = str(e) return redirect(url_for('.show_all'))
27,564
def calc_adjusted_pvalues(adata, method='fdr_by'): """Calculates pvalues adjusted per sample with the given method. :param data: AnnData object annotated with model fit results. :param method: Name of pvalue adjustment method (from statsmodels.stats.multitest.multipletests). :return: AnnData object with adjusted pvalues. """ assert "X_pvalue" in adata.layers.keys(), ( 'No X_pvalue found in AnnData object, calculate pvalues first.') adata.layers["X_padj"] = (np.array([multiple_testing_nan(row, method=method) for row in adata.layers["X_pvalue"]])) return adata
27,565
def create_model(model_type='mobilenet'): """ Create a model. :param model_type: Must be one of 'alexnet', 'vgg16', 'resnet50' or 'mobilenet'. :return: Model. """ if model_type is 'alexnet': net = mdl.alexnet(input_shape, num_breeds, lr=0.001) elif model_type is 'vgg16': net = mdl.vgg16(input_shape, num_breeds, lr=0.0001) elif model_type is 'resnet50': net = mdl.resnet50(input_shape, num_breeds, lr=0.0002) # 0.01 elif model_type is 'mobilenet': net = mdl.mobilenet(input_shape, num_breeds, lr=0.0001) # 0.01 else: print("Model type is not supported.") return net
27,566
def generate_voter_groups(): """Generate all possible voter groups.""" party_permutations = list(permutations(PARTIES, len(PARTIES))) voter_groups = [VoterGroup(sequence) for sequence in party_permutations] return voter_groups
27,567
def add_image_fuzzy_pepper_noise(im, ration=0.1, rand_seed=None): """ generate and add a continues noise to an image :param ndarray im: np.array<height, width> input float image :param float ration: number means 0 = no noise :param rand_seed: random initialization :return ndarray: np.array<height, width> float image >>> img = np.zeros((5, 9), dtype=int) >>> img[1:4, 2:7] = 1 >>> img = add_image_fuzzy_pepper_noise(img, ration=0.5, rand_seed=0) >>> np.round(img, 2) array([[ 0.1 , 0.43, 0.21, 0.09, 0.15, 0.29, 0.12, 0. , 0. ], [ 0.23, 0. , 0.94, 0.86, 1. , 1. , 1. , 0. , 0. ], [ 0. , 0. , 1. , 1. , 1.08, 1. , 1. , 0.28, 0. ], [ 0. , 0.04, 1.17, 1.47, 1. , 1.09, 0.86, 0. , 0.24], [ 0.22, 0.23, 0. , 0.36, 0.28, 0.13, 0.4 , 0. , 0.33]]) """ logging.debug('... add smooth noise to a probability image') np.random.seed(rand_seed) rnd = 2 * (np.random.random(im.shape) - 0.5) rnd[abs(rnd) > ration] = 0 im_noise = np.abs(im - rnd) # plt.subplot(1,3,1), plt.imshow(im) # plt.subplot(1,3,2), plt.imshow(rnd) # plt.subplot(1,3,3), plt.imshow(im - rnd) # plt.show() return im_noise
27,568
def generator(fields, instance): """ Calculates the value needed for a unique ordered representation of the fields we are paginating. """ values = [] for field in fields: neg = field.startswith("-") # If the field we have to paginate by is the pk, get the pk field name. if field == 'pk': field = instance._meta.pk.name value = instance._meta.get_field(field.lstrip("-")).value_from_object(instance) if hasattr(value, "isoformat"): value = value.isoformat() value = unicode(value) if neg: # this creates the alphabetical mirror of a string, e.g. ab => zy, but for the full # range of unicode characters, e.g. first unicode char => last unicode char, etc value = u"".join([ unichr(0xffff - ord(x)) for x in value ]) values.append(value) values.append(unicode(instance.pk) if instance.pk else unicode(random.randint(0, 1000000000))) return NULL_CHARACTER.join(values)
27,569
def token_hash(token: Any, as_int: bool = True) -> Union[str, int]: """Hash of Token type Args: token (Token): Token to hash as_int (bool, optional): Encode hash as int Returns: Union[str, int]: Token hash """ return _hash((token.text, token.start, token.end, token.id), as_int=as_int)
27,570
def generate_totp_passcode(secret): """Generate TOTP passcode. :param bytes secret: A base32 encoded secret for TOTP authentication :returns: totp passcode as bytes """ if isinstance(secret, six.text_type): secret = secret.encode('utf-8') while len(secret) % 8 != 0: secret = secret + b'=' decoded = base64.b32decode(secret) totp = TOTP( decoded, 6, SHA1(), 30, backend=default_backend()) return totp.generate(timegm(datetime.utcnow().utctimetuple())).decode()
27,571
def all_ndcubes(request): """ All the above ndcube fixtures in order. """ return request.getfixturevalue(request.param)
27,572
def _create_root_content(): """ Make empty files and directories for msids, msids.pickle """ empty = set() if not os.path.exists(f"{ENG_ARCHIVE}/logs"): os.makedirs(f"{ENG_ARCHIVE}/logs") if not os.path.exists(f"{ENG_ARCHIVE}/archive"): os.makedirs(f"{ENG_ARCHIVE}/archive") if not os.path.exists(f"{ENG_ARCHIVE}/staging"): os.makedirs(f"{ENG_ARCHIVE}/staging") if not os.path.exists(f"{TELEMETRY_ARCHIVE}/msids.pickle"): with open(f"{TELEMETRY_ARCHIVE}/msids.pickle", 'wb') as f: pickle.dump(empty, f, protocol=0) if not os.path.exists(f"{ENG_ARCHIVE}/processed_files"): os.makedirs(f"{ENG_ARCHIVE}/processed_files")
27,573
def read_xmu(fpath: Path, scan: str='mu', ref: bool=True, tol: float=1e-4) -> Group: """Reads a generic XAFS file in plain format. Parameters ---------- fpath Path to file. scan Requested mu(E). Accepted values are transmission ('mu'), fluorescence ('fluo'), or None. The default is 'mu'. ref Indicates if the transmission reference ('mu_ref') should also be returned. The default is True. tol Tolerance in energy units to remove duplicate values. Returns ------- : Group containing the requested arrays. Notes ----- :func:`read_xmu` assumes the following column order in the file: 1. energy. 2. transmission/fluorescence mu(E). 3. transmission reference. See also -------- read_file : Reads a XAFS file based on specified columns. Examples -------- >>> from araucaria import Group >>> from araucaria.io import read_xmu >>> from araucaria.testdata import get_testpath >>> from araucaria.utils import check_objattrs >>> fpath = get_testpath('xmu_testfile.xmu') >>> # extracting mu and mu_ref scans >>> group_mu = read_xmu(fpath, scan='mu') >>> check_objattrs(group_mu, Group, attrlist=['mu', 'mu_ref']) [True, True] >>> # extracting only fluo scan >>> group_fluo = read_xmu(fpath, scan='fluo', ref=False) >>> check_objattrs(group_fluo, Group, attrlist=['fluo']) [True] >>> # extracting only mu_ref scan >>> group_ref = read_xmu(fpath, scan=None, ref=True) >>> check_objattrs(group_ref, Group, attrlist=['mu_ref']) [True] """ # default modes and channels scandict = ['mu', 'fluo', None] coldict = {'fluo':1, 'mu':1, 'mu_ref':2} # testing that scan exists in the current dictionary if scan not in scandict: warnings.warn("scan mode %s not recognized. Retrieving transmission measurement ('mu')." %scan) scan = 'mu' if scan is None: usecols = (0, coldict['mu_ref']) else: usecols = (0, coldict[scan], coldict['mu_ref']) group = read_file(fpath, usecols, scan, ref, tol) return (group)
27,574
def filter_order_by_oid(order, oid): """ :param order: :type order: :class:`tests.testapp.testapp.trading.models.Order` :param oid: Order ID :type oid: int """ return order.tid == oid
27,575
def start_recognition(rec_data, language): """start bidirectional streaming from microphone input to speech API""" client = speech.SpeechClient() if "language == 'kor'": config = speech.RecognitionConfig( encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=SAMPLE_RATE, language_code="ko-KR" ) else: config = speech.RecognitionConfig( encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=SAMPLE_RATE, language_code="en-US", #alternative_language_codes=["ko-KR"], ) streaming_config = speech.StreamingRecognitionConfig( config=config, interim_results=True ) mic_manager = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE) with mic_manager as stream: while not stream.closed: stream.audio_input = [] audio_generator = stream.generator() requests = ( speech.StreamingRecognizeRequest(audio_content=content) for content in audio_generator ) print("before response", language) responses = client.streaming_recognize(streaming_config, requests) # Now, put the transcription responses to use. get_speech_recognition(responses, stream, rec_data) if stream.result_end_time > 0: stream.final_request_end_time = stream.is_final_end_time stream.result_end_time = 0 stream.last_audio_input = [] stream.last_audio_input = stream.audio_input stream.audio_input = [] stream.restart_counter = stream.restart_counter + 1 if not stream.last_transcript_was_final: sys.stdout.write("\n") stream.new_stream = True
27,576
def process_pwdump_loot(loot_list=[], msf=None): """ Takes an array of loot records in loot_list, downloads the pwdump file and adds the users. """ from skaldship.passwords.utils import process_password_file, insert_or_update_acct db = current.globalenv['db'] #cache = current.globalenv['cache'] data = [] for loot_id in loot_list: loot = msf.loot_download(loot_id) if loot['ltype'] not in ['host.windows.pwdump', 'windows.hashes']: log("Loot is not a pwdump, it is a %s" % loot['ltype'], logging.ERROR) continue else: # process the pwdump file pw_data = loot['data'].split('\n') accounts = process_password_file( pw_data=pw_data, file_type='PWDUMP', source='Metasploit', ) # find the info/0 service id for the host host = get_host_record(loot['host']) query = (db.t_services.f_number == '0') & (db.t_services.f_proto == 'info') & (db.t_services.f_hosts_id == host.id) svc_id = db(query).select().first() if svc_id is None: # info/0 not found.. add it! svc_id = db.t_services.insert(f_proto="info", f_number="0", f_status="info", f_hosts_id=host.id) db.commit() # insert or update the account records resp_text = insert_or_update_acct(svc_id.id, accounts) log("Added pwdump records for host: %s" % host.f_ipaddr) data.append({loot['host']: resp_text}) return data
27,577
def has_poor_grammar(token_strings): """ Returns whether the output has an odd number of double quotes or if it does not have balanced parentheses. """ has_open_left_parens = False quote_count = 0 for token in token_strings: if token == '(': if has_open_left_parens: return True else: has_open_left_parens = True elif token == ')': if has_open_left_parens: has_open_left_parens = False else: return True elif token == '"': quote_count += 1 return quote_count % 2 == 1 or has_open_left_parens
27,578
def save_blend_scene(path: str): """Saves the scene to a .blend file""" bpy.ops.wm.save_as_mainfile(filepath=path)
27,579
def run(main, *, debug=False): """ Since we're using asyncio loop to run wait() in irder to be compatible with async calls, here we also run each wait in a different thread to allow nested calls to wait() """ thread = RunnerThread(main, debug=debug) thread.start() thread.join() if thread.exception: raise thread.exception return thread.result
27,580
def test_backref_thumbnail_div(): """Test if the thumbnail div generates the correct string""" html_div = sg._thumbnail_div('fake_dir', 'test_file.py', 'test formating', is_backref=True) reference = """ .. raw:: html <div class="sphx-glr-thumbcontainer" tooltip="test formating"> .. only:: html .. figure:: /fake_dir/images/thumb/sphx_glr_test_file_thumb.png :ref:`sphx_glr_fake_dir_test_file.py` .. raw:: html </div> .. only:: not html * :ref:`sphx_glr_fake_dir_test_file.py` """ assert html_div == reference
27,581
def csv_dataset_reader(path): """ This function reads a csv from a specified path and returns a Pandas dataframe representation of it, and renames columns. :param path: Path to and name of the csv file to read. :return: A Pandas dataframe. """ import pandas as pd data = pd.read_csv(path, sep=",", header=None) data.columns = ['age', 'weight', 'height'] return data
27,582
def get_daily_blurb_info(): """Get daily blurb info.""" html, ss_image_1day_file, ss_image_1year_file = _scrape() return _parse(html, ss_image_1day_file, ss_image_1year_file)
27,583
def generate_random_ring_element(size, ring_size=(2 ** 64), **kwargs): """Helper function to generate a random number from a signed ring""" # TODO (brianknott): Check whether this RNG contains the full range we want. rand_element = torch.randint( -(ring_size // 2), (ring_size - 1) // 2, size, dtype=torch.long, **kwargs ) if rand_element.is_cuda: return CUDALongTensor(rand_element) return rand_element
27,584
def Move(args, callback): """Move all files a pattern to a directory.""" assert len(args) == 2 pattern = args[0] dest = args[1] res_src = store_utils.ParseFullPath(pattern) res_dst = store_utils.ParseFullPath(dest) assert res_src is not None and res_dst is not None, 'Source or destination not part of a registered bucket' assert res_src[0] == res_dst[0], 'Moving between buckets not supported' bucket, pattern = res_src dest_dir = res_dst[1] src_prefix = store_utils.PrefixFromPattern(pattern) assert dest_dir.endswith('/'), 'Destination must be a directory (with trailing slash)' assert not src_prefix.startswith(dest_dir) and not dest_dir.startswith(src_prefix), \ 'Source and destination must not intersect' source_dir = os.path.dirname(src_prefix) + '/' store = ObjectStore.GetInstance(bucket) # Get list of files matching the pattern as well as any existing files in the destination directory. source_files = yield gen.Task(store_utils.ListRecursively, store, pattern) res = yield gen.Task(store_utils.ListRecursively, store, dest_dir) dest_files = set(res) if len(source_files) == 0: callback() return answer = raw_input("Move %d files from %s/%s to %s/%s? [y/N] " % (len(source_files), bucket, source_dir, bucket, dest_dir)).strip() if answer != 'y': callback() return done = 0 last_update = 0.0 bytes_read = bytes_written = 0 for src_name in source_files: delta = time.time() - last_update if (delta) > 10.0: print '%d/%d, read %.2f KB/s, wrote %.2f KB/s' % (done, len(source_files), bytes_read / delta / 1024, bytes_written / delta / 1024) last_update = time.time() bytes_read = bytes_written = 0 done += 1 dst_name = dest_dir + src_name[len(source_dir):] if dst_name in dest_files: last_update = 0.0 answer = raw_input('File exists: %s/%s. Overwrite, skip, or abort? [o/a/S] ' % (bucket, dst_name)) if answer == 'a': callback() return elif answer != 'o': continue # Read source file. contents = yield gen.Task(store.Get, src_name) bytes_read += len(contents) # Write destination file. yield gen.Task(store.Put, dst_name, contents) bytes_written += len(contents) if options.options.verify: # Read dest file back. dst_contents = yield gen.Task(store.Get, dst_name) bytes_read += len(dst_contents) if dst_contents != contents: logging.warning('Verification failed for %s/%s, deleting destination' % (bucket, dst_name)) yield gen.Task(store.Delete, dst_name) continue if options.options.delete_source: # Delete original file. yield gen.Task(store.Delete, src_name) callback()
27,585
def otsu_binarization(img): """ Method to perform Otsu Binarization :param img: input image :return: thresholded image """ ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) return th2
27,586
def main(tablefile, args=None): """Maps the nodes for the source:alias tablefile. This takes the path to an tablefile (see table_utilities.main) and maps the nodes in it using the Redis DB. It then outputs a status files in the format (table_hash, n1, n2, edge_type, weight, edge_hash, line_hash, status, status_desc), where status is production if both nodes mapped and unmapped otherwise. It also outpus an edge file which all rows where status is production, in the format (edge_hash, n1, n2, edge_type, weight), and and edge2line file in the formate (edge_hash, line_hash). Args: tablefile (str): path to an tablefile to be mapped args (Namespace): args as populated namespace or 'None' for defaults """ if args is None: args = cf.config_args() if 'lincs.level4' in tablefile or 'lincs.exp_meta' in tablefile: if os.path.isfile(tablefile.replace('conv', 'node')): iu.import_pnode(tablefile.replace('conv', 'node'), args) iu.import_edge(tablefile, args) return rdb = ru.get_database(args) edge_file = tablefile.replace('table', 'edge') status_file = tablefile.replace('table', 'status') ue_file = tablefile.replace('table', 'unique.edge') ue2l_file = tablefile.replace('table', 'unique.edge2line') us_file = tablefile.replace('table', 'unique.status') src_data_dir = os.path.join(args.working_dir, args.data_path, cf.DEFAULT_MAP_PATH) species_file = os.path.join(src_data_dir, 'species', 'species.json') with open(species_file, 'r') as infile: species_dict = json.load(infile) supported_taxids = ['unknown'] + list(species_dict.values()) with open(tablefile, 'r') as infile, \ open(edge_file, 'w') as edge, \ open(status_file, 'w') as e_stat: reader = csv.reader(infile, delimiter='\t') s_writer = csv.writer(e_stat, delimiter='\t', lineterminator='\n') e_writer = csv.writer(edge, delimiter='\t', lineterminator='\n') to_map = defaultdict(list) for line in reader: (n1, hint, ntype, taxid) = line[1:5] if ntype == 'gene' and taxid in supported_taxids: to_map[hint, taxid].append(n1) (n2, hint, ntype, taxid) = line[5:9] if ntype == 'gene' and taxid in supported_taxids: to_map[hint, taxid].append(n2) infile.seek(0) mapped = {k: {n: m for m, n in zip(ru.conv_gene(rdb, v, k[0], k[1]), v)} for k, v in to_map.items()} for line in reader: (n1, hint, ntype, taxid) = line[1:5] if ntype == 'gene': if taxid not in supported_taxids: n1_map = 'unmapped-unsupported-species' else: n1_map = mapped[hint, taxid][n1] else: n1_map = n1 (n2, hint, ntype, taxid) = line[5:9] if ntype == 'gene': if taxid not in supported_taxids: n2_map = 'unmapped-unsupported-species' else: n2_map = mapped[hint, taxid][n2] else: n2_map = n2 chksum = line[0] #line chksum et_map = line[9] weight = line[10] t_chksum = line[11] #raw edge chksum hasher = hashlib.md5() hasher.update('\t'.join([n1_map, n2_map, et_map]).encode()) e_chksum = hasher.hexdigest() if 'unmapped' in n1_map: status = 'unmapped' status_desc = n1_map elif 'unmapped' in n2_map: status = 'unmapped' status_desc = n2_map else: status = 'production' status_desc = 'mapped' e_writer.writerow([e_chksum, n1_map, n2_map, et_map, weight]) s_writer.writerow([t_chksum, n1_map, n2_map, et_map, weight, e_chksum, \ chksum, status, status_desc]) tu.csu(edge_file, ue_file) tu.csu(status_file, us_file) tu.csu(us_file, ue2l_file, [6, 7])
27,587
def rosstack_depends_1(s): """ @param s: stack name @type s: str @return: A list of the names of the stacks which s depends on directly @rtype: list """ return rosstackexec(['depends1', s]).split()
27,588
def load_db_dump(dump_file): """Load db dump on a remote environment.""" require('environment') temp_file = os.path.join(env.home, '%(environment)s.sql' % env) put(dump_file, temp_file, use_sudo=True) sudo('psql -d %s -f %s' % (env.db, temp_file), user=env.project_user)
27,589
def _config_file_is_to_update(): """ Ask the user if the configuration file should be updated or not. :return: Returns True if the user wants to update the configuration file and False otherwise. :rtype: bool """ if yes_or_no_input("Do you want to save the account on the configuration file?") == USER_INPUT_YES: return True return False
27,590
def _save_predictions(drug_disease_assocs, store, key_name): """ Saves the predictions into an HDFStore using the key_name as key. """ predictions_data = ( drug_disease_assocs.unstack() .reset_index() .rename(columns={"level_0": "trait", "perturbagen": "drug", 0: "score"}) ) predictions_data["trait"] = predictions_data["trait"].astype("category") predictions_data["drug"] = predictions_data["drug"].astype("category") assert predictions_data.shape == predictions_data.dropna().shape print(f" shape: {predictions_data.shape}") display(predictions_data.describe()) # save print(f" key: {key_name}") store.put(key_name, predictions_data, format="table")
27,591
def countbam(sortedbam, outdir): """calculates the raw counts from a BAM index parameters ---------- sortedbam string, the name of the sorted bam file outdir string, the path of the output directory returns ---------- counts_file = file containing the counts """ counts_file = f"{sortedbam[:-3]}count" try: cmd_count = f"samtools idxstats {sortedbam} > {counts_file}" res_count = subprocess.check_output(cmd_count, shell=True) except(subprocess.CalledProcessError): print('Unable to calculate raw counts from BAM') return (counts_file)
27,592
def funcScrapeTableWunderground(html_tree, forecast_date_str): """ """ # This will get you the Wunderground table headers for future hour conditions columns = html_tree.xpath("//table[@id='hourly-forecast-table']/thead//button[@class='tablesaw-sortable-btn']") rows = html_tree.xpath("//table[@id='hourly-forecast-table']/tbody/tr") fill_cols = np.asarray([]) for column in columns: # print etree.tostring(column) col = column.xpath("text()")[0] fill_cols = np.append(fill_cols, col) # print(col) # Make a DataFrame to fill dayDf = DataFrame(columns = fill_cols)#.set_index(fill_cols[0]) # This will go through the rows of the table and grab actual values for row in rows: values = row.xpath("td") for i, value in enumerate(values): col = columns[i].xpath("text()")[0] val = value.xpath("ng-saw-cell-parser/div//span/text()") # print(val) if col == 'Time': timeVal = val # Initializing a single row. The goal is to make it look just like what dayDf looks like hourRow = pd.DataFrame([forecast_date_str + ' ' + (''.join(timeVal))], columns = [col])#.set_index elif col == 'Conditions': hourRow[col] = val[1] else: if col == 'Pressure': val = value.xpath("ng-saw-cell-parser//span/span/text()") val = [val[0] + ' ' + val[2][0:2]] if col in ['Precip', 'Amount']: # These are hiding behind hyperlinks. Need to be smart val = value.xpath("ng-saw-cell-parser/div//span/a/text()") try: hourRow[col] = val[0] except: hourRow[col] = np.nan dayDf = dayDf.append(hourRow) dayDf['Time'] = pd.to_datetime(dayDf['Time']) # print(columns[i].xpath("text()")[0]) # print value.xpath("ng-saw-cell-parser/div//span/text()") return dayDf
27,593
def as_finite_diff(derivative, points=1, x0=None, wrt=None): """ Returns an approximation of a derivative of a function in the form of a finite difference formula. The expression is a weighted sum of the function at a number of discrete values of (one of) the independent variable(s). Parameters ========== derivative: a Derivative instance (needs to have an variables and expr attribute). points: sequence or coefficient, optional If sequence: discrete values (length >= order+1) of the independent variable used for generating the finite difference weights. If it is a coefficient, it will be used as the step-size for generating an equidistant sequence of length order+1 centered around x0. defult: 1 (step-size 1) x0: number or Symbol, optional the value of the independent variable (wrt) at which the derivative is to be approximated. default: same as wrt wrt: Symbol, optional "with respect to" the variable for which the (partial) derivative is to be approximated for. If not provided it is required that the Derivative is ordinary. default: None Examples ======== >>> from sympy import symbols, Function, exp, sqrt, Symbol, as_finite_diff >>> x, h = symbols('x h') >>> f = Function('f') >>> as_finite_diff(f(x).diff(x)) -f(x - 1/2) + f(x + 1/2) The default step size and number of points are 1 and ``order + 1`` respectively. We can change the step size by passing a symbol as a parameter: >>> as_finite_diff(f(x).diff(x), h) -f(-h/2 + x)/h + f(h/2 + x)/h We can also specify the discretized values to be used in a sequence: >>> as_finite_diff(f(x).diff(x), [x, x+h, x+2*h]) -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h) The algorithm is not restricted to use equidistant spacing, nor do we need to make the approximation around x0, but we can get an expression estimating the derivative at an offset: >>> e, sq2 = exp(1), sqrt(2) >>> xl = [x-h, x+h, x+e*h] >>> as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2) 2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/\ ((-h + E*h)*(h + E*h)) + (-(-sqrt(2)*h + h)/(2*h) - \ (-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) + \ (-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h) Partial derivatives are also supported: >>> y = Symbol('y') >>> d2fdxdy=f(x,y).diff(x,y) >>> as_finite_diff(d2fdxdy, wrt=x) -f(x - 1/2, y) + f(x + 1/2, y) See also ======== sympy.calculus.finite_diff.apply_finite_diff sympy.calculus.finite_diff.finite_diff_weights """ if wrt is None: wrt = derivative.variables[0] # we need Derivative to be univariate to guess wrt if any(v != wrt for v in derivative.variables): raise ValueError('if the function is not univariate' + ' then `wrt` must be given') order = derivative.variables.count(wrt) if x0 is None: x0 = wrt if not iterable(points): # points is simply the step-size, let's make it a # equidistant sequence centered around x0 if order % 2 == 0: # even order => odd number of points, grid point included points = [x0 + points*i for i in range(-order//2, order//2 + 1)] else: # odd order => even number of points, half-way wrt grid point points = [x0 + points*i/S(2) for i in range(-order, order + 1, 2)] if len(points) < order+1: raise ValueError("Too few points for order %d" % order) return apply_finite_diff(order, points, [ derivative.expr.subs({wrt: x}) for x in points], x0)
27,594
def is_prime(pp: int) -> bool: """ Returns True if pp is prime otherwise, returns False Note: not a very sophisticated check """ if pp == 2 or pp == 3: return True elif pp < 2 or not pp % 2: return False odd_n = range(3, int(sqrt(pp) + 1), 2) return not any(not pp % i for i in odd_n)
27,595
def createChromosome( totQty, menuData ): """ Creates the chromosome with Qty assigned to Each Dish such that sum of all Qty equals to the number of dishes to be ordered totQty = Number of Dishes to be Ordered returns chromosome of dish id and corresponding quantity """ chromosome = [] qtySeq = randSeq2(len(menuData),totQty) i=0 for key in menuData: chromosome.append(Dish(key,qtySeq[i])) i+=1 return chromosome
27,596
def add_residual(transformed_inputs, original_inputs, zero_pad=True): """Adds a skip branch to residual block to the output.""" original_shape = original_inputs.shape.as_list() transformed_shape = transformed_inputs.shape.as_list() delta = transformed_shape[3] - original_shape[3] stride = int(np.ceil(original_shape[1] / transformed_shape[1])) if stride > 1: original_inputs = tf.layers.average_pooling2d( original_inputs, pool_size=[stride] * 2, strides=stride, padding="same") if delta != 0: if zero_pad: # Pad channels with zeros at the beginning and end. if delta > 0: original_inputs = tf.pad( original_inputs, [[0, 0], [0, 0], [0, 0], [delta // 2, delta // 2]], mode="CONSTANT", constant_values=0) else: transformed_inputs = tf.pad( transformed_inputs, [ [0, 0], [0, 0], [0, 0], [-delta // 2, -delta // 2]], mode="CONSTANT", constant_values=0) else: # Convolution original_inputs = tf.layers.conv2d( original_inputs, filters=transformed_shape[3], kernel_size=(1, 1), strides=(1, 1), padding="same", activation=None, use_bias=False) net = original_inputs + transformed_inputs return net, original_inputs
27,597
def test_load_database_from_path(tmp_path): """Test that database is generated because it does not exist.""" path = tmp_path / "test.db" database = load_database(path=path) assert isinstance(database, sqlalchemy.MetaData) assert database.bind is not None
27,598
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter): """Implements the fourth step of the FASTER algorithm. This function attempts to automatically mark bad channels in each epochs by performing outlier detection. Additional Parameters --------------------- use_metrics : list of str List of metrics to use. Can be any combination of: 'amplitude', 'variance', 'deviation', 'median_gradient' Defaults to all of them. thresh : float The threshold value, in standard deviations, to apply. A channel crossing this threshold value is marked as bad. Defaults to 3. max_iter : int The maximum number of iterations performed during outlier detection (defaults to 1, as in the original FASTER paper). """ metrics = { 'amplitude': lambda x: np.ptp(x, axis=2), 'deviation': lambda x: _deviation(x), 'variance': lambda x: np.var(x, axis=2), 'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2), 'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'], [50, 60]), } if use_metrics is None: use_metrics = metrics.keys() info = pick_info(epochs.info, picks, copy=True) data = epochs.get_data()[:, picks] bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for m in metrics) for ch_type, chs in _picks_by_type(info): ch_names = [info['ch_names'][k] for k in chs] chs = np.array(chs) for metric in use_metrics: logger.info('Bad channel-in-epoch detection on %s channels:' % ch_type.upper()) s_epochs = metrics[metric](data[:, chs]) for i_epochs, epoch in enumerate(s_epochs): outliers = find_outliers(epoch, thresh, max_iter) if len(outliers) > 0: bad_segment = [ch_names[k] for k in outliers] logger.info('Epoch %d, Bad by %s:\n\t%s' % ( i_epochs, metric, bad_segment)) bads[metric][i_epochs, chs[outliers]] = True return bads
27,599