content
stringlengths
22
815k
id
int64
0
4.91M
def get_systemd_services(service_names): """ :param service_names: {'service_unit_id': 'service_display_name'} e.g., {'cloudify-rabbitmq.service': 'RabbitMQ'} """ systemd_services = get_services(service_names) statuses = [] services = {} for service in systemd_services: is_service_running = service['instances'] and ( service['instances'][0]['state'] == 'running') status = NodeServiceStatus.ACTIVE if is_service_running \ else NodeServiceStatus.INACTIVE services[service['display_name']] = { 'status': status, 'extra_info': { 'systemd': service } } statuses.append(status) return services, statuses
11,000
def json_handler(obj): """serialize non-serializable data for json""" import datetime from werkzeug.local import LocalProxy # serialize date if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)): return unicode(obj) elif isinstance(obj, LocalProxy): return unicode(obj) else: raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \ (type(obj), repr(obj))
11,001
def downgrade(): """Reverts changes performed by upgrade().""" op.drop_refresh_updated_at_trigger("mod_release") op.drop_table("mod_release")
11,002
def _random_prefix(sentences): """ prefix random generator input: list of input sentences output: random word """ words = _word_dict(sentences) return choice(words)
11,003
def bootstrap_gitdir(): """Installs `gitdir` and configures `git`. Requires the `python` setup.""" subprocess.run(['git', 'config', '--global', 'merge.conflictstyle', 'diff3'], check=True) gitdir_gitdir = git_dir(existing_only=False) / 'github.com' / 'fenhl' / 'gitdir' if not gitdir_gitdir.exists(): gitdir_gitdir.mkdir(parents=True) if not (gitdir_gitdir / 'master').exists(): subprocess.run(['git', 'clone', 'https://github.com/fenhl/gitdir.git', 'master'], cwd=str(git_dir() / 'github.com' / 'fenhl' / 'gitdir'), check=True) if not py_dir().exists(): try: py_dir().mkdir() except PermissionError: subprocess.run(['sudo', 'mkdir', str(py_dir())], check=True) if not (py_dir() / 'gitdir').exists(): try: (py_dir() / 'gitdir').symlink_to(gitdir_gitdir / 'master' / 'gitdir') except PermissionError: subprocess.run(['sudo', 'ln', '-s', str(gitdir_gitdir / 'master' / 'gitdir'), str(py_dir() / 'gitdir')], check=True)
11,004
def test_build_request_no_server(): """ Test Building of URL """ # TODO: add put and delete request for method_type in ["get", "post"]: api_request = create_api_request(GET_ITEMS_ENDPOINT, server_description="", method_type=method_type) assert api_request.build_url() is None
11,005
def call(*args, **kwargs): """Thin wrapper over the `prelim_call` function. This function applies a deduplication procedure to preliminary calls. """ for call in merge_adjacent(dedup(prelim_call(*args, **kwargs))): yield call
11,006
def user_logout(*args, **kwargs): # pylint: disable=unused-argument """ This endpoint is the landing page for the logged-in user """ # Delete the Oauth2 token for this session log.info('Logging out User: %r' % (current_user,)) delete_session_oauth2_token() logout_user() flash('You were successfully logged out.', 'warning') return flask.redirect(_url_for('backend.home'))
11,007
def create_ith_simcat(d=None): """Write 'simcat' and 'skipped_ids' tables for a given sample of sources Args: d: {'Samp': fits_table for the properties of sources in the brick 'brickwcs': WCS object for the brick 'metacat': fits_table with configuration params for the simulated sources } Returns: Nothing, saves the 'simcat' and 'skipped_ids' tables Adds 'simcat' table to dict 'd' """ assert(d is not None) log = logging.getLogger('decals_sim') #chunksuffix = '{:02d}'.format(ith_chunk) # Build and write out the simulated object catalog. #seed= d['seeds'][ith_chunk] #simcat = build_simcat(d['nobj'], d['brickname'], d['brickwcs'], d['metacat'], seed) simcat, skipped_ids = build_simcat(Samp=d['Samp'],brickwcs=d['brickwcs'],meta=d['metacat']) # Simcat simcat_dir = get_outdir_runbrick(d['decals_sim_dir'], d['brickname'],d['rowst'], do_skipids=d['do_skipids'],do_more=d['do_more']) if not os.path.exists(simcat_dir): os.makedirs(simcat_dir) #simcatfile = os.path.join(simcat_dir, 'simcat-{}-{}-row{}-{}.fits'.format(d['brickname'], d['objtype'],rowstart,rowend)) # chunksuffix)) simcatfile = os.path.join(simcat_dir, 'simcat'+get_fnsuffix(**d)) if os.path.isfile(simcatfile): os.remove(simcatfile) simcat.writeto(simcatfile) log.info('Wrote {}'.format(simcatfile)) # Skipped Ids if len(skipped_ids) > 0: skip_table= fits_table() skip_table.set('ids',skipped_ids) name= os.path.join(simcat_dir,'skippedids'+get_fnsuffix(**d)) if os.path.exists(name): os.remove(name) log.info('Removed %s' % name) skip_table.writeto(name) log.info('Wrote {}'.format(name)) # add to dict d['simcat']= simcat d['simcat_dir']= simcat_dir
11,008
def redraw_frame(image, names, aligned): """ Adds names and bounding boxes to the frame """ i = 0 unicode_font = ImageFont.truetype("DejaVuSansMono.ttf", size=17) img_pil = Image.fromarray(image) draw = ImageDraw.Draw(img_pil) for face in aligned: draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 255, 0), width=2) if names is not None and len(names) > i: if names[i] == 'unknown': draw.text((face[0], face[1] - 30), "unknown", fill=(0, 0, 255), font=unicode_font) draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 0, 255), width=2) else: draw.text((face[0], face[1] - 30), names[i], fill=(0, 255, 0), font=unicode_font) if names is None or len(names) <= i: draw.text((face[0], face[1] - 30), 'refreshing...', fill=(255, 0, 0), font=unicode_font) i += 1 return np.array(img_pil)
11,009
def _tp_relfq_name(tp, tp_name=None, assumed_globals=None, update_assumed_globals=None, implicit_globals=None): # _type: (type, Optional[Union[Set[Union[type, types.ModuleType]], Mapping[Union[type, types.ModuleType], str]]], Optional[bool]) -> str """Provides the fully qualified name of a type relative to a set of modules and types that is assumed as globally available. If assumed_globals is None this always returns the fully qualified name. If update_assumed_globals is True, this will return the plain type name, but will add the type to assumed_globals (expected to be a set). This way a caller can query how to generate an appropriate import section. If update_assumed_globals is False, assumed_globals can alternatively be a mapping rather than a set. In that case the mapping is expected to be an alias table, mapping modules or types to their alias names desired for displaying. update_assumed_globals can be None (default). In that case this will return the plain type name if assumed_globals is None as well (default). This mode is there to have a less involved default behavior. """ if tp_name is None: tp_name = util.get_class_qualname(tp) if implicit_globals is None: implicit_globals = _implicit_globals else: implicit_globals = implicit_globals.copy() implicit_globals.update(_implicit_globals) if assumed_globals is None: if update_assumed_globals is None: return tp_name md = sys.modules[tp.__module__] if md in implicit_globals: return tp_name name = tp.__module__+'.'+tp_name pck = None if not (md.__package__ is None or md.__package__ == '' or name.startswith(md.__package__)): pck = md.__package__ return name if pck is None else pck+'.'+name if tp in assumed_globals: try: return assumed_globals[tp] except: return tp_name elif hasattr(tp, '__origin__') and tp.__origin__ in assumed_globals: try: return assumed_globals[tp.__origin__] except: return tp_name # For some reason Callable does not have __origin__, so we special-case # it here. Todo: Find a cleaner solution. elif is_Callable(tp) and typing.Callable in assumed_globals: try: return assumed_globals[typing.Callable] except: return tp_name elif update_assumed_globals == True: if not assumed_globals is None: if hasattr(tp, '__origin__') and not tp.__origin__ is None: toadd = tp.__origin__ elif is_Callable(tp): toadd = typing.Callable else: toadd = tp if not sys.modules[toadd.__module__] in implicit_globals: assumed_globals.add(toadd) return tp_name else: md = sys.modules[tp.__module__] if md in implicit_globals: return tp_name md_name = tp.__module__ if md in assumed_globals: try: md_name = assumed_globals[md] except: pass else: if not (md.__package__ is None or md.__package__ == '' or md_name.startswith(md.__package__)): md_name = md.__package__+'.'+tp.__module__ return md_name+'.'+tp_name
11,010
def is_dapr_actor(cls: Type[Actor]) -> bool: """Checks if class inherits :class:`Actor`. Args: cls (type): The Actor implementation. Returns: bool: True if cls inherits :class:`Actor`. Otherwise, False """ return issubclass(cls, Actor)
11,011
def save_v1(filename, data, folder="", compressed=False): """ Create a folder structure inside a zipfile Add .json and .npy and .npz files with the correct names And subfolders for more complicated objects with the same layout Each class should have a save and a load method which can be used for this purpose Parameters ---------- filename : str Filename of the final zipfile data : SME_struct data to save folder : str, optional subfolder to save data to compressed : bool, optional whether to compress the output """ # We use LZMA for compression, since that yields the # smallest filesize of the existing compression algorithms if not compressed: compression = ZIP_STORED else: compression = ZIP_LZMA with ZipFile(filename, "w", compression) as file: saves_v1(file, data, folder=folder)
11,012
def compute_summary(print_terminal=False, save=False): """ Computes benchmark summary. """ # TI ti_percentage = marche_df['occupazione_ti'].iloc[-1] # increment TI ti_rolling_7_mean = marche_df['occupazione_ti'].rolling(7).mean() ti_perc_increment = (ti_rolling_7_mean.iloc[-1] - ti_rolling_7_mean.iloc[-2])\ / ti_rolling_7_mean.iloc[-2] # new positives new_positives = marche_df['nuovi_positivi'].iloc[-1] # increment new positives positives_rolling_7_mean = marche_df['nuovi_positivi'].rolling(7).mean() positives_perc_increment = (positives_rolling_7_mean.iloc[-1] - positives_rolling_7_mean.iloc[-2])\ / positives_rolling_7_mean.iloc[-2] # weekly positives positives_rolling_7_sum = marche_df['nuovi_positivi'].rolling(7).sum() weekly_new_positives = positives_rolling_7_sum.iloc[-1] # weekly increment weekly_new_positives_increment = (positives_rolling_7_sum.iloc[-1] - positives_rolling_7_sum.iloc[-8])\ / positives_rolling_7_sum.iloc[-8] output_dict = {} output_dict.update({"lastUpdate": datetime.now().isoformat()}) region_dict = { "tiPercentage": f"{ti_percentage:.4f}", "tiIncrement": f"{ti_perc_increment:.4f}", "newPositives": f"{new_positives:.0f}", "newPositivesIncrement": f"{positives_perc_increment:.4f}", "weeklyPositives": f"{weekly_new_positives:.0f}", "weeklyPositivesIncrement": f"{weekly_new_positives_increment:.4f}" } output_dict.update({"marche": region_dict}) for province_code, province_data in marche_dict.items(): # new positives new_positives = province_data['nuovi_positivi'].iloc[-1] # increment new positives positives_rolling_7_mean = province_data['nuovi_positivi'].rolling(7).mean() positives_perc_increment = (positives_rolling_7_mean.iloc[-1] - positives_rolling_7_mean.iloc[-2])\ / positives_rolling_7_mean.iloc[-2] # weekly positives positives_rolling_7_sum = province_data['nuovi_positivi'].rolling(7).sum() weekly_new_positives = positives_rolling_7_sum.iloc[-1] # weekly increment weekly_new_positives_increment = (positives_rolling_7_sum.iloc[-1] - positives_rolling_7_sum.iloc[-8])\ / positives_rolling_7_sum.iloc[-8] province_name_clean = area_names_dict[province_code].lower().replace(' ', '') province_dict = { "newPositives": f"{new_positives:.0f}", "newPositivesIncrement": f"{positives_perc_increment:.4f}", "weeklyPositives": f"{weekly_new_positives:.0f}", "weeklyPositivesIncrement": f"{weekly_new_positives_increment:.4f}" } output_dict.update({f"{province_name_clean}": province_dict}) if save: with open('./charts/covid/marche_summary.json', 'w') as f: json.dump(output_dict, f) if print_terminal: json_output = json.dumps(output_dict) print(json_output)
11,013
def unpickle(file): """ unpickle the data """ fo = open(file, 'rb') dict = cPickle.load(fo) fo.close() return dict
11,014
def delete_entry(cur, id): """Erase entry""" if (input('Are you sure [yN]? ').lower().strip() == 'y'): entries = cur.execute('SELECT * FROM mytodos') for entry in entries: if int(id) in entry: cur.execute(f"DELETE FROM mytodos WHERE ID={id}")
11,015
def _calc_norm_gen_prob(sent_1, sent_2, mle_lambda, topic): """ Calculates and returns the length-normalized generative probability of sent_1 given sent_2. """ sent_1_len = sum([count for count in sent_1.raw_counts.values()]) return _calc_gen_prob(sent_1, sent_2, mle_lambda, topic) ** (1.0 / sent_1_len)
11,016
def isomorphic(l_op, r_op): """ Subject of definition, here it is equal operation. See limintations (vectorization.rst). """ if l_op.getopnum() == r_op.getopnum(): l_vecinfo = forwarded_vecinfo(l_op) r_vecinfo = forwarded_vecinfo(r_op) return l_vecinfo.bytesize == r_vecinfo.bytesize return False
11,017
def partition5(l, left, right): """ Insertion Sort of list of at most 5 elements and return the position of the median. """ j = left for i in xrange(left, right + 1): t = numpy.copy(l[i]) for j in xrange(i, left - 1, -1): if l[j - 1][0] < t[0]: break l[j] = l[j - 1] l[j] = t return int(math.floor((left + right) / 2))
11,018
def max_validator(max_value): """Return validator function that ensures upper bound of a number. Result validation function will validate the internal value of resource instance field with the ``value >= min_value`` check. Args: max_value: maximum value for new validator """ def validator(value): if value > max_value: raise ValidationError("{} is not <= {}".format(value, max_value)) return validator
11,019
def main(): """ Function which contains main program's cycle. """ path = input("Enter the file path: ") try: res = json_read(path) except: print("There is no such file! Exiting...") return obj_path = [] while True: print("-" * 100) temp = choice(res, obj_path) if not temp: break res, obj_path = temp
11,020
def test_song_from_search_term(): """ Tests if Song.from_search_term() works correctly. """ song = Song.from_search_term("Dirty Palm - Ropes") assert song.name == "Ropes" assert song.artists == ["Dirty Palm", "Chandler Jewels"] assert song.album_name == "Ropes" assert song.album_artist == "Dirty Palm" assert song.genres == ["gaming edm", "melbourne bounce international"] assert song.disc_number == 1 assert song.duration == 188 assert song.year == 2021 assert song.date == "2021-10-28" assert song.track_number == 1 assert song.tracks_count == 1 assert song.isrc == "GB2LD2110301" assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU" assert ( song.cover_url == "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332" ) assert song.explicit == False assert song.download_url == None
11,021
def standalone_job_op(name, image, command, gpus=0, cpu_limit=0, memory_limit=0, env=[], tensorboard=False, tensorboard_image=None, data=[], sync_source=None, annotations=[], metrics=['Train-accuracy:PERCENTAGE'], arena_image='cheyang/arena_launcher:v0.5', timeout_hours=240): """This function submits a standalone training Job Args: name: the name of standalone_job_op image: the docker image name of training job mount: specify the datasource to mount to the job, like <name_of_datasource>:<mount_point_on_job> command: the command to run """ if not name: raise ValueError("name must be specified") if not image: raise ValueError("image must be specified") if not command: raise ValueError("command must be specified") options = [] if sync_source: if not sync_source.startswith("http"): raise ValueError("sync_source must be an http git url") options.append('--sync-source') options.append(str(sync_source)) for e in env: options.append('--env') options.append(str(e)) for d in data: options.append('--data') options.append(str(d)) for m in metrics: options.append('--metric') options.append(str(m)) if tensorboard_image: options.append('--tensorboard-image') options.append(str(tensorboard_image)) op = dsl.ContainerOp( name=name, image=arena_image, command=['python','arena_launcher.py'], arguments=[ "--name", name, "--tensorboard", str(tensorboard), "--image", str(image), "--gpus", str(gpus), "--cpu", str(cpu_limit), "--step-name", '{{pod.name}}', "--workflow-name", '{{workflow.name}}', "--memory", str(memory_limit), "--timeout-hours", str(timeout_hours), ] + options + [ "job", "--", str(command)], file_outputs={'train': '/output.txt', 'workflow':'/workflow-name.txt', 'step':'/step-name.txt', 'name':'/name.txt'} ) op.set_image_pull_policy('Always') return op
11,022
def main(dataset, hb_thr): """The main processing function. Keyword arguments: >>> dataset: The loaded trajectory data generated by TCA. >>> hb_thr: The hard braking threshold, float. RETURN: Time of detected hard braking, Locations of involved vehicles, the deceleration value, and the merged event index """ # Read the whole file using useful columns. df = pd.read_csv(dataset, usecols=['Vehicle_ID', 'transtime', 'X', 'Y', 'Speed', 'Heading', 'Avg_Acceleration']) df = df.sort_values(by=['transtime', 'Vehicle_ID']) print("Before drop duplicates, data size is:", len(df)) df = df.drop_duplicates(subset=['X', 'Y', 'Speed', 'Heading', 'transtime'], keep="first") print("After drop duplicates, data size is:", len(df)) df.index = pd.RangeIndex(start=0, stop=len(df), step=1) df['HB'] = df['Avg_Acceleration'] # Extracting the hard braking events by pre-set threshold df['HB'] = df['HB'].apply(lambda x: 1 if(x<-hb_thr and x>-200) else np.nan) df = df.dropna(subset=['HB']) df = df.drop(columns=['Speed', 'Heading']) df = df.sort_values(by=['Vehicle_ID', 'transtime']) df.index = pd.RangeIndex(start=0, stop=len(df), step=1) df.to_csv("HB_Online_"+traj_file[:-4]+".csv")
11,023
def load_randompdata(dataset_str, iter): """Load data.""" names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] objects = [] for i in range(len(names)): with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) x, y, tx, ty, allx, ally, graph = tuple(objects) test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str)) test_idx_range = np.sort(test_idx_reorder) if dataset_str == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) ty_extended[test_idx_range-min(test_idx_range), :] = ty ty = ty_extended NL = 2312 NC = 6 elif dataset_str == 'cora': NL = 1708 NC = 7 else: NL = 18717 NC = 3 features = sp.vstack((allx, tx)).tolil() features[test_idx_reorder, :] = features[test_idx_range, :] adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) labels = np.vstack((ally, ty)) labels[test_idx_reorder, :] = labels[test_idx_range, :] #fixed 500 for validation read from file, choose random 20 per class from the others for train ''' idx_test = test_idx_range.tolist() idx_train = range(len(y)) idx_val = range(len(y), len(y)+500) ''' idx_val=[int(item) for item in open("source/"+dataset_str+"/val_idx"+str(iter)+".txt").readlines()] idx_test = test_idx_range.tolist() idx_traincand = list(set(range(0,NL))-set(idx_val)) #train candiate, not test not valid nontestlabels = labels[idx_traincand] gtlabels = np.argmax(nontestlabels,axis=1) idx_train = [] for i in range(NC): nodeidx = np.where(gtlabels==i) ridx = random.sample(range(0,nodeidx[0].shape[0]),20) idx_train+=list(np.asarray(idx_traincand)[list(nodeidx[0][ridx])]) train_mask = sample_mask(idx_train, labels.shape[0]) val_mask = sample_mask(idx_val, labels.shape[0]) test_mask = sample_mask(idx_test, labels.shape[0]) y_train = np.zeros(labels.shape) y_val = np.zeros(labels.shape) y_test = np.zeros(labels.shape) y_train[train_mask, :] = labels[train_mask, :] y_val[val_mask, :] = labels[val_mask, :] y_test[test_mask, :] = labels[test_mask, :] return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
11,024
def page_to_reload(): """ Returns page that is refreshed every argument of content attribute in meta http-equiv="refresh". """ val = knob_thread.val year = int(val * 138./256 + 1880) return ( """<!DOCTYPE html> <html> <head><meta http-equiv="refresh" content=".2"> <style> h1 {{color:white; font-family: Arial; font-size: 9em}} </style> </head> <body bgcolor="{color}0000"> <h1>YEAR {year}</h1><br /> <h1>ANOMALY {anomaly}&#176; </h1> </body> </html> """ ).format(color=('%x' % val), year=year, anomaly=year_to_anomaly[year])
11,025
def svn_log_changed_path2_create(*args): """svn_log_changed_path2_create(apr_pool_t pool) -> svn_log_changed_path2_t""" return _core.svn_log_changed_path2_create(*args)
11,026
def plot_timeseries(model, radius, lon, save=False, tag=''): """ Plot the solar wind model timeseries at model radius and longitude closest to those specified. :param model: An instance of the HUXt class with a completed solution. :param radius: Radius to find the closest model radius to. :param lon: Longitude to find the closest model longitude to. :param save: Boolean to determine if the figure is saved. :param tag: String to append to the filename if saving the figure. :return: fig: Figure handle :return: ax: Axes handle """ if (radius < model.r.min()) | (radius > (model.r.max())): print("Error, specified radius outside of model radial grid") if model.lon.size != 1: if (lon < model.lon.min()) | (lon > (model.lon.max())): print("Error, input lon outside range of model longitudes. Defaulting to closest longitude") id_lon = np.argmin(np.abs(model.lon - lon)) lon = model.lon[id_lon] fig, ax = plt.subplots(figsize=(14, 7)) # Get plotting data id_r = np.argmin(np.abs(model.r - radius)) r_out = model.r[id_r].value if model.lon.size == 1: id_lon = 0 lon_out = model.lon.value else: id_lon = np.argmin(np.abs(model.lon - lon)) lon_out = model.lon[id_lon].value t_day = model.time_out.to(u.day) ax.plot(t_day, model.v_grid[:, id_r, id_lon], 'k-') ylab = 'Solar Wind Speed (km/s)' ymin = 200 ymax = 1000 ax.set_ylim(ymin, ymax) ax.set_ylabel(ylab) ax.set_xlim(t_day.value.min(), t_day.value.max()) ax.set_xlabel('Time (days)') fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95) # Add label radius_label = " Radius: {:3.2f}".format(r_out) + "$R_{sun}$ " lon_label = " Longitude: {:3.2f}".format(lon_out) + "$^\circ$" label = "HUXt" + radius_label + lon_label ax.set_title(label, fontsize=20) #ax.legend(loc=1) if save: cr_num = np.int32(model.cr_num.value) r_tag = np.int32(r_out) lon_tag = np.int32(lon_out) template_string = "HUXt1D_CR{:03d}_{}_time_series_radius_{:03d}_lon_{:03d}.png" filename = template_string.format(cr_num, tag, r_tag, lon_tag) filepath = os.path.join(model._figure_dir_, filename) fig.savefig(filepath) return fig, ax
11,027
def missing_values_rf_rebase(repo, master, branch, rebase_branch): """ >>> import sys >>> sys.path.append('/home/joncrall/code/ibeis/_scripts') >>> from setup_special_sklearn import * >>> dpath = ut.truepath('~/code/scikit-learn') >>> master = 'master' >>> repo = ut.Repo(dpath=dpath) >>> branch = 'missing_values_rf' >>> rebase_branch = 'dev_rebase_' + branch """ assert branch == 'missing_values_rf' reset_dev_branch(repo, branch, rebase_branch) # Custom rebase script out = repo.issue('git rebase ' + master, error='return') if out: fpaths = repo._parse_merge_conflict_fpaths(out) fpath = fpaths[0] assert len(fpaths) == 1 and fpath.endswith('sklearn/utils/validation.py') repo.resolve_conflicts(fpath, 'theirs', force=True) ut.sedfile(fpath, 'accept_sparse=None', 'accept_sparse=False', force=True) repo.issue('git add ' + fpath) # Step 2 out = repo.issue('git rebase --continue', error='return') # Regex to help solve conflicts for fpath in repo._parse_merge_conflict_fpaths(out): # repo.resolve_conflicts(fpath, 'ours', force=True) repo.resolve_conflicts(fpath, 'theirs', force=True) # repo.issue('git checkout --theirs ' + fpath) repo.issue('git add ' + fpath) out = repo.issue('git rebase --continue', error='return') assert out is None # mixins.remove(branch) # mixins.append(rebase_branch) # out = repo.issue('git rebase --abort') # Fix the patch # apply it # repo.issue('git am < fix_empty_poster.patch')
11,028
def convert_csv_to_excel(csv_path): """ This function converts a csv file, given by its file path, to an excel file in the same directory with the same name. :param csv_path:string file path of CSV file to convert :return: string file path of converted Excel file. """ (file_path, file_extension) = os.path.splitext(csv_path) # split the csv pathname to remove the extension wb = xl.Workbook() # create the excel workbook ws = wb.active # use the active sheet by default logging.info("converting file to xlsx: '{}'".format(csv_path)) with open(csv_path, newline='') as csv_file: # append each row of the csv to the excel worksheet rd = csv.reader(csv_file, delimiter=",", quotechar='"') for row in rd: ws.append(row) output_path = os.path.join(file_path + '.xlsx') # output file path should be the same as the csv file logging.info("saving to file: '{}'".format(output_path)) wb.save(output_path) # save the converted file return output_path
11,029
def check_from_dict(method): """A wrapper that wrap a parameter checker to the original function(crop operation).""" @wraps(method) def new_method(self, *args, **kwargs): word_dict, = (list(args) + [None])[:1] if "word_dict" in kwargs: word_dict = kwargs.get("word_dict") assert isinstance(word_dict, dict), "word_dict needs to be a list of word,id pairs" for word, word_id in word_dict.items(): assert isinstance(word, str), "each word in word_dict needs to be type str" assert isinstance(word_id, int) and word_id >= 0, "each word id needs to be positive integer" kwargs["word_dict"] = word_dict return method(self, **kwargs) return new_method
11,030
def iter_model_rows(model, column, include_root=False): """Iterate over all row indices in a model""" indices = [QtCore.QModelIndex()] # start iteration at root for index in indices: # Add children to the iterations child_rows = model.rowCount(index) for child_row in range(child_rows): child_index = model.index(child_row, column, index) indices.append(child_index) if not include_root and not index.isValid(): continue yield index
11,031
def env_observation_space_info(instance_id): """ Get information (name and dimensions/bounds) of the env's observation_space Parameters: - instance_id: a short identifier (such as '3c657dbc') for the environment instance Returns: - info: a dict containing 'name' (such as 'Discrete'), and additional dimensional info (such as 'n') which varies from space to space """ info = envs.get_observation_space_info(instance_id) return jsonify(info = info)
11,032
def build_target_from_transitions( dynamics_function: TargetDynamics, initial_state: State, final_states: Set[State], ) -> Target: """ Initialize a service from transitions, initial state and final states. The set of states and the set of actions are parsed from the transition function. This will guarantee that all the states are reachable. :param dynamics_function: the transition function :param initial_state: the initial state :param final_states: the final states :return: the service """ states = set() actions = set() transition_function: TransitionFunction = {} policy: Dict[State, Dict[Action, Prob]] = {} reward: Dict[State, Dict[Action, Reward]] = {} for start_state, transitions_by_action in dynamics_function.items(): states.add(start_state) transition_function[start_state] = {} policy[start_state] = {} reward[start_state] = {} for action, (next_state, prob, reward_value) in transitions_by_action.items(): actions.add(action) states.add(next_state) transition_function[start_state][action] = next_state policy[start_state][action] = prob reward[start_state][action] = reward_value unreachable_final_states = final_states.difference(states) assert ( len(unreachable_final_states) == 0 ), f"the following final states are not in the transition function: {unreachable_final_states}" assert initial_state in states, "initial state not in the set of states" return Target( states, actions, final_states, initial_state, transition_function, policy, reward, )
11,033
def citation(dll_version: Optional[str] = None) -> dict: """ Return a citation for the software. """ executed = datetime.now().strftime("%B %d, %Y") bmds_version = __version__ url = "https://pypi.org/project/bmds/" if not dll_version: # assume we're using the latest version dll_version = get_latest_dll_version() return dict( paper=( "Pham LL, Watford S, Friedman KP, Wignall J, Shapiro AJ. Python BMDS: A Python " "interface library and web application for the canonical EPA dose-response modeling " "software. Reprod Toxicol. 2019;90:102-108. doi:10.1016/j.reprotox.2019.07.013." ), software=( f"Python BMDS. (Version {bmds_version}; Model Library Version {dll_version}) " f"[Python package]. Available from {url}. Executed on {executed}." ), )
11,034
def comment_pr_(ci_data, github_token): """Write either a staticman comment or non-staticman comment to github. """ return sequence( (comment_staticman(github_token) if is_staticman(ci_data) else comment_general), post(github_token, ci_data), lambda x: dict(status_code=x.status_code, json=x.json()), )(ci_data)
11,035
def response_loss_model(h, p, d_z, d_x, d_y, samples=1, use_upper_bound=False, gradient_samples=0): """ Create a Keras model that computes the loss of a response model on data. Parameters ---------- h : (tensor, tensor) -> Layer Method for building a model of y given p and x p : (tensor, tensor) -> Layer Method for building a model of p given z and x d_z : int The number of dimensions in z d_x : int Tbe number of dimensions in x d_y : int The number of dimensions in y samples: int The number of samples to use use_upper_bound : bool Whether to use an upper bound to the true loss (equivalent to adding a regularization penalty on the variance of h) gradient_samples : int The number of separate additional samples to use when calculating the gradient. This can only be nonzero if user_upper_bound is False, in which case the gradient of the returned loss will be an unbiased estimate of the gradient of the true loss. Returns ------- A Keras model that takes as inputs z, x, and y and generates a single output containing the loss. """ assert not(use_upper_bound and gradient_samples) # sample: (() -> Layer, int) -> Layer def sample(f, n): assert n > 0 if n == 1: return f() else: return L.average([f() for _ in range(n)]) z, x, y = [L.Input((d,)) for d in [d_z, d_x, d_y]] if gradient_samples: # we want to separately sample the gradient; we use stop_gradient to treat the sampled model as constant # the overall computation ensures that we have an interpretable loss (y-h̅(p,x))², # but also that the gradient is -2(y-h̅(p,x))∇h̅(p,x) with *different* samples used for each average diff = L.subtract([y, sample(lambda: h(p(z, x), x), samples)]) grad = sample(lambda: h(p(z, x), x), gradient_samples) def make_expr(grad, diff): return K.stop_gradient(diff) * (K.stop_gradient(diff + 2 * grad) - 2 * grad) expr = L.Lambda(lambda args: make_expr(*args))([grad, diff]) elif use_upper_bound: expr = sample(lambda: L.Lambda(K.square)(L.subtract([y, h(p(z, x), x)])), samples) else: expr = L.Lambda(K.square)(L.subtract([y, sample(lambda: h(p(z, x), x), samples)])) return Model([z, x, y], [expr])
11,036
def get_hourly_load(session, endpoint_id, start_date, end_date): """ :param session: session for the database :param endpoint_id: id for the endpoint :param start_date: datetime object :param end_date: datetime object and: end_date >= start_date :return: """ numdays = (end_date - start_date).days + 1 # list of hours: 0:00 - 23:00 hours = ['0{}:00'.format(h) for h in range(0, 10)] + ['{}:00'.format(h) for h in range(10, 24)] heatmap_data = numpy.zeros((len(hours), numdays)) start_datetime = to_utc_datetime( datetime.datetime.combine(start_date, datetime.time(0, 0, 0, 0)) ) end_datetime = to_utc_datetime(datetime.datetime.combine(end_date, datetime.time(23, 59, 59))) for time, count in get_num_requests(session, endpoint_id, start_datetime, end_datetime): parsed_time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S') day_index = (parsed_time - start_datetime).days hour_index = int(to_local_datetime(parsed_time).strftime('%H')) heatmap_data[hour_index][day_index] = count return { 'days': [ (start_date + datetime.timedelta(days=i)).strftime('%Y-%m-%d') for i in range(numdays) ], "data": heatmap_data.tolist(), }
11,037
def startend(start=None, end=None): """Return TMIN, TAVG, TMAX.""" # Select statement sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] if not end: # Calculate TMIN, TAVG, TMAX for dates greater than start results = session.query(*sel).\ filter(Measurement.date >= start).all() print(results) # This is a list of tuples # Convert list of tuples into normal list temps = list(np.ravel(results)) return jsonify(temps) # Calculate TMIN, TAVG, TMAX with start and stop results = session.query(*sel).\ filter(Measurement.date >= start).\ filter(Measurement.date <= end).all() print(results) # This is a list of tuples # Convert list of tuples into normal list temps = list(np.ravel(results)) print(temps) # This is a normal list return jsonify(temps)
11,038
def configure_camera(config): """ Configures the camera. :param config: dictionary containing BARD configuration parameters optional parameters in camera. source (default 0), window size (default delegates to cv2.CAP_PROP_FRAME_WIDTH), calibration directory and roi (region of interest) """ # Specify some reasonable defaults. Webcams are typically 640x480. video_source = 0 dims = None mtx33d = np.array([[1000.0, 0.0, 320.0], [0.0, 1000.0, 240.0], [0.0, 0.0, 1.0]]) dist5d = np.array([0.0, 0.0, 0.0, 0.0, 0.0]) roi = None if config is None: return video_source, mtx33d, dist5d, dims, roi camera_config = config.get('camera', None) if camera_config is not None: video_source = camera_config.get('source', 0) calib_dir = camera_config.get('calibration directory', None) calib_prefix = camera_config.get('calibration prefix', 'calib') if calib_dir is not None: calib_param = MonoCalibrationParams() calib_param.load_data(calib_dir, calib_prefix, halt_on_ioerror = False) mtx33d = calib_param.camera_matrix dist5d = calib_param.dist_coeffs dims = camera_config.get('window size', None) if dims is None: print("WARNING: window size was not specified! " "This probably breaks the calibrated overlay!") else: # JSON file contains list, OpenCV requires tuple. if len(dims) != 2: raise ValueError("Invalid window size given, window size", " should be list of length 2") dims = (dims[0], dims[1]) roi = camera_config.get('roi', None) if roi is not None: if len(roi) != 4: raise ValueError("Invalid roi set. Region of interest should", " be a list of length 4. [x_start, y_start, x_end, y_end]") return video_source, mtx33d, dist5d, dims, roi
11,039
def munsell_value_Moon1943(Y: FloatingOrArrayLike) -> FloatingOrNDArray: """ Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using *Moon and Spencer (1943)* method. Parameters ---------- Y *luminance* :math:`Y`. Returns ------- :class:`np.floating` or :class:`numpy.ndarray` *Munsell* value :math:`V`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``Y`` | [0, 100] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``V`` | [0, 10] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Wikipedia2007c` Examples -------- >>> munsell_value_Moon1943(12.23634268) # doctest: +ELLIPSIS 4.0688120... """ Y = to_domain_100(Y) V = 1.4 * spow(Y, 0.426) return as_float(from_range_10(V))
11,040
def make_heatmap_and_hist(mag, hist, show_plot): """Handles creating and then labeling the one magnitude heatmap and corresponding histogram. Parameters ---------- mag: int The magnitude that should be plotted int he heatmpa. fig: figure object The figure object the pie chart should be graphed to. ax: axis object The axis object corresponding to the figure. show_plot: bool A boolean representing whether to the plot should be saved to a file of simply displayed on the screen. """ fig = plt.figure(mag, figsize=(15, 11.25)) gs = gridspec.GridSpec(9, 3) mag_map = OneMagHeatMap('/grp/hst/wfc3t/sasp/code/master_table.csv', mag) plt.subplot(gs[:, :-1]) ax_2 = plt.gca() mag_map.plot_heatmap(fig, ax_2, False) plt.title('Map of Dragon\'s Breath for Magnitude {} stars'.format(mag), fontsize='xx-large') plt.subplot(gs[1:-1, -1]) hist.plot_hist(mag) fig.tight_layout() if show_plot: plt.show() else: plt.savefig('db_map_mag_{}.jpg'.format(mag))
11,041
def log_errors(func): """ A wrapper to print exceptions raised from functions that are called by callers that silently swallow exceptions, like render callbacks. """ @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: # Exceptions from calls like this aren't well-defined, so just log the # error and don't reraise it. traceback.print_exc() return wrapper
11,042
def get_urls( url_queue: Queue, images: List[Tuple[str, Image]], ImageClass: type, ) -> None: """Processes URL sources from a/some separate thread(s)""" source = url_queue.get() while not interrupted.is_set() and source: log(f"Getting image from {source!r}", logger, verbose=True) try: images.append((basename(source), Image(ImageClass.from_url(source)))) # Also handles `ConnectionTimeout` except requests.exceptions.ConnectionError: log(f"Unable to get {source!r}", logger, _logging.ERROR) except URLNotFoundError as e: log(str(e), logger, _logging.ERROR) except PIL.UnidentifiedImageError as e: log(str(e), logger, _logging.ERROR) except Exception: log_exception(f"Getting {source!r} failed", logger, direct=True) else: log(f"Done getting {source!r}", logger, verbose=True) source = url_queue.get()
11,043
def count_total_words(sentence_list): """ 문장 리스트에 있는 단어를 셉니다. :param sentence_list: 단어의 리스트로 구성된 문장 리스트 :return: 문장에 있는 단어의 개수 """ return sum( [count_words_per_sentence(sentence) for sentence in sentence_list] )
11,044
def grib_index_select_double(iid,key,val): """ @brief Select the message subset with key==value. The value is a double. The key must have been created with integer type or have integer as native type if the type was not explicitly defined in the index creation. \b Examples: \ref index.py "index.py" @param indexid id of an index created from a file. The index must have been created with the key in argument. @param key key to be selected @param value value of the key to select @exception GribInternalError """ GRIB_CHECK(_internal.grib_c_index_select_real8(iid,key,val))
11,045
def translate_dbpedia_url(url): """ Convert an object that's defined by a DBPedia URL to a ConceptNet URI. We do this by finding the part of the URL that names the object, and using that as surface text for ConceptNet. This is, in some ways, abusing a naming convention in the Semantic Web. The URL of an object doesn't have to mean anything at all. The human-readable name is supposed to be a string, specified by the "name" relation. The problem here is that the "name" relation is not unique in either direction. A URL can have many names, and the same name can refer to many URLs, and some of these names are rarely used or are the result of parsing glitches. The URL itself is a stable thing that we can build a ConceptNet URI from, on the other hand. """ if '__' in url or 'dbpedia.org' not in url: return None parsed = parse_url(url) domain = parsed.netloc if '.' not in domain: return None if domain == 'dbpedia.org': # Handle old DBPedia URLs that had no language code domain = 'en.dbpedia.org' domain_parts = domain.split('.', 1) if domain_parts[1] == 'dbpedia.org': lang = domain_parts[0] if lang in LCODE_ALIASES: lang = LCODE_ALIASES[lang] if lang not in ALL_LANGUAGES: return None text = resource_name(url).replace('_', ' ') uri = topic_to_concept(lang, text) if uri in CONCEPT_BLACKLIST: return None else: return uri else: return None
11,046
def setSecurityPolicy(aSecurityPolicy): """Set the system default security policy. This method should only be caused by system startup code. It should never, for example, be called during a web request. """ last = _ImplPython._defaultPolicy _ImplPython._defaultPolicy = aSecurityPolicy return last
11,047
def map_sentence2ints(sentence): """Map a sentence to a list of words.""" word_list = re.findall(r"[\w']+|[.,!?;]", sentence) int_list = [const.INPUTVOCABULARY.index(word) for word in word_list] return np.array(int_list).astype(np.int32)
11,048
def get_api_key( api_key_header: str = Security( APIKeyHeader(name=settings.API_KEY_HEADER, auto_error=False) ) ) -> str: """ This function checks the header and his value for correct authentication if not a 403 error is returned: - api_key_header = Security api header https://github.com/tiangolo/fastapi/issues/142 """ if api_key_header == settings.API_KEY: return api_key_header
11,049
def add_pred_to_test(test_df, pred_np, demo_col_list, days): """ derived from Tensorflow INPUT: - df (pandas DataFrame) - group (string) OUTPUT: - show_group_stats_viz """ test_df = test_df.copy() for c in demo_col_list: test_df[c] = test_df[c].astype(str) test_df['score'] = pred_np test_df['label_value'] = test_df['time_in_hospital'].apply(lambda x: 1 if x >=days else 0) return test_df
11,050
def test_index(mock_app): """Test the CLI that creates indexes in the database""" runner = mock_app.test_cli_runner() assert runner # Test the command that updates the indexes without arguments result = runner.invoke(cli, ['index']) # It should print confirm message assert 'This will delete and rebuild all indexes(if not --update). Are you sure? [y/N]' in result.output # Provide confirm command to CLI (say yes) result = runner.invoke(cli, ['index'], input='y') assert result.exit_code == 0 assert 'INFO creating indexes' in result.output # Provide confirm command and update option result = runner.invoke(cli, ['index', '--update'], input='y') assert result.exit_code == 0 assert 'INFO All indexes in place' in result.output
11,051
def getCriticality(cvss): """ color convention fot the cells of the PDF """ if cvss == 0.0: return ("none", "#00ff00", (0, 255, 0)) if cvss < 3.1: return ("low", "#ffff00", (255, 255, 0)) if cvss < 6.1: return ("medium", "#ffc800", (255, 200, 0)) if cvss < 9.1: return ("high", "#ff6400", (255, 100, 0)) return ("critical", "#cc0000", (200, 0, 0))
11,052
def combine_groups(groups: List[np.ndarray], num_features: int) -> np.ndarray: """ Combines the given groups back into a 2d measurement matrix. Args: groups: A list of 1d, flattened groups num_features: The number of features in each measurement (D) Returns: A [K, D] array containing the recovered measurements. """ flattened = np.concatenate(groups) # [K * D] return flattened.reshape(num_features, -1).T
11,053
def skippable(*prompts, argument=None): """ Decorator to allow a method on the :obj:`CustomCommand` to be skipped. Parameters: ---------- prompts: :obj:iter A series of prompts to display to the user when the method is being skipped. argument: :obj:`str` By default, the management command argument to indicate that the method should be skipped will be `skip_<func_name>`. If the argument should be different, it can be explicitly provided here. """ def decorator(func): @functools.wraps(func) def inner(instance, *args, **kwargs): parameter = argument or "skip_%s" % func.__name__ if parameter in kwargs and kwargs[parameter] is True: instance.prompt(*prompts, style_func=instance.style.HTTP_NOT_MODIFIED) return False else: return func(instance, *args, **kwargs) return inner return decorator
11,054
def test(): """ unit tests """ import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)
11,055
def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, ): """ 相对 fairseq.data.language_pair_dataset.collate 的区别是: 1. prev_output_tokens的key不再是target,而是 prev_output_tokens(因为自定义了prev_output_tokens,) 2. 增加了positions(默认position从1开始,mass保留了原句中的位置) TODO: 1. 新key的order问题: 策略0,全部重写:https://coding.jd.com/alphaw/fairseq_ext/blob/a336c4529822271417fff86a06dcd9f2b0945592/src/data/mask_language_pair_dataset.py 策略1,继承时也sort一次。前提保证sort结果的不变性。(目前采用该策略,看上去仍然代码冗余) 策略2:collate增加more_keys参数,或者net_input下的所有都order一遍 (TODO: 先采用策略一) """ if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, ) batch = _collate(samples, pad_idx, eos_idx, left_pad_source=left_pad_source, left_pad_target=left_pad_target, input_feeding=input_feeding) # patch src_lengths = torch.LongTensor([ s['source'].ne(pad_idx).long().sum() for s in samples ]) src_lengths, sort_order = src_lengths.sort(descending=True) prev_output_positions = merge('prev_output_positions', left_pad=left_pad_target).index_select(0, sort_order) # 更改 batch['net_input']['prev_output_positions'] = prev_output_positions # 更改 return batch
11,056
def _cache_name(address): """Generates the key name of an object's cache entry""" addr_hash = hashlib.md5(address).hexdigest() return "unsub-{hash}".format(hash=addr_hash)
11,057
def format_currency( value: Decimal, currency: str | None = None, show_if_zero: bool = False, invert: bool = False, ) -> str: """Format a value using the derived precision for a specified currency.""" if not value and not show_if_zero: return "" if value == ZERO: return g.ledger.format_decimal(ZERO, currency) if invert: value = -value return g.ledger.format_decimal(value, currency)
11,058
def guess_ghostscript() -> str: """Guess the path to ghostscript. Only guesses well on Windows. Should prevent people from needing to add ghostscript to PATH. """ if os.name != 'nt': return 'gs' # I'm not sure where to look on non-Windows OSes so just guess 'gs'. def sort_by_version(v: Path) -> Union[version.Version, version.LegacyVersion]: return version.parse(v.name[2:]) # When this is an inline lambda mypy and pylint fuss. locations = 'C:\\Program Files\\gs', 'C:\\Program Files (x86)\\gs' files = 'gswin64c.exe', 'gswin32c.exe', 'gs.exe' for location in locations: path = Path(location) if path.exists(): versions = [v for v in path.iterdir() if v.is_dir() and v.name.startswith('gs')] versions.sort(key=sort_by_version, reverse=True) for v in versions: for file in files: exe = v / 'bin' / file if exe.exists(): return str(exe) return 'gswin64c'
11,059
def ExportFakeTF1ImageModule(*, input_image_height: int, input_image_width: int, output_feature_dim: int, export_path: str): """Makes a TF-hub image feature module for use in unit tests. The resulting module has the signature of a image model, but contains a minimal set of trainable variables and its initialization loads nothing from disk. Args: input_image_height: Height of the module's input images. input_image_width: Width of module's input images. output_feature_dim: Dimension of the output feature vectors. export_path: Path where exported module will be written. """ def ModuleFn(training): """Builds the graph and signature for the stub TF-hub module.""" image_data = tf.placeholder( shape=[None, input_image_height, input_image_width, 3], dtype=tf.float32) # Linearly project image_data to shape [1, output_feature_dim] features. encoder_output = tf.compat.v1.layers.dense( tf.reshape(image_data, [-1, input_image_height * input_image_width * 3]), output_feature_dim) # Add a non-trainable 'count' variable that can be updated through an # UPDATE_OP. This is analogous to a batch-norm moving average that should be # updated during fine-tuning. v = tf.get_variable('count', initializer=0, dtype=tf.int32, trainable=False) if training: update_op = v.assign_add(1).op tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op) hub.add_signature( 'default', inputs={'images': image_data}, outputs=encoder_output) spec = hub.create_module_spec( ModuleFn, tags_and_args=[({'train'}, dict(training=True)), (set(), dict(training=False))]) with tf.compat.v1.Graph().as_default(): module = hub.Module(spec, trainable=True) with tf.compat.v1.Session() as session: session.run(tf.compat.v1.global_variables_initializer()) module.export(export_path, session)
11,060
def supplemental_div(content): """ Standardize supplemental content listings Might not be possible if genus and tree content diverge """ return {'c': content}
11,061
def viewTypes(): """View types of item when sent through slash command""" user_id, user_name, channel_id = getUserData(request.form) checkUser(user_id) itemType = request.form.get('text') try: text = viewTypesItems(itemType) except ItemNotInPantry: reply = "Sorry! But either the spelling is wrong or the item is currently unavailable.\nPlease view items in the pantry to check." client.chat_postMessage(channel=channel_id, blocks=itemExceptionBlock(reply)) return Response(), 200 client.chat_postMessage(channel=channel_id, blocks=viewTypesItemBlock(text)) return Response(), 200
11,062
def load_and_preprocess(): """ Load the data (either train.csv or test.csv) and pre-process it with some simple transformations. Return in the correct form for usage in scikit-learn. Arguments --------- filestr: string string pointing to csv file to load into pandas Returns ------- X_train: numpy.array array containing features of training set X_test: numpy.array array containing features of test set y: numpy.array array containing labels for training set test_ID: numpy.array IDs for test set, for submission """ train = pd.read_csv("data/train.csv") test = pd.read_csv("data/test.csv") data = pd.concat((train.loc[:,'MSSubClass':'SaleCondition'],\ test.loc[:,'MSSubClass':'SaleCondition'])) #first extract the target variable, and log-transform because the prices are very skewed y_train = np.log1p(train['SalePrice'].values) #one hot encoding for categorical variables data = pd.get_dummies(data) #first find which numerical features are significantly skewed and transform them to log(1 + x) numerical = data.dtypes[data.dtypes!='object'].index skewed = data[numerical].apply(lambda u: skew(u.dropna())) skewed = skewed[skewed > 0.75].index data[skewed] = np.log1p(data[skewed]) #if numerical values are missing, replace with median from that column data = data.fillna(data.mean()) X_train = data[:train.shape[0]].as_matrix() X_test = data[train.shape[0]:].as_matrix() return X_train,X_test,y_train,test.Id
11,063
def _dates2absolute(dates, units): """ Absolute dates from datetime object Parameters ---------- dates : datetime instance or array_like of datetime instances Instances of pyjams.datetime class units : str 'day as %Y%m%d.%f', 'month as %Y%m.%f', or 'year as %Y.%f' Returns ------- longdouble or array_like of longdouble absolute dates Examples -------- >>> dt = [datetime(1990, 1, 1), datetime(1991, 1, 1)] >>> dec = _dates2absolute(dt, 'day as %Y%m%d.%f') >>> print(np.around(dec, 1)) [19900101.0, 19910101.0] """ mdates = input2array(dates, default=datetime(1990, 1, 1)) # wrapper might be slow out = [ _date2absolute(dd, units) for dd in mdates ] out = array2input(out, dates) return out
11,064
def Mix_GetNumMusicDecoders(): """Retrieves the number of available music decoders. The returned value can differ between runs of a program due to changes in the availability of the shared libraries required for supporting different formats. Returns: int: The number of available music decoders. """ return _funcs["Mix_GetNumMusicDecoders"]()
11,065
def check_ft_grid(fv, diff): """Grid check for fft optimisation""" if np.log2(np.shape(fv)[0]) == int(np.log2(np.shape(fv)[0])): nt = np.shape(fv)[0] else: print("fix the grid for optimization \ of the fft's, grid:" + str(np.shape(fv)[0])) sys.exit(1) lvio = [] for i in range(len(fv)-1): lvio.append(fv[i+1] - fv[i]) grid_error = np.abs(np.asanyarray(lvio)[:]) - np.abs(diff) if not(np.allclose(grid_error, 0, rtol=0, atol=1e-12)): print(np.max(grid_error)) sys.exit("your grid is not uniform") assert len(np.unique(fv)) == len(fv) return 0
11,066
def _concat_applicative( current: KindN[ _ApplicativeKind, _FirstType, _SecondType, _ThirdType, ], acc: KindN[ _ApplicativeKind, _UpdatedType, _SecondType, _ThirdType, ], function: KindN[ _ApplicativeKind, Callable[[_FirstType], Callable[[_UpdatedType], _UpdatedType]], _SecondType, _ThirdType, ], ) -> KindN[_ApplicativeKind, _UpdatedType, _SecondType, _ThirdType]: """Concats two applicatives using a curried-like function.""" return acc.apply(current.apply(function))
11,067
def goto_x(new_x): """ Move tool to the new_x position at speed_mm_s at high speed. Update curpos.x with new position. If a failure is detected, sleep so the operator can examine the situation. Since the loss of expected responses to commands indicates that the program does not know the exact position of the device, the caller should immediately abort on a failure. Call this function like this: assert goto_x(new_x_value), "Useful message indicating where failure occurred" :param new_x: new X position of tool :return: True -> success, False -> failure """ assert isinstance(new_x, float) if VERIFY_NEGATIVE_VALUES: assert is_x_valid(new_x) global curpos output_and_log("G00 X{0:3.3f}".format(new_x)) responded = read_port_await_str("ok") if not responded: print "goto_x() RESPONSE STRING({0}) NOT RECEIVED".format("ok") time.sleep(SLEEP_BEFORE_ESTOP) else: curpos.x = new_x return responded
11,068
def server_rename(adapter_id, server_id): """Renames a server using a certain adapter, if that adapter supports renaming.""" adapter = get_adapter(adapter_id) if not adapter: return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501) if not adapter.can_rename(): return output.failure("This adapter doesn't support renaming servers.", 501) if not adapter.do_verify(request.headers): return output.failure("Credential verification failed. Please check your credentials and try again.", 401) result = adapter.do_server_rename(request.headers, server_id, request.json) if isinstance(result, dict) and 'error' in result: return output.failure(result['error'], result['status']) return ""
11,069
def add_months(img, year, **kwargs): """Add months""" img_draw = ImageDraw.Draw(img) for month_index in range(1, 12 + 1): month_object = MonthObject( draw=img_draw, year=year, month=month_index, x=MONTHS_LEFT + MONTH_HORIZONTAL_STEP * ((month_index - 1) % MONTHS_IN_ROW), y=MONTHS_TOP + MONTH_VERTICAL_STEP * ((month_index - 1) // MONTHS_IN_ROW), **kwargs ) month_object.render()
11,070
def _to_absolute_uri(uri): """ Converts the input URI into an absolute URI, relative to the current working directory. :param uri: A URI, absolute or relative. :return: An absolute URI. """ if ":" in uri: #Already absolute. Is either a drive letter ("C:/") or already fully specified URI ("http://"). return pathlib.Path(uri).as_uri() #Pathlib can take care of both these cases. return pathlib.Path(os.path.abspath(uri)).as_uri()
11,071
def index(): """ Root URL response, load UI """ return app.send_static_file("index.html")
11,072
def plot_separacion2D(x, y, grado, mu, de, w, b): """ Grafica las primeras dos dimensiones (posiciones 1 y 2) de datos en dos dimensiones extendidos con un clasificador polinomial así como la separación dada por theta_phi """ if grado < 2: raise ValueError('Esta funcion es para graficar separaciones con polinomios mayores a 1') x1_min, x1_max = np.min(x[:,0]), np.max(x[:,0]) x2_min, x2_max = np.min(x[:,1]), np.max(x[:,1]) delta1, delta2 = (x1_max - x1_min) * 0.1, (x2_max - x2_min) * 0.1 spanX1 = np.linspace(x1_min - delta1, x1_max + delta1, 600) spanX2 = np.linspace(x2_min - delta2, x2_max + delta2, 600) X1, X2 = np.meshgrid(spanX1, spanX2) X = normaliza(map_poly(grado, np.c_[X1.ravel(), X2.ravel()]), mu, de) Z = predictor(X, w, b) Z = Z.reshape(X1.shape[0], X1.shape[1]) # plt.contour(X1, X2, Z, linewidths=0.2, colors='k') plt.contourf(X1, X2, Z, 1, cmap=plt.cm.binary_r) plt.plot(x[y > 0.5, 0], x[y > 0.5, 1], 'sr', label='clase positiva') plt.plot(x[y < 0.5, 0], x[y < 0.5, 1], 'oy', label='clase negativa') plt.axis([spanX1[0], spanX1[-1], spanX2[0], spanX2[-1]])
11,073
def restore_capitalization(word, example): """ Make the capitalization of the ``word`` be the same as in ``example``: >>> restore_capitalization('bye', 'Hello') 'Bye' >>> restore_capitalization('half-an-hour', 'Minute') 'Half-An-Hour' >>> restore_capitalization('usa', 'IEEE') 'USA' >>> restore_capitalization('pre-world', 'anti-World') 'pre-World' >>> restore_capitalization('123-do', 'anti-IEEE') '123-DO' >>> restore_capitalization('123--do', 'anti--IEEE') '123--DO' In the alignment fails, the reminder is lower-cased: >>> restore_capitalization('foo-BAR-BAZ', 'Baz-Baz') 'Foo-Bar-baz' >>> restore_capitalization('foo', 'foo-bar') 'foo' .. note: Currently this function doesn't handle uppercase letters in the middle of the token (e.g. McDonald). """ if '-' in example: results = [] word_parts = word.split('-') example_parts = example.split('-') for i, part in enumerate(word_parts): if len(example_parts) > i: results.append(_make_the_same_case(part, example_parts[i])) else: results.append(part.lower()) return '-'.join(results) return _make_the_same_case(word, example)
11,074
def showWelcomeAnimation(): """Shows welcome screen animation of flappy bird""" messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) basex = 0 # amount by which base can maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() while True: for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() # any key if event.type == KEYDOWN: # make first flap sound and return values for mainGame # SOUNDS['wing'].play() return basex = -((-basex + 4) % baseShift) # draw sprites SCREEN.blit(IMAGES['background'], (0, 0)) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) pygame.display.update() FPSCLOCK.tick(FPS)
11,075
def test_loss_at_machine_precision_interval_is_zero(): """The loss of an interval smaller than _dx_eps should be set to zero.""" def f(x): return 1 if x == 0 else 0 def goal(l): return learner.loss() < 0.01 or learner.npoints >= 1000 learner = Learner1D(f, bounds=(-1, 1)) simple(learner, goal=goal) # this means loss < 0.01 was reached assert learner.npoints != 1000
11,076
def params_document_to_uuid(params_document): """Generate a UUID5 based on a pipeline components document""" return identifiers.typeduuid.catalog_uuid(params_document)
11,077
def test_parseerror_initial_attrs(arg_names, args, exp_line, exp_column): """Test initial attributes of ParseError.""" msg = args[0] posargs, kwargs = func_args(args, arg_names) # Execute the code to be tested exc = ParseError(*posargs, **kwargs) assert isinstance(exc, Error) assert len(exc.args) == 1 assert exc.args[0] == msg assert exc.line == exp_line assert exc.column == exp_column
11,078
def modify_account() -> typing.RouteReturn: """IntraRez account modification page.""" form = forms.AccountModificationForm() if form.validate_on_submit(): rezident = flask.g.rezident rezident.nom = form.nom.data.title() rezident.prenom = form.prenom.data.title() rezident.promo = form.promo.data rezident.email = form.email.data db.session.commit() utils.log_action( f"Modified account {rezident} ({rezident.prenom} {rezident.nom} " f"{rezident.promo}, {rezident.email})" ) flask.flash(_("Compte modifié avec succès !"), "success") return utils.redirect_to_next() return flask.render_template("profile/modify_account.html", title=_("Mettre à jour mon compte"), form=form)
11,079
def fizzbuzz(end=100): """Generate a FizzBuzz game sequence. FizzBuzz is a childrens game where players take turns counting. The rules are as follows:: 1. Whenever the count is divisible by 3, the number is replaced with "Fizz" 2. Whenever the count is divisible by 5, the number is replaced with "Buzz" 3. Whenever the count is divisible by both 3 and 5, the number is replaced with "FizzBuzz" Parameters ---------- end : int The FizzBuzz sequence is generated up and including this number. Returns ------- sequence : list of str The FizzBuzz sequence. Examples -------- >>> fizzbuzz(3) ['1', '2', 'Fizz'] >>> fizzbuzz(5) ['1', '2', 'Fizz', '4', 'Buzz'] References ---------- https://blog.codinghorror.com/why-cant-programmers-program/ """ sequence = [] for i in range(1, end + 1): if i % (3 * 5) == 0: sequence.append('FizzBuzz') elif i % 3 == 0: sequence.append('Fizz') elif i % 5 == 0: sequence.append('Buzz') else: sequence.append(str(i)) return sequence
11,080
def image_generator(mode='train'): """ mode : train, val, test """ X = [] y_age = [] y_gender = [] while True: for idx, row in df[df['set'] == mode].iterrows(): # images image_path = os.path.join(images_path, row['image_names']) image_array = _imread(image_path) image_array = _imresize(image_array, image_shape[:2]) image_array = preprocessing_img(image_array) X.append(image_array) # labels y_gender.append(row['gender_classes']) y_age.append(row['age_cat']) if len(X) == batch_size: # images X = np.asarray(X) # labels y_age = to_categorical(y_age, num_classes=num_cat_age) # y_gender = np.array(y_gender).reshape(-1, 1) # y_gender = to_categorical(y_gender, num_classes=2) # Y = np.hstack((y_age, y_gender)) yield X, y_age X = [] y_age = [] y_gender = []
11,081
def generate_lane_struct(): """ Generate the datatype for the lanes dataset :return: The datatype for the lanes dataset and the fill values for the lanes dataset """ lane_top_list = [] for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]: lane_top_list.append((item.name, item.type)) lane_list = [] for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]: lane_list.append((item.name, item.type)) lane_top_list.append((str_lan_obj, lane_list, 4)) d_lane = np.dtype(lane_top_list) lane_fill = np.zeros((len(lane_top_list), ), dtype=d_lane) for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]: lane_fill[item.name] = item.fill_value for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]: lane_fill[str_lan_obj][item.name] = item.fill_value return d_lane, lane_fill
11,082
def clean_maya_environment(): """Contextmanager to reset necessary environment values for a clean run, then restore overwritten values. """ with temp_app_dir(): script_path = os.environ.get('MAYA_SCRIPT_PATH', '') module_path = os.environ.get('MAYA_MODULE_PATH', '') try: os.environ['MAYA_SCRIPT_PATH'] = '' os.environ['MAYA_MODULE_PATH'] = get_module_path() yield finally: os.environ['MAYA_SCRIPT_PATH'] = script_path os.environ['MAYA_MODULE_PATH'] = module_path
11,083
def notification_list(next_id=None): # noqa: E501 """notification_list Get all your certificate update notifications # noqa: E501 :param next_id: :type next_id: int :rtype: NotificationList """ return 'do some magic!'
11,084
def _delete_dest_path_if_stale(master_path, dest_path): """Delete dest_path if it does not point to cached image. :param master_path: path to an image in master cache :param dest_path: hard link to an image :returns: True if dest_path points to master_path, False if dest_path was stale and was deleted or it didn't exist """ dest_path_exists = os.path.exists(dest_path) if not dest_path_exists: # Image not cached, re-download return False master_path_exists = os.path.exists(master_path) if (not master_path_exists or os.stat(master_path).st_ino != os.stat(dest_path).st_ino): # Image exists in cache, but dest_path out of date os.unlink(dest_path) return False return True
11,085
def SendPost(user, password, xdvbf, cookie, session, url=URL.form): """ 根据之前获得的信息,发送请求 :param user: 学号 :param password: 密码 :param xdvbf: 验证码内容 :param cookie: 之前访问获得的cookie :param session: 全局唯一的session :param url: 向哪个资源发送请求 :return: response """ form_data = { "timestamp": helper.time_stamp, "jwb": helper.jwb, "id": user, "pwd": password, "xdvfb": xdvbf } response = session.post(url, form_data, headers=helper.header, cookies=requests.utils.dict_from_cookiejar(cookie)) response.encoding = response.apparent_encoding return response
11,086
def css_flat(name, values=None): """Все значения у свойства (по порядку) left -> [u'auto', u'<dimension>', u'<number>', u'<length>', u'.em', u'.ex', u'.vw', u'.vh', u'.vmin', u'.vmax', u'.ch', u'.rem', u'.px', u'.cm', u'.mm', u'.in', u'.pt', u'.pc', u'<percentage>', u'.%'] """ cur = CSS_DICT.get(name) or CSS_DICT.get(name[1:-1]) if values is None: values = [] if cur is None: return values for value in cur['values']: values.append(value) if value.startswith('<') and value.endswith('>'): values = css_flat(value, values) return values
11,087
def disconnect(sid): """ The function is called when a client disconnects """ print('Client disconnected')
11,088
def prepare_mqtt(MQTT_SERVER, MQTT_PORT=1883): """ Initializes MQTT client and connects to a server """ client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect(MQTT_SERVER, MQTT_PORT, 60) return client
11,089
def load_inputs(mod, switch_data, inputs_dir): """ Import data to support unit commitment. The following files are expected in the input directory. All files and fields are optional. If you only want to override default values for certain columns in a row, insert a dot . into the other columns. generation_projects_info.tab GENERATION_PROJECT, gen_min_load_fraction, gen_startup_fuel, gen_startup_om Note: If you need to specify minimum loading fraction or startup costs for a non-fuel based generator, you must put a dot . in the gen_startup_fuel column to avoid an error. gen_timepoint_commit_bounds.tab GENERATION_PROJECT, TIMEPOINT, gen_min_commit_fraction_TP, gen_max_commit_fraction_TP, gen_min_load_fraction_TP """ switch_data.load_aug( optional=True, filename=os.path.join(inputs_dir, 'generation_projects_info.tab'), auto_select=True, param=(mod.gen_min_load_fraction, mod.gen_startup_fuel, mod.gen_startup_om, mod.gen_min_uptime, mod.gen_min_downtime)) switch_data.load_aug( optional=True, filename=os.path.join(inputs_dir, 'gen_timepoint_commit_bounds.tab'), auto_select=True, param=(mod.gen_min_commit_fraction, mod.gen_max_commit_fraction, mod.gen_min_load_fraction_TP))
11,090
def process_message_impl(message): """Called on a background thread once for each message. It is the responsibility of the caller to gracefully handle exceptions""" log.debug(f"Processing: {message}") data_map = json.loads(message.data)['payload'] log.notify(f"pubsub: processing {json.dumps(data_map)}") assert "action" in data_map.keys() action = data_map["action"] if action == "send_messages": assert "ids" in data_map.keys() assert "messages" in data_map.keys() log.audit(f"rapidpro: send_messages {json.dumps(data_map)}") # { # "action" : "send_messages" # "ids" : [ "nook-uuid-23dsa" ], # "messages" : [ "🐱" ] # } # TODO: Handle lookup failures mappings = phone_number_uuid_table.uuid_to_data_batch(data_map["ids"]) # HACK: Filter out urns that don't start with "tel:+" as # RapidPro sometimes crashes on sending messages to them # These are working phone numbers though, and we can receive # messages from them, so the issue has been raised with RapidPro dirty_urns = list(mappings.values()) urns = [] for urn in dirty_urns: # if not urn.find("tel:+") >= 0: # print (f"WARNING: SKIPPING SEND TO bad {urn}") # continue urns.append(urn) # Break into groups of 100 urn_groups = [] group_start = 0 group_end = 100 while group_end < len(urns): urn_groups.append(urns[group_start:group_end]) group_start = group_end group_end += 100 urn_groups.append(urns[group_start:]) # Assert that groups contain all of the original urns assert set(urns) == set(itertools.chain.from_iterable(urn_groups)) group_num = 0 while len(urn_groups) > 0: group_num += 1 urns = urn_groups.pop(0) for text in data_map["messages"]: retry_count = 0 while True: log.debug(f"sending group {group_num}: {len(urns)} sms") try: with rapidpro_lock: rapidpro_client.send_message_to_urns(text, urns, interrupt=True) log.debug(f"sent {len(urns)} sms") # in addition to notifying about the send_message command # notify for each URN so we can get a view of how many people are being messaged # send successful - exit loop break except HTTPError as e: retry_exception = e # fall through to retry except TembaRateExceededError as e: retry_exception = e # fall through to retry except TembaBadRequestError as e: # recast underlying exception so that the underlying details can be logged raise Exception(f"Exception sending sms: {e.errors}") from e last_failure_tokens.append(utcnow()) # expire any tokens that are more than 5 minutes old expired_tokens = [] now = utcnow() for token in last_failure_tokens: if (now - token).total_seconds() > (5 * 60): expired_tokens.append(token) for token in expired_tokens: log.warning(f"Removing failure token: {token.isoformat()}") last_failure_tokens.remove(token) # Do not retry large batch send-multis # or there are more than 10 exceptions in 5 min ... prefer to crash and cause a page if len(urns) <= 15 and retry_count < len(retry_wait_times) and len(last_failure_tokens) < 10: wait_time_sec = retry_wait_times[retry_count] log.warning(f"Send failed: {retry_exception}") log.warning(f" will retry send after {wait_time_sec} seconds") time.sleep(wait_time_sec) retry_count += 1 continue log.warning(f"Failing after {retry_count} retries, failure_tokens: {last_failure_tokens}") raise retry_exception log.debug(f"Acking message") message.ack() log.info(f"Done send_messages") return raise Exception(f"Unknown action: {action}")
11,091
def game(): """ Main program function. """ # setting logger logger.setLevel(logging.INFO) # Initialize Pygame and set up the window pygame.init() size = [SCREEN_WIDTH, SCREEN_HEIGHT] screen = pygame.display.set_mode(size) pygame.display.set_caption("SeatSmart") # Create our objects and set the data done = False clock = pygame.time.Clock() # Create an instance of the Game class game = Game() # discplay console.print(Display.blocks(KEYS_DICT)) # Main game loop with console.status("[green]Running simulation ...") as status: while not done: # Process events (keystrokes, mouse clicks, etc) done = game.process_events() # Update object positions, check for collisions game.run_logic() # Draw the current frame game.display_frame(screen) # Pause for the next frame clock.tick(60) # Close window and exit pygame.quit()
11,092
def record_iterator_class(record_type): """ Gets the record iterator for a given type A way to abstract the construction of a record iterator class. :param record_type: the type of file as string :return: the appropriate record iterator class """ if record_type == 'bib': return BibtexRecordIterator elif record_type == 'froac' or record_type == 'xml': return FroacRecordIterator elif record_type == 'isi': return IsiRecordIterator else: raise ValueError("This type {} has not been implemented yet".format( record_type ))
11,093
def start_service(service: Type[LabDataService], port: int) -> None: """ Start a service in a ThreadedServer. """ threaded_server = rpyc.ThreadedServer( service=service, port=port, protocol_config={"allow_public_attrs": True, "allow_pickle": True}, ) threaded_server.start()
11,094
def sort_servers_closest(servers: Sequence[str]) -> Dict[str, float]: """Sorts a list of servers by http round-trip time Params: servers: sequence of http server urls Returns: sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers (possibly empty) """ if not {urlparse(url).scheme for url in servers}.issubset({"http", "https"}): raise TransportError("Invalid server urls") get_rtt_jobs = set( gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url) for server_url in servers ) # these tasks should never raise, returns None on errors gevent.joinall(get_rtt_jobs, raise_error=False) # block and wait tasks sorted_servers: Dict[str, float] = dict( sorted((job.value for job in get_rtt_jobs if job.value[1] is not None), key=itemgetter(1)) ) log.debug("Matrix homeserver RTTs", rtts=sorted_servers) return sorted_servers
11,095
def TestMotion(): """ """ #clear registries storing.Store.Clear() #deeding.Deed.Clear() doing.Doer.Clear() store = storing.Store(name = 'Test') #CreateActions(store) print("\nTesting Motion Sim Controller") simulator = SimulatorMotionUuv(name = 'simulatorMotionTest', store = store, group = 'simulator.motion.test', speed = 'state.speed', speedRate = 'state.speedRate', depth = 'state.depth', depthRate = 'state.depthRate', pitch = 'state.pitch', pitchRate = 'state.pitchRate', altitude = 'state.altitude', heading = 'state.heading', headingRate = 'state.headingRate', position = 'state.position', rpm = 'goal.rpm', stern = 'goal.stern', rudder = 'goal.rudder', current = 'scenario.current', bottom = 'scenario.bottom', prevPosition = 'state.position', parms = dict(rpmLimit = 1500.0, sternLimit = 20.0, rudderLimit = 20.0, gs = 0.0025, gpr = -0.5, ghr = -0.5)) store.expose() rpm = store.fetch('goal.rpm').update(value = 500.0) stern = store.fetch('goal.stern').update(value = 0.0) rudder = store.fetch('goal.rudder').update(value = 0.0) current = store.fetch('scenario.current').update(north = 0.0, east = 0.0) bottom = store.fetch('scenario.bottom').update(value = 50.0) prevPosition = store.fetch('scenario.startposition').update(north = 0.0, east = 0.0) simulator.restart() simulator._expose() store.advanceStamp(0.125) simulator.update() simulator._expose()
11,096
def palgo( dumbalgo: type[DumbAlgo], space: Space, fixed_suggestion_value: Any ) -> SpaceTransformAlgoWrapper[DumbAlgo]: """Set up a SpaceTransformAlgoWrapper with dumb configuration.""" return create_algo(algo_type=dumbalgo, space=space, value=fixed_suggestion_value)
11,097
def english_to_french(english_text): """ Input language translate function """ translation = language_translator.translate(text=english_text, model_id = "en-fr").get_result() french_text = translation['translations'][0]['translation'] return french_text
11,098
def noise_get_turbulence( n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int = NOISE_DEFAULT, ) -> float: """Return the turbulence noise sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value. """ return float( lib.TCOD_noise_get_turbulence_ex( n.noise_c, ffi.new("float[4]", f), oc, typ ) )
11,099