content
stringlengths
22
815k
id
int64
0
4.91M
def datetime_to_hours(dt): """Converts datetime.timedelta to hours Parameters: ----------- dt: datetime.timedelta Returns: -------- float """ return dt.days * 24 + dt.seconds / 3600
30,700
def create_data_element(ds: "Dataset") -> "DataElement": """Return a ``gdcm.DataElement`` for the *Pixel Data*. Parameters ---------- ds : dataset.Dataset The :class:`~pydicom.dataset.Dataset` containing the *Pixel Data*. Returns ------- gdcm.DataElement The converted *Pixel Data* element. """ tsyntax = ds.file_meta.TransferSyntaxUID data_element = gdcm.DataElement(gdcm.Tag(0x7fe0, 0x0010)) if tsyntax.is_compressed: if getattr(ds, 'NumberOfFrames', 1) > 1: pixel_data_sequence = ( pydicom.encaps.decode_data_sequence(ds.PixelData) ) else: pixel_data_sequence = [ pydicom.encaps.defragment_data(ds.PixelData) ] fragments = gdcm.SequenceOfFragments.New() for pixel_data in pixel_data_sequence: fragment = gdcm.Fragment() fragment.SetByteStringValue(pixel_data) fragments.AddFragment(fragment) data_element.SetValue(fragments.__ref__()) else: data_element.SetByteStringValue(ds.PixelData) return data_element
30,701
def thermo_paths(spc_dct, spc_locs_dct, spc_mods, run_prefix): """ Set up the path for saving the pf input and output. Placed in a MESSPF, NASA dirs high in run filesys. """ thm_path_dct = {} for spc_name in spc_locs_dct: spc_thm_path_dct = {} spc_info = sinfo.from_dct(spc_dct[spc_name]) spc_formula = automol.inchi.formula_string(spc_info[0]) thm_prefix = [spc_formula, automol.inchi.inchi_key(spc_info[0])] spc_locs_lst = spc_locs_dct[spc_name] for sidx, spc_locs in enumerate(spc_locs_lst, start=1): spc_mod_thm_path_dct = {} for midx, mod in enumerate(spc_mods): idx = sidx * 10 + midx spc_mod_thm_path_dct[mod] = ( job_path( run_prefix, 'MESS', 'PF', thm_prefix, locs_idx=idx), job_path( run_prefix, 'THERM', 'NASA', thm_prefix, locs_idx=idx) ) spc_mod_thm_path_dct['mod_total'] = ( job_path( run_prefix, 'MESS', 'PF', thm_prefix, locs_idx=sidx), job_path( run_prefix, 'THERM', 'NASA', thm_prefix, locs_idx=sidx) ) spc_thm_path_dct[tuple(spc_locs)] = spc_mod_thm_path_dct spc_thm_path_dct['spc_total'] = ( job_path( run_prefix, 'MESS', 'PF', thm_prefix, locs_idx=0), job_path( run_prefix, 'THERM', 'NASA', thm_prefix, locs_idx=0) ) thm_path_dct[spc_name] = spc_thm_path_dct return thm_path_dct
30,702
def create_da_model_std(filename, eta_rho=10, xi_rho=10, s_rho=1, reftime=default_epoch, clobber=False, cdl=None, title="My Model STD"): """ Create an time varying model standard deviation file Parameters ---------- filename : string name and path of file to create eta_rho: int, optional number of rows in the eta direction xi_rho: int, optional number of columns in the xi direction s_rho: int, optional number of s-levels reftime: datetime, optional date of epoch for time origin in netcdf clobber: bool, optional If True, clobber any existing files and recreate. If False, use the existing file definition cdl: string, optional, Use the specified CDL file as the definition for the new netCDF file. title: string, optional netcdf attribute title Returns ------- nc, netCDF4 object """ # Generate the Structure dims, vars, attr = cdl_parser( _cdl_dir + "s4dvar_std_m.cdl" if cdl is None else cdl) # Fill in the appropriate dimension values dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho) vars = _set_time_ref(vars, "ocean_time", reftime) # Create the file _nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber, title=title) # Return the new file return _nc
30,703
def thermoEntry(request, section, subsection, index): """ A view for showing an entry in a thermodynamics database. """ from rmgpy.chemkin import writeThermoEntry # Load the thermo database if necessary loadDatabase('thermo', section) # Determine the entry we wish to view try: database = getThermoDatabase(section, subsection) except ValueError: raise Http404 index = int(index) if index != 0 and index != -1: for entry in database.entries.values(): if entry.index == index: break else: raise Http404 else: if index == 0: index = min(entry.index for entry in database.entries.values() if entry.index > 0) else: index = max(entry.index for entry in database.entries.values() if entry.index > 0) return HttpResponseRedirect(reverse(thermoEntry, kwargs={'section': section, 'subsection': subsection, 'index': index, })) # Get the structure of the item we are viewing structure = getStructureInfo(entry.item) # Prepare the thermo data for passing to the template # This includes all string formatting, since we can't do that in the template if isinstance(entry.data, str): thermo = ['Link', database.entries[entry.data].index] else: thermo = entry.data # Get the thermo data for the molecule nasa_string = None if isinstance(entry.item, Molecule): species = Species(molecule=[entry.item]) species.generateResonanceIsomers() ThermoDatabase().findCp0andCpInf(species, thermo) nasa_string = '' try: if isinstance(thermo,NASA): nasa = thermo else: nasa = thermo.toNASA(Tmin=100.0, Tmax=5000.0, Tint=1000.0) species.thermo = nasa nasa_string = writeThermoEntry(species) except: pass referenceType = '' reference = entry.reference return render_to_response('thermoEntry.html', {'section': section, 'subsection': subsection, 'databaseName': database.name, 'entry': entry, 'structure': structure, 'reference': reference, 'referenceType': referenceType, 'thermo': thermo, 'nasa_string':nasa_string}, context_instance=RequestContext(request))
30,704
def get_clean_iris(): """ If file exists in data/processed/, recover it If it doesn't, import and clean it again Compress it if possible """ if os.path.exists(f"{path_}/data/processed/iris.csv"): print("Retrieving clean data from backup...") df = pd.read_csv(f"{path_}/data/processed/iris.csv") return df else: df = get_raw_iris() df.columns = \ map(lambda i: ''.join([x for x in i.lower() if x not in './()& ']).replace('cm', ''), df.columns) print("Persisting cleaned iris data...") df.to_csv(f"{path_}/data/processed/iris.csv") return df
30,705
def __string(string, name="", internal=False): """ Internal method for basic string validation. """ if string is None: __ex("The %s is missing." % name, internal) if string == "": __ex("The %s must not be empty." % name, internal)
30,706
def read_template(filename): """[summary] This function is for reading a template from a file [description] Arguments: filename {[type]} -- [description] Returns: [type] -- [description] """ with io.open(filename, encoding = 'utf-8') as template_file: content = template_file.read() return Template(content)
30,707
def S(a): """ Return the 3x3 cross product matrix such that S(a)*b = a x b. """ assert a.shape == (3,) , "Input vector is not a numpy array of size (3,)" S = np.asarray([[ 0.0 ,-a[2], a[1] ], [ a[2], 0.0 ,-a[0] ], [-a[1], a[0], 0.0 ]]) return S
30,708
def bulk_get_subscriber_user_ids( stream_dicts: Collection[Mapping[str, Any]], user_profile: UserProfile, subscribed_stream_ids: Set[int], ) -> Dict[int, List[int]]: """sub_dict maps stream_id => whether the user is subscribed to that stream.""" target_stream_dicts = [] for stream_dict in stream_dicts: stream_id = stream_dict["id"] is_subscribed = stream_id in subscribed_stream_ids try: validate_user_access_to_subscribers_helper( user_profile, stream_dict, lambda user_profile: is_subscribed, ) except JsonableError: continue target_stream_dicts.append(stream_dict) recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts} recipient_ids = sorted(stream["recipient_id"] for stream in target_stream_dicts) result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts} if not recipient_ids: return result """ The raw SQL below leads to more than a 2x speedup when tested with 20k+ total subscribers. (For large realms with lots of default streams, this function deals with LOTS of data, so it is important to optimize.) """ query = SQL( """ SELECT zerver_subscription.recipient_id, zerver_subscription.user_profile_id FROM zerver_subscription WHERE zerver_subscription.recipient_id in %(recipient_ids)s AND zerver_subscription.active AND zerver_subscription.is_user_active ORDER BY zerver_subscription.recipient_id, zerver_subscription.user_profile_id """ ) cursor = connection.cursor() cursor.execute(query, {"recipient_ids": tuple(recipient_ids)}) rows = cursor.fetchall() cursor.close() """ Using groupby/itemgetter here is important for performance, at scale. It makes it so that all interpreter overhead is just O(N) in nature. """ for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)): user_profile_ids = [r[1] for r in recip_rows] stream_id = recip_to_stream_id[recip_id] result[stream_id] = list(user_profile_ids) return result
30,709
def adjust_learning_rate(optimizer, step): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" if step == 500000: for param_group in optimizer.param_groups: param_group['lr'] = 0.0005 elif step == 1000000: for param_group in optimizer.param_groups: param_group['lr'] = 0.0003 elif step == 2000000: for param_group in optimizer.param_groups: param_group['lr'] = 0.0001 return optimizer
30,710
def get_menu_permissions(obj): """ 接收request中user对象的id :param obj: :return: 通过表级联得到user或者user所属group对应的菜单信息 """ menu_obj = Menu.objects # 菜单对象 umids = [x.id for x in UserMenu.objects.get(user=obj).menu.all()] isgroups = [x.id for x in User.objects.get(id=obj).groups.all()] # 用户所属组 if isgroups: gmids = [[m.id for m in x.menu.all()] for x in GroupMenu.objects.filter(group__in=isgroups) if x.menu.all()][0] # 获取组被授权的菜单 menus = menu_obj.filter(Q(id__in=gmids) | Q(id__in=umids)) else: menus = menu_obj.filter(id__in=umids) # 获取用户被授权的菜单 return menus
30,711
def process_package( package_path: str, include_patterns: typing.Optional[typing.List[str]] = None, exclude_patterns: typing.Optional[typing.List[str]] = None, ) -> SchemaMap: """ Recursively process a package source folder and return all json schemas from the top level functions it can find. You can use optional include/exclude patterns to filter the functions you want to process. These patterns are also applied to the file names that are processed, with the exception of __init__.py, which is always processed. :param package_path: The path to the your python package :param include_patterns: A list of wildcard patterns to match the function names you want to include :param exclude_patterns: A list of wildcard patterns to match the function names you want to exclude :return: A dictionary containing your function names and their json schemas """ function_schema_map = {} for package_chain, package_file_path in package_iterator(package_path, include_patterns, exclude_patterns): function_schema_map.update( **{ f"{package_chain}.{func_name}": func_schema for func_name, func_schema in process_file( package_file_path, include_patterns, exclude_patterns ).items() } ) return function_schema_map
30,712
def plotMaximumElementSize( GraphInstance ): """ At the beginning the function cleans the GraphInstance from the previous plot. Then it adjusts the following lines with corresponding labels and colors on the plot: MaxElementSize - Lamda MaxElementSize - Lamda_Eff MaxElementSize - ElementSize :param GraphInstance: an instance of GraphObject class :return: """ GraphInstance.cleanGraph( ) GraphInstance.cleanLabels( 3 ) GraphInstance.Graph.yaxis.axis_label = "Wave length / Max. Element Size in m" GraphInstance.Graph.xaxis.axis_label = "Frequency in Hz" # ...................... LamdaH graph ....................... # 'Bending Wave Length' GraphInstance.GraphData[ 0 ].data \ = dict( XData = GraphInstance.getRange( ), YData = GraphInstance.Containers[ "MaxElementSize" ][ "Lamda" ] ) GraphInstance.defineLine( 0, 'Bending Wave Length', GREEN, 'dotted' ) # ..................... LamdaH_Effective graph ........................... # 'Effective Bending Wave Length' GraphInstance.GraphData[ 1 ].data \ = dict( XData = GraphInstance.getRange( ), YData = GraphInstance.Containers[ "MaxElementSize" ][ "Lamda_Eff" ] ) GraphInstance.defineLine( 1, 'Effective Bending Wave Length', GREEN, 'solid' ) # ........................ ElementSize graph ............................... # 'Maximum Element Size \n(Quadratic Shape Functions)' GraphInstance.GraphData[ 2 ].data \ = dict( XData = GraphInstance.getRange( ), YData = GraphInstance.Containers[ "MaxElementSize" ][ "ElementSize" ] ) GraphInstance.defineLine( 2, 'Max. Elem. Size: quadr. shape fct.', GRAY, 'solid' )
30,713
def find_file(path, include_str='t1', exclude_str='lesion'): """finds all the files in the given path which include include_str in their name and do not include exclude_str ---------- path: path to the directory path where the files are stored include_str: string string which must be included in the name of the file exclude_str: strin string which may not be included in the name of the file Returns ------- files: list list of filenames matching the given criteria """ files = os.listdir(path) if include_str is not None: files = [n_file for n_file in files if (include_str in n_file)] if exclude_str is not None: files = [n_file for n_file in files if (exclude_str not in n_file)] return files
30,714
def init_loopback_devices(loopdevice_numbers): """Create and initialize loopback devices.""" for i in six.moves.range(0, loopdevice_numbers): if not os.path.exists('/dev/loop%s' % i): subproc.check_call(['mknod', '-m660', '/dev/loop%s' % i, 'b', '7', str(i)]) subproc.check_call(['chown', 'root.disk', '/dev/loop%s' % i])
30,715
def receiver(ip, port, target_loc=None): """ receive a file with multi-thread :param ip: the listening IP :param port: the listening PORT :param target_loc: location of file :return: None """ sct = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sct.bind((ip, port)) sct.listen(5) initialed = False thd_list = [] total = None while True: conn, addr = sct.accept() if not initialed: ctrl_conn = conn buf = [] end = -1 while end == -1: bys = ctrl_conn.recv(4096) end = bys.find(b'\n') buf.append(bys) file_name, total, _ = b"".join(buf).decode("utf8").split(sep=',') total_thd = total = int(total) temp_dir = pathlib.Path(tempfile.mkdtemp(), file_name) os.mkdir(str(temp_dir)) file_need = set(str(x) for x in range(0, total)) initialed = True ctrl_conn.send(b"\n") # print(file_need,total_thd) # sys.exit(0) else: thd = threading.Thread(group=None, target=receive, args=(conn, temp_dir)) thd.start() thd_list.append(thd) if len(thd_list) >= total_thd: while True: if len(file_received) == total: break evnt.wait() file_remain = file_need - file_received - file_receiving # print(total_thd, ":", file_received, ",", file_receiving, # ",", file_remain) # time.sleep(180) if file_remain: total_thd = len(file_remain) file_remain = ",".join(file_remain) file_remain += "\n" ctrl_conn.send(file_remain.encode()) break else: for i in thd_list: i.join() thd_list.remove(i) if len(file_received) == total: break ctrl_conn.send(b"\n") ctrl_conn.close() sct.close() with open(pathlib.Path(target_loc, file_name), 'wb') as f: for i in range(0, total): with open(pathlib.Path(temp_dir, str(i)), 'rb') as j: f.write(j.read()) shutil.rmtree(str(temp_dir))
30,716
def parse_datasets(dataset_option, database): """ Parses dataset names from command line. Valid forms of input: - None (returns None) - Comma-delimited list of names - File of names (One per line) Also checks to make sure that the datasets are in the database. """ if dataset_option == None: print(("No dataset names specified, so filtering process will use all " "datasets present in the database.")) return None elif os.path.isfile(dataset_option): print("Parsing datasets from file %s..." % (dataset_option)) datasets = [] with open(dataset_option) as f: for line in f: line = line.strip() datasets.append(line) else: datasets = dataset_option.split(",") # Now validate the datasets with sqlite3.connect(database) as conn: cursor = conn.cursor() valid_datasets = qutils.fetch_all_datasets(cursor) invalid_datasets = [] for dset in datasets: if dset not in valid_datasets: invalid_datasets.append(dset) if len(invalid_datasets) > 0: raise ValueError(("Problem parsing datasets. The following names are " "not in the database: '%s'. \nValid dataset names: '%s'") % (", ".join(invalid_datasets), ", ".join(valid_datasets))) else: print("Parsed the following dataset names successfully: %s" % \ (", ".join(datasets))) return datasets
30,717
def remove_suffix(input_string, suffix): """From the python docs, earlier versions of python does not have this.""" if suffix and input_string.endswith(suffix): return input_string[: -len(suffix)] return input_string
30,718
def ais_TranslatePointToBound(*args): """ :param aPoint: :type aPoint: gp_Pnt :param aDir: :type aDir: gp_Dir :param aBndBox: :type aBndBox: Bnd_Box & :rtype: gp_Pnt """ return _AIS.ais_TranslatePointToBound(*args)
30,719
def _shaded_interval_example_1(price_by_date): """# Line with datetime x axis""" # Plot the data ch = chartify.Chart(blank_labels=True, x_axis_type='datetime') ch.set_title("Area with second_y_column") ch.set_subtitle( "Use alone or combined with line graphs to represent confidence." ) ch.plot.area( data_frame=price_by_date, x_column='date', y_column='lower_ci', second_y_column='upper_ci') # Reset to ensure same color of line & shaded interval ch.style.color_palette.reset_palette_order() ch.plot.line( data_frame=price_by_date, x_column='date', y_column='mean') ch.show(_OUTPUT_FORMAT)
30,720
def set_todays_alarms(): """This function will set alarms due today Function takes from a list which stores alarms that were set for a day other than the day they were set. It check if the alarm is to be set on the current day. If it is it schedules it in the scheduler for the day, otherwise it does nothing. """ for event in events: if full_date.year == int(event["date"][0:4]) and full_date.month == int(event["date"][5:7]) and full_date.day == int(event["date"][8:10]): events_list.append({"title": event["event_title"], "content": s.enter(int(event["delay"]), 1, alarm_runner,(alarm_content(), request.args.get("two")))}) logging.info('Alarm set for today in ' + str(event["delay"])) else: continue
30,721
def unet_deepflash2(pretrained=None, **kwargs): """ U-Net model optimized for deepflash2 pretrained (str): specifies the dataset for pretrained weights """ model = _unet_deepflash2(pretrained=pretrained, **kwargs) return model
30,722
def get_bga_game_list(): """Gets a geeklist containing all games currently on Board Game Arena.""" result = requests.get("https://www.boardgamegeek.com/xmlapi2/geeklist/252354") return result.text
30,723
def artifact(name: str, path: str): """Decorate a step to create a KFP HTML artifact. Apply this decorator to a step to create a Kubeflow Pipelines artifact (https://www.kubeflow.org/docs/pipelines/sdk/output-viewer/). In case the path does not point to a valid file, the step will fail with an error. To generate more than one artifact per step, apply the same decorator multiple time, as shown in the example below. ```python @artifact(name="artifact1", path="./figure.html") @artifact(name="artifact2", path="./plot.html") @step(name="artifact-generator") def foo(): # ... # save something to plot.html and figure.html # ... ``` **Note**: Currently the only supported format is HTML. Args: name: Artifact name path: Absolute path to an HTML file """ def _(step: Step): if not isinstance(step, Step): raise ValueError("You should decorate functions that are decorated" " with the @step decorator!") step.artifacts.append(Artifact(name, path)) return step return _
30,724
async def test_duplicate_device_tracker_removal(hass, mqtt_mock, caplog): """Test for a non duplicate component.""" async_fire_mqtt_message( hass, "homeassistant/device_tracker/bla/config", '{ "name": "Beer", "state_topic": "test-topic" }', ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "homeassistant/device_tracker/bla/config", "") await hass.async_block_till_done() assert "Component has already been discovered: device_tracker bla" in caplog.text caplog.clear() async_fire_mqtt_message(hass, "homeassistant/device_tracker/bla/config", "") await hass.async_block_till_done() assert ( "Component has already been discovered: device_tracker bla" not in caplog.text )
30,725
def build_status(code: int) -> str: """ Builds a string with HTTP status code and reason for given code. :param code: integer HTTP code :return: string with code and reason """ status = http.HTTPStatus(code) def _process_word(_word: str) -> str: if _word == "OK": return _word return _word.capitalize() reason = " ".join(_process_word(word) for word in status.name.split("_")) text = f"{code} {reason}" return text
30,726
def pretty_picks_players(picks): """Formats a table of players picked for the gameweek, with live score information""" fields = ["Team", "Position", "Player", "Gameweek score", "Chance of playing next game", "Player news", "Sub position", "Id"] table = PrettyTable(field_names=fields) table.title = "GW points: " + str(picks.score) \ + " - Average GW points: " + str(picks.event.average_entry_score) \ + " - Overall arrow: " + picks.entry.overall_arrow["unicode"] \ + " - GW rank: " + str(picks.entry.summary_event_rank) \ + " - Overall rank: " + str(picks.entry.summary_overall_rank) for p in picks.picks: if picks.player_fielded[p.pick_position]: table.add_row([p.team_name, p.position, p.displayname, p.gw_points, p.chance_of_playing_next_round, p.news, p.pick_position, p.id_]) table.add_row(["===", "===", "=======", "==", "", "", "==", "==="]) for p in picks.picks: if not picks.player_fielded[p.pick_position]: table.add_row([p.team_name, p.position, p.displayname, p.gw_points, p.chance_of_playing_next_round, p.news, p.pick_position, p.id_]) table.align = "l" table.align["Gameweek score"] = "r" table.align["Sub position"] = "r" table.align["Chance of playing next game"] = "c" return table
30,727
def open_path(request): """ handles paths authors/ """ if(request.method == "POST"): json_data = request.data new_author = Author(is_active=False) # Creating new user login information if "password" in json_data: password = json_data["password"] json_data.pop("password") new_author.set_password(password) new_author.username = json_data["username"] for k, v in json_data.items(): setattr(new_author, k, v) new_author.host = HOST_URL url = new_author.host + "author/" + str(new_author.id) new_author.url = url # Try creating user, # if duplicate user, return Bad Request try: new_author.save() except IntegrityError: return HttpResponseBadRequest("username taken") return HttpResponse(str(new_author.id), status=status.HTTP_200_OK) if(request.method == "GET"): data = Author.objects.all() ser = AuthorSerializer(data, many=True) return JsonResponse(ser.data, safe=False)
30,728
def encode_task(task): """ Encodes a syllogistic task. Parameters ---------- task : list(list(str)) List representation of the syllogism (e.g., [['All', 'A', 'B'], ['Some', 'B', 'C']]). Returns ------- str Syllogistic task encoding (e.g., 'AI1'). """ return SyllogisticTaskEncoder.encode_task(task)
30,729
def STOSD(cpu_context: ProcessorContext, instruction: Instruction): """Store value in EAX in the address pointed to by EDI""" value = cpu_context.registers.eax addr = cpu_context.registers.edi logger.debug("Storing 0x%X into 0x%X", value, addr) cpu_context.memory.write(addr, value.to_bytes(4, cpu_context.byteorder)) if cpu_context.registers.df: cpu_context.registers.edi -= 4 else: cpu_context.registers.edi += 4
30,730
def fuser(name): """ A context manager that facilitates switching between backend fusers. Valid names: * ``fuser0`` - enables only legacy fuser * ``fuser1`` - enables only NNC * ``fuser2`` - enables only nvFuser """ old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() old_nvfuser_state = torch._C._jit_nvfuser_enabled() if name == 'fuser0': # legacy fuser torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(False) elif name == 'fuser1': # NNC old_profiling_executor = torch._C._jit_set_profiling_executor(True) old_profiling_mode = torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(True) torch._C._jit_set_nvfuser_enabled(False) elif name == 'fuser2': # nvFuser torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(True) else: raise Exception("unrecognized fuser option") try: yield finally: if name == 'fuser1': # NNC torch._C._jit_set_profiling_executor(old_profiling_executor) torch._C._jit_set_profiling_mode(old_profiling_mode) # recover the previous values torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse) torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse) torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state) torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
30,731
def resize_small(image, resolution): """Shrink an image to the given resolution.""" h, w = image.shape[0], image.shape[1] ratio = resolution / min(h, w) h = tf.round(h * ratio, tf.int32) w = tf.round(w * ratio, tf.int32) return tf.image.resize(image, [h, w], antialias=True)
30,732
def test_01(): """ Particle on surface. """ psf = MicroscopePSF() rv = numpy.arange(0.0, 1.01, 0.1) zv = numpy.arange(-1.0, 1.01, 0.2) fast_rz = psf.gLZRFocalScan(rv, zv) slow_rz = psf.gLZRFocalScanSlow(rv, zv) assert numpy.allclose(fast_rz, slow_rz)
30,733
def retry(func_name, max_retry, *args): """Retry a function if the output of the function is false :param func_name: name of the function to retry :type func_name: Object :param max_retry: Maximum number of times to be retried :type max_retry: Integer :param args: Arguments passed to the function :type args: args :return: Output of the function if function is True :rtype: Boolean (True/False) or None Type(None) """ output = None for _ in range(max_retry): output = func_name(*args) if output and output != 'False': return output else: time.sleep(5) else: return output
30,734
def test_has_view_change_quorum_number_must_contain_primary_propagate_primary(tconf, tdir): """ Checks method _hasViewChangeQuorum of SimpleSelector It must have f+1 ViewChangeDone and contain a VCD from the next Primary in the case of PrimaryPropagation Check it for a case of view change (view_change_in_progress = True, propagate_primary = True) """ ledgerInfo = ( # ledger id, ledger length, merkle root (0, 10, '7toTJZHzaxQ7cGZv18MR4PMBfuUecdEQ1JRqJVeJBvmd'), (1, 5, 'Hs9n4M3CrmrkWGVviGq48vSbMpCrk6WgSBZ7sZAWbJy3') ) node = FakeNode(str(tdir), tconf) node.view_changer.view_change_in_progress = True node.view_changer.propagate_primary = True assert not node.view_changer._hasViewChangeQuorum # Accessing _view_change_done directly to avoid influence of methods node.view_changer._view_change_done = {} def declare(replica_name): node.view_changer._view_change_done[replica_name] = ('Node2', ledgerInfo) # Declare the Primary first and check that f+1 are required declare('Node1') assert not node.view_changer.has_view_change_from_primary assert not node.view_changer._hasViewChangeQuorum declare('Node3') assert not node.view_changer.has_view_change_from_primary assert node.view_changer._hasViewChangeQuorum declare('Node2') assert node.view_changer.has_view_change_from_primary assert node.view_changer._hasViewChangeQuorum
30,735
def configure( *, log_callback: Callable[[str], None] = default_log_fn, connection_pool_max_size: int = DEFAULT_CONNECTION_POOL_MAX_SIZE, max_connection_pool_count: int = DEFAULT_MAX_CONNECTION_POOL_COUNT, # https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs # the chunk size determines the maximum size of an individual blob azure_write_chunk_size: int = DEFAULT_AZURE_WRITE_CHUNK_SIZE, google_write_chunk_size: int = DEFAULT_GOOGLE_WRITE_CHUNK_SIZE, retry_log_threshold: int = DEFAULT_RETRY_LOG_THRESHOLD, retry_common_log_threshold: int = DEFAULT_RETRY_COMMON_LOG_THRESHOLD, retry_limit: Optional[int] = None, connect_timeout: Optional[int] = DEFAULT_CONNECT_TIMEOUT, read_timeout: Optional[int] = DEFAULT_READ_TIMEOUT, output_az_paths: bool = False, use_azure_storage_account_key_fallback: bool = True, get_http_pool: Optional[Callable[[], urllib3.PoolManager]] = None, use_streaming_read: bool = False, default_buffer_size: int = DEFAULT_BUFFER_SIZE, ) -> None: """ log_callback: a log callback function `log(msg: string)` to use instead of printing to stdout connection_pool_max_size: the max size for each per-host connection pool max_connection_pool_count: the maximum count of per-host connection pools azure_write_chunk_size: the size of blocks to write to Azure Storage blobs, can be set to a maximum of 100MB google_write_chunk_size: the size of blocks to write to Google Cloud Storage blobs in bytes, this only determines the unit of request retries retry_log_threshold: set a retry count threshold above which to log failures to the log callback function connect_timeout: the maximum amount of time (in seconds) to wait for a connection attempt to a server to succeed, set to None to wait forever read_timeout: the maximum amount of time (in seconds) to wait between consecutive read operations for a response from the server, set to None to wait forever output_az_paths: output `az://` paths instead of using the `https://` for azure use_azure_storage_account_key_fallback: fallback to storage account keys for azure containers, having this enabled (the default) requires listing your subscriptions and may run into 429 errors if you hit the low azure quotas for subscription listing get_http_pool: a function that returns a `urllib3.PoolManager` to be used for requests """ global _context _context = create_context( log_callback=log_callback, connection_pool_max_size=connection_pool_max_size, max_connection_pool_count=max_connection_pool_count, azure_write_chunk_size=azure_write_chunk_size, retry_log_threshold=retry_log_threshold, retry_common_log_threshold=retry_common_log_threshold, retry_limit=retry_limit, google_write_chunk_size=google_write_chunk_size, connect_timeout=connect_timeout, read_timeout=read_timeout, output_az_paths=output_az_paths, use_azure_storage_account_key_fallback=use_azure_storage_account_key_fallback, get_http_pool=get_http_pool, use_streaming_read=use_streaming_read, default_buffer_size=default_buffer_size, )
30,736
def encode_randomness(randomness: hints.Buffer) -> str: """ Encode the given buffer to a :class:`~str` using Base32 encoding. The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which are cryptographically secure random values. .. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID bytes specifically and is not meant for arbitrary encoding. :param randomness: Bytes to encode :type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview` :return: Value encoded as a Base32 string :rtype: :class:`~str` :raises ValueError: when the randomness is not 10 bytes """ length = len(randomness) if length != 10: raise ValueError('Expects 10 bytes for randomness; got {}'.format(length)) encoding = ENCODING return \ encoding[(randomness[0] & 248) >> 3] + \ encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \ encoding[(randomness[1] & 62) >> 1] + \ encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \ encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \ encoding[(randomness[3] & 124) >> 2] + \ encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \ encoding[randomness[4] & 31] + \ encoding[(randomness[5] & 248) >> 3] + \ encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \ encoding[(randomness[6] & 62) >> 1] + \ encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \ encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \ encoding[(randomness[8] & 124) >> 2] + \ encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \ encoding[randomness[9] & 31]
30,737
def obtain_parameter_values(flow): """ Extracts all parameter settings from the model inside a flow in OpenML format. Parameters ---------- flow : OpenMLFlow openml flow object (containing flow ids, i.e., it has to be downloaded from the server) Returns ------- list A list of dicts, where each dict has the following names: - oml:name (str): The OpenML parameter name - oml:value (mixed): A representation of the parameter value - oml:component (int): flow id to which the parameter belongs """ openml.flows.functions._check_flow_for_server_id(flow) def get_flow_dict(_flow): flow_map = {_flow.name: _flow.flow_id} for subflow in _flow.components: flow_map.update(get_flow_dict(_flow.components[subflow])) return flow_map def extract_parameters(_flow, _flow_dict, component_model, _main_call=False, main_id=None): def is_subcomponent_specification(values): # checks whether the current value can be a specification of # subcomponents, as for example the value for steps parameter # (in Pipeline) or transformers parameter (in # ColumnTransformer). These are always lists/tuples of lists/ # tuples, size bigger than 2 and an OpenMLFlow item involved. if not isinstance(values, (tuple, list)): return False for item in values: if not isinstance(item, (tuple, list)): return False if len(item) < 2: return False if not isinstance(item[1], openml.flows.OpenMLFlow): return False return True # _flow is openml flow object, _param dict maps from flow name to flow # id for the main call, the param dict can be overridden (useful for # unit tests / sentinels) this way, for flows without subflows we do # not have to rely on _flow_dict exp_parameters = set(_flow.parameters) exp_components = set(_flow.components) model_parameters = set([mp for mp in component_model.get_params() if '__' not in mp]) if len((exp_parameters | exp_components) ^ model_parameters) != 0: flow_params = sorted(exp_parameters | exp_components) model_params = sorted(model_parameters) raise ValueError('Parameters of the model do not match the ' 'parameters expected by the ' 'flow:\nexpected flow parameters: ' '%s\nmodel parameters: %s' % (flow_params, model_params)) _params = [] for _param_name in _flow.parameters: _current = OrderedDict() _current['oml:name'] = _param_name current_param_values = openml.flows.sklearn_to_flow( component_model.get_params()[_param_name]) # Try to filter out components (a.k.a. subflows) which are # handled further down in the code (by recursively calling # this function)! if isinstance(current_param_values, openml.flows.OpenMLFlow): continue if is_subcomponent_specification(current_param_values): # complex parameter value, with subcomponents parsed_values = list() for subcomponent in current_param_values: # scikit-learn stores usually tuples in the form # (name (str), subcomponent (mixed), argument # (mixed)). OpenML replaces the subcomponent by an # OpenMLFlow object. if len(subcomponent) < 2 or len(subcomponent) > 3: raise ValueError('Component reference should be ' 'size {2,3}. ') subcomponent_identifier = subcomponent[0] subcomponent_flow = subcomponent[1] if not isinstance(subcomponent_identifier, six.string_types): raise TypeError('Subcomponent identifier should be ' 'string') if not isinstance(subcomponent_flow, openml.flows.OpenMLFlow): raise TypeError('Subcomponent flow should be string') current = { "oml-python:serialized_object": "component_reference", "value": { "key": subcomponent_identifier, "step_name": subcomponent_identifier } } if len(subcomponent) == 3: if not isinstance(subcomponent[2], list): raise TypeError('Subcomponent argument should be' 'list') current['value']['argument_1'] = subcomponent[2] parsed_values.append(current) parsed_values = json.dumps(parsed_values) else: # vanilla parameter value parsed_values = json.dumps(current_param_values) _current['oml:value'] = parsed_values if _main_call: _current['oml:component'] = main_id else: _current['oml:component'] = _flow_dict[_flow.name] _params.append(_current) for _identifier in _flow.components: subcomponent_model = component_model.get_params()[_identifier] _params.extend(extract_parameters(_flow.components[_identifier], _flow_dict, subcomponent_model)) return _params flow_dict = get_flow_dict(flow) parameters = extract_parameters(flow, flow_dict, flow.model, True, flow.flow_id) return parameters
30,738
def logging_sync_ocns(cookie, in_from_or_zero, in_to_or_zero): """ Auto-generated UCSC XML API Method. """ method = ExternalMethod("LoggingSyncOcns") method.cookie = cookie method.in_from_or_zero = str(in_from_or_zero) method.in_to_or_zero = str(in_to_or_zero) xml_request = method.to_xml(option=WriteXmlOption.DIRTY) return xml_request
30,739
def test_stations_by_distance(): """Test stations by distance function""" stations = floodsystem.stationdata.build_station_list() sorted_list = floodsystem.geo.stations_by_distance(stations, (0, 0)) # check tuple entries are of expected types for n in sorted_list: assert type(n[1]) == float assert type(n[0]) == floodsystem.station.MonitoringStation # check it is ordered correctly for n in range(len(sorted_list) - 1): assert sorted_list[n][1] <= sorted_list[n + 1][1]
30,740
def archive(in_file): """ Evaluate the result for the given input parameters, and archive the results. """ (iteration, it_id, result) = process_input(in_file) archive_output(iteration, it_id, result)
30,741
def story_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Link to a JIRA issue. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ return role_base(name, rawtext, text, lineno, inliner, options=options, content=content, role_type='story')
30,742
def benedict_bornder_constants(g, critical=False): """ Computes the g,h constants for a Benedict-Bordner filter, which minimizes transient errors for a g-h filter. Returns the values g,h for a specified g. Strictly speaking, only h is computed, g is returned unchanged. The default formula for the Benedict-Bordner allows ringing. We can "nearly" critically damp it; ringing will be reduced, but not entirely eliminated at the cost of reduced performance. Parameters ---------- g : float scaling factor g for the filter critical : boolean, default False Attempts to critically damp the filter. Returns ------- g : float scaling factor g (same as the g that was passed in) h : float scaling factor h that minimizes the transient errors Examples -------- .. code-block:: Python from filterpy.gh import GHFilter, benedict_bornder_constants g, h = benedict_bornder_constants(.855) f = GHFilter(0, 0, 1, g, h) References ---------- Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and Sons, 1998. """ g_sqr = g**2 if critical: return (g, 0.8 * (2. - g_sqr - 2*(1-g_sqr)**.5) / g_sqr) return (g, g_sqr / (2.-g))
30,743
def enable(configuration_file, section="ispyb"): """Enable access to features that are currently under development.""" global _db, _db_cc, _db_config if _db_config: if _db_config == configuration_file: # This database connection is already set up. return logging.getLogger("ispyb").warn( "__future__ configuration file change requested" ) disable() logging.getLogger("ispyb").info( "NOTICE: This code uses __future__ functionality in the ISPyB API. " "This enables unsupported and potentially unstable code, which may " "change from version to version without warnings. Here be dragons." ) cfgparser = configparser.RawConfigParser() if not cfgparser.read(configuration_file): raise RuntimeError( "Could not read from configuration file %s" % configuration_file ) cfgsection = dict(cfgparser.items(section)) host = cfgsection.get("host") port = cfgsection.get("port", 3306) database = cfgsection.get("database", cfgsection.get("db")) username = cfgsection.get("username", cfgsection.get("user")) password = cfgsection.get("password", cfgsection.get("pw")) # Open a direct MySQL connection _db = mysql.connector.connect( host=host, port=port, user=username, password=password, database=database, use_pure=True, ) _db_config = configuration_file _db.autocommit = True class DictionaryCursorContextManager: """This class creates dictionary cursors for mysql.connector connections. By using a context manager it is ensured that cursors are closed immediately after use. Cursors created with this context manager return results as a dictionary and offer a .run() function, which is an alias to .execute that accepts query parameters as function parameters rather than a list. """ def __enter__(cm): """Enter context. Ensure the database is alive and return a cursor with an extra .run() function.""" _db.ping(reconnect=True) cm.cursor = _db.cursor(dictionary=True) def flat_execute(stmt, *parameters): """Pass all given function parameters as a list to the existing .execute() function.""" return cm.cursor.execute(stmt, parameters) setattr(cm.cursor, "run", flat_execute) return cm.cursor def __exit__(cm, *args): """Leave context. Close cursor. Destroy reference.""" cm.cursor.close() cm.cursor = None _db_cc = DictionaryCursorContextManager import ispyb.model.datacollection ispyb.model.datacollection.DataCollection.integrations = ( _get_linked_autoprocintegration_for_dc ) ispyb.model.datacollection.DataCollection.screenings = _get_linked_screenings_for_dc ispyb.model.datacollection.DataCollection.pdb = _get_linked_pdb_for_dc import ispyb.model.processingprogram ispyb.model.processingprogram.ProcessingProgram.reload = _get_autoprocprogram import ispyb.model.screening ispyb.model.screening.Screening.outputs = _get_linked_outputs_for_screening ispyb.model.screening.Screening.reload = _get_screening ispyb.model.screening.ScreeningOutput.lattices = ( _get_linked_lattices_for_screening_output ) ispyb.model.screening.ScreeningOutput.strategies = ( _get_linked_strategies_for_screening_output ) ispyb.model.screening.ScreeningOutput.reload = _get_screening_output ispyb.model.screening.ScreeningOutputLattice.reload = _get_screening_output_lattice ispyb.model.screening.ScreeningStrategy.wedges = ( _get_linked_wedges_for_screening_strategy ) ispyb.model.screening.ScreeningStrategy.reload = _get_screening_strategy ispyb.model.screening.ScreeningStrategyWedge.sub_wedges = ( _get_linked_sub_wedges_for_screening_strategy_wedge ) ispyb.model.screening.ScreeningStrategyWedge.reload = _get_screening_strategy_wedge ispyb.model.screening.ScreeningStrategySubWedge.reload = ( _get_screening_strategy_sub_wedge ) import ispyb.model.image_quality_indicators ispyb.model.image_quality_indicators.ImageQualityIndicators.reload = ( _get_image_quality_indicators ) ispyb.model.image_quality_indicators.ImageQualityIndicatorsList.reload = ( _get_image_quality_indicators_for_dcid ) ispyb.model.datacollection.DataCollection.image_quality = ( _get_linked_image_quality_indicators_for_data_collection ) import ispyb.model.detector ispyb.model.detector.Detector.reload = _get_detector import ispyb.model.sample ispyb.model.sample.Sample.reload = _get_sample ispyb.model.datacollection.DataCollection.sample = _get_linked_sample_for_dcid import ispyb.model.samplegroup ispyb.model.samplegroup.SampleGroup.reload = _get_sample_group ispyb.model.datacollection.DataCollection.sample_groups = ( _get_linked_sample_groups_for_dcid )
30,744
def median(f, x, y, a, b): """ Return the median value of the `size`-neighbors of the given point. """ # Create the sub 2d array sub_f = f[x - a:x + a + 1, y - b:y + b + 1] # Return the median arr = np.sort(np.asarray(sub_f).reshape(-1)) return np.median(arr)
30,745
def build_relevant_api_reference_files( docstring: str, api_doc_id: str, api_doc_path: str ) -> Set[str]: """Builds importable link snippets according to the contents of a docstring's `# Documentation` block. This method will create files if they do not exist, and will append links to the files that already do exist. Args: docstring: the docstring that contains the `# Documentation` block listing urls to be cross-linked. api_doc_id: A string representation of the API doc that will have the link applied to it. api_doc_path: a Docusaurus compliant path to the API document. Returns: A set containing the file paths that were created or appended to. """ output_paths = set() document_paths = get_document_paths(docstring) for relevant_path in document_paths: links_path = Path(f"..{relevant_path}__api_links.mdx") output_paths.add(links_path) if links_path.exists(): with open(links_path, "a") as f: f.write(f"- [{api_doc_id}]({api_doc_path})\n") else: with open(links_path, "w") as f: f.write(f"- [{api_doc_id}]({api_doc_path})\n") return output_paths
30,746
def main(args): """Load a model with given transforming arguments and transform individual cases.""" cases = io_functions.load_case_collection(args.data, args.meta) # cases = cases.sample(1, groups=["CLL", "normal"]) selected_markers = cases.selected_markers marker_name_only = False if args.tensorboard: tensorboard_dir = args.output / "tensorboard" else: tensorboard_dir = None # scaler = "RefitMinMaxScaler" scaler = args.scaler # Training parameters for the model can be respecified, the only difference # between transform and normal traninig, is that after a transformation is # completed, the original weights will be restored to the model. model = casesom.CaseSom( tubes=selected_markers, tensorboard_dir=tensorboard_dir, modelargs={ "marker_name_only": marker_name_only, "max_epochs": 5, "batch_size": 50000, "initial_radius": int(args.size / 2), "end_radius": 1, "radius_cooling": "linear", # "marker_images": sommodels.fcssom.MARKER_IMAGES_NAME_ONLY, "map_type": "toroid", "dims": (args.size, args.size, -1), "scaler": scaler, } ) transform_cases(cases, model, args.output)
30,747
def find_zip_entry(zFile, override_file): """ Implement ZipFile.getinfo() as case insensitive for systems with a case insensitive file system so that looking up overrides will work the same as it does in the Sublime core. """ try: return zFile.getinfo(override_file) except KeyError: if _wrap("ABC") == _wrap("abc"): override_file = _wrap(override_file) entry_list = zFile.infolist() for entry in entry_list: if _wrap(entry.filename) == override_file: return entry raise
30,748
def get_mode(elements): """The element(s) that occur most frequently in a data set.""" dictionary = {} elements.sort() for element in elements: if element in dictionary: dictionary[element] += 1 else: dictionary[element] = 1 # Get the max value max_value = max(dictionary.values()) highest_elements = [key for key, value in dictionary.items() if value == max_value] modes = sorted(highest_elements) return modes[0]
30,749
def holtWintersAberration(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the positive or negative deviation of the series data from the forecast. """ results = [] for series in seriesList: confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta) lowerBand = confidenceBands[0] upperBand = confidenceBands[1] aberration = list() for i, actual in enumerate(series): if series[i] is None: aberration.append(0) elif upperBand[i] is not None and series[i] > upperBand[i]: aberration.append(series[i] - upperBand[i]) elif lowerBand[i] is not None and series[i] < lowerBand[i]: aberration.append(series[i] - lowerBand[i]) else: aberration.append(0) newName = "holtWintersAberration(%s)" % series.name results.append(TimeSeries(newName, series.start, series.end , series.step, aberration)) return results
30,750
def get_in_addition_from_start_to_end_item(li, start, end): """ 获取除开始到结束之外的元素 :param li: 列表元素 :param start: 开始位置 :param end: 结束位置 :return: 返回开始位置到结束位置之间的元素 """ return li[start:end + 1]
30,751
def remove_special_message(section_content): """ Remove special message - "medicinal product no longer authorised" e.g. 'me di cin al p ro du ct n o lo ng er a ut ho ris ed' 'me dic ina l p rod uc t n o l on ge r a uth ori se d' :param section_content: content of a section :return: content of a section without special message """ # string as it is present in the section content SPECIAL_MESSAGE1 = 'me di cin al p ro du ct n o lo ng er a ut ho ris ed' SPECIAL_MESSAGE2 = 'me dic ina l p ro du ct no lo ng er au th or ise d' SPECIAL_MESSAGE3 = 'me dic ina l p rod uc t n o l on ge r a uth ori se d' SPECIAL_MESSAGE4 = 'me dic ina l p ro du ct no lo ng er au tho ris ed' SPECIAL_MESSAGE5 = 'me dic ina l p ro du ct no lo ng er a ut ho ris ed' SPECIAL_MESSAGE6 = 'me dic ina l p rod uc t n o l on ge r a uth ori sed' SPECIAL_MESSAGE7 = 'm ed ici na l p ro du ct no lo ng er a ut ho ris ed' SPECIAL_MESSAGE8 = 'm ed ici na l p ro du ct no lo ng er au th or ise d' SPECIAL_MESSAGE9 = 'med icin al pro du ct no lo ng er au tho ris ed' SPECIAL_MESSAGE_ARRAY = [SPECIAL_MESSAGE1, SPECIAL_MESSAGE2, SPECIAL_MESSAGE3, SPECIAL_MESSAGE4, SPECIAL_MESSAGE5, SPECIAL_MESSAGE6, SPECIAL_MESSAGE7, SPECIAL_MESSAGE8, SPECIAL_MESSAGE9] # in case message present in section content for SPECIAL_MESSAGE in SPECIAL_MESSAGE_ARRAY: section_content = section_content.replace(SPECIAL_MESSAGE, '') # remove multiple consecutive spaces section_content = re.sub(' +', ' ', section_content) return section_content
30,752
def simulate_cash_flow_values(cash_flow_data, number_of_simulations=1): """Simulate cash flow values from their mean and standard deviation. The function returns a list of numpy arrays with cash flow values. Example: Input: cash_flow_data: [[100, 20], [-500, 10]] number_of_simulations: 3 Output: [array([113.36222158, 77.39297513, 77.15350701]), array([-506.58408186, -503.27855081, -500.37690891])]""" if cash_flow_data and number_of_simulations > 0: simulated = [get_random_numbers(mean, standard_deviation, number_of_simulations) for mean, standard_deviation in cash_flow_data] else: simulated = [] return simulated
30,753
def sim_v1(sim_params, prep_result, progress=None, pipeline=None): """ Map the simulation over the peptides in prep_result. This is actually performed twice in order to get a train and (different!) test set The "train" set includes decoys, the test set does not; furthermore the the error modes and radiometry noise is different in each set. """ if sim_params.random_seed is None: sim_params.random_seed = int(time.time()) np.random.seed(sim_params.random_seed) # CREATE a *training-set* for all peptides (real and decoy) if pipeline: pipeline.set_phase(0, 2) # Sanity check that all the peps are accounted for pep_seqs_with_decoys = prep_result.pepseqs__with_decoys() n_peps = pep_seqs_with_decoys.pep_i.nunique() assert n_peps == prep_result.n_peps ( train_dytmat, train_radmat, train_pep_recalls, train_flus, train_flu_remainders, train_true_pep_iz, ) = _run_sim( sim_params, pep_seqs_with_decoys, name="train", n_peps=n_peps, n_samples=sim_params.n_samples_train, progress=progress, ) if sim_params.is_survey: test_dyemat = None test_radmat = None test_recalls = None test_flus = None test_flu_remainders = None test_true_pep_iz = None else: # CREATE a *test-set* for real-only peptides if pipeline: pipeline.set_phase(1, 2) ( test_dyemat, test_radmat, test_recalls, test_flus, test_flu_remainders, test_true_pep_iz, ) = _run_sim( sim_params, prep_result.pepseqs__no_decoys(), name="test", n_peps=n_peps, n_samples=sim_params.n_samples_test, progress=progress, ) # CHECK that the train and test are not identical in SOME non_zero_row # If they are, there was some sort of RNG seed errors which might happen # for example if sub-processes failed to re-init their RNG seeds. # Test this by looking at pep_i==1 non_zero_rows = np.any(train_radmat[1] > 0, axis=(1, 2)) non_zero_row_args = np.argwhere(non_zero_rows)[0:100] train_rows = train_radmat[1, non_zero_row_args].reshape( ( non_zero_row_args.shape[0], non_zero_row_args.shape[1] * train_radmat.shape[2] * train_radmat.shape[3], ) ) test_rows = test_radmat[1, non_zero_row_args].reshape( ( non_zero_row_args.shape[0], non_zero_row_args.shape[1] * test_radmat.shape[2] * test_radmat.shape[3], ) ) if train_rows.shape[0] > 0 and not sim_params.allow_train_test_to_be_identical: any_differences = np.any(np.diagonal(cdist(train_rows, test_rows)) != 0.0) check.affirm(any_differences, "Train and test sets are identical") if train_dytmat is not None: train_dytmat.reshape( (train_dytmat.shape[0] * train_dytmat.shape[1], *train_dytmat.shape[2:]) ) if train_radmat is not None: train_radmat.reshape( (train_radmat.shape[0] * train_radmat.shape[1], *train_radmat.shape[2:]) ) if test_dyemat is not None: test_dyemat.reshape( (test_dyemat.shape[0] * test_dyemat.shape[1], *test_dyemat.shape[2:]) ) if test_radmat is not None: test_radmat.reshape( (test_radmat.shape[0] * test_radmat.shape[1], *test_radmat.shape[2:]) ) # REMOVE all-zero rows (EXCEPT THE FIRST which is the nul row) assert np.all(train_dytmat[0, :, :] == 0) some_non_zero_row_args = np.argwhere( ~np.all(train_dytmat[:, :, :] == 0, axis=(1, 2)) ).flatten() some_non_zero_row_args = np.concatenate(([0], some_non_zero_row_args)) # TASK: Plucking out the non-zero rows doesn't work well # with Arrtay results -- I need to rethink that. # For now, I'm converting this back to np.ndarray train_dytmat = train_dytmat[some_non_zero_row_args] train_radmat = train_radmat[some_non_zero_row_args] train_true_pep_iz = train_true_pep_iz[some_non_zero_row_args] if test_dyemat is not None: assert np.all(test_dyemat[0, :, :] == 0) some_non_zero_row_args = np.argwhere( ~np.all(test_dyemat[:, :, :] == 0, axis=(1, 2)) ).flatten() # DO not add a nul row into the test data # some_non_zero_row_args = np.concatenate(([0], some_non_zero_row_args)) test_dyemat = test_dyemat[some_non_zero_row_args] test_radmat = test_radmat[some_non_zero_row_args] test_true_pep_iz = test_true_pep_iz[some_non_zero_row_args] return SimV1Result( params=sim_params, train_dytmat=train_dytmat, train_radmat=train_radmat, train_pep_recalls=train_pep_recalls, train_flus=train_flus, train_flu_remainders=train_flu_remainders, train_true_pep_iz=train_true_pep_iz, test_dyemat=test_dyemat, test_radmat=test_radmat, test_recalls=test_recalls, test_flus=test_flus, test_true_pep_iz=test_true_pep_iz, test_flu_remainders=test_flu_remainders, )
30,754
def test_normalize_acl_without_acl_attribute(): """ test for resource without __acl__ attribute """ from fastapi_permissions import normalize_acl assert normalize_acl("without __acl__") == []
30,755
def apply_function_elementwise_series(ser, func): """Apply a function on a row/column basis of a DataFrame. Args: ser (pd.Series): Series. func (function): The function to apply. Returns: pd.Series: Series with the applied function. Examples: >>> df = pd.DataFrame(np.array(range(12)).reshape(4, 3), columns=list('abc')) >>> ser = df['b'] >>> f = lambda x: '%.1f' % x >>> apply_function_elementwise_series(ser, f) 0 1.0 1 4.0 2 7.0 3 10.0 Name: b, dtype: object """ return ser.map(func)
30,756
def is_the_bbc_html(raw_html, is_lists_enabled): """ Creates a concatenate string of the article, with or without li elements included from bbc.co.uk. :param raw_html: resp.content from response.get(). :param is_lists_enabled: Boolean to include <Li> elements. :return: List where List[0] is a concatenated String of the article. """ article = [""] parsed_html = BeautifulSoup(raw_html.decode('utf-8', 'ignore'), 'html.parser') text_body = parsed_html.find("div", {"class": "story-body__inner"}).findAll('p') for text in text_body: article[0] += text.text if is_lists_enabled: text_lists = parsed_html.find("div", {"class": "story-body__inner"}).findAll('ls') if len(text_lists) > 0: for text in text_lists: article[0] += text.text return article
30,757
def create_empty_module(module_name, origin=None): """Creates a blank module. Args: module_name: The name to be given to the module. origin: The origin of the module. Defaults to None. Returns: A blank module. """ spec = spec_from_loader(module_name, loader=None, origin=origin) module = module_from_spec(spec) return module
30,758
def _warp_3d_cupy(image, vector_field, mode, block_size: int = 8): """ Parameters ---------- image vector_field mode block_size Returns ------- """ xp = Backend.get_xp_module() source = r""" extern "C"{ __global__ void warp_3d(float* warped_image, cudaTextureObject_t input_image, cudaTextureObject_t vector_field, int width, int height, int depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int z = blockIdx.z * blockDim.z + threadIdx.z; if (x < width && y < height && z < depth) { // coordinates in coord-normalised vector_field texture: float u = float(x)/width; float v = float(y)/height; float w = float(z)/depth; //printf("(%f,%f,%f)\n", u, v, w); // Obtain linearly interpolated vector at (u,v,w): float4 vector = tex3D<float4>(vector_field, u, v, w); //printf("(%f,%f,%f,%f)\n", vector.x, vector.y, vector.z, vector.w); // Obtain the shifted coordinates of the source voxel, // flip axis order to match numpy order: float sx = 0.5f + float(x) - vector.z; float sy = 0.5f + float(y) - vector.y; float sz = 0.5f + float(z) - vector.x; // Sample source image for voxel value: float value = tex3D<float>(input_image, sx, sy, sz); //printf("(%f, %f, %f)=%f\n", sx, sy, sz, value); // Store interpolated value: warped_image[z*width*height + y*width + x] = value; //TODO: supersampling would help in regions for which warping misses voxels in the source image, //better: adaptive supersampling would automatically use the vector field // divergence to determine where to super sample and by how much. } } } """ if image.ndim != 3 or vector_field.ndim != 4: raise ValueError("image or vector field has wrong number of dimensions!") # set up textures: input_image_tex, input_image_cudarr = create_cuda_texture( image, num_channels=1, normalised_coords=False, sampling_mode="linear", address_mode=mode ) vector_field = cupy.pad(vector_field, pad_width=((0, 0),) * 3 + ((0, 1),), mode="constant") vector_field_tex, vector_field_cudarr = create_cuda_texture( vector_field, num_channels=4, normalised_coords=True, sampling_mode="linear", address_mode="clamp" ) # Set up resulting image: warped_image = xp.empty(shape=image.shape, dtype=image.dtype) # get the kernel, which copies from texture memory warp_3d_kernel = cupy.RawKernel(source, "warp_3d") # launch kernel depth, height, width = image.shape grid_x = (width + block_size - 1) // block_size grid_y = (height + block_size - 1) // block_size grid_z = (depth + block_size - 1) // block_size warp_3d_kernel( (grid_x, grid_y, grid_z), (block_size,) * 3, (warped_image, input_image_tex, vector_field_tex, width, height, depth), ) del input_image_tex, input_image_cudarr, vector_field_tex, vector_field_cudarr return warped_image
30,759
def BitWidth(n: int): """ compute the minimum bitwidth needed to represent and integer """ if n == 0: return 0 if n > 0: return n.bit_length() if n < 0: # two's-complement WITHOUT sign return (n + 1).bit_length()
30,760
def read_many_nam_cube(netcdf_file_names, PREDICTOR_NAMES): """Reads storm-centered images from many NetCDF files. :param netcdf_file_names: 1-D list of paths to input files. :return: image_dict: See doc for `read_image_file`. """ image_dict = None keys_to_concat = [PREDICTOR_MATRIX_KEY] for this_file_name in netcdf_file_names: #print('Reading data from: "{0:s}"...'.format(this_file_name)) this_image_dict = read_nam_maps(this_file_name, PREDICTOR_NAMES) if image_dict is None: image_dict = copy.deepcopy(this_image_dict) continue for this_key in keys_to_concat: image_dict[this_key] = numpy.concatenate( (image_dict[this_key], this_image_dict[this_key]), axis=0 ) return image_dict
30,761
def logkv(key, val): """ Log a value of some diagnostic Call this once for each diagnostic quantity, each iteration If called many times, last value will be used. :param key: (Any) save to log this key :param val: (Any) save to log this value """ Logger.CURRENT.logkv(key, val)
30,762
def test_xapi_connect_singleton(mocker, fake_ansible_module, XenAPI, xenserver): """Tests if XAPI.connect() returns singleton.""" mocker.patch('XenAPI.Session') xapi_session1 = xenserver.XAPI.connect(fake_ansible_module) xapi_session2 = xenserver.XAPI.connect(fake_ansible_module) XenAPI.Session.assert_called_once() assert xapi_session1 == xapi_session2
30,763
def rouge_l_summary_level(evaluated_sentences, reference_sentences): """ Computes ROUGE-L (summary level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: SUM(i,u) = SUM from i through u u = number of sentences in reference summary C = Candidate summary made up of v sentences m = number of words in reference summary n = number of words in candidate summary :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the referene set :returns float: F_lcs :raises ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise (ValueError("Collections must contain at least 1 sentence.")) # total number of words in reference sentences m = len(_split_into_words(reference_sentences)) # total number of words in evaluated sentences n = len(_split_into_words(evaluated_sentences)) union_lcs_sum_across_all_references = 0 for ref_s in reference_sentences: union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences, ref_s) return _f_lcs(union_lcs_sum_across_all_references, m, n)
30,764
def test_id_not_found_delete(flask_client): """Tests DELETE with an ID not found""" response = flask_client.delete(f"/msg-scheduler?id=-10") assert response.status_code == 200
30,765
def construct_classifier(cfg, module_names, in_features, slot_machine=False, k=8, greedy_selection=True ): """ Constructs a sequential model of fully-connected layers :param cfg:(List) The configuration of the model :param module_names: (List) The names of the layers :param in_features: (int) The number of input features to first fully-connected layer :param slot_machine: (bool) constructs a module for weight updates or slot_machines :param k:(int), the number of options per weight if model is a slot machine :param greedy_selection: (bool), use greedy selection if model is slot machine :return: model: a sequential module of fully-connected layers """ model = nn.Sequential() for i, v in enumerate(cfg): if v == 'D': model.add_module(module_names[i], nn.Dropout(p=0.5)) elif v == "relu": model.add_module(module_names[i], nn.ReLU(inplace=True)) else: if slot_machine: model.add_module(module_names[i],Linear(in_features, v, k, greedy_selection)) else: model.add_module(module_names[i], nn.Linear(in_features, v, bias=False)) in_features = v return model
30,766
def size_adjustment(imgs, shape): """ Args: imgs: Numpy array with shape (data, width, height, channel) = (*, 240, 320, 3). shape: 256 or None. 256: imgs_adj.shape = (*, 256, 256, 3) None: No modification of imgs. Returns: imgs_adj: Numpy array with shape (data, modified width, modified height, channel) """ if shape is None: imgs_adj = imgs elif shape == 256: # Reshape from 240x320 to 256x256 imgs_adj = np.delete(imgs, obj=[i for i in range(32)] + [i for i in range(287, 319)], axis=2) _tmp = imgs_adj.shape mask = np.zeros(shape=(_tmp[0], 8, _tmp[2], _tmp[3]), dtype=np.uint8) imgs_adj = np.concatenate([imgs_adj, mask], axis=1) imgs_adj = np.concatenate([mask, imgs_adj], axis=1) return imgs_adj
30,767
def to_full_model_name(root_key: str) -> str: """ Find model name from the root_key in the file. Args: root_key: root key such as 'system-security-plan' from a top level OSCAL model. """ if root_key not in const.MODEL_TYPE_LIST: raise TrestleError(f'{root_key} is not a top level model name.') module = const.MODEL_TYPE_TO_MODEL_MODULE[root_key] class_name = utils.alias_to_classname(root_key, utils.AliasMode.JSON) return f'{module}.{class_name}'
30,768
def is_unique(s: str) -> bool: """ Time: O(n) Space: O(n) """ chars: Dict[str, int] = {} for char in s: if char in chars: return False else: chars[char] = 1 return True
30,769
def _title(soup): """ Accepts a BeautifulSoup object for the APOD HTML page and returns the APOD image title. Highly idiosyncratic with adaptations for different HTML structures that appear over time. """ LOG.debug('getting the title') try: # Handler for later APOD entries center_selection = soup.find_all('center')[1] bold_selection = center_selection.find_all('b')[0] return bold_selection.text.strip(' ') except Exception: # Handler for early APOD entries text = soup.title.text.split(' - ')[-1] return text.strip()
30,770
def is_available() -> bool: """Return ``True`` if the handler has its dependencies met.""" return HAVE_RLE
30,771
def test_repr(bond): """ Test :meth:`.Bond.__repr__`. Parameters ---------- bond : :class:`.Bond` The bond whose representation is tested. Returns ------- None : :class:`NoneType` """ other = eval(repr(bond), dict(stk.__dict__)) is_equivalent_atom(other.get_atom1(), bond.get_atom1()) is_equivalent_atom(other.get_atom2(), bond.get_atom2()) assert other.get_order() == bond.get_order() assert other.get_periodicity() == bond.get_periodicity()
30,772
def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights, buckets, seq2seq, softmax_loss_function=None, per_example_loss=False, name=None): """Create a sequence-to-sequence model with support for bucketing. The seq2seq argument is a function that defines a sequence-to-sequence model, e.g., seq2seq = lambda x, y: basic_rnn_seq2seq( x, y, core_rnn_cell.GRUCell(24)) Args: encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input. decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input. targets: A list of 1D batch-sized int32 Tensors (desired output sequence). weights: List of 1D batch-sized float-Tensors to weight the targets. buckets: A list of pairs of (input size, output size) for each bucket. seq2seq: A sequence-to-sequence model function; it takes 2 input that agree with encoder_inputs and decoder_inputs, and returns a pair consisting of outputs and states (as, e.g., basic_rnn_seq2seq). softmax_loss_function: Function (labels-batch, inputs-batch) -> loss-batch to be used instead of the standard softmax (the default if this is None). per_example_loss: Boolean. If set, the returned loss will be a batch-sized tensor of losses for each sequence in the batch. If unset, it will be a scalar with the averaged loss from all examples. name: Optional name for this operation, defaults to "model_with_buckets". Returns: A tuple of the form (outputs, losses), where: outputs: The outputs for each bucket. Its j'th element consists of a list of 2D Tensors. The shape of output tensors can be either [batch_size x output_size] or [batch_size x num_decoder_symbols] depending on the seq2seq model used. losses: List of scalar Tensors, representing losses for each bucket, or, if per_example_loss is set, a list of 1D batch-sized float Tensors. Raises: ValueError: If length of encoder_inputs, targets, or weights is smaller than the largest (last) bucket. """ if len(encoder_inputs) < buckets[-1][0]: raise ValueError("Length of encoder_inputs (%d) must be at least that of la" "st bucket (%d)." % (len(encoder_inputs), buckets[-1][0])) if len(targets) < buckets[-1][1]: raise ValueError("Length of targets (%d) must be at least that of last" "bucket (%d)." % (len(targets), buckets[-1][1])) if len(weights) < buckets[-1][1]: raise ValueError("Length of weights (%d) must be at least that of last" "bucket (%d)." % (len(weights), buckets[-1][1])) all_inputs = encoder_inputs + decoder_inputs + targets + weights losses = [] outputs = [] with ops.name_scope(name, "model_with_buckets", all_inputs): for j, bucket in enumerate(buckets): with variable_scope.variable_scope( variable_scope.get_variable_scope(), reuse=True if j > 0 else None): bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]], decoder_inputs[:bucket[1]]) outputs.append(bucket_outputs) if per_example_loss: losses.append( sequence_loss_by_example( outputs[-1], targets[:bucket[1]], weights[:bucket[1]], softmax_loss_function=softmax_loss_function)) else: losses.append( sequence_loss( outputs[-1], targets[:bucket[1]], weights[:bucket[1]], softmax_loss_function=softmax_loss_function)) return outputs, losses
30,773
def sqlite_cast(vtype, v): """ Returns the casted version of v, for use in database. SQLite does not perform any type check or conversion so this function should be used anytime a data comes from outstide to be put in database. This function also handles CoiotDatetime objects and accepts "now" as an argument for them (the date will then be the calling date of this function). """ if vtype is type(v) or v is None: return v if vtype is bool: if type(v) is int: return bool(v) elif type(v) is str and v.lower() in ('true', 'false'): return v.lower() == 'true' elif vtype is int: if type(v) in (bool, str): return int(v) elif vtype is str: return str(v) elif vtype is CoiotDatetime: if type(v) in (float, int): return CoiotDatetime.fromepoch(v) elif v.lower() == 'now': return CoiotDatetime.now() raise TypeError("argument of type {} cannot be " + "casted to {}".format(type(v), vtype))
30,774
def test_get_atom_infos(case_data): """ Test :meth:`.ConstructedMolecule.get_atom_infos`. Parameters ---------- case_data : :class:`.CaseData` A test case. Holds the constructed molecule to test and the correct number of new atoms. Returns ------- None : :class:`NoneType` """ _test_get_atom_infos( constructed_molecule=case_data.constructed_molecule, num_new_atoms=case_data.num_new_atoms, )
30,775
def test_user_remove_from_team_command(mocker, grafana_client): """ Given: - All relevant arguments for the command that is executed When: - user-remove-from-team command is executed Then: - The http request is called with the right arguments """ http_request = mocker.patch.object(grafana_client, '_http_request') args = {'user_id': "3", 'team_id': "17"} user_remove_from_team_command(grafana_client, args) http_request.assert_called_with('DELETE', 'api/teams/17/members/3', headers=None)
30,776
def test_references_with_specific_segments(parser, description): """ Parse example variants with specifications to a specific annotated segment of a reference sequence that be given in parentheses directly after the reference sequence. """ parser(description)
30,777
def command_runner( command, # type: Union[str, List[str]] valid_exit_codes=None, # type: Optional[List[int]] timeout=3600, # type: Optional[int] shell=False, # type: bool encoding=None, # type: Optional[str] stdout=None, # type: Optional[Union[int, str]] stderr=None, # type: Optional[Union[int, str]] windows_no_window=False, # type: bool live_output=False, # type: bool method="monitor", # type: str **kwargs # type: Any ): # type: (...) -> Tuple[Optional[int], str] """ Unix & Windows compatible subprocess wrapper that handles output encoding and timeouts Newer Python check_output already handles encoding and timeouts, but this one is retro-compatible It is still recommended to set cp437 for windows and utf-8 for unix Also allows a list of various valid exit codes (ie no error when exit code = arbitrary int) command should be a list of strings, eg ['ping', '127.0.0.1', '-c 2'] command can also be a single string, ex 'ping 127.0.0.1 -c 2' if shell=True or if os is Windows Accepts all of subprocess.popen arguments Whenever we can, we need to avoid shell=True in order to preserve better security Avoiding shell=True involves passing absolute paths to executables since we don't have shell PATH environment When no stdout option is given, we'll get output into the returned (exit_code, output) tuple When stdout = filename or stderr = filename, we'll write output to the given file live_output will poll the process for output and show it on screen (output may be non reliable, don't use it if your program depends on the commands' stdout output) windows_no_window will disable visible window (MS Windows platform only) Returns a tuple (exit_code, output) """ # Choose default encoding when none set # cp437 encoding assures we catch most special characters from cmd.exe if not encoding: encoding = "cp437" if os.name == "nt" else "utf-8" # Fix when unix command was given as single string # This is more secure than setting shell=True if os.name == "posix" and shell is False and isinstance(command, str): command = shlex.split(command) # Set default values for kwargs errors = kwargs.pop( "errors", "backslashreplace" ) # Don't let encoding issues make you mad universal_newlines = kwargs.pop("universal_newlines", False) creationflags = kwargs.pop("creationflags", 0) # subprocess.CREATE_NO_WINDOW was added in Python 3.7 for Windows OS only if ( windows_no_window and sys.version_info[0] >= 3 and sys.version_info[1] >= 7 and os.name == "nt" ): # Disable the following pylint error since the code also runs on nt platform, but # triggers an error on Unix # pylint: disable=E1101 creationflags = creationflags | subprocess.CREATE_NO_WINDOW close_fds = kwargs.pop("close_fds", "posix" in sys.builtin_module_names) # Default buffer size. line buffer (1) is deprecated in Python 3.7+ bufsize = kwargs.pop("bufsize", 16384) # Decide whether we write to output variable only (stdout=None), to output variable and stdout (stdout=PIPE) # or to output variable and to file (stdout='path/to/file') stdout_to_file = False if stdout is None: _stdout = PIPE elif isinstance(stdout, str): # We will send anything to file _stdout = open(stdout, "wb") stdout_to_file = True elif stdout is False: _stdout = subprocess.DEVNULL else: # We will send anything to given stdout pipe _stdout = stdout # The only situation where we don't add stderr to stdout is if a specific target file was given stderr_to_file = False if isinstance(stderr, str): _stderr = open(stderr, "wb") stderr_to_file = True elif stderr is False: _stderr = subprocess.DEVNULL else: _stderr = subprocess.STDOUT def to_encoding( process_output, # type: Union[str, bytes] encoding, # type: str errors, # type: str ): # type: (...) -> str """ Convert bytes output to string and handles conversion errors """ # Compatibility for earlier Python versions where Popen has no 'encoding' nor 'errors' arguments if isinstance(process_output, bytes): try: process_output = process_output.decode(encoding, errors=errors) except TypeError: try: # handle TypeError: don't know how to handle UnicodeDecodeError in error callback process_output = process_output.decode(encoding, errors="ignore") except (ValueError, TypeError): # What happens when str cannot be concatenated logger.debug("Output cannot be captured {}".format(process_output)) return process_output def _read_pipe( stream, # type: io.StringIO output_queue, # type: queue.Queue ): # type: (...) -> None """ will read from subprocess.PIPE Must be threaded since readline() might be blocking on Windows GUI apps Partly based on https://stackoverflow.com/a/4896288/2635443 """ # WARNING: Depending on the stream type (binary or text), the sentinel character # needs to be of the same type, or the iterator won't have an end # We also need to check that stream has readline, in case we're writing to files instead of PIPE if hasattr(stream, "readline"): sentinel_char = "" if hasattr(stream, "encoding") else b"" for line in iter(stream.readline, sentinel_char): output_queue.put(line) output_queue.put(None) stream.close() def _poll_process( process, # type: Union[subprocess.Popen[str], subprocess.Popen] timeout, # type: int encoding, # type: str errors, # type: str ): # type: (...) -> Tuple[Optional[int], str] """ Process stdout/stderr output polling is only used in live output mode since it takes more resources than using communicate() Reads from process output pipe until: - Timeout is reached, in which case we'll terminate the process - Process ends by itself Returns an encoded string of the pipe output """ begin_time = datetime.now() output = "" output_queue = queue.Queue() def __check_timeout( begin_time, # type: datetime.timestamp timeout, # type: int ): # type: (...) -> None """ Simple subfunction to check whether timeout is reached Since we check this alot, we put it into a function """ if timeout and (datetime.now() - begin_time).total_seconds() > timeout: kill_childs_mod(process.pid, itself=True, soft_kill=False) raise TimeoutExpired(process, timeout, output) try: read_thread = threading.Thread( target=_read_pipe, args=(process.stdout, output_queue) ) read_thread.daemon = True # thread dies with the program read_thread.start() while True: try: line = output_queue.get(timeout=MIN_RESOLUTION) except queue.Empty: __check_timeout(begin_time, timeout) else: if line is None: break else: line = to_encoding(line, encoding, errors) if live_output: sys.stdout.write(line) output += line __check_timeout(begin_time, timeout) # Make sure we wait for the process to terminate, even after # output_queue has finished sending data, so we catch the exit code while process.poll() is None: __check_timeout(begin_time, timeout) # Additional timeout check to make sure we don't return an exit code from processes # that were killed because of timeout __check_timeout(begin_time, timeout) exit_code = process.poll() return exit_code, output except KeyboardInterrupt: raise KbdInterruptGetOutput(output) def _timeout_check_thread( process, # type: Union[subprocess.Popen[str], subprocess.Popen] timeout, # type: int timeout_queue, # type: queue.Queue ): # type: (...) -> None """ Since elder python versions don't have timeout, we need to manually check the timeout for a process """ begin_time = datetime.now() while True: if timeout and (datetime.now() - begin_time).total_seconds() > timeout: kill_childs_mod(process.pid, itself=True, soft_kill=False) timeout_queue.put(True) break if process.poll() is not None: break sleep(MIN_RESOLUTION) def _monitor_process( process, # type: Union[subprocess.Popen[str], subprocess.Popen] timeout, # type: int encoding, # type: str errors, # type: str ): # type: (...) -> Tuple[Optional[int], str] """ Create a thread in order to enforce timeout Get stdout output and return it """ # Shared mutable objects have proven to have race conditions with PyPy 3.7 (mutable object # is changed in thread, but outer monitor function has still old mutable object state) # Strangely, this happened only sometimes on github actions/ubuntu 20.04.3 & pypy 3.7 # Let's create a queue to get the timeout thread response on a deterministic way timeout_queue = queue.Queue() is_timeout = False thread = threading.Thread( target=_timeout_check_thread, args=(process, timeout, timeout_queue), ) thread.setDaemon(True) thread.start() process_output = None stdout = None try: # Don't use process.wait() since it may deadlock on old Python versions # Also it won't allow communicate() to get incomplete output on timeouts while process.poll() is None: sleep(MIN_RESOLUTION) try: is_timeout = timeout_queue.get_nowait() except queue.Empty: pass else: break # We still need to use process.communicate() in this loop so we don't get stuck # with poll() is not None even after process is finished if _stdout is not False: try: stdout, _ = process.communicate() # ValueError is raised on closed IO file except (TimeoutExpired, ValueError): pass exit_code = process.poll() if _stdout is not False: try: stdout, _ = process.communicate() except (TimeoutExpired, ValueError): pass process_output = to_encoding(stdout, encoding, errors) # On PyPy 3.7 only, we can have a race condition where we try to read the queue before # the thread could write to it, failing to register a timeout. # This workaround prevents reading the queue while the thread is still alive while thread.is_alive(): sleep(MIN_RESOLUTION) try: is_timeout = timeout_queue.get_nowait() except queue.Empty: pass if is_timeout: raise TimeoutExpired(process, timeout, process_output) return exit_code, process_output except KeyboardInterrupt: raise KbdInterruptGetOutput(process_output) try: # Finally, we won't use encoding & errors arguments for Popen # since it would defeat the idea of binary pipe reading in live mode # Python >= 3.3 has SubProcessError(TimeoutExpired) class # Python >= 3.6 has encoding & error arguments # universal_newlines=True makes netstat command fail under windows # timeout does not work under Python 2.7 with subprocess32 < 3.5 # decoder may be cp437 or unicode_escape for dos commands or utf-8 for powershell # Disabling pylint error for the same reason as above # pylint: disable=E1123 if sys.version_info >= (3, 6): process = subprocess.Popen( command, stdout=_stdout, stderr=_stderr, shell=shell, universal_newlines=universal_newlines, encoding=encoding, errors=errors, creationflags=creationflags, bufsize=bufsize, # 1 = line buffered close_fds=close_fds, **kwargs ) else: process = subprocess.Popen( command, stdout=_stdout, stderr=_stderr, shell=shell, universal_newlines=universal_newlines, creationflags=creationflags, bufsize=bufsize, close_fds=close_fds, **kwargs ) try: if method == "poller" or live_output and _stdout is not False: exit_code, output = _poll_process(process, timeout, encoding, errors) else: exit_code, output = _monitor_process(process, timeout, encoding, errors) except KbdInterruptGetOutput as exc: exit_code = -252 output = "KeyboardInterrupted. Partial output\n{}".format(exc.output) try: kill_childs_mod(process.pid, itself=True, soft_kill=False) except AttributeError: pass if stdout_to_file: _stdout.write(output.encode(encoding, errors=errors)) logger.debug( 'Command "{}" returned with exit code "{}". Command output was:'.format( command, exit_code ) ) except subprocess.CalledProcessError as exc: exit_code = exc.returncode try: output = exc.output except AttributeError: output = "command_runner: Could not obtain output from command." if exit_code in valid_exit_codes if valid_exit_codes is not None else [0]: logger.debug( 'Command "{}" returned with exit code "{}". Command output was:'.format( command, exit_code ) ) logger.error( 'Command "{}" failed with exit code "{}". Command output was:'.format( command, exc.returncode ) ) logger.error(output) except FileNotFoundError as exc: logger.error('Command "{}" failed, file not found: {}'.format(command, exc)) exit_code, output = -253, exc.__str__() # On python 2.7, OSError is also raised when file is not found (no FileNotFoundError) # pylint: disable=W0705 (duplicate-except) except (OSError, IOError) as exc: logger.error('Command "{}" failed because of OS: {}'.format(command, exc)) exit_code, output = -253, exc.__str__() except TimeoutExpired as exc: message = 'Timeout {} seconds expired for command "{}" execution. Original output was: {}'.format( timeout, command, exc.output ) logger.error(message) if stdout_to_file: _stdout.write(message.encode(encoding, errors=errors)) exit_code, output = ( -254, 'Timeout of {} seconds expired for command "{}" execution. Original output was: {}'.format( timeout, command, exc.output ), ) # We need to be able to catch a broad exception # pylint: disable=W0703 except Exception as exc: logger.error( 'Command "{}" failed for unknown reasons: {}'.format(command, exc), exc_info=True, ) logger.debug("Error:", exc_info=True) exit_code, output = -255, exc.__str__() finally: if stdout_to_file: _stdout.close() if stderr_to_file: _stderr.close() logger.debug(output) return exit_code, output
30,778
def read_table(name): """ Mock of IkatsApi.table.read method """ return TABLES[name]
30,779
def cosh(x, out=None): """ Raises a ValueError if input cannot be rescaled to a dimensionless quantity. """ if not isinstance(x, Quantity): return np.cosh(x, out) return Quantity( np.cosh(x.rescale(dimensionless).magnitude, out), dimensionless, copy=False )
30,780
def dates_from_360cal(time): """Convert numpy.datetime64 values in 360 calendar format. This is because 360 calendar cftime objects are problematic, so we will use datetime module to re-create all dates using the available data. Parameters ---------- time: single or numpy.ndarray of cftime._cftime.Datetime360Day Returns ------- DatetimeIndex object. """ # noqa # get all dates as strings dates = [] for d in time: dstr = '%0.4i-%0.2i-%0.2i' % (d.year, d.month, d.day) date = datetime.datetime.strptime(dstr, '%Y-%m-%d') dates.append(date) return pd.to_datetime(dates)
30,781
def db_queue(**data): """Add a record to queue table. Arguments: **data: The queue record data. Returns: (dict): The inserted queue record. """ fields = data.keys() assert 'request' in fields queue = Queue(**data) db.session.add(queue) db.session.commit() return dict(queue)
30,782
def loadMnistData(trainOrTestData='test'): """Loads MNIST data from sklearn or web. :param str trainOrTestData: Must be 'train' or 'test' and specifies which \ part of the MNIST dataset to load. :return: images, targets """ mnist = loadMNIST() if trainOrTestData == 'train': X = mnist.data[:60000, :].astype(np.uint8) y = mnist.target[:60000].astype(np.uint8) elif trainOrTestData == 'test': X = mnist.data[60000:, :].astype(np.uint8) y = mnist.target[60000:].astype(np.uint8) else: raise ValueError("trainOrTestData must be 'train' or 'test'.") return X, y
30,783
def times_once() -> _Timing: """ Expect the request a single time :return: Timing object """ return _Timing(1)
30,784
def test_run_job_batch_as_admin_with_job_reqs(ee2_port, ws_controller, mongo_client): """ A test of the run_job method focusing on job requirements and minimizing all other inputs. Since the batch endpoint uses the same code path as the single job endpoint for processing job requirements, we only have a single test that mixes job requirements from the input, catalog, and deploy configuration, as opposed to the multiple tests for single jobs. """ _set_up_workspace_objects(ws_controller, TOKEN_NO_ADMIN, "foo") # ws 1 # need to get the mock objects first so spec_set can do its magic before we mock out # the classes in the context manager sub, schedd, txn = _get_htc_mocks() # seriously black you're killing me here. This is readable? with patch("htcondor.Submit", spec_set=True, autospec=True) as sub_init, patch( "htcondor.Schedd", spec_set=True, autospec=True ) as schedd_init, patch( CAT_LIST_CLIENT_GROUPS, spec_set=True, autospec=True ) as list_cgroups, patch( CAT_GET_MODULE_VERSION, spec_set=True, autospec=True ) as get_mod_ver: # set up the rest of the mocks _finish_htc_mocks(sub_init, schedd_init, sub, schedd, txn) sub.queue.side_effect = [123, 456] list_cgroups.side_effect = [ [{"client_groups": ['{"client_group":"bigmem"}']}], [{"client_groups": ['{"request_disk":8,"request_memory":5}']}], ] get_mod_ver.side_effect = [ {"git_commit_hash": "somehash"}, {"git_commit_hash": "somehash2"}, ] # run the method job1_params = {"method": _MOD} job2_params = { "method": "mod2.meth2", "job_requirements": { "request_memory": 42, "client_group": "extreme", "client_group_regex": 0, "bill_to_user": "forrest_gump", "ignore_concurrency_limits": "true", "scheduler_requirements": {"foo": "bar", "baz": "bat"}, "debug_mode": True, }, } job_batch_wsid = 1 job_batch_params = {"wsid": job_batch_wsid, "as_admin": "foo"} ee2 = ee2client(f"http://localhost:{ee2_port}", token=TOKEN_WRITE_ADMIN) ret = ee2.run_job_batch([job1_params, job2_params], job_batch_params) # May need to increase sleep if thread takes too long time.sleep(0.1) batch_id = ret["batch_id"] job_id_1, job_id_2 = ret["child_job_ids"] # check that mocks were called correctly # Since these are class methods, the first argument is self, which we ignore get_mod_ver.assert_has_calls( [ call(ANY, {"module_name": "mod", "version": "release"}), call(ANY, {"module_name": "mod2", "version": "release"}), ] ) list_cgroups.assert_has_calls( [ call(ANY, {"module_name": "mod", "function_name": "meth"}), call(ANY, {"module_name": "mod2", "function_name": "meth2"}), ] ) job1 = _get_mongo_job(mongo_client, job_id_1) job2 = _get_mongo_job(mongo_client, job_id_2) expected_job1 = { "_id": ObjectId(job_id_1), "user": USER_WRITE_ADMIN, "authstrat": "kbaseworkspace", "status": "queued", "wsid": job_batch_wsid, "batch_id": batch_id, "job_input": { "wsid": job_batch_wsid, "method": _MOD, "service_ver": "somehash", "source_ws_objects": [], "requirements": { "clientgroup": "bigmem", "cpu": 4, "memory": 2000, "disk": 100, }, "narrative_cell_info": {}, }, "child_jobs": [], "batch_job": False, "scheduler_id": "123", "scheduler_type": "condor", "retry_ids": [], "retry_saved_toggle": False, } assert job1 == expected_job1 expected_job2 = { "_id": ObjectId(job_id_2), "user": USER_WRITE_ADMIN, "authstrat": "kbaseworkspace", "status": "queued", "wsid": job_batch_wsid, "batch_id": batch_id, "job_input": { "wsid": job_batch_wsid, "method": "mod2.meth2", "service_ver": "somehash2", "source_ws_objects": [], "requirements": { "clientgroup": "extreme", "cpu": 32, "memory": 42, "disk": 8, }, "narrative_cell_info": {}, }, "child_jobs": [], "batch_job": False, "scheduler_id": "456", "scheduler_type": "condor", "retry_ids": [], "retry_saved_toggle": False, } assert job2 == expected_job2 parent_job = _get_mongo_job(mongo_client, batch_id, has_queued=False) expected_parent_job = { "_id": ObjectId(batch_id), "user": USER_WRITE_ADMIN, "authstrat": "kbaseworkspace", "wsid": job_batch_wsid, "status": "created", "job_input": { "method": "batch", "service_ver": "batch", "app_id": "batch", "source_ws_objects": [], "narrative_cell_info": {}, }, "child_jobs": [job_id_1, job_id_2], "batch_job": True, "retry_ids": [], "retry_saved_toggle": False, } assert parent_job == expected_parent_job expected_sub_1 = _get_condor_sub_for_rj_param_set( job_id_1, USER_WRITE_ADMIN, TOKEN_WRITE_ADMIN, clientgroup="bigmem", cpu=4, mem=2000, disk=100, parent_job_id=batch_id, app_id=None, app_module=None, wsid=job_batch_wsid, ) expected_sub_1.update( {"+KB_SOURCE_WS_OBJECTS": "", "+KB_WSID": f'"{job_batch_wsid}"'} ) expected_sub_2 = _get_condor_sub_for_rj_param_set( job_id_2, USER_WRITE_ADMIN, TOKEN_WRITE_ADMIN, clientgroup="extreme", cpu=32, mem=42, disk=8, parent_job_id=batch_id, app_id=None, app_module=None, wsid=job_batch_wsid, ) expected_sub_2.update( { "+KB_SOURCE_WS_OBJECTS": "", "+KB_WSID": f'"{job_batch_wsid}"', "+AccountingGroup": '"forrest_gump"', "+KB_MODULE_NAME": '"mod2"', "+KB_FUNCTION_NAME": '"meth2"', "requirements": '(CLIENTGROUP == "extreme") && (baz == "bat") && (foo == "bar")', "environment": expected_sub_2["environment"].replace( "DEBUG_MODE=False", "DEBUG_MODE=True" ), } ) del expected_sub_2["Concurrency_Limits"] _check_batch_htc_calls( sub_init, schedd_init, sub, schedd, txn, expected_sub_1, expected_sub_2 )
30,785
def linear_growth(mesh, pos, coefficient): """Applies a homotety to a dictionary of coordinates. Parameters ---------- mesh : Topomesh Not used in this algorithm pos : dict(int -> iterable) Dictionary (pid -> ndarray) of the tissue vertices coefficient : float or ndarray Scaling coefficient for the homothety Returns ------- dict(int -> ndarray) dictionary (pid -> new position) of the vertices """ utilities.check_pos(pos) scaling = np.array(coefficient) res = dict((pid, scaling * vec) for pid,vec in pos.iteritems()) assert np.all(res.values() <> None) return res
30,786
def _setup_logging(verbose=False): """Initialize logging and set level based on verbose. :type verbose: bool :arg verbose: When True, set log level to DEBUG. """ log_level = logging.DEBUG if verbose else logging.WARN logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=log_level) logging.debug('Verbose output enabled.')
30,787
def einstein_t(tini, tfin, npoint, HT_lim=3000,dul=False,model=1): """ Computes the *Einstein temperature* Args: tini: minimum temperature (K) of the fitting interval tfin: maximum temperature npoint: number of points in the T range HT_lim: high temperature limit where Cv approaches the Dulong-Petit value model: if model=1 a single Einstein oscillator is considered (default), if model > 1, 2 Einstein oscillators are considered """ flag_int=False if f_fix.flag: kp_original=f_fix.value flag_int=True reset_fix() v0, k_gpa, kp=eos_temp(298.15,prt=False, update=True) set_fix(kp) print("Kp fixed to %4.2f" % kp) vol=new_volume(298.15,0.0001) ent, cve=entropy_v(298.15,vol[0]) dp_limit=apfu*3*avo*kb # Dulong Petit limit emp=10636/(ent/apfu+6.44) # Empirical Einstein T t_range=np.linspace(tini, tfin, npoint) cv_list=np.array([]) for ti in t_range: enti, cvi=entropy_v(ti, vol, plot=False, prt=False) cv_list=np.append(cv_list, cvi) reset_fix() if flag_int: set_fix(kp_original) t_range=np.append(t_range,HT_lim) cv_list=np.append(cv_list, dp_limit) sigma=np.ones(len(t_range)) sigma[len(sigma)-1]=0.1 if model==1: ein_fit, ein_cov=curve_fit(einstein_fun, t_range, cv_list, p0=emp, \ sigma=sigma, xtol=1e-15, ftol=1e-15) else: ein_fit, ein_cov=curve_fit(einstein_2_fun, t_range, cv_list, \ sigma=sigma,p0=[emp,emp], xtol=1e-15, ftol=1e-15) t_range_new=np.linspace(tini,HT_lim,50) plt.figure() if model==1: plt.plot(t_range_new, einstein_fun(t_range_new, ein_fit[0]), "k-", \ t_range, cv_list, "k*") else: plt.plot(t_range_new, einstein_2_fun(t_range_new, ein_fit[0],ein_fit[1]), "k-", \ t_range, cv_list, "k*") plt.xlabel("Temperature (K)") plt.ylabel("Cv (J/mol K)") plt.show() print("\nEinstein temperature") print("empirical estimation (from molar entropy): %6.2f K" % emp) if model==1: print("result from fit: %6.2f K" % ein_fit[0]) else: print("result from fit: %6.2f, %6.2f K" % (ein_fit[0],ein_fit[1])) print("Dulong-Petit limit for Cv (T = %5.2f K): %6.2f J/mol K" % \ (HT_lim, dp_limit)) t_table=np.linspace(tini,tfin,10) cv_real=np.array([]) cv_ein=np.array([]) for ti in t_table: enti, cri=entropy_v(ti, vol, plot=False, prt=False) if model==1: ce=einstein_fun(ti,ein_fit[0]) else: ce=einstein_2_fun(ti,ein_fit[0],ein_fit[1]) cv_real=np.append(cv_real, cri) cv_ein=np.append(cv_ein, ce) serie=(t_table,cv_real,cv_ein) pd.set_option('colheader_justify', 'center') df=pd.DataFrame(serie, index=['T (K)','Cv "real"','Cv "fit"']) df=df.T df2=df.round(2) print("") print(df2.to_string(index=False)) if model==1: print("\nFit at T = %6.2f K: Cv = %6.2f J/mol K" % \ (HT_lim, einstein_fun(HT_lim, ein_fit[0]))) else: print("\nFit at T = %6.2f K: Cv = %6.2f J/mol K" % \ (HT_lim, einstein_2_fun(HT_lim, ein_fit[0], ein_fit[1]))) if dul: return ein_fit
30,788
def normalize_uri(path_uri: str) -> str: """Convert any path to URI. If not a path, return the URI.""" if not isinstance(path_uri, pathlib.Path) and is_url(path_uri): return path_uri return pathlib.Path(path_uri).resolve().as_uri()
30,789
def RETune(ont: Ontology, training: [Annotation]): """ Tune the relation extraction class over a range of various values and return the correct parameters Params: ont (RelationExtractor/Ontology) - The ontology of information needed to form the base training ([Datapoint]) - A collection of data points to be able to perform cross validation Returns: scores - A data structure that holds all of the metric scores for the extractor against the structures then against the alphas structures - The network sizes and shapes alphas - The neural network """ logging.getLogger().setLevel(logging.ERROR) # Ensure that logging output is captured # The structures to validate structures = [(3,1), (4,2), (6,3), (8,4), (12,6), (20,10), (50,20)] alphas = logspace(-16,1,20) scores = [] for layers in structures: layer_scores = [] for alpha in alphas: def run(queue, tr, val): tr, val = [training[i] for i in tr], [training[i] for i in val] # Create a new extractor model ext = RelationExtractor(ontology=ont, hidden_layers=layers, alpha=alpha) # Generate the training and validation documents Xtr, Xtv = Document(), Document() Xtr.datapoints(tr) Xtv.datapoints(val) # Fit, predict and score ext.fit(Xtr) ext.predict(Xtv) results = score(ont, [Xtv]) queue.put(results[0]) queue = Queue() processors = [Process(target=run, args=(queue, tr, val)) for tr, val in KFold(n_splits=5, shuffle=True).split(training)] [p.start() for p in processors] [p.join() for p in processors] alpha_scores = [queue.get() for _ in range(5)] compressed = {"precision":[],"recall":[],"f1":[]} for r in alpha_scores: for k, v in r.items(): compressed[k].append(v) for k, v in compressed.items(): compressed[k] = sum(v)/len(v) layer_scores.append(compressed) scores.append(layer_scores) return scores, structures, alphas
30,790
def create_instance(test_id, config, args): """ Invoked by TestExecutor class to create a test instance @test_id - test index number @config - test parameters from, config @args - command line args """ return TestNodeConnectivity(test_id, config, args)
30,791
def to_me() -> Rule: """ :说明: 通过 ``event.is_tome()`` 判断事件是否与机器人有关 :参数: * 无 """ return Rule(ToMeRule())
30,792
def download_process_hook(event: dict): """ Allows to handle processes of downloading episode's file. It is called by `youtube_dl.YoutubeDL` """ total_bytes = event.get("total_bytes") or event.get("total_bytes_estimate", 0) episode_process_hook( status=EpisodeStatus.DL_EPISODE_DOWNLOADING, filename=event["filename"], total_bytes=total_bytes, processed_bytes=event.get("downloaded_bytes", total_bytes), )
30,793
def test_divisor_count1(): """ Tesing for a large number using brute force """ num = 12345637 cnt = 0 for i in range(1,num+1): if(num%i == 0): cnt = cnt + 1 assert divisor_count(num) == cnt
30,794
def CanonicalPrint(root, stream=sys.stdout, exclusive=False, inclusivePrefixes=None): """ Given a Node instance assumed to be the root of an XML DOM or Domlette tree, this function serializes the document to the given stream or stdout, using c14n serialization, according to http://www.w3.org/TR/xml-c14n (the default) or http://www.w3.org/TR/xml-exc-c14n/ This function does nothing if root is not a Node. exclusive - if true, apply exclusive c14n according to http://www.w3.org/TR/xml-exc-c14n/ inclusivePrefixes - if exclusive is True, use this as a list of namespaces representing the "InclusiveNamespacesPrefixList" list in exclusive c14n Please import this from Ft.Xml.Domlette rather than directly from Ft.Xml.Lib. """ from Ft.Xml.Domlette import SeekNss if not hasattr(root, "nodeType"): return added_attributes = {} #All the contents should be XML NS attrs nshints = {} if not exclusive: #Roll in ancestral xml:* attributes parent_xml_attrs = root.xpath(u'ancestor::*/@xml:*') for attr in parent_xml_attrs: aname = (attr.namespaceURI, attr.nodeName) if (aname not in added_attributes and aname not in root.attributes): added_attributes[attr.nodeName] = attr.value nsnodes = root.xpath('namespace::*') inclusivePrefixes = inclusivePrefixes or [] if u'#default' in inclusivePrefixes: inclusivePrefixes.remove(u'#default') inclusivePrefixes.append(u'') decls_to_remove = [] if exclusive: used_prefixes = [ node.prefix for node in root.xpath('self::*|@*') ] declared_prefixes = [] for ans, anodename in root.attributes: if ans == XMLNS_NAMESPACE: attr = root.attributes[ans, anodename] prefix = attr.localName declared_prefixes.append(prefix) #print attr.prefix, attr.localName, attr.nodeName if attr.localName not in used_prefixes: decls_to_remove.append(prefix) #for prefix in used_prefixes: # if prefix not in declared_prefixes: # nshints[ns.nodeName] = ns.value #Roll in ancestral NS nodes for ns in nsnodes: prefix = ns.nodeName if (ns.value != XML_NAMESPACE and (XMLNS_NAMESPACE, ns.nodeName) not in root.attributes and (not exclusive or ns.localName in inclusivePrefixes)): #added_attributes[(XMLNS_NAMESPACE, ns.nodeName)] = ns.value nshints[prefix] = ns.value elif (exclusive and prefix in used_prefixes and prefix not in declared_prefixes): nshints[prefix] = ns.value visitor = PrintVisitor(stream, 'UTF-8', nshints, False, 0, True, added_attributes, decls_to_remove) visitor.visit(root) return
30,795
def unsaturated_atom_keys(xgr): """ keys of unsaturated (radical or pi-bonded) atoms """ atm_unsat_vlc_dct = atom_unsaturated_valences(xgr, bond_order=False) unsat_atm_keys = frozenset(dict_.keys_by_value(atm_unsat_vlc_dct, bool)) return unsat_atm_keys
30,796
def recognize_keyword() -> None: """ Listens for the keyword, to activate the assistant. Steps: 1. Listens for audio from the microphone 2. Recognizes the audio using `gTTS` 3. Checks if the keyword (as in `settings.KEYWORD`) is in the audio data (if True, break loop) """ global keyword_detected global new_process audio = listen() new_process = True log.debug("Recognizing keyword...") try: rec_input = recognizer.recognize_google(audio, language=settings.LANGUAGE) if settings.KEYWORD in rec_input.lower(): log.debug("Keyword detected!") # stop listening keyword_detected = True else: log.debug("Keyword not detected in '{0}'".format(rec_input)) except sr.UnknownValueError: log.debug("Speech engine could not resolve audio") except sr.RequestError: log.error("An error ocurred with the Google services, try again") except: traceback.print_exc() log.error("A unknown error ocurred...")
30,797
def clip_data(input_file, latlim, lonlim): """ Clip the data to the defined extend of the user (latlim, lonlim) Keyword Arguments: input_file -- output data, output of the clipped dataset latlim -- [ymin, ymax] lonlim -- [xmin, xmax] """ try: if input_file.split('.')[-1] == 'tif': dest_in = gdal.Open(input_file) else: dest_in = input_file except: dest_in = input_file # Open Array data_in = dest_in.GetRasterBand(1).ReadAsArray() # Define the array that must remain Geo_in = dest_in.GetGeoTransform() Geo_in = list(Geo_in) Start_x = np.max([int(np.floor(((lonlim[0]) - Geo_in[0])/ Geo_in[1])),0]) End_x = np.min([int(np.ceil(((lonlim[1]) - Geo_in[0])/ Geo_in[1])),int(dest_in.RasterXSize)]) Start_y = np.max([int(np.floor((Geo_in[3] - latlim[1])/ -Geo_in[5])),0]) End_y = np.min([int(np.ceil(((latlim[0]) - Geo_in[3])/Geo_in[5])), int(dest_in.RasterYSize)]) #Create new GeoTransform Geo_in[0] = Geo_in[0] + Start_x * Geo_in[1] Geo_in[3] = Geo_in[3] + Start_y * Geo_in[5] Geo_out = tuple(Geo_in) data = np.zeros([End_y - Start_y, End_x - Start_x]) data = data_in[Start_y:End_y,Start_x:End_x] dest_in = None return(data, Geo_out)
30,798
async def get_task_authors(url, request_data, session, resp, phid): """ :Summary: Get the Phabricator tasks that the user authored. :param url: URL to be fetched. :param request_data: Phabricator token and JSON Request Payload. :param session: ClientSession to perform the API request. :param resp: Global response array to which the response from the API has to be appended. :param phid: Phabricator ID of the user :return: None """ if request_data[0]['constraints[authorPHIDs][0]'] != '': page, after = True, False while page or after is not False: page = False after = False async with session.post(url[0], data=request_data[0]) as response: data = await response.read() data = loads(data.decode('utf-8'))['result'] resp.extend(data['data']) if phid[0] is False: if len(data['data']) > 0: phid[0] = data['data'][0]['fields']['authorPHID'] else: phid[0] = True if data['cursor']['after']: after = data['cursor']['after'] request_data[0]['after'] = after
30,799