content
stringlengths
22
815k
id
int64
0
4.91M
def friend_invitation_by_email_verify_for_api( # friendInvitationByEmailVerify voter_device_id, invitation_secret_key, web_app_root_url=''): """ :param voter_device_id: :param invitation_secret_key: :param web_app_root_url: :return: """ status = "" success = False # If a voter_device_id is passed in that isn't valid, we want to throw an error device_id_results = is_voter_device_id_valid(voter_device_id) if not device_id_results['success']: status += device_id_results['status'] json_data = { 'status': status, 'success': False, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': False, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return json_data if not positive_value_exists(invitation_secret_key): status += "VOTER_EMAIL_ADDRESS_VERIFY_MISSING_SECRET_KEY " error_results = { 'status': status, 'success': True, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': False, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results voter_manager = VoterManager() voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id) voter_id = voter_results['voter_id'] if not positive_value_exists(voter_id): status += "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID " error_results = { 'status': status, 'success': False, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': False, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results voter = voter_results['voter'] voter_we_vote_id = voter.we_vote_id voter_has_data_to_preserve = voter.has_data_to_preserve() friend_manager = FriendManager() friend_invitation_results = friend_manager.retrieve_friend_invitation_from_secret_key( invitation_secret_key, for_accepting_friendship=True, read_only=False) if not friend_invitation_results['friend_invitation_found']: status += "INVITATION_NOT_FOUND_FROM_SECRET_KEY " error_results = { 'status': status, 'success': True, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results # Now that we have the friend_invitation data, look more closely at it invitation_found = True voter_we_vote_id_accepting_invitation = "" email_manager = EmailManager() if friend_invitation_results['friend_invitation_voter_link_found']: friend_invitation_voter_link = friend_invitation_results['friend_invitation_voter_link'] if friend_invitation_voter_link.sender_voter_we_vote_id == voter_we_vote_id: status += "SENDER_AND_RECIPIENT_ARE_IDENTICAL_FAILED " error_results = { 'status': status, 'success': True, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': True, 'attempted_to_approve_own_invitation': True, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': True, } return error_results voter_we_vote_id_accepting_invitation = friend_invitation_voter_link.recipient_voter_we_vote_id # Now we want to make sure we have a current_friend entry recipient_organization_we_vote_id = '' voter_results = voter_manager.retrieve_voter_by_we_vote_id( friend_invitation_voter_link.recipient_voter_we_vote_id) if voter_results['voter_found']: recipient_organization_we_vote_id = voter_results['voter'].linked_organization_we_vote_id friend_results = friend_manager.create_or_update_current_friend( friend_invitation_voter_link.sender_voter_we_vote_id, friend_invitation_voter_link.recipient_voter_we_vote_id, voter.linked_organization_we_vote_id, recipient_organization_we_vote_id ) friend_manager.update_suggested_friends_starting_with_one_voter( friend_invitation_voter_link.sender_voter_we_vote_id) friend_manager.update_suggested_friends_starting_with_one_voter( friend_invitation_voter_link.recipient_voter_we_vote_id) accepting_voter_we_vote_id = voter_we_vote_id_accepting_invitation original_sender_we_vote_id = friend_invitation_voter_link.sender_voter_we_vote_id results = friend_accepted_invitation_send(accepting_voter_we_vote_id, original_sender_we_vote_id, web_app_root_url=web_app_root_url) # Update the PositionNetworkCount entries for both friends add_position_network_count_entries_for_one_friend( 0, accepting_voter_we_vote_id, voter_we_vote_id=original_sender_we_vote_id) add_position_network_count_entries_for_one_friend( 0, original_sender_we_vote_id, voter_we_vote_id=accepting_voter_we_vote_id) # Now that a CurrentFriend entry exists, update the FriendInvitation... if friend_results['success']: try: friend_invitation_voter_link.invitation_status = ACCEPTED friend_invitation_voter_link.save() except Exception as e: success = False status += 'FAILED_TO_UPDATE_INVITATION_STATUS1 ' + str(e) + ' ' else: success = False status += "friend_invitation_voter_link_found CREATE_OR_UPDATE_CURRENT_FRIEND_FAILED " # We don't need to do anything with the email because this was an invitation to a known voter elif friend_invitation_results['friend_invitation_email_link_found']: friend_invitation_email_link = friend_invitation_results['friend_invitation_email_link'] if friend_invitation_email_link.sender_voter_we_vote_id == voter_we_vote_id: status += "SENDER_AND_RECIPIENT_ARE_IDENTICAL_FAILED " error_results = { 'status': status, 'success': False, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': True, 'attempted_to_approve_own_invitation': True, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results this_voter_has_first_or_last_name_saved = voter_manager.this_voter_has_first_or_last_name_saved(voter) if positive_value_exists(friend_invitation_email_link.recipient_first_name) or \ positive_value_exists(friend_invitation_email_link.recipient_last_name): we_have_first_or_last_name_from_friend_invitation_email_link = True else: we_have_first_or_last_name_from_friend_invitation_email_link = False # Check to see if the email used has been claimed by a voter account yet temp_voter_we_vote_id = "" update_voter_name = False email_results = email_manager.retrieve_primary_email_with_ownership_verified( temp_voter_we_vote_id, friend_invitation_email_link.recipient_voter_email) if email_results['email_address_object_found']: # The email belongs to this or another voter email_address_object = email_results['email_address_object'] voter_we_vote_id_accepting_invitation = email_address_object.voter_we_vote_id # We might need to heal the data in the voter record if voter_we_vote_id_accepting_invitation != voter_we_vote_id: email_owner_results = voter_manager.retrieve_voter_by_we_vote_id(email_address_object.voter_we_vote_id) if email_owner_results['voter_found']: email_owner_voter = email_owner_results['voter'] voter_manager.update_voter_email_ownership_verified(email_owner_voter, email_address_object) else: # If we are here, then the email_address_object doesn't belong to another voter and can be # claimed by this current voter. voter_manager.update_voter_email_ownership_verified(voter, email_address_object) if we_have_first_or_last_name_from_friend_invitation_email_link and \ not this_voter_has_first_or_last_name_saved: # The current voter does not have first or last name, and we have incoming names to apply update_voter_name = True else: voter_we_vote_id_accepting_invitation = voter_we_vote_id # If we are here, we know the email is unclaimed. We can assign it to the current voter. # Is there an email address entry for this voter/email? email_we_vote_id = '' email_results = email_manager.retrieve_email_address_object( friend_invitation_email_link.recipient_voter_email, email_we_vote_id, voter_we_vote_id) if email_results['email_address_object_found']: email_address_object = email_results['email_address_object'] try: email_address_object.email_ownership_is_verified = True email_address_object.secret_key = generate_random_string(12) # Reset the secret_key email_address_object.save() voter_manager.update_voter_email_ownership_verified(voter, email_address_object) if we_have_first_or_last_name_from_friend_invitation_email_link and \ not this_voter_has_first_or_last_name_saved: # The current voter does not have first or last name, and we have incoming names to apply update_voter_name = True except Exception as e: success = False status += 'FAILED_TO_UPDATE_UNVERIFIED_EMAIL ' + str(e) + ' ' else: email_ownership_is_verified = True email_create_results = email_manager.create_email_address_for_voter( friend_invitation_email_link.recipient_voter_email, voter, email_ownership_is_verified) if email_create_results['email_address_object_saved']: email_address_object = email_create_results['email_address_object'] voter_manager.update_voter_email_ownership_verified(voter, email_address_object) if we_have_first_or_last_name_from_friend_invitation_email_link and \ not this_voter_has_first_or_last_name_saved: # The current voter does not have first or last name, and we have incoming names to apply update_voter_name = True # The current voter does not have first or last name, and we have incoming names that can be used if update_voter_name: results = voter_manager.update_voter_name_by_object( voter, friend_invitation_email_link.recipient_first_name, friend_invitation_email_link.recipient_last_name) if results['voter_updated']: voter = results['voter'] # Now that we know who owns the recipient_email_address, update invitation status sender_organization_we_vote_id = '' voter_results = voter_manager.retrieve_voter_by_we_vote_id( friend_invitation_email_link.sender_voter_we_vote_id) if voter_results['voter_found']: sender_organization_we_vote_id = voter_results['voter'].linked_organization_we_vote_id friend_results = friend_manager.create_or_update_current_friend( friend_invitation_email_link.sender_voter_we_vote_id, voter_we_vote_id_accepting_invitation, sender_organization_we_vote_id, voter.linked_organization_we_vote_id ) friend_manager.update_suggested_friends_starting_with_one_voter( friend_invitation_email_link.sender_voter_we_vote_id) friend_manager.update_suggested_friends_starting_with_one_voter(voter_we_vote_id_accepting_invitation) accepting_voter_we_vote_id = voter_we_vote_id_accepting_invitation original_sender_we_vote_id = friend_invitation_email_link.sender_voter_we_vote_id friend_accepted_invitation_send(accepting_voter_we_vote_id, original_sender_we_vote_id, web_app_root_url=web_app_root_url) # Update the PositionNetworkCount entries for both friends add_position_network_count_entries_for_one_friend( 0, accepting_voter_we_vote_id, voter_we_vote_id=original_sender_we_vote_id) add_position_network_count_entries_for_one_friend( 0, original_sender_we_vote_id, voter_we_vote_id=accepting_voter_we_vote_id) if friend_results['success']: try: friend_invitation_email_link.invitation_status = ACCEPTED friend_invitation_email_link.save() success = True status += ' friend_invitation_email_link_found FRIENDSHIP_CREATED ' except Exception as e: success = False status += 'FAILED_TO_UPDATE_INVITATION_STATUS2 ' + str(e) + ' ' else: success = False status += "friend_invitation_email_link_found CREATE_OR_UPDATE_CURRENT_FRIEND_FAILED " # And finally, create an organization for this brand new signed-in voter so they can create public opinions organization_name = voter.get_full_name() organization_website = "" organization_twitter_handle = "" organization_twitter_id = "" organization_email = "" organization_facebook = "" organization_image = voter.voter_photo_url() organization_type = INDIVIDUAL organization_manager = OrganizationManager() create_results = organization_manager.create_organization( organization_name, organization_website, organization_twitter_handle, organization_email, organization_facebook, organization_image, organization_twitter_id, organization_type) if create_results['organization_created']: # Add value to twitter_owner_voter.linked_organization_we_vote_id when done. organization = create_results['organization'] try: voter.linked_organization_we_vote_id = organization.we_vote_id voter.save() status += "VOTER_AND_ORGANIZATION_CREATED_FROM_FRIEND_INVITATION " except Exception as e: status += "UNABLE_CREATE_AND_LINK_VOTER_FROM_FRIEND_INVITATION " + str(e) + ' ' invitation_secret_key_belongs_to_this_voter = \ voter_we_vote_id == voter_we_vote_id_accepting_invitation json_data = { 'status': status, 'success': success, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': invitation_found, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': invitation_secret_key_belongs_to_this_voter, } return json_data
12,400
def preprocess_rinex(rinex_file, target_directory=None): """Read a RINEX Navigation Message file and reformat the data. Read a file with name "BRDC00IGS_R_yyyyddd0000_01D_MN.rnx" that was downloaded from https://cddis.nasa.gov/archive/gnss/data/daily/yyyy/brdc/ where yyyy is the year and ddd is the day of the year. The file must be unpacked first such that it is a plain text file in RINEX 3 format. It is expected to contain data for GPS (G), Galileo (E), and BeiDou (C) in this order. If target_directory=None, then return a 2D navigation data NumPy array with 21 rows for GPS, Galileo, and BeiDou, respectively, else attempt to write the three arrays in the '.npy' format to the directory specified in target_directory: yyyy_ddd_G.npy for GPS, yyyy_ddd_E.npy for Galileo, and yyyy_ddd_C.npy for BeiDou. Units are either seconds, meters, or radians. Typical call: preprocess_rinex("BRDC00IGS_R_20203410000_01D_MN.rnx") Inputs: rinex_file - Path to unpacked RINEX Navigation Message file ending with "BRDC00IGS_R_yyyyddd0000_01D_MN.rnx" target_directory - Directory to store the navigation data matrices or None if they shall be returned, default=None Outputs: eph_G - GPS navigation data matrix, 2D NumPy array with 21 rows eph_E - Galileo navigation data matrix, 2D NumPy array with 21 rows eph_D - BeiDou navigation data matrix, 2D NumPy array with 21 rows Author: Jonas Beuchert """ with open(rinex_file, "r") as fide: # Skip header line = fide.readline() while not line == "" and not line.find("END OF HEADER") > -1: line = fide.readline() if line == "": raise Exception( "Invalid RINEX navigation data file." ) # Expected maximum number of columns for a single GNSS (Galileo) max_col = 20000 # Set aside memory for the input svprn = np.zeros(max_col) toe = np.zeros(max_col) af2 = np.zeros(max_col) af1 = np.zeros(max_col) af0 = np.zeros(max_col) deltan = np.zeros(max_col) M0 = np.zeros(max_col) ecc = np.zeros(max_col) roota = np.zeros(max_col) toe = np.zeros(max_col) cic = np.zeros(max_col) crc = np.zeros(max_col) cis = np.zeros(max_col) crs = np.zeros(max_col) cuc = np.zeros(max_col) cus = np.zeros(max_col) Omega0 = np.zeros(max_col) omega = np.zeros(max_col) i0 = np.zeros(max_col) Omegadot = np.zeros(max_col) idot = np.zeros(max_col) # Create list for returned matrices eph = [] # Loop over all three GNSS (expected order: GPS, Galileo, BeiDou) line = fide.readline() gnss_list = ["G", "E", "C"] while not line == "" and len(gnss_list) > 0: gnss = gnss_list.pop(0) # Loop until next desired GNSS is found while not line == "" and line[0] != gnss: line = fide.readline() if line == "": raise Exception( "RINEX navigation data file does not contain data for " + "all desired GNSS or they are not in the desired order " + "(G - E - C)." ) # reset index i = 0 # Loop over all entries for this GNSS while line[0] == gnss: try: # Read one entry svprn[i] = int(line[1:3]) af0[i] = float(line[23:42]) af1[i] = float(line[42:61]) af2[i] = float(line[61:80]) line = fide.readline() crs[i] = float(line[23:42]) deltan[i] = float(line[42:61]) M0[i] = float(line[61:80]) line = fide.readline() cuc[i] = float(line[4:23]) ecc[i] = float(line[23:42]) cus[i] = float(line[42:61]) roota[i] = float(line[61:80]) line = fide.readline() toe[i] = float(line[4:23]) cic[i] = float(line[23:42]) Omega0[i] = float(line[42:61]) cis[i] = float(line[61:80]) line = fide.readline() i0[i] = float(line[4:23]) crc[i] = float(line[23:42]) omega[i] = float(line[42:61]) Omegadot[i] = float(line[61:80]) line = fide.readline() idot[i] = float(line[4:23]) line = fide.readline() line = fide.readline() except: Exception( "Found corrupted entry for GNSS {}.".format(gnss) ) # Read first line of next entry line = fide.readline() i += 1 # Reformat data into array with 21 rows eph.append(np.array( [ svprn[:i], af2[:i], M0[:i], roota[:i], deltan[:i], ecc[:i], omega[:i], cuc[:i], cus[:i], crc[:i], crs[:i], i0[:i], idot[:i], cic[:i], cis[:i], Omega0[:i], Omegadot[:i], toe[:i], af0[:i], af1[:i], toe[:i] ] )) if len(gnss_list) > 0: raise Exception( "RINEX navigation data file does not contain data for " + "all desired GNSS or they are not in the desired order " + "(G - E - C)." ) if target_directory is None: return tuple(eph) # Extract year and day of year yyyy = rinex_file[-22:-18] ddd = rinex_file[-18:-15] # Save three .npy files for gnss, eph_gnss in zip(["G", "E", "C"], eph): np.save(os.path.join(target_directory, yyyy + "_" + ddd + "_" + gnss), eph_gnss)
12,401
def _load_image(fnames, dim=None, device=None, label=False): """Load a N-D image from disk""" dat, affine = _map_image(fnames, dim) if label: dtype = dat.dtype if isinstance(dtype, (list, tuple)): dtype = dtype[0] dtype = dtypes.as_torch(dtype, upcast=True) dat0 = dat.data(device=device, dtype=dtype)[0] # assume single channel if label is True: label = dat0.unique(sorted=True) label = label[label != 0].tolist() dat = torch.zeros([len(label), *dat0.shape], device=device) for i, l in enumerate(label): dat[i] = dat0 == l else: dat = dat.fdata(device=device, rand=True) affine = affine.to(dat.device, dat.dtype) return dat, affine
12,402
def GetCurrentVersion(paths, platform): """Find the current component version by iterating gsbucket root folder. Args: paths: ([str]) a list of folder paths strings. platform: (str) the platform for which the component is being built Returns: str: current component version. str: gs path for current component version. """ current_version = LooseVersion('0.0.0.0') current_version_path = None for version_path in paths: if version_path[-1] != '/': logger.fatal("version_path (%s) needs to end with '/'.", version_path) continue version = os.path.basename(version_path[:-1]) if len(ParseVersion(version)) < 3: # Path does not contain a component version. continue v = LooseVersion(version) if v > current_version: # Skip the version if the path for the target platform does not exist. ctx = gs.GSContext() src = os.path.join(version_path, platform, COMPONENT_ZIP) if not ctx.Exists(src): continue current_version = v current_version_path = version_path return str(current_version), current_version_path
12,403
def dtpool(name: str) -> Tuple[int, str, bool]: """ Return the data about a kernel pool variable. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dtpool_c.html :param name: Name of the variable whose value is to be returned. :return: Number of values returned for name, Type of the variable "C", "N", or "X". """ name = stypes.string_to_char_p(name) found = ctypes.c_int() n = ctypes.c_int() typeout = ctypes.c_char() libspice.dtpool_c(name, ctypes.byref(found), ctypes.byref(n), ctypes.byref(typeout)) return n.value, stypes.to_python_string(typeout.value), bool(found.value)
12,404
def write_output(df, filename): """ Writes the given data frame to the given file. :param pd.DataFrame df :param str filename """ logging.info('Writing output') feather.write_dataframe(df, filename)
12,405
def add_similar_tracks(position_or_range = ":", howmany=5, relative_positions=True): """ Adds Up to the value of howmany tracks similar to each track on the current playlist. parameters: =========== # position_or_range: The position of the track to add similar tracks to. Can also be a range string as "START:STOP" with the STOP position not included, acceptable ranges include empty START and/or empty STOP values for the first and last values in the playlist. The character "c" can be used to indicate the current playing posision. Example: ":" for all tracks ":-3" for all but the last three tracks "c:" for the current playing song and the rest following it # howmany: Maximum number of added similar tracks per existing track # relative_position: Whether to add similar tracks after their respective original track (True) or at the end of the playlist (False) Returns the number of added tracks """ # Handle the range case if type(position_or_range) == str and ":" in position_or_range: (start, stop) = position_or_range.split(":") if start == "": start = 0 if stop == "": stop = get_playlist_length() if start == "c": start = _mpd_current_playlist_position() if stop == "c": stop = _mpd_current_playlist_position() (start, stop) = (int(start), int(stop)) if stop < 0: stop = get_playlist_length + stop added = 0 for i in xrange(start, stop): if relative_positions: added += add_similar_tracks(i+added, howmany, True) else: added += add_similar_tracks(i, howmany, False) return added # Handle the single position case added = 0 relative_buffer = {} normal_buffer = [] if position_or_range == "c": position = int(_mpd_current_playlist_position()) else: position = int(position_or_range) # Get similar tracks' URIs for track in _get_similar_tracks(_mpd_get_playlist(position)[0]): if added >= howmany: break # look up track uris = _mpd_lookup_track(track) if not uris: continue # check to see if it's already added uri = uris[0] if _is_track_added(uri): continue # add it to the buffer if relative_positions: relative_buffer[position+added+1] = uri else: normal_buffer += [uri] added += 1 if added < howmany: print added artist = _mpd_get_playlist(position)[0]["artist"] artists = [artist] artists.extend(_get_similar_artists(artist)) songs = [] for a in artists: uris = _mpd_lookup_artist_tracks(artist) songs.extend(uris) random.shuffle(songs) for song in songs: if added >= howmany: break # check to see if it's already added if _is_track_added(song): continue # add it to the buffer if relative_positions: relative_buffer[position+added+1] = song else: normal_buffer += [song] added += 1 print added # add tracks from buffer _mpd_client.command_list_ok_begin() if relative_positions: keys = relative_buffer.keys() keys.sort() for key in keys: _mpd_add_track(relative_buffer[key], key) else: for uri in normal_buffer: _mpd_add_track(uri) _mpd_client.command_list_end() return added
12,406
def get_hot_article_tags(): """ 获取文章的所有标签 :return: 返回所有文章的标签 """ return Tag.objects.filter(is_hot=True)
12,407
def patch_user_interface_null() -> MockedUserInterfaceNull: """Patch player interface with no players.""" return patch("steam.api.interface", return_value=MockedUserInterfaceNull())
12,408
def setup_logging(loglevel): """Setup basic logging Args: loglevel (int): minimum loglevel for emitting messages """ logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s" logging.basicConfig(level=loglevel, format=logformat, datefmt="%Y-%m-%d %H:%M:%S", handlers=[_handler])
12,409
def confirm(new_command, side_effect, settings): """Returns `True` when running of new command confirmed.""" if not settings.require_confirmation: logs.show_command(new_command, side_effect, settings) return True logs.confirm_command(new_command, side_effect, settings) try: sys.stdin.read(1) return True except KeyboardInterrupt: logs.failed('Aborted', settings) return False
12,410
def setup_log(level): """ Note that the log level may be changed by the cfg file """ logging.basicConfig(level=level, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S') logger.debug("DEBUG")
12,411
def first_localmax_index(data): """Return index of first local maxima. If there is no local maxima (e.g. if all the values are zero), it will simply return zero. """ localmax_indexes = signal.argrelextrema(data, numpy.greater, mode='wrap') if localmax_indexes[0].size > 0: return localmax_indexes[0][0] else: return 0
12,412
def _rds_clone_ ( dataset , name = '' ) : """Clone dataset >>> dataset = ... >>> cloned = datatset.clone ( 'new_name') """ name = name if name else dsID () return ROOT.RooDataSet ( dataset , name )
12,413
def getFirstDateOfQuarter(date): """ Return: {Date} The start date of the quarter for the given date. """ # Bug if first date of the quarter is used, so add 1 if that's the case. if date.day == 1: date = date + timedelta(days=1) quarter_start = pd.to_datetime(date - pd.tseries.offsets.QuarterBegin(startingMonth=1)).date() return quarter_start
12,414
def abbink_onset_detector(signal=None, rest=None, sampling_rate=1000., size=None, alarm_size=None, threshold=None, transition_threshold=None): """Determine onsets of EMG pulses. Follows the approach by Abbink et al.. [Abb98]_. Parameters ---------- signal : array Input filtered EMG signal. rest : array, list, dict One of the following 3 options: * N-dimensional array with filtered samples corresponding to a rest period; * 2D array or list with the beginning and end indices of a segment of the signal corresponding to a rest period; * Dictionary with {'mean': mean value, 'std_dev': standard variation}. sampling_rate : int, float, optional Sampling frequency (Hz). size : int Detection window size (seconds). alarm_size : int Number of amplitudes searched in the calculation of the transition index. threshold : int, float Detection threshold. transition_threshold: int, float Threshold used in the calculation of the transition index. Returns ------- onsets : array Indices of EMG pulse onsets. processed : array Processed EMG signal. References ---------- .. [Abb98] Abbink JH, van der Bilt A, van der Glas HW, "Detection of onset and termination of muscle activity in surface electromyograms", Journal of Oral Rehabilitation, vol. 25, pp. 365–369, 1998 """ # check inputs if signal is None: raise TypeError("Please specify an input signal.") if rest is None: raise TypeError("Please specidy rest parameters.") if size is None: raise TypeError("Please specify the detection window size.") if alarm_size is None: raise TypeError("Please specify the number of amplitudes searched in " "the calculation of the transition index.") if threshold is None: raise TypeError("Please specify the detection threshold.") if transition_threshold is None: raise TypeError("Please specify the second threshold.") # gather statistics on rest signal if isinstance(rest, np.ndarray) or isinstance(rest, list): # if the input parameter is a numpy array or a list if len(rest) >= 2: # first ensure numpy rest = np.array(rest) if len(rest) == 2: # the rest signal is a segment of the signal rest_signal = signal[rest[0]:rest[1]] else: # the rest signal is provided as is rest_signal = rest rest_zero_mean = rest_signal - np.mean(rest_signal) statistics = st.signal_stats(signal=rest_zero_mean) mean_rest = statistics['mean'] std_dev_rest = statistics['std_dev'] else: raise TypeError("Please specify the rest analysis.") elif isinstance(rest, dict): # if the input is a dictionary mean_rest = rest['mean'] std_dev_rest = rest['std_dev'] else: raise TypeError("Please specify the rest analysis.") # subtract baseline offset signal_zero_mean = signal - np.mean(signal) # full-wave rectification fwlo = np.abs(signal_zero_mean) # moving average mvgav = np.convolve(fwlo, np.ones((size,)) / size, mode='valid') # calculate the test function tf = (1 / std_dev_rest) * (mvgav - mean_rest) # additional filter filtered_tf, _, _ = st.filter_signal(signal=tf, ftype='butter', band='lowpass', order=10, frequency=30, sampling_rate=sampling_rate) # convert from numpy array to list to use list comprehensions filtered_tf = filtered_tf.tolist() onset_time_list = [] offset_time_list = [] alarm_time = 0 onset = False alarm = False for k in range(0, len(tf)): if onset is True: # an onset was previously detected and we are looking for the offset time, applying the same criteria if alarm is False: if filtered_tf[k] < threshold: # the first index of the sliding window is used as an estimate for the onset time (simple post-processor) alarm_time = k alarm = True else: # if alarm_time > alarm_window_size and len(emg_conditioned_list) == (alarm_time + alarm_window_size + 1): if alarm_time > alarm_size and k == (alarm_time + alarm_size + 1): transition_indices = [] for j in range(alarm_size, alarm_time): low_list = [filtered_tf[j-alarm_size+a] for a in range(1, alarm_size+1)] low = sum(i < transition_threshold for i in low_list) high_list = [filtered_tf[j+b] for b in range(1, alarm_size+1)] high = sum(i > transition_threshold for i in high_list) transition_indices.append(low + high) offset_time_list = np.where(transition_indices == np.amin(transition_indices))[0].tolist() onset = False alarm = False else: # we only look for another onset if a previous offset was detected if alarm is False: if filtered_tf[k] >= threshold: # the first index of the sliding window is used as an estimate for the onset time (simple post-processor) alarm_time = k alarm = True else: # if alarm_time > alarm_window_size and len(emg_conditioned_list) == (alarm_time + alarm_window_size + 1): if alarm_time > alarm_size and k == (alarm_time + alarm_size + 1): transition_indices = [] for j in range(alarm_size, alarm_time): low_list = [filtered_tf[j-alarm_size+a] for a in range(1, alarm_size+1)] low = sum(i < transition_threshold for i in low_list) high_list = [filtered_tf[j+b] for b in range(1, alarm_size+1)] high = sum(i > transition_threshold for i in high_list) transition_indices.append(low + high) onset_time_list = np.where(transition_indices == np.amax(transition_indices))[0].tolist() onset = True alarm = False onsets = np.union1d(onset_time_list, offset_time_list) # adjust indices because of moving average onsets += int(size / 2) return utils.ReturnTuple((onsets, filtered_tf), ('onsets', 'processed'))
12,415
def reparse(metadata): """Some things need to be parsed again after the build environment has been created and activated.""" metadata.final = False sys.path.insert(0, metadata.config.build_prefix) sys.path.insert(0, metadata.config.host_prefix) py_ver = '.'.join(metadata.config.variant['python'].split('.')[:2]) sys.path.insert(0, utils.get_site_packages(metadata.config.host_prefix, py_ver)) metadata.parse_until_resolved() metadata = finalize_metadata(metadata) return metadata
12,416
def _process_image(filename, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(filename, 'rb') as f: image_data = f.read() # Convert any PNG to JPEG's for consistency. if _is_png(filename): print('Converting PNG to JPEG for %s' % filename) image_data = coder.png_to_jpeg(image_data) # Decode the RGB JPEG. image = coder.resample_jpeg(image_data) return image, RESIZE_HEIGHT, RESIZE_WIDTH
12,417
def test_get_end_coords(): """ Tests the get_end_coords function :return: Asserts true if test cases pass, false if otherwise """ start = Coordinates(4, 5) end = Coordinates(9, 16) length = 7 out_ln = 3 in_ln = 2 angle = (math.pi/2) speed_limit = 10 test_coords = get_end_coords(start, end, length, out_ln, in_ln, angle, speed_limit, 'Test') assert test_coords.get_x() == end.get_x() assert test_coords.get_y() == end.get_y() assert test_coords.get_x() != start.get_x() assert test_coords.get_y() != start.get_y()
12,418
def fork_processes_with_watchdog( num_processes, is_shutdown_callback, child_pids=None, stoploss_ratio=STOPLOSS_RATIO, timeout_period=TIMEOUT_PERIOD, sleep_period=SLEEP_PERIOD, grace_period=GRACE_PERIOD, max_rss=MAX_RSS, statsd=None, app_name='default'): """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the debug=True option to `tornado.web.Application`). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. """ global _task_id assert _task_id is None assert multiprocessing is not None assert psutil is not None if num_processes is None or num_processes <= 0: num_processes = cpu_count() if ioloop.IOLoop.initialized(): raise RuntimeError("Cannot run in multiple processes: IOLoop instance " "has already been initialized. You cannot call " "IOLoop.instance() before calling start_processes()") logging.info("Starting %d processes", num_processes) children = {} pipes = {} last_checkin = {} processes = {} healthy = set() def send_stat(metric, val): if statsd: statsd.incr(metric,val,use_prefix=False) def start_child(i): parent_conn, child_conn = multiprocessing.Pipe(duplex=False) pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i, child_conn else: # NOTE [adam Nov/11/12]: bit of a hack... lists behave as # pass-by-reference, so this lets me minimize restructuring # of this module and still get a list of child pids into # the application object if child_pids is not None: child_pids.append(pid) children[pid] = i last_checkin[pid] = time.time() + grace_period pipes[pid] = (parent_conn, child_conn) processes[pid] = psutil.Process(pid) logging.info("Started new child process with number %d pid %d", i, pid) return None, None def cleanup_pid(pid, remove_from_child_pids=True): if remove_from_child_pids and pid in child_pids: child_pids.remove(pid) if pid in pipes: parent_pipe, child_pipe = pipes[pid] try: parent_pipe.close() except Exception: logging.exception("Problem while closing pipe for "\ "pid %d", pid) try: child_pipe.close() except Exception: logging.exception("Problem while closing pipe for "\ "pid %d", pid) pipes.pop(pid, None) last_checkin.pop(pid, None) children.pop(pid, None) processes.pop(pid, None) if pid in healthy: healthy.remove(pid) def stoplossed(): if float(len(healthy)) / num_processes < stoploss_ratio: return True return False for i in range(num_processes): _id, child_conn = start_child(i) if _id is not None: return _id, child_conn # for keeping track of which processes we've gracefully killed # due to memory issues sent_term = set() while children: try: # Try to respawn any children that don't exist yet # this can happen during bad site issues where a process # dies because it can't connect to mongo, etc and then gets # reaped by our upstart checker. This will put us in a period # of permanent stoploss, so to avoid that we respawn here # Only execute if we're not shutting down. if not is_shutdown_callback(): alive_children = set(children.values()) children_to_spawn = set(range(num_processes)) - alive_children for child_number in children_to_spawn: _id, child_conn = start_child(child_number) if _id is not None: return _id, child_conn else: # there's a race where we can spawn children after the tornado # app has received the shutdown signal. This will ensure we will # eventually shutdown. Should happen very rarely for pid, child_number in children.iteritems(): try: os.kill(pid, signal.SIGTERM) if pid in healthy: healthy.remove(pid) send_stat('infra.frontend.%s.shutdown_term' % app_name,1) except OSError: logging.exception( "Failed to terminate pid %d process number %d "\ "while shutting down", pid, child_number) # for all the processes we've sent SIGTERM, make sure they are # not running anymore - if they are escalate to SIGKILL. # Shouldn't need to check for stoploss, since len(sent_term) # must be < what would put us over the stoploss # We need to do this first so that we don't mistakenly think that # upstart is trying to shut us down since that also sends a SIGTERM # to the child processes for i, (pid, child_number) in enumerate(sent_term): if pid not in processes: continue if processes[pid].is_running(): logging.info("Trying to kill pid %d process number %d", pid, child_number) try: os.kill(pid, signal.SIGKILL) send_stat('infra.frontend.%s.kill' % app_name,1) except OSError: logging.exception("Failed to kill pid %d process number %d", pid, child_number) logging.info("Trying to wait on pid %d process number %d", pid, child_number) try: os.waitpid(pid, os.WNOHANG) except OSError: logging.exception("Failed to wait on pid %d process number %d", pid, child_number) cleanup_pid(pid) if not is_shutdown_callback(): _id, child_conn = start_child(child_number) if _id is not None: return _id, child_conn # reset this sent_term = set() # Detect if we're being shut down by upstart, in which case # we no longer want to respawn child processes that have died # also detect if the child has exited on its own (died on startup) to_cleanup = set() for pid, _ in children.iteritems(): try: _resp_pid, status = os.waitpid(pid, os.WNOHANG) except OSError as e: if e.errno == errno.EINTR: continue if e.errno != errno.ECHILD: raise # Still alive if status == 0: continue to_cleanup.add(pid) logging.info("Detected that pid %d died", pid) for pid in to_cleanup: cleanup_pid(pid, remove_from_child_pids=False) # If we have child processes, see if they're up and running to_reap = set() exceed_mem = set() for pid, child_number in children.iteritems(): parent_pipe, child_pipe = pipes[pid] got_msg = False # get the last message that was sent try: while parent_pipe.poll(): status = parent_pipe.recv() got_msg = True except Exception: logging.exception("Problem while polling pipe for "\ "pid %d process number %d", pid, child_number) if got_msg: if not status: logging.info( "Empty status message from pid %d process number %d", pid, child_number) if 'checkin_time' not in status: logging.info( "Check in time not reported from pid %d process number %d", pid, child_number) last_checkin[pid] = time.time() healthy.add(pid) else: last_checkin[pid] = status['checkin_time'] healthy.add(pid) if time.time() - last_checkin[pid] > timeout_period: to_reap.add((pid, child_number)) logging.info( "Scheduling pid %d process number %d to be reaped "\ 'for failing to check in after %d seconds', pid, child_number, timeout_period) try: # only terminate if it's after grace period if processes[pid].create_time() + grace_period < time.time(): rss, _ = processes[pid].memory_info()[:2] if rss > max_rss: exceed_mem.add((pid, child_number)) logging.info( "Scheduling pid %d process number %d to be reaped "\ 'for exceeding MAX_RSS (%dMB/%dMB)', pid, child_number, rss / (1024 * 1024), max_rss / (1024 * 1024)) except Exception: logging.exception( "Unable to get RSS from pid %d process number %d", pid, child_number) # Reap the child processes that are stuck for i, (pid, child_number) in enumerate(to_reap): if stoplossed(): logging.info( "Not enough tornadoes healthy, stoploss initiated.") send_stat('infra.frontend.%s.stoplossed' % app_name,1) break logging.info("Trying to gracefully terminate pid "\ "%d process number %d", pid, child_number) try: os.kill(pid, signal.SIGTERM) sent_term.add((pid, child_number)) if pid in healthy: healthy.remove(pid) send_stat('infra.frontend.%s.term' % app_name,1) except OSError: logging.exception("Failed to terminate pid %d process number %d", pid, child_number) # if its timed out, we've already termed it exceed_mem -= to_reap for i, (pid, child_number) in enumerate(exceed_mem): if stoplossed(): logging.info( "Not enough tornadoes healthy, stoploss initiated.") send_stat('infra.frontend.%s.stoplossed_mem' % app_name,1) break logging.info("Trying to gracefully terminate pid %d process number %d", pid, child_number) try: os.kill(pid, signal.SIGTERM) sent_term.add((pid, child_number)) if pid in healthy: healthy.remove(pid) send_stat('infra.frontend.%s.term_mem' % app_name,1) except OSError: logging.exception("Failed to terminate pid %d process number %d", pid, child_number) time.sleep(sleep_period) except Exception: logging.exception("Unhandled error in watchdog loop") send_stat('infra.frontend.%s.unhandled_exception' % app_name,1) return None, None
12,419
def load_fonts(folder="fonts/latin"): """Load all fonts in the fonts directories """ fonts = [] if folder is not None: if os.path.isdir(folder): # the folder exists whether it is relative or absolute path for font in os.listdir(folder): if font.split(".")[-1].lower() in ["ttf", "otf"]: fonts.append(os.path.join(folder, font)) return fonts elif os.path.isdir(os.path.join(os.path.dirname(__file__), folder)): # we are working with base folder of this library for font in os.listdir(os.path.join(os.path.dirname(__file__), folder)): if font.split(".")[-1].lower() in ["ttf", "otf"]: fonts.append(os.path.join(os.path.dirname(__file__), folder, font)) return fonts raise Exception("No font folder specified/found!")
12,420
def main() -> None: """ Calculate and output the solutions based on the real puzzle input. """ data = aocd.get_data(year=2020, day=22) decks = read_decks(data) result = play_game(decks) print(f"Part 1: {sum(score(deck) for deck in result)}") decks2, _ = play_recursive_game(decks) print(f"Part 2: {sum(score(deck) for deck in decks2)}")
12,421
def float_feature(value): """Wrapper for inserting float features into Example proto. """ if not isinstance(value,list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value))
12,422
def my_charts(request): """ define personal graphics page behavior """ data = [0, 0, 0, 0] if request.method == 'POST': month = request.POST.get('month') if month is not None: current_user_id = request.user.id_user if month == 'all': all_classifications = list(ClinicalState_28d. objects.all()) + \ list(ClinicalState_29d_2m. objects.all()) + \ list(ClinicalState_2m_3y. objects.all()) + \ list(ClinicalState_3y_10y. objects.all()) + \ list(ClinicalState_10yMore. objects.all()) else: all_classifications = list(ClinicalState_28d.objects. filter(date__month=month)) + \ list(ClinicalState_29d_2m.objects. filter(date__month=month)) + \ list(ClinicalState_2m_3y.objects. filter(date__month=month)) + \ list(ClinicalState_3y_10y.objects. filter(date__month=month)) + \ list(ClinicalState_10yMore.objects. filter(date__month=month)) for classification in all_classifications: if classification.classifier_id == current_user_id: patient_classification = \ classification.patient.classification if patient_classification == 1: data[0] += 1 elif patient_classification == 2: data[1] += 1 elif patient_classification == 3: data[2] += 1 elif patient_classification == 4: data[3] += 1 return render(request, 'users/myCharts.html', { 'data': data })
12,423
def sort_test(): """Sort variations""" pos = [1, 10, 20] stop = [2, 11, 21] ref = ['A', 'C', 'T'] alt = ['AA', 'CAT', 'G'] p = [0.1, 0.5, 0.9] l = vr.VariantList(pos, stop, ref, alt, p) pos = [5, 15] stop = [6, 16] ref = ['T', 'G'] alt = ['A', 'GAT'] p = [0.1, 0.5] l.add(pos, stop, ref, alt, p) l.sort() assert l.variants['pos'][1] == 5 assert l.variants['alt'][2] == 'CAT'
12,424
def matrix2yzy_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray: """ Ry(k3) @ Rz(k2) @ Ry(k1) = [[c1c2c3-s1s3, -s2c3, s1c2c3+c1c3], [c1s2, c2, s1s2], [-c1c2s3, s2s3, -s1c2s3+c1c3]] """ rotation_matrices = rotation_matrices.reshape((-1, 3, 3)) angles_radians = np.zeros((rotation_matrices.shape[0], 3)) # Angle 2 can be taken directly from matrices angles_radians[:, 1] = np.arccos(rotation_matrices[:, 1, 1]) # Gimbal lock case (s2 = 0) tolerance = 1e-4 # Find indices where this is the case gimbal_idx = np.abs(rotation_matrices[:, 1, 0]) < tolerance # Calculate angle 1 and set angle 3 = 0 for those indices r31 = rotation_matrices[gimbal_idx, 2, 0] r33 = rotation_matrices[gimbal_idx, 2, 2] angles_radians[gimbal_idx, 0] = np.arctan2(-r31, r33) angles_radians[gimbal_idx, 2] = 0 # Normal case (s2 > 0) idx = np.invert(gimbal_idx) r23 = rotation_matrices[idx, 1, 2] r21 = rotation_matrices[idx, 1, 0] r32 = rotation_matrices[idx, 2, 1] r12 = rotation_matrices[idx, 0, 1] angles_radians[idx, 0] = np.arctan2(r23, r21) angles_radians[idx, 2] = np.arctan2(r32, -r12) # convert to degrees euler_angles = np.rad2deg(angles_radians) return euler_angles
12,425
def smallest_subarray_with_given_sum(arr, s): """Find the length of the smallest subarray whose sum is >= s. Time: O(n) Space: O(1) >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 3, 2], 7) 2 >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 8], 7) 1 >>> smallest_subarray_with_given_sum([3, 4, 1, 1, 6], 8) 3 """ win_sum = 0 win_start = 0 min_len = 0 for win_end in range(len(arr)): win_sum += arr[win_end] while win_sum >= s: cur_len = win_end - win_start + 1 if min_len == 0 or cur_len < min_len: min_len = cur_len win_sum -= arr[win_start] win_start += 1 return min_len
12,426
def create_lit_model( model: str, input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821 output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821 attribution_method: str = "sampled_shapley", ) -> lit_model.Model: """Creates a LIT Model object. Args: model: Required. A string reference to a local TensorFlow saved model directory. The model must have at most one input and one output tensor. input_types: Required. An OrderedDict of string names matching the features of the model as the key, and the associated LitType of the feature. output_types: Required. An OrderedDict of string names matching the labels of the model as the key, and the associated LitType of the label. attribution_method: Optional. A string to choose what attribution configuration to set up the explainer with. Valid options are 'sampled_shapley' or 'integrated_gradients'. Returns: A LIT Model object that has the same functionality as the model provided. """ return _TensorFlowLitModel(model, input_types, output_types, attribution_method)
12,427
def test_load_first(install_mockery, mock_fetch, mock_archive, mock_packages): """Test with and without the --first option""" install('mpileaks') # Only one version of mpileaks will work diff('mpileaks', 'mpileaks') # 2 specs are required for a diff with pytest.raises(spack.main.SpackCommandError): diff('mpileaks') with pytest.raises(spack.main.SpackCommandError): diff('mpileaks', 'mpileaks', 'mpileaks') # Ensure they are the same assert "No differences" in diff('mpileaks', 'mpileaks') output = diff('--json', 'mpileaks', 'mpileaks') result = sjson.load(output) assert len(result['a_not_b']) == 0 assert len(result['b_not_a']) == 0 assert 'mpileaks' in result['a_name'] assert 'mpileaks' in result['b_name'] assert "intersect" in result and len(result['intersect']) > 50 # After we install another version, it should ask us to disambiguate install('mpileaks+debug') # There are two versions of mpileaks with pytest.raises(spack.main.SpackCommandError): diff('mpileaks', 'mpileaks+debug') # But if we tell it to use the first, it won't try to disambiguate assert "variant" in diff('--first', 'mpileaks', 'mpileaks+debug') # This matches them exactly output = diff("--json", "mpileaks@2.3/ysubb76", "mpileaks@2.3/ft5qff3") result = sjson.load(output) assert len(result['a_not_b']) == 1 assert len(result['b_not_a']) == 1 assert result['a_not_b'][0] == ['variant_value', 'mpileaks debug False'] assert result['b_not_a'][0] == ['variant_value', 'mpileaks debug True']
12,428
def show_lightning_round_zero_correct(database_connection: mysql.connector.connect ) -> List[Dict]: """Return list of shows in which a panelist answers zero Lightning Fill-in-the-Blank round questions correct""" cursor = database_connection.cursor(dictionary=True) query = ("SELECT s.showid, s.showdate, p.panelistid, p.panelist, " "pm.panelistlrndstart, pm.panelistlrndcorrect, " "pm.panelistscore, pm.showpnlrank " "FROM ww_showpnlmap pm " "JOIN ww_shows s ON s.showid = pm.showid " "JOIN ww_panelists p ON p.panelistid = pm.panelistid " "WHERE s.bestof = 0 AND s.repeatshowid IS null " "AND pm.panelistlrndcorrect = 0 " "ORDER BY s.showdate ASC;") cursor.execute(query) result = cursor.fetchall() cursor.close() if not result: return None shows = [] for row in result: show = OrderedDict() show["id"] = row["showid"] show["date"] = row["showdate"].isoformat() panelist = OrderedDict() panelist["id"] = row["panelistid"] panelist["name"] = row["panelist"] panelist["start"] = row["panelistlrndstart"] panelist["correct"] = row["panelistlrndcorrect"] panelist["score"] = row["panelistscore"] panelist["rank"] = row["showpnlrank"] show["panelist"] = panelist shows.append(show) return shows
12,429
def get_webpage(page_url): """Get the OOTS index webpage and return the content.""" result = requests.get(page_url) if result.status_code == 200: return result.text else: _logger.error( colored( "Unable to read the OOTS data,please check your connection.", "red", attrs=["bold"], ) ) _logger.error(colored(f"URL : {page_url}", "red")) quit(1)
12,430
def generate_config(context): """ Entry point for the deployment resources. """ properties = context.properties name = properties.get('name', context.env['name']) bastion_props = { 'zone': properties['zone'], 'network': properties['network'], 'machineType': properties['machineType'], 'diskImage': IMAGE } bastion = {'name': name, 'type': 'instance.py', 'properties': bastion_props} optional_props = ['diskSizeGb', 'metadata', 'tags'] for prop in optional_props: set_optional_property(bastion_props, properties, prop) if properties.get('disableSudo'): disable_sudo(bastion_props) firewall_settings = properties.get('createFirewallRules') if firewall_settings: extra_resources, extra_outputs = create_firewall_rules( bastion, firewall_settings ) else: extra_resources = [] extra_outputs = [] outputs = [ { 'name': 'name', 'value': name }, { 'name': 'selfLink', 'value': '$(ref.{}.selfLink)'.format(name) }, { 'name': 'internalIp', 'value': '$(ref.{}.internalIp)'.format(name) }, { 'name': 'externalIp', 'value': '$(ref.{}.externalIp)'.format(name) } ] return { 'resources': [bastion] + extra_resources, 'outputs': outputs + extra_outputs }
12,431
def dump_args(func): """Decorator to print function call details - parameters names and effective values. """ def wrapper(*args, **kwargs): func_args = inspect.signature(func).bind(*args, **kwargs).arguments func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items()) print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )') return func(*args, **kwargs) return wrapper
12,432
def bigaussian( n_particles: int, mean: Tuple[float, float, float, float, float], geometric_emittance_h: float, geometric_emittance_v: float, sigma_p: float, ) -> np.array: """Generate a bigaussian distributed distribution. Args: n_particles: Number of particles. meam: Distribution centers. geometric_emittance: Geometric emittance. sigma_p: Absolute momentum spread. Returns: Array of position and angle phase space coordinates of the distribution. """ cov = np.diag( ( geometric_emittance_h, geometric_emittance_h, geometric_emittance_v, geometric_emittance_v, sigma_p ** 2, ) ) return np.random.multivariate_normal(mean, cov, n_particles).T
12,433
def test_affected_repr(): """Test repr() for the Affected class.""" affected = Affected.from_dict( {'name': 'my-package', 'version': [], 'fixedin': []}, ecosystem='python' ) assert repr(affected) == '<Affected(name=my-package)>'
12,434
def test_parser(): """ Tests PasswordHashParser implementation. """ PWD_LENGTH = 64 password = misc_utils.generate_random_string(PWD_LENGTH) wrong_password = misc_utils.generate_random_string(PWD_LENGTH) assert password != wrong_password # Test pbkdf2_sha256(md5(password)). md5_salt = auth_utils.generate_salt() pbkdf2_sha256_salt = auth_utils.generate_salt() pbkdf2_sha256_rounds = 100000 password_hash = auth_utils.hash_password(password, md5_salt, None, 'md5') password_hash = auth_utils.hash_password(password_hash, pbkdf2_sha256_salt, pbkdf2_sha256_rounds, 'pbkdf2_sha256') hash_string = "$md5|pbkdf2_sha256$|{}${}|{}${}".format( pbkdf2_sha256_rounds, md5_salt, pbkdf2_sha256_salt, password_hash) parser = auth_utils.PasswordHashParser() parser.parse(hash_string) assert parser.verify_password(password) == True assert parser.verify_password(wrong_password) == False assert str(parser) == hash_string
12,435
def horizontal_tile(silhouette, reps = 2): """Places two silhouettes side-by-side with an empty line in the middle.""" silhouette = np.append(silhouette,np.zeros((silhouette.shape[0],1)),axis=1) return np.tile(silhouette,(1,reps))[:,:]
12,436
def grads_norm(parameters): """get grad norms of the parameters, useful for model inspection""" t = [p.grad for p in parameters if p.grad is not None] return many_l2_norm(*t)
12,437
def django_admin_add_object(request, context): """show add object""" if request and request.user.is_staff and (context.get('object', None) or context.get('model', None)): object_class = context.get('model', None) if not object_class: object_class = context['object'].__class__ view_name = 'admin:{0}_{1}_add'.format(get_model_app(object_class), get_model_name(object_class)) try: return make_link( reverse(view_name), _('Add {0}').format(get_model_label(object_class)), 'table', classes=['icon', 'alert_on_click'] ) except NoReverseMatch: pass
12,438
def test_bellmanford_sp(graph_sp): """Test the bellmanford algorithm returns shortest path.""" assert graph_sp.bellmanford('A', 'E') == 20
12,439
def get_config(): """Returns an instance of the configured config class. :return: Project's defined Adyen configuration. :rtype: :class:`AbstractAdyenConfig` By default, this function will return an instance of :class:`adyen.settings_config.FromSettingsConfig`. If :data:`ADYEN_CONFIG_CLASS` is defined, it will try to load this class and return an instance of this class instead. .. note:: This function expects :data:`ADYEN_CONFIG_CLASS` to be a string that represent the python import path of the Adyen config class, such as ``adyen.settings_config.FromSettingsConfig``. """ try: config_class_string = settings.ADYEN_CONFIG_CLASS except AttributeError: config_class_string = 'adyen.settings_config.FromSettingsConfig' return import_string(config_class_string)()
12,440
def aperiodic(amp, samples): """an aperiodic oscillating signal Parameters ---------- amp : float values range over +-amp samples : int number of samples to generate Returns ------- ndarray """ periods = np.abs(sine(samples, samples, 1)) + samples / 10 seq = [amp * math.sin(i * 2 * math.pi / periods[i]) for i in range(samples)] return np.array(seq)
12,441
def kriging_interpolate(): """ Use Guassain Process Regression provided by Scikit-learn """ pass
12,442
def removeHs(ctab): """ Removes any hydrogens from the graph of a molecule. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation of multiple molfiles. cURL examples: curl -X GET ${BEAKER_ROOT_URL}removeHs/$(cat removeHs.mol | base64 -w 0 | tr "+/" "-_") curl -X GET ${BEAKER_ROOT_URL}removeHs/$(cat removeHs.mol | base64 -w 0 | tr "+/" "-_")?implicitOnly=1 """ data = base64.urlsafe_b64decode(ctab) return removeHsView(data, request.params)
12,443
def judge_key(key: str, up: any): """判断key是否存在""" if dict == type(up): if key in up: return True else: for dict_key, dict_value in up.items(): if dict == type(dict_value) or list == type(dict_value): result = judge_key(key, dict_value) if result: return result return False elif list == type(up): for dict_value in up: if dict == type(dict_value) or list == type(dict_value): result = judge_key(key, dict_value) if result: return result return False else: return False
12,444
def apply_changes(patch_obj_dic, file_dic): """ If all checks are passed, write the changes to the patch file. Note that the original file is overwritten :return: """ success = False error_title = None error_msg = None # Checks that mutually exclusive options have not been set together. If they have, alert the user, # and abort before writing to file(s) for (fn, patch_obj_list) in iterDic(patch_obj_dic): mut_exl_dic = {} for obj in patch_obj_list: if obj.group and 'yes' in obj.status: if obj.group not in mut_exl_dic: mut_exl_dic[obj.group] = [] mut_exl_dic[obj.group].append(obj.name) else: mut_exl_dic[obj.group].append(obj.name) for (group, names) in iterDic(mut_exl_dic): if len(names) > 1: name_str = '\n' for name in names: name_str += ' ' + name + '\n' error_title = 'Mutually Exlusive Options Detected!' error_msg = 'The following options cannot be enabled together: \n' + name_str + \ fn + ' was not written.' success = False return success, error_title, error_msg # If checks passed, prepare and then write data to file(s) for (fn, patch_obj_list) in iterDic(patch_obj_dic): for obj in patch_obj_list: file_dic = prep_for_writing(fn, obj, file_dic) r_p_f_success, error_title, error_msg = write_patch_files(fn, file_dic) if not r_p_f_success: success = False return success, error_title, error_msg success = True return success, error_title, error_msg
12,445
def test_filter_translate(tr_arg, tr_src, tr_dest): """Tests for translate argument. Translate characters from one to another """ args = parser.parse_args(["-tr", *tr_arg]) filters = renamer.initfilters(args) dest = renamer.get_renames(tr_src, filters, args.extension, args.raw) assert dest == tr_dest
12,446
def construct_1D_scan_fast(gate, swing, n_pt, t_step, biasT_corr, pulse_lib, digitizer, channels, dig_samplerate, dig_vmax=2.0, iq_mode=None, acquisition_delay_ns=None, enabled_markers=[], channel_map=None, pulse_gates={}, line_margin=0): """ 1D fast scan parameter constructor. Args: gate (str) : gate/gates that you want to sweep. swing (double) : swing to apply on the AWG gates. [mV] n_pt (int) : number of points to measure (current firmware limits to 1000) t_step (double) : time in ns to measure per point. [ns] biasT_corr (bool) : correct for biasT by taking data in different order. pulse_lib : pulse library object, needed to make the sweep. digitizer : digitizer object channels : digitizer channels to read dig_samplerate : digitizer sample rate [Sa/s] iq_mode (str or dict): when digitizer is in MODE.IQ_DEMODULATION then this parameter specifies how the complex I/Q value should be plotted: 'I', 'Q', 'abs', 'angle', 'angle_deg'. A string applies to all channels. A dict can be used to specify selection per channel, e.g. {1:'abs', 2:'angle'}. Note: channel_map is a more generic replacement for iq_mode. acquisition_delay_ns (float): Time in ns between AWG output change and digitizer acquisition start. This also increases the gap between acquisitions. enable_markers (List[str]): marker channels to enable during scan channel_map (Dict[str, Tuple(int, Callable[[np.ndarray], np.ndarray])]): defines new list of derived channels to display. Dictionary entries name: (channel_number, func). E.g. {(ch1-I':(1, np.real), 'ch1-Q':(1, np.imag), 'ch3-Amp':(3, np.abs), 'ch3-Phase':(3, np.angle)} The default channel_map is: {'ch1':(1, np.real), 'ch2':(2, np.real), 'ch3':(3, np.real), 'ch4':(4, np.real)} pulse_gates (Dict[str, float]): Gates to pulse during scan with pulse voltage in mV. E.g. {'vP1': 10.0, 'vB2': -29.1} line_margin (int): number of points to add to sweep 1 to mask transition effects due to voltage step. The points are added to begin and end for symmetry (bias-T). Returns: Parameter (QCODES multiparameter) : parameter that can be used as input in a conversional scan function. """ vp = swing/2 # set up sweep voltages (get the right order, to compenstate for the biasT). voltages_sp = np.linspace(-vp,vp,n_pt) if biasT_corr: m = (n_pt+1)//2 voltages = np.zeros(n_pt) voltages[::2] = voltages_sp[:m] voltages[1::2] = voltages_sp[m:][::-1] else: voltages = voltages_sp return dummy_digitzer_scan_parameter(digitizer, None, pulse_lib, t_step, (n_pt, ), (gate, ), ( tuple(voltages_sp), ), biasT_corr, 500e6)
12,447
def main(): """ Running the word guessing game """ ans = random_word() run_game(ans, N_TURNS)
12,448
async def test_fan_direction(hass, hk_driver, events): """Test fan with direction.""" entity_id = "fan.demo" hass.states.async_set( entity_id, STATE_ON, {ATTR_SUPPORTED_FEATURES: SUPPORT_DIRECTION, ATTR_DIRECTION: DIRECTION_FORWARD}, ) await hass.async_block_till_done() acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None) hk_driver.add_accessory(acc) assert acc.char_direction.value == 0 await acc.run_handler() await hass.async_block_till_done() assert acc.char_direction.value == 0 hass.states.async_set(entity_id, STATE_ON, {ATTR_DIRECTION: DIRECTION_REVERSE}) await hass.async_block_till_done() assert acc.char_direction.value == 1 # Set from HomeKit call_set_direction = async_mock_service(hass, DOMAIN, "set_direction") char_direction_iid = acc.char_direction.to_HAP()[HAP_REPR_IID] hk_driver.set_characteristics( { HAP_REPR_CHARS: [ { HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_direction_iid, HAP_REPR_VALUE: 0, }, ] }, "mock_addr", ) await hass.async_block_till_done() assert call_set_direction[0] assert call_set_direction[0].data[ATTR_ENTITY_ID] == entity_id assert call_set_direction[0].data[ATTR_DIRECTION] == DIRECTION_FORWARD assert len(events) == 1 assert events[-1].data[ATTR_VALUE] == DIRECTION_FORWARD hk_driver.set_characteristics( { HAP_REPR_CHARS: [ { HAP_REPR_AID: acc.aid, HAP_REPR_IID: char_direction_iid, HAP_REPR_VALUE: 1, }, ] }, "mock_addr", ) await hass.async_add_executor_job(acc.char_direction.client_update_value, 1) await hass.async_block_till_done() assert call_set_direction[1] assert call_set_direction[1].data[ATTR_ENTITY_ID] == entity_id assert call_set_direction[1].data[ATTR_DIRECTION] == DIRECTION_REVERSE assert len(events) == 2 assert events[-1].data[ATTR_VALUE] == DIRECTION_REVERSE
12,449
def assemble_f_local(ck, f_func, p1, p2, p3): """ Assemble the local contribution to the f_load_lv for the element Parameters ---------- ck : np.array basis function coef. matrix. f_func : function load function. p1 : np.array first vertex of the triangle element. p1 : np.array second vertex of the triangle element. p1 : np.array third vertex of the triangle element. Returns ------- np.array local contribution to f_load_lv. """ f_local = np.zeros(6, dtype=float) for ki in range(6): i, di = inv_index_map(ki) def f_phi(x, y): return f_func(x, y)[:, di] * phi(x, y, ck, i) f_local[ki] = quadrature2D(p1, p2, p3, 4, f_phi) return f_local
12,450
def test_from_percolator(perc_weights): """ Test the from_percolator() function. Verify that a model is correctly loaded from the Percolator weights output. """ path = os.path.join(perc_weights) loaded = xenith.from_percolator(path) # Correct answers for feat_mean and feat_stdev features = ['score', 'eval', 'evala', 'evalb', 'ionmatch', 'conionmatch', 'ionmatcha', 'conionmatcha', 'ionmatchb', 'conionmatchb', 'ppscorediff', 'mass', 'ppm', 'lensum', 'intraprotein', 'lenrat', 'charge_1', 'charge_2', 'charge_3', 'charge_4', 'charge_5', 'charge_6', 'charge_7', 'charge_8'] correct_feat = pd.Series([0]*24, index=features) assert loaded.source == "percolator" assert loaded.num_features == 24 assert loaded.pretrained == True assert loaded.feat_mean.equals(correct_feat) assert loaded.feat_stdev.equals(correct_feat) with pytest.raises(FileNotFoundError): xenith.from_percolator("blah")
12,451
def take_turn(num_rolls, opponent_score, dice=six_sided): """Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free Bacon). Return the points scored for the turn by the current player. Also implements the Hogtimus Prime rule. num_rolls: The number of dice rolls that will be made. opponent_score: The total score of the opponent. dice: A function that simulates a single dice roll outcome. """ # Leave these assert statements here; they help check for errors. assert type(num_rolls) == int, 'num_rolls must be an integer.' assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.' assert num_rolls <= 10, 'Cannot roll more than 10 dice.' assert opponent_score < 100, 'The game should be over.' # BEGIN PROBLEM 2 "*** REPLACE THIS LINE ***" roll_score=0 if num_rolls==0: roll_score=free_bacon(opponent_score) else: roll_score=roll_dice(num_rolls,dice) if is_prime(roll_score): return next_prime(roll_score) else: return roll_score # END PROBLEM 2
12,452
def __sort_vertices(points): """Return vertices that are sorted by average center of all points.""" points = list(set(points)) if len(points) < 3: return None start_point = __find_average_center(points) start_vector = Vector3D.by_points(start_point, points[0]) return sorted(points, key=lambda point: GeometryUtils.angle_between( start_vector, Vector3D.by_points(start_point, point)))
12,453
def is_arnold_usd_available(): """ Returns whether or not Arnold USD libraries and schemas are available in current session :return: bool """ raise NotImplementedError('is_arnold_usd_available function is not implemented!')
12,454
def create_feature( tokens: List[str], label_ids: List[int], words_map: List[Tuple[int, int, bool]], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-100, sequence_a_segment_id=0, mask_padding_with_zero=True, words_map_pad=(-1, -1, True) ) -> Tuple[InputFeatures, List[Tuple[int, int, str]]]: """ `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = tokenizer.num_special_tokens_to_add() # if len(tokens) > max_seq_length - special_tokens_count: # tokens = tokens[: (max_seq_length - special_tokens_count)] # label_ids = label_ids[: (max_seq_length - special_tokens_count)] assert (len(tokens) <= max_seq_length - special_tokens_count) # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] words_map += [words_map_pad] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] words_map += [words_map_pad] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] words_map += [words_map_pad] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids words_map = [words_map_pad] + words_map input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids words_map = ([words_map_pad] * padding_length) + words_map else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length words_map += [words_map_pad] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(words_map) == max_seq_length if "token_type_ids" not in tokenizer.model_input_names: segment_ids = None return InputFeatures( input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids, ), words_map
12,455
def unified_genotyper(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Perform SNP genotyping on the given alignment file. """ if out_file is None: out_file = "%s-variants.vcf" % os.path.splitext(align_bams[0])[0] if not file_exists(out_file): broad_runner, params = \ _shared_gatk_call_prep(align_bams, ref_file, items[0]["config"], assoc_files["dbsnp"], region, out_file) if (not isinstance(region, (list, tuple)) and not all(has_aligned_reads(x, region) for x in align_bams)): vcfutils.write_empty_vcf(out_file) else: with file_transaction(out_file) as tx_out_file: params += ["-T", "UnifiedGenotyper", "-o", tx_out_file, "--genotype_likelihoods_model", "BOTH"] broad_runner.run_gatk(params) return out_file
12,456
def test_add_finished_hashes(local_config): """Tests adding hashes that are finished and getting them with get_previous_hashes().""" manager = StateManager(local_config) manager.set_checkpoint('DEFGHIJK', 'default', 'DONE') manager.set_checkpoint('ABCDEFGH', 'default', 'DONE') manager.set_checkpoint('MNOPQRST', 'default', 'DONE') manager.set_checkpoint('ABCDEFGH', 'another', 'DONE') return_list = manager.get_previous_hashes('default') assert return_list == ['ABCDEFGH', 'DEFGHIJK', 'MNOPQRST'] return_list = manager.get_previous_hashes('another') assert return_list == ['ABCDEFGH']
12,457
def read_and_compress_table(parser, table, debug): """Read data from a FITS file and save it into a FITS binary table The data are read from the FITS file specified in the "table" section of the "parser" object (an instance of ConfigurationParser). If "debug" is true, save additional information (useful for debugging) in the table.""" input_file_name = os.path.normpath(parser.get(table, 'file')) cur_hdu = None with pyfits.open(input_file_name) as input_file: hdu, column = get_hdu_and_column_from_schema(parser, table, input_file) compression = parser.get(table, 'compression') log.info('compressing file %s (HDU %s, column %s) ' 'into table "%s", ' 'compression is "%s"', input_file_name, str(hdu), str(column), table, compression) samples_format = input_file[hdu].columns.formats[column] samples = ppc.to_native_endianness(input_file[hdu].data.field(column)) if parser.has_option(table, 'datatype'): samples = np.array(samples, dtype=parser.get(table, 'datatype')) cur_hdu, num_of_bytes, elapsed_time = compress_to_FITS_table(parser, table, samples_format, samples, debug) cur_hdu.name = table cur_hdu.header['PCNUMSA'] = (len(samples), 'Number of uncompressed samples') cur_hdu.header['PCCOMPR'] = (compression, 'Polycomp compression algorithm') cur_hdu.header['PCSRCTP'] = (str(samples.dtype), 'Original NumPy type of the data') cur_hdu.header['PCUNCSZ'] = (samples.itemsize * samples.size, 'Size of the uncompressed data [bytes]') cur_hdu.header['PCCOMSZ'] = (num_of_bytes, 'Size of the compressed data [bytes]') cur_hdu.header['PCTIME'] = (elapsed_time, 'Time used for compression [s]') cr = float(cur_hdu.header['PCUNCSZ']) / float(cur_hdu.header['PCCOMSZ']) cur_hdu.header['PCCR'] = (cr, 'Compression ratio') log.info('table "%s" compressed, %s compressed to %s (cr: %.4f)', table, humanize_size(cur_hdu.header['PCUNCSZ']), humanize_size(cur_hdu.header['PCCOMSZ']), cr) return cur_hdu
12,458
def calculate_log_likelihood_and_derivative_at_parameter_point_with_mRNA(protein_at_observations,model_parameters,mean_protein,measurement_variance,mRNA_parameters): """ Calculates the log of the likelihood, and the derivative of the negative log likelihood wrt each parameter, of our data given the paramters, using the Kalman filter. It uses the predicted_observation_distributions, predicted_observation_mean_derivatives, and predicted_observation_variance_derivatives from the kalman_filter function. It returns the log likelihood as in the calculate_log_likelihood_at_parameter_point function, and also returns an array of the derivative wrt each parameter. Parameters ---------- protein_at_observations : numpy array. Observed protein. The dimension is n x 2, where n is the number of observation time points. The first column is the time, and the second column is the observed protein copy number at that time. model_parameters : numpy array. An array containing the moderowl parameters in the following order: repression_threshold, hill_coefficient, mRNA_degradation_rate, protein_degradation_rate, basal_transcription_rate, translation_rate, transcription_delay. mean_protein : float. The mean protein value, used to set prior bounds for the repression threshold measurement_variance : float. The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018). mRNA_parameters : numpy array. two element array, mean and standard deviation of the mRNA distribution Returns ------- log_likelihood : float. The log of the likelihood of the data. log_likelihood_derivative : numpy array. The derivative of the log likelihood of the data, wrt each model parameter """ from scipy.stats import norm, gamma, uniform number_of_parameters = model_parameters.shape[0] if ((uniform(50,2*mean_protein-50).pdf(model_parameters[0]) == 0) or (uniform(2,6-2).pdf(model_parameters[1]) == 0) or (uniform(np.log(2)/150,np.log(2)/10 - np.log(2)/150).pdf(model_parameters[2]) == 0) or (uniform(np.log(2)/150,np.log(2)/10 - np.log(2)/150).pdf(model_parameters[3]) == 0) or (uniform(0.01,120-0.01).pdf(model_parameters[4]) == 0) or (uniform(0.01,40-0.01).pdf(model_parameters[5]) == 0) or (uniform(1,40-1).pdf(model_parameters[6]) == 0) ): return -np.inf, np.zeros(number_of_parameters) state_space_mean, _, _, _, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives = kalman_filter(protein_at_observations, model_parameters, measurement_variance, derivative=True) mean_mRNA = np.mean(state_space_mean[:,1]) # calculate log likelihood as before if protein_at_observations.reshape(-1,2).shape[0] == 1: number_of_observations = 1 observations = [protein_at_observations[1]] else: number_of_observations = protein_at_observations.shape[0] observations = protein_at_observations[:,1] mean = predicted_observation_distributions[:,1] sd = np.sqrt(predicted_observation_distributions[:,2]) # add mRNA penalty log_likelihood = np.sum(norm.logpdf(observations,mean,sd)) + norm.logpdf(mean_mRNA, mRNA_parameters[0], mRNA_parameters[1]) # now for the computation of the derivative of the negative log likelihood. An expression of this can be found # at equation (28) in Mbalawata, Särkkä, Haario (2013) observation_transform = np.array([[0.0,1.0]]) helper_inverse = 1.0/predicted_observation_distributions[:,2] log_likelihood_derivative = np.zeros(number_of_parameters) for parameter_index in range(number_of_parameters): for time_index in range(number_of_observations): log_likelihood_derivative[parameter_index] -= 0.5*(helper_inverse[time_index]*np.trace(observation_transform.dot( predicted_observation_variance_derivatives[time_index,parameter_index].dot( np.transpose(observation_transform)))) - helper_inverse[time_index]*np.transpose(observation_transform.dot( predicted_observation_mean_derivatives[time_index,parameter_index]))[0]* (observations[time_index] - mean[time_index]) - np.power(helper_inverse[time_index],2)*np.power(observations[time_index] - mean[time_index],2)* observation_transform.dot( predicted_observation_variance_derivatives[time_index,parameter_index].dot( np.transpose(observation_transform))) - helper_inverse[time_index]*(observations[time_index] - mean[time_index])* observation_transform.dot(predicted_observation_mean_derivatives[time_index,parameter_index])[0]) return log_likelihood, log_likelihood_derivative
12,459
def test_wheel_compiles_pyc( script: PipTestEnvironment, shared_data: TestData, tmpdir: Path ) -> None: """ Test installing from wheel with --compile on """ shutil.copy(shared_data.packages / "simple.dist-0.1-py2.py3-none-any.whl", tmpdir) script.pip( "install", "--compile", "simple.dist==0.1", "--no-index", "--find-links", tmpdir, ) # There are many locations for the __init__.pyc file so attempt to find # any of them exists = [ os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"), *glob.glob(script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"), ] assert any(exists)
12,460
def SEORedirectMiddleware(get_response): """ Intercepts 404 errors and checks the database for any defined redirecs that match the current request path. """ def middleware(request): response = get_response(request) if response.status_code != 404: return response try: r = Redirect.objects.get(url=normalize_url(request.path)) except Redirect.DoesNotExist: return response to = r.target_url kwargs = dict(permanent=r.is_permanent) if r.with_query_string: to = modify_url_query_string(to, replace=request.GET.dict()) return redirect(to, **kwargs) return middleware
12,461
def b2s(src): """ Convert from bytes to string :param src: bytes :return: string """ return src.decode(encoding=UTF_ENCODING)
12,462
def get_finetune_lfo_type(header: bytes) -> AutomationLfoType: """Return finetune LFO type.""" assert isinstance(value := _unpack(header, "FINETUNE_LFO_TYPE"), int), type(value) return AutomationLfoType(value)
12,463
def main(): """Main module execution code path""" AzureRMManagedDiskFacts()
12,464
def locate(client: Client, structure: Structure) -> str: """Locates the respective structure.""" return client.run('locate', structure)
12,465
def teraflops_for_accelerator(accel): """ Stores the number of TFLOPs available to a few accelerators, including driver handicaps. Args: accel (str): A string descriptor of which accelerator to use. Must be either "3090" or "V100". Returns: accel_flops (int): an integer of how many TFLOPs are in the accelerator. """ accel_flops = {"3090": 71, "V100": 125} return accel_flops[accel]
12,466
def reduced_supercell_vectors(ab, n): """ Returns all possible reduced in-plane lattice vectors and transition matrices for the given starting unit cell lattice vectors(ab) and the supercell size n Args: ab: a, b lattice vectors n: (int) supercell size """ uv_list = [] tm_list = [] for r_tm in get_trans_matrices(n): uv = get_uv(ab, r_tm) uv_r, tm0 = get_reduced_uv(uv, r_tm) uv_list.append(uv_r) tm_list.append(tm0) return uv_list, tm_list
12,467
def jsonify(*args, **kwargs): """Creates a `Response` with the JSON representation of the given arguments with an`application/json` mimetype. The arguments to this function are the same as to the `dict` constructor. Example usage: from cocopot import jsonify @app.route('/_get_current_user') def get_current_user(): return jsonify(username=g.user.username, email=g.user.email, id=g.user.id) This will send a JSON response like this to the browser: { "username": "admin", "email": "admin@localhost", "id": 42 } """ indent = None separators = (',', ':') rv = Response(json.dumps(dict(*args, **kwargs), indent=indent, separators=separators), content_type='application/json') return rv
12,468
def logged_in_profile(client): """Add a Profile and logged-in User""" user = UserFactory.create(username="george") client.force_login(user) return user.profile
12,469
def log(type, message, *args, **kwargs): """Log into archer logger. Only set up a default log handler if the end-user application didn't set anything up. """ if not logging.root.handlers and base_logger.level == logging.NOTSET: base_logger.setLevel(logging.INFO) handler = logging.StreamHandler() base_logger.addHandler(handler) getattr(base_logger, type)(message.rstrip(), *args, **kwargs)
12,470
def plot_fini(): """ Plot the data, opens web browser with plot :return: None """ doc_layout.children.append(plot) doc_layout.children.append(plot_mahr) show(doc_layout)
12,471
def get_storage_backend(): """ Return the singleton instance of the storage backend in use. """ global _STORAGE_BACKEND if _STORAGE_BACKEND is None: module, klass = ClassLoader.split(str(config.STORAGE_BACKEND_CLASS)) cl = ClassLoader(module, klass, config.STORAGE_BACKEND_ARGS) _STORAGE_BACKEND = cl.get_instance() return _STORAGE_BACKEND
12,472
def _pr_compile(regex, cleanup=None): """Prepare a 2-tuple of compiled regex and callable.""" return (_re_compile(regex), cleanup)
12,473
def hydrate_detectify(event): """Hydrator for detectify events, enriching them with owner, a fingerprint and metadata. Args: event (comet_core.app.EventContainer): the incoming event to hydrate """ msg = event.message event.set_owner(get_owner_email_from_domain(msg['domain'])) event.set_fingerprint('detectify_' + msg['payload']['signature']) # arbitrary metadata event.set_metadata({ 'issue_type': msg['payload']['title'], 'source_readable': 'Web Security Scanner', 'resource': msg['domain'], 'resource_readable': msg['domain'], 'issue_type_readable': msg['payload']['title'] })
12,474
def config(): """ Configuration via config.json (introduced in Anki 2.1) """ try: getConfig = mw.addonManager.getConfig except AttributeError: return LEGACY_CONFIG return getConfig(__name__)
12,475
def test_provenance(): """ test provenance features """ ## create a new project project = create_project() ## create a data entity try: filename = utils.make_bogus_data_file() data_entity = create_data_entity(project['id']) data_entity = syn.uploadFile(data_entity, filename) finally: os.remove(filename) ## create a code entity, source of the data above code = """ ## Chris's fabulous random data generator ############################################################ import random random.seed(12345) data = [random.gauss(mu=0.0, sigma=1.0) for i in range(100)] """ try: try: f = tempfile.NamedTemporaryFile(suffix=".py", delete=False) f.write(code) f.write("\n") finally: f.close() CODE_JSON['parentId']= project['id'] code_entity = syn.createEntity(CODE_JSON) code_entity = syn.uploadFile(code_entity, f.name) finally: os.remove(f.name) ## create a new activity asserting that the code entity was used to ## make the data entity activity = Activity(name='random.gauss', description='Generate some random numbers') activity.used(code_entity, wasExecuted=True) activity.used({'name':'Superhack', 'url':'https://github.com/joe_coder/Superhack'}, wasExecuted=True) activity = syn.setProvenance(data_entity, activity) ## retrieve the saved provenance record retrieved_activity = syn.getProvenance(data_entity) ## does it match what we expect? assert retrieved_activity == activity ## test update new_description = 'Generate random numbers like a gangsta' retrieved_activity['description'] = new_description updated_activity = syn.updateActivity(retrieved_activity) ## did the update stick? assert updated_activity['name'] == retrieved_activity['name'] assert updated_activity['description'] == new_description ## test delete syn.deleteProvenance(data_entity) try: ## provenance should be gone now ## decided this should throw exception with a 404 deleted_activity = syn.getProvenance(data_entity['id']) except Exception as ex: assert ex.response.status_code == 404 else: assert False, 'Should throw 404 exception'
12,476
def ping(request): """ This view returns a dummy json. It is meant to be used to check whether the server is alive or not """ return Json(None)
12,477
def copy_ffn(model): """Copy feed forward network model. Args: model: A previously created ffn model Returns: A copy of the model """ import numpy as np import copy #init model as list holding data for each layer start with input layer newmodel = [] newmodel.append({ "layer":0, "n": copy.copy(model[0]['n']), "activation": copy.copy(model[0]["activation"]), "lreg": copy.copy(model[0]["lreg"]), "regval": copy.copy(model[0]["regval"]), "desc": copy.copy(model[0]["desc"]) }) # init weights and biases for hidden layers and declare activation function for layer in range(1, len(model)): newmodel.append({ "layer":layer, "n": copy.copy(model[layer]['n']), "activation": copy.copy(model[layer]["activation"]), "lreg": copy.copy(model[layer]["lreg"]), "regval": copy.copy(model[layer]["regval"]), "desc": copy.copy(model[layer]["desc"]), "weight": np.copy(model[layer]["weight"]), "bias": np.copy(model[layer]["bias"]), "weightdot": np.copy(model[layer]["weightdot"]), "biasdot": np.copy(model[layer]["biasdot"]) }) return newmodel
12,478
def nlp_progress() -> TaskDB: """Parse a the whole nlp progress repo or a single markdown file. Checkouts the nlp progress git repository and parses all the markdown files in it. Returns: TaskDB: Populated task database. """ tdb = TaskDB() with tempfile.TemporaryDirectory() as tmpdir: repo_path = os.path.join(tmpdir, "nlp-progress") cp = subprocess.run( ["git", "clone", NLP_PROGRESS_REPO, repo_path], capture_output=True ) if cp.returncode != 0: logger.error("stdout: %s", cp.stdout) logger.error("stderr: %s", cp.stderr) raise DataError("Could not clone the NLP Progress repository.") filenames = glob.glob(os.path.join(repo_path, "english", "*.md")) for filename in filenames: file_tdb = parse_file(filename) for task in file_tdb.tasks.values(): tdb.add_task(task) return tdb
12,479
def is_big(label: str) -> bool: """Returns whether or not a cave is large based on its label""" return label.isupper()
12,480
def fast_mult_polynoms(a, b): """Fast multiply of two polynoms in GF(2^8) using the log table NB. This is NOT constant-time and leaks secret values in timing differences. DO NOT USE THIS CODE TO IMPLEMENT SECURE APPLICATIONS """ if a == 0 or b == 0: return 0 return POWER_X1_TABLE[(LOG_X1_TABLE[a] + LOG_X1_TABLE[b]) % 255]
12,481
def get_page_for_group(user_groups, slug): """ Returns a page associated with user_groups given a slug. """ try: page = get_pages_for_group(user_groups).get( slug = slug) except Page.DoesNotExist: page = None return page
12,482
def create(ctx, asset_a, asset_b, share_asset, taker_fee, withdrawal_fee, account): """ Create a new Liquidity Pool. ASSET_A, ASSET_B: These are the assets to be traded in the pool. Can be symbols or ids. Note that ASSET_A should be the one that has the lowest-numbered asset id. SHARE_ASSET: This is the asset that represents a share in the pool for pool stakers. This asset must be a UIA and have zero supply in order to bind it to the pool at creation, and you must be the owner of this asset. TAKER_FEE: This is the fee percent taken by the pool on exchanges made via the pool. Expressed as a percent from 0.00 to 100.00, where "1%" is expressed as 1.00. WITHDRAWAL_FEE: This is the fee percent taken by the pool on withdrawal of stake from the pool. Expressed as a percent from 0.00 to 100.00, where "1%" is expressed as 1.00. """ ctx.blockchain.blocking = True tx = ctx.blockchain.create_liquidity_pool( asset_a, asset_b, share_asset, taker_fee, withdrawal_fee, account=account, ) tx.pop("trx", None) print_tx(tx) results = tx.get("operation_results", {}) print("Results: ", results)
12,483
def read_gps(gps_path): """Read GPS feed in CSV. Expects GPS structured as: vehicle_id: str Internal system identification of the vehicle. Should be unique per vehicle, and is used for tracking the vehicle as it proceeds through the system. route_id: str The route_id from the GTFS feed that this selector refers to datetime: datetime Moment at which the vehicle's position was measured latitude: float Degrees North, in the WGS-84 coordinate system. longitude: float Degrees East, in the WGS-84 coordinate system. Parameters ---------- gps_path : [type] [description] Returns ------- pm.MoveDataFrame GPS data as a MoveDataFrame """ return pm.MoveDataFrame( data=pd.read_csv(gps_path), latitude="latitude", longitude="longitude", datetime="datetime", traj_id="vehicle_id", )
12,484
def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input tensor or object that can be converted to a tensor. func : callable Reduction function capable of receiving a single axis argument. It is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim axis = normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 if len(axis) == 1: kwargs['axis'] = axis[0] else: keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 keepdim = tuple(keepdim) else: keepdim = (1,) * a.ndim r = func(a, **kwargs) return r, keepdim
12,485
def compute_autocorrelation_local(x, Fs, N, H, norm_sum=True): """Compute local autocorrelation [FMP, Section 6.2.3] Notebook: C6/C6S2_TempogramAutocorrelation.ipynb Args: x: Input signal Fs: Sampling rate N: Window length H: Hop size norm_sum: Normalizes by the number of summands in local autocorrelation Returns: A: Time-lag representation T_coef: Time axis (seconds) F_coef_lag: Lag axis """ L = len(x) L_left = round(N / 2) L_right = L_left x_pad = np.concatenate((np.zeros(L_left), x, np.zeros(L_right))) L_pad = len(x_pad) M = int(np.floor(L_pad - N) / H) + 1 A = np.zeros((N, M)) win = np.ones(N) if norm_sum is True: lag_summand_num = np.arange(N, 0, -1) for n in range(M): t_0 = n * H t_1 = t_0 + N x_local = win * x_pad[t_0:t_1] r_xx = np.correlate(x_local, x_local, mode='full') r_xx = r_xx[N-1:] if norm_sum is True: r_xx = r_xx / lag_summand_num A[:, n] = r_xx Fs_A = Fs / H T_coef = np.arange(A.shape[1]) / Fs_A F_coef_lag = np.arange(N) / Fs return A, T_coef, F_coef_lag
12,486
def euclid_dist(vector_p1, vector_p2): """ calculated the euclidean distance between 2 points """ distances = vector_p1 - vector_p2 return cp.hypot(distances[:, :, 0], distances[:, :, 1])
12,487
def _name_cleaner(agent_name): """Renames agent_name to prettier string for plots.""" rename_dict = {'correct_ts': 'Correct TS', 'kl_ucb': 'KL UCB', 'misspecified_ts': 'Misspecified TS', 'ucb1': 'UCB1', 'ucb-best': 'UCB-best', 'nonstationary_ts': 'Nonstationary TS', 'stationary_ts': 'Stationary TS', 'greedy': 'greedy', 'ts': 'TS', 'action_0': 'Action 0', 'action_1': 'Action 1', 'action_2': 'Action 2', 'bootstrap': 'bootstrap TS', 'laplace': 'Laplace TS', 'thoughtful': 'Thoughtful TS', 'gibbs': 'Gibbs TS'} if agent_name in rename_dict: return rename_dict[agent_name] else: return agent_name
12,488
def add_training_args(parser): """Training arguments.""" group = parser.add_argument_group('train', 'training configurations') group.add_argument('--experiment-name', type=str, default="gpt-345M", help="The experiment name for summary and checkpoint") group.add_argument('--batch-size', type=int, default=4, help='Data Loader batch size') group.add_argument('--gradient-accumulation-steps', type=int, default=1, help='Data Loader batch size') group.add_argument('--weight-decay', type=float, default=0.01, help='weight decay coefficient for L2 regularization') group.add_argument('--checkpoint-activations', action='store_true', help='checkpoint activation to allow for training ' 'with larger models and sequences') group.add_argument('--checkpoint-num-layers', type=int, default=1, help='chunk size (number of layers) for checkpointing') group.add_argument('--deepspeed-activation-checkpointing', action='store_true', help='uses activation checkpointing from deepspeed') group.add_argument('--epochs', type=int, default=None, help='Number of finetunning epochs. Zero results in evaluation only.') group.add_argument('--clip-grad', type=float, default=1.0, help='gradient clipping') group.add_argument('--train-iters', type=int, default=0, help='total number of iterations to train over all training runs') group.add_argument('--label-smoothing', type=float, default=0.0) group.add_argument('--log-interval', type=int, default=100, help='report interval') group.add_argument('--summary-dir', type=str, default="", help="The directory to store the summary") group.add_argument('--seed', type=int, default=1234, help='random seed') # Batch producer arguments group.add_argument('--reset-position-ids', action='store_true', help='Reset posistion ids after end-of-document token.') group.add_argument('--reset-attention-mask', action='store_true', help='Reset self attention maske after ' 'end-of-document token.') # Learning rate. group.add_argument('--lr-decay-iters', type=int, default=None, help='number of iterations to decay LR over,' ' If None defaults to `--train-iters`*`--epochs`') group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine', 'exponential'], help='learning rate decay function') group.add_argument('--lr-decay-ratio', type=float, default=0.1) group.add_argument('--lr', type=float, default=1.0e-4, help='initial learning rate') group.add_argument('--warmup', type=float, default=0.01, help='percentage of data to warmup on (.01 = 1% of all ' 'training iters). Default 0.01') group.add_argument('--switch-linear', action='store_true', help="Switch to linear decay for cosine decay") # model checkpointing group.add_argument('--save', type=str, default=None, help='Output directory to save checkpoints to.') group.add_argument('--new-save-directory', action='store_true') group.add_argument('--save-epoch', type=int, default=1, help='number of epochs between saves') group.add_argument('--save-interval', type=int, default=5000, help='number of iterations between saves') group.add_argument('--no-save-optim', action='store_true', help='Do not save current optimizer.') group.add_argument('--no-save-rng', action='store_true', help='Do not save current rng state.') group.add_argument('--load', type=str, default=None, help='Path to a directory containing a model checkpoint.') group.add_argument('--no-load-optim', action='store_true', help='Do not load optimizer when loading checkpoint.') group.add_argument('--no-load-rng', action='store_true', help='Do not load rng state when loading checkpoint.') group.add_argument('--no-load-lr-scheduler', action='store_true', help='Do not load lr scheduler when loading checkpoint.') group.add_argument('--no-deepspeed-load', action='store_true', help='Not use deepspeed when loading checkpoint') group.add_argument('--finetune', action='store_true', help='Load model for finetuning. Do not load optimizer ' 'or rng state from checkpoint and set iteration to 0. ' 'Assumed when loading a release checkpoint.') group.add_argument('--resume-dataloader', action='store_true', help='Resume the dataloader when resuming training. ' 'Does not apply to tfrecords dataloader, try resuming' 'with a different seed in this case.') # distributed training args group.add_argument('--distributed-backend', default='nccl', help='which backend to use for distributed training. One of [gloo, nccl]', choices=['nccl', 'gloo']) group.add_argument('--DDP-impl', default='torch', choices=['local', 'torch', 'none'], help='which DistributedDataParallel implementation to use.') group.add_argument('--local_rank', type=int, default=None, help='local rank passed from distributed launcher') # BlockLM training args group.add_argument('--block-lm', action='store_true', help="whether use the BlockLM pre-training") group.add_argument('--masked-lm', action='store_true', help='whether to use the mlm objective') group.add_argument('--bert-prob', type=float, default=0.5) group.add_argument('--gpt-infill-prob', type=float, default=0.5) group.add_argument('--gpt-min-ratio', type=float, default=0.5) group.add_argument('--gap-sentence-prob', type=float, default=0.0) group.add_argument('--gap-sentence-ratio', type=float, default=0.15) group.add_argument('--avg-block-length', type=int, default=3) group.add_argument('--short-seq-prob', type=float, default=0.0) group.add_argument('--single-span-prob', type=float, default=0.0) group.add_argument('--task-mask', action='store_true', help="Use different mask for generation and blank filling") group.add_argument('--no-shuffle-block', action='store_true', help="not shuffle the blocks when filling the blank") group.add_argument('--no-block-position', action='store_true', help='Use (rough) absolute positions instead of block positions') group.add_argument('--sentinel-token', action='store_true', help="Use sentinel (mask) tokens to replace 2d position encoding") group.add_argument('--block-mask-prob', type=float, default=0.0) group.add_argument('--context-mask-ratio', type=float, default=0.0) group.add_argument('--random-position', action='store_true', help="Use random start position to cover all the position embeddings") return parser
12,489
def _multi_class_confusion_matrix_plot( thresholds: Optional[List[float]] = None, num_thresholds: Optional[int] = None, name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME, eval_config: Optional[config.EvalConfig] = None, model_name: Text = '', output_name: Text = '', ) -> metric_types.MetricComputations: """Returns computations for multi-class confusion matrix plot.""" if num_thresholds is None and thresholds is None: thresholds = [0.0] key = metric_types.PlotKey( name=name, model_name=model_name, output_name=output_name) # Make sure matrices are calculated. matrices_computations = ( multi_class_confusion_matrix_metrics.multi_class_confusion_matrices( thresholds=thresholds, num_thresholds=num_thresholds, eval_config=eval_config, model_name=model_name, output_name=output_name)) matrices_key = matrices_computations[-1].keys[-1] def result( metrics: Dict[metric_types.MetricKey, multi_class_confusion_matrix_metrics.Matrices] ) -> Dict[metric_types.PlotKey, metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds]: return { key: metrics[matrices_key].to_proto() .multi_class_confusion_matrix_at_thresholds } derived_computation = metric_types.DerivedMetricComputation( keys=[key], result=result) computations = matrices_computations computations.append(derived_computation) return computations
12,490
def innerL(i, os): """ Parameters ---------- i os:OptStruct Returns ------- """ ei = cal_ek(os, i) if (os.labels[i] * ei < -os.tol and os.alphas[i] < os.C) or ( os.labels[i] * ei > os.tol and os.alphas[i] > 0 ): j, ej = select_j(i, os, ei) alphaIold = os.alphas[i].copy() alphaJold = os.alphas[j].copy() if os.labels[i] != os.labels[j]: L = max(0, os.alphas[j] - os.alphas[i]) H = min(os.C, os.C + os.alphas[j] - os.alphas[i]) else: L = max(0, os.alphas[j] + os.alphas[i] - os.C) H = min(os.C, os.alphas[j] + os.alphas[i]) if L == H: print("L==H") return 0 eta = ( 2.0 * os.X[i, :] * os.X[j, :].T - os.X[i, :] * os.X[i, :].T - os.X[j, :] * os.X[j, :].T ) if eta >= 0: print("eta>=0") return 0 os.alphas[j] -= os.labels[j] * (ei - ej) / eta os.alphas[j] = clip_alpha(os.alphas[j], H, L) update_ek(os, j) if abs(os.alphas[j] - alphaJold) < 0.00001: print("j 移动不足") return 0 os.alphas[i] += os.labels[j] * os.labels[i] * (alphaJold - alphaIold) update_ek(os, i) b1 = ( os.b - ei - os.labels[i] * (os.alphas[i] - alphaIold) * os.X[i, :] * os.X[i, :].T - os.labels[j] * (os.alphas[j] - alphaJold) * os.X[i, :] * os.X[i, :].T ) b2 = ( os.b - ej - os.labels[i] * (os.alphas[i] - alphaIold) * os.X[i, :] * os.X[i, :].T - os.labels[j] * (os.alphas[j] - alphaJold) * os.X[i, :] * os.X[i, :].T ) if os.alphas[i] and os.C > os.alphas[i]: os.b = b1 elif os.alphas[j] > 0 and os.C > os.alphas[j]: os.b = b2 else: os.b = (b1 + b2) / 2.0 return 1 else: return 0
12,491
def verify_manifest_by_spath(extrepo, session_path): """This function will load the given manifest and then verify the checksum of every file specified within. If the manifest file has a md5 checksum in its filename, the manifest file itself will be verified against that checksum.""" print("Verifying manifest", session_path) session_name, manifest_path = session_path.split("/", 1) try: manifest_contents = load_blob(extrepo.get_blob_by_path(session_name, manifest_path)) except: raise IntegrityError("Couldn't load manifest with session path %s from repository" % session_path) expected_manifest_md5 = find_checksum(manifest_path) verify_manifest(extrepo, manifest_contents, expected_manifest_md5)
12,492
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient: """Create Cumulocity client and prompt for missing credentials if necessary. Args: ctx (click.Context): Click context opts (ProxyContext): Proxy options Returns: CumulocityClient: Configured Cumulocity client """ if not opts.disable_prompts and not opts.host: opts.host = click.prompt( text="Enter the Cumulocity Host/URL", ) client = CumulocityClient( hostname=opts.host, tenant=opts.tenant, user=opts.user, password=opts.password, tfacode=opts.tfa_code, token=opts.token, ignore_ssl_validate=opts.ignore_ssl_validate, ) if not client.url: opts.show_error( "No Cumulocity host was provided. The host can be set via" "environment variables, arguments or the env-file" ) ctx.exit(ExitCodes.NO_SESSION) logging.info("Checking tenant id") client.validate_tenant_id() # Retry logging so the user can be prompted for # their credentials/TFA code etc. without having to run c8ylp again retries = 3 success = False while retries: try: if client.token: client.validate_credentials() else: client.login() if opts.env_file and opts.store_token: store_credentials(opts, client) success = True break except CumulocityMissingTFAToken as ex: client.tfacode = click.prompt( text="Enter your Cumulocity TFA-Token", hide_input=False ) except Exception as ex: logging.info("unknown exception: %s", ex) if not opts.disable_prompts: if not client.user: client.user = click.prompt( text="Enter your Cumulocity Username", ) if not client.password: client.password = click.prompt( text="Enter your Cumulocity Password [input hidden]", hide_input=True, ) retries -= 1 if not success: logging.info("Could not create client") ctx.exit(ExitCodes.NO_SESSION) return client
12,493
def rename_worksheet(sheet, wrk_sheet, sheet_name): """ REF: https://github.com/burnash/gspread I reuse the gspread function to make this API request. """ body = { "requests": [ { "updateSheetProperties": { "properties": {"sheetId": wrk_sheet.id, "title": SHEET_TAG}, "fields": "title", } } ] } sheet.batch_update(body)
12,494
def get_float(data, index): """get a float value from data array""" return struct.unpack('f', "".join(map(chr, data[4*index:(index+1)*4])))[0]
12,495
def gelu(tensor): """ Gaussian Error Linear Unit - https://arxiv.org/abs/1606.08415 """ return 0.5 * tensor * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (tensor + 0.044715 * tf.pow(tensor, 3))))
12,496
def make_links_in_program(): """ Make the talk titles in the program link to description pages, as far as we can, anyway. The rest should be done by hand by making use of the descriptions.index.md. Beware, this is ugly, and makes all kinds of assumptions about how the program table is formatted, and it needs manual corrections, and it does not work after it has applied the changes. We should probably just throw it away. """ # Build reverse index rindex = {} fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'descriptions', 'index.md') with open(fname, 'rb') as f: for line in f.read().decode().splitlines(): if line.strip(): id, _, title = line.partition('-') rindex[title.strip().lower()] = 'descriptions/' + id.strip() + '.html' default_link = 'descriptions/oops.html' # Add links fname = os.path.join(THIS_DIR, 'content', 'pages', '2017', 'program.md') text = open(fname, 'rb').read().decode() lines = text.splitlines() for i in range(len(lines)-1): line = lines[i] if line.lstrip().startswith("<td>") and not line.rstrip().endswith(">"): if '&nbsp' not in lines[i+1]: title = line.lstrip()[4:] id = rindex.get(title.strip().lower(), default_link) lines[i] = " <td><a href='%s'>%s</a>" % (id, title) if line.lstrip().startswith("<td>") and line.rstrip().endswith("</td>"): if '<br>' in line and '&nbsp' not in line: title, _, rest = line.lstrip()[4:].partition('<br>') id = rindex.get(title.strip().lower(), default_link) lines[i] = " <td><a href='%s'>%s</a><br>%s" % (id, title, rest) with open(fname, 'wb') as f: text = '\n'.join(lines) f.write(text.encode())
12,497
def _uniqueElements(an_iterable): """ :param iterable an_iterable: :param int idx: :return list: has only one occurrence of each element """ used = [] unique = [x for x in an_iterable if x not in used and (used.append(x) or True)] return unique
12,498
def guestbook_key(guestbook_name=None): """Constructs a Datastore key for a Guestbook entity with name.""" return ndb.Key('Guestbook', guestbook_name or 'default_guestbook')
12,499