content
stringlengths
22
815k
id
int64
0
4.91M
def translate_boarding_cards(boarding_cards): """Translate list of BoardingCards to readable travel instructions. This function sorts list of random BoardingCard objects connecting starts with ends of every stage of the trip then returns readable instructions that include seat numbers, location names and additional data. :param boarding_cards: list of :class:`BoardingCard` objects. :return: list of human readable string that describe the whole trip. """ # Creating helper maps, one is keyed based on start locations, second one # is keyed on end locations starts_map = { boarding_card.start_key: boarding_card for boarding_card in boarding_cards } ends_map = { boarding_card.end_key: boarding_card for boarding_card in boarding_cards } # Guessing start and end of the trip trip_start_keys = [ start_key for start_key in starts_map if start_key not in ends_map ] trip_end_keys = [ end_key for end_key in ends_map if end_key not in starts_map ] # Validating our guess of start and end of the trip if len(trip_start_keys) > 1: raise ValueError(u'More than 1 starting point in the trip!') if not trip_start_keys: raise ValueError(u'No starting point in the trip!') if len(trip_end_keys) > 1: raise ValueError(u'More than 1 ending point in the trip!') if not trip_end_keys: raise ValueError(u'No ending point in the trip!') trip_start_key = trip_start_keys[0] trip_end_key = trip_end_keys[0] # Connecting boarding cards into ordered trip list trip = [starts_map[trip_start_key]] current_stop_index = 0 trip_reached_end = False while not trip_reached_end: last_stop = trip[current_stop_index] if last_stop.end_key == trip_end_key: trip_reached_end = True else: trip.append(starts_map[last_stop.end_key]) current_stop_index += 1 # building human readable messages from every stop of the trip directions = [ boarding_card.human_readable_message for boarding_card in trip ] if TRIP_FINISH_MESSAGE: directions.append(TRIP_FINISH_MESSAGE) return directions
28,100
def detect_side(start: dict, point: dict, degrees): """detect to which side robot should rotate""" if start['lat'] < point['lat'] and start['lng'] < point['lng']: return f'{degrees} degrees right' elif start['lat'] < point['lat'] and start['lng'] > point['lng']: return f'{degrees} degrees left' elif start['lat'] > point['lat'] and start['lng'] < point['lng']: return f'{degrees + 90} degrees right' elif start['lat'] > point['lat'] and start['lng'] > point['lng']: return f'{degrees + 90} degrees left' elif degrees == 0: return f'{0} degress' elif degrees == 180: return f'{180} degrees right' elif start['lat'] == point['lat'] and start['lng'] < point['lng']: return f'{degrees} degress right' elif start['lat'] == point['lat'] and start['lng'] > point['lng']: return f'{degrees} degress left'
28,101
def ScanSlnFile(filename): """Scan a Visual Studio .sln and extract the project dependencies.""" try: sln = open(filename, "r") except IOError: sys.stderr.write("Unable to open " + filename + " for reading.\n") return 1 projects = {} project = None while 1: line = sln.readline().strip() if not line: break if line.startswith('Project("{'): # Project definition line looks like # Project("$TypeGuid") = "$ProjectName", "$ProjectPath", "$ProjectGuid"$ items = line.split('"') project = Project() project.name = items[3] project.path = items[5] project.guid = items[7] project.type = items[1] projects[items[7]] = project # Start of a dependency group. if line == "ProjectSection(ProjectDependencies) = postProject": line = sln.readline().strip() # End of a dependency group. while line and line != "EndProjectSection": project.deps.append(line[:len(project.guid)]) line = sln.readline().strip() # We are done parsing. sln.close() return projects
28,102
def iterate_log_lines(file_path:pathlib.Path, n:int = 0, **kwargs): """Reads the file in line by line dev note: One of the best featuers of this functions is we can use efficient unix style operations. Because we know we are inside of a unix container there should be no problem relying on GNU tail directly. """ abs_path = file_path.absolute() def get_tail_iter(replay=0): return sh.tail("-n", replay, "-f", str(abs_path), _iter=True) tail_itr = get_tail_iter(replay=n) while True: try: for line in tail_itr: yield line.strip() except KeyboardInterrupt as err: raise err except Exception as err: log.error(err) log.warning("continuing tail of file") tail_itr = get_tail_iter(replay=0)
28,103
def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ assert start_idx > 0 and end_idx > 0 assert len(x.size()) == 2 src_len, batch_size = x.size() # batch_size, 1, src_len x = x.t().unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1]) moving_sum = ( torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ) .squeeze(1) .t() ) moving_sum = moving_sum[end_idx:-start_idx] assert src_len == moving_sum.size(0) assert batch_size == moving_sum.size(1) return moving_sum
28,104
def _wait_for_event(event_name, redis_address, extra_buffer=0): """Block until an event has been broadcast. This is used to synchronize drivers for the multi-node tests. Args: event_name: The name of the event to wait for. redis_address: The address of the Redis server to use for synchronization. extra_buffer: An amount of time in seconds to wait after the event. Returns: The data that was passed into the corresponding _broadcast_event call. """ redis_host, redis_port = redis_address.split(":") redis_client = redis.StrictRedis(host=redis_host, port=int(redis_port)) while True: event_infos = redis_client.lrange(EVENT_KEY, 0, -1) events = {} for event_info in event_infos: name, data = json.loads(event_info) if name in events: raise Exception("The same event {} was broadcast twice." .format(name)) events[name] = data if event_name in events: # Potentially sleep a little longer and then return the event data. time.sleep(extra_buffer) return events[event_name] time.sleep(0.1)
28,105
def _stack_exists(stack_name): """ Checks if the stack exists. Returns True if it exists and False if not. """ cf = boto3.client('cloudformation') exists = False try: cf.describe_stacks(StackName=stack_name) exists = True except botocore.exceptions.ClientError as ex: if ex.response['Error']['Code'] == 'ValidationError': exists = False else: raise return exists
28,106
def mode(): """Compute mode. Formatting multiple modes is a little ambiguous in the context of pcalc, so this condition triggers an error.""" count = Counter(_values()) # If the two most common elements have the same count then there are at # least 2 modes. if len(count) > 1 and len({c[-1] for c in count.most_common(2)}) == 1: raise click.ClickException("Multiple mode's - unsure how to format.") else: _echo(count.most_common(1)[0][0])
28,107
def make_sequence_output(detections, classes): """ Create the output object for an entire sequence :param detections: A list of lists of detections. Must contain an entry for each image in the sequence :param classes: The list of classes in the order they appear in the label probabilities :return: """ return { 'detections': detections, 'classes': classes }
28,108
def json_to_dataframe(json, subset=0): """Load data from path. The file needs to be a .csv Returns:\n Dataframe """ # This is to make sure it has the right format when passed to pandas if type(json) != list: json = [json] try: df = pd.DataFrame(json, [i for i in range(0, len(json))]) except KeyError as identifier: print("There was an error") # raise identifier if subset == 0: return df return df.head(subset)
28,109
def pow(a, b, num_threads=None, direction='left'): """Raise to power multithreaded Args a (np.ndarray or scalar): Numpy array or scalar b (np.ndarray or scalar): Numpy array or scalar num_threads : Number of threads to be used, overrides threads as set by mtalg.set_num_threads() direction : 'left' or 'right' to decide if a or b is modified """ __multithreaded_opr_direction(a, b, _pow_inplace, num_threads, direction=direction)
28,110
def dms_to_angle(dms): """ Get the angle from a tuple of numbers or strings giving its sexagesimal representation in degrees @param dms: (degrees, minutes, seconds) """ sign = 1 angle_string = dms[0] if angle_string.startswith('-'): sign = -1 angle_string = angle_string[1:] angle_deg = int(angle_string) angle_min = int(dms[1]) angle_sec = float(dms[2]) if not 0 <= angle_min < 60: raise VdtAngleError("not a valid value for minutes: " + str(angle_min)) if not 0 <= angle_sec < 60: raise VdtAngleError("not a valid value for seconds: " + str(angle_sec)) return sign * VAngle((angle_deg, angle_min, angle_sec), unit=u.deg)
28,111
def fix(text): """Repairs encoding problems.""" # NOTE(Jonas): This seems to be fixed on the PHP side for now. # import ftfy # return ftfy.fix_text(text) return text
28,112
def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None, affine=np.eye(4), dtype=np.int): """Generate a 3D volume with labeled regions. Parameters ---------- shape: tuple shape of returned array n_regions: int number of regions to generate. By default (if "labels" is None), add a background with value zero. labels: iterable labels to use for each zone. If provided, n_regions is unused. rand_gen: numpy.random.RandomState random generator to use for generation. affine: numpy.ndarray affine of returned image Returns ------- regions: nibabel.Nifti1Image data has shape "shape", containing region labels. """ n_voxels = shape[0] * shape[1] * shape[2] if labels is None: labels = range(0, n_regions + 1) n_regions += 1 else: n_regions = len(labels) regions = generate_regions_ts(n_voxels, n_regions, rand_gen=rand_gen) # replace weights with labels for n, row in zip(labels, regions): row[row > 0] = n data = np.zeros(shape, dtype=dtype) data[np.ones(shape, dtype=np.bool)] = regions.sum(axis=0).T return nibabel.Nifti1Image(data, affine)
28,113
def get_source_item_ids(portal, q=None): """ Get ids of hosted feature services that have an associated scene service. Can pass in portal search function query (q). Returns ids only for valid source items. """ source_item_ids = [] scene_item_ids = get_scene_service_item_ids(portal) items = portal.search(q=q) for item in items: if item['type'] == 'Feature Service': if '/Hosted/' in item['url']: if 'Hosted Service' in item['typeKeywords']: # if the service has been published the item # will have 'Hosted Service' in typeKeywords # Check if the feature service has an associated # scene service feat_service_name = item['url'].split('/')[-2] for scene_id in scene_item_ids: scene_service_name = portal.item(scene_id)['url'].split('/')[-2] if feat_service_name == scene_service_name: if item['id'] not in source_item_ids: source_item_ids.append(item['id']) return source_item_ids
28,114
def rasterize_poly(poly_xy, shape): """ Args: poly_xy: [(x1, y1), (x2, y2), ...] Returns a bool array containing True for pixels inside the polygon """ _poly = poly_xy[:-1] # PIL wants *EXACTLY* a list of tuple (NOT a numpy array) _poly = [tuple(p) for p in _poly] img = Image.new('L', (shape[1], shape[0]), 0) ImageDraw.Draw(img).polygon(_poly, outline=0, fill=1) return np.array(img) == 1
28,115
def from_url_representation(url_rep: str) -> str: """Reconvert url representation of path to actual path""" return url_rep.replace("__", "/").replace("-_-", "_")
28,116
async def test_async_start_from_history_and_switch_to_watching_state_changes_multiple( hass, recorder_mock, ): """Test we startup from history and switch to watching state changes.""" hass.config.set_time_zone("UTC") utcnow = dt_util.utcnow() start_time = utcnow.replace(hour=0, minute=0, second=0, microsecond=0) # Start t0 t1 t2 Startup End # |--20min--|--20min--|--10min--|--10min--|---------30min---------|---15min--|---15min--| # |---on----|---on----|---on----|---on----|----------on-----------|---off----|----on----| def _fake_states(*args, **kwargs): return { "binary_sensor.state": [ ha.State( "binary_sensor.state", "on", last_changed=start_time, last_updated=start_time, ), ] } with patch( "homeassistant.components.recorder.history.state_changes_during_period", _fake_states, ): with freeze_time(start_time): await async_setup_component( hass, "sensor", { "sensor": [ { "platform": "history_stats", "entity_id": "binary_sensor.state", "name": "sensor1", "state": "on", "start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}", "duration": {"hours": 2}, "type": "time", }, { "platform": "history_stats", "entity_id": "binary_sensor.state", "name": "sensor2", "state": "on", "start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}", "duration": {"hours": 2}, "type": "time", }, { "platform": "history_stats", "entity_id": "binary_sensor.state", "name": "sensor3", "state": "on", "start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}", "duration": {"hours": 2}, "type": "count", }, { "platform": "history_stats", "entity_id": "binary_sensor.state", "name": "sensor4", "state": "on", "start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}", "duration": {"hours": 2}, "type": "ratio", }, ] }, ) await hass.async_block_till_done() for i in range(1, 5): await async_update_entity(hass, f"sensor.sensor{i}") await hass.async_block_till_done() assert hass.states.get("sensor.sensor1").state == "0.0" assert hass.states.get("sensor.sensor2").state == "0.0" assert hass.states.get("sensor.sensor3").state == "0" assert hass.states.get("sensor.sensor4").state == "0.0" one_hour_in = start_time + timedelta(minutes=60) with freeze_time(one_hour_in): async_fire_time_changed(hass, one_hour_in) await hass.async_block_till_done() assert hass.states.get("sensor.sensor1").state == "1.0" assert hass.states.get("sensor.sensor2").state == "1.0" assert hass.states.get("sensor.sensor3").state == "0" assert hass.states.get("sensor.sensor4").state == "50.0" turn_off_time = start_time + timedelta(minutes=90) with freeze_time(turn_off_time): hass.states.async_set("binary_sensor.state", "off") await hass.async_block_till_done() async_fire_time_changed(hass, turn_off_time) await hass.async_block_till_done() assert hass.states.get("sensor.sensor1").state == "1.5" assert hass.states.get("sensor.sensor2").state == "1.5" assert hass.states.get("sensor.sensor3").state == "0" assert hass.states.get("sensor.sensor4").state == "75.0" turn_back_on_time = start_time + timedelta(minutes=105) with freeze_time(turn_back_on_time): async_fire_time_changed(hass, turn_back_on_time) await hass.async_block_till_done() assert hass.states.get("sensor.sensor1").state == "1.5" assert hass.states.get("sensor.sensor2").state == "1.5" assert hass.states.get("sensor.sensor3").state == "0" assert hass.states.get("sensor.sensor4").state == "75.0" with freeze_time(turn_back_on_time): hass.states.async_set("binary_sensor.state", "on") await hass.async_block_till_done() assert hass.states.get("sensor.sensor1").state == "1.5" assert hass.states.get("sensor.sensor2").state == "1.5" assert hass.states.get("sensor.sensor3").state == "1" assert hass.states.get("sensor.sensor4").state == "75.0" end_time = start_time + timedelta(minutes=120) with freeze_time(end_time): async_fire_time_changed(hass, end_time) await hass.async_block_till_done() assert hass.states.get("sensor.sensor1").state == "1.75" assert hass.states.get("sensor.sensor2").state == "1.75" assert hass.states.get("sensor.sensor3").state == "1" assert hass.states.get("sensor.sensor4").state == "87.5"
28,117
def runningmean(data, nav): """ Compute the running mean of a 1-dimenional array. Args: data: Input data of shape (N, ) nav: Number of points over which the data will be averaged Returns: Array of shape (N-(nav-1), ) """ return np.convolve(data, np.ones((nav,)) / nav, mode='valid')
28,118
def retreive_dataset(filename, url): """ Download datasets, like the EADL or EPDL on the IAEA website, by blatantly spoofing the User-Agent. Spoofing based on this reference: http://stackoverflow.com/a/802246/2465202 Downloading based on this one: http://stackoverflow.com/a/22721/2465202 """ opener = build_opener() opener.addheaders = [('User-Agent', 'Mozilla/5.0')] remote_fid = opener.open(url) with open(filename, 'w') as local_fid: local_fid.write(remote_fid.read()) remote_fid.close()
28,119
def test_space(gym_space, expected_size, expected_min, expected_max): """Test that an action or observation space is the correct size and bounds. Parameters ---------- gym_space : gym.spaces.Box gym space object to be tested expected_size : int expected size expected_min : float or array_like expected minimum value(s) expected_max : float or array_like expected maximum value(s) Returns ------- bool True if the test passed, False otherwise """ return gym_space.shape[0] == expected_size \ and all(gym_space.high == expected_max) \ and all(gym_space.low == expected_min)
28,120
def multiindex_strategy( pandera_dtype: Optional[DataType] = None, strategy: Optional[SearchStrategy] = None, *, indexes: Optional[List] = None, size: Optional[int] = None, ): """Strategy to generate a pandas MultiIndex object. :param pandera_dtype: :class:`pandera.dtypes.DataType` instance. :param strategy: an optional hypothesis strategy. If specified, the pandas dtype strategy will be chained onto this strategy. :param indexes: a list of :class:`~pandera.schema_components.Index` objects. :param size: number of elements in the Series. :returns: ``hypothesis`` strategy. """ # pylint: disable=unnecessary-lambda if strategy: raise BaseStrategyOnlyError( "The dataframe strategy is a base strategy. You cannot specify " "the strategy argument to chain it to a parent strategy." ) indexes = [] if indexes is None else indexes index_dtypes = { index.name if index.name is not None else i: str(index.dtype) for i, index in enumerate(indexes) } nullable_index = { index.name if index.name is not None else i: index.nullable for i, index in enumerate(indexes) } strategy = pdst.data_frames( [index.strategy_component() for index in indexes], index=pdst.range_indexes( min_size=0 if size is None else size, max_size=size ), ).map(lambda x: x.astype(index_dtypes)) # this is a hack to convert np.str_ data values into native python str. for name, dtype in index_dtypes.items(): if dtype in {"object", "str"} or dtype.startswith("string"): # pylint: disable=cell-var-from-loop,undefined-loop-variable strategy = strategy.map( lambda df: df.assign(**{name: df[name].map(str)}) ) if any(nullable_index.values()): strategy = null_dataframe_masks(strategy, nullable_index) return strategy.map(pd.MultiIndex.from_frame)
28,121
def pitch_from_centers(X, Y): """Spot pitch in X and Y direction estimated from spot centers (X, Y). """ assert X.shape == Y.shape assert X.size > 1 nspots_y, nspots_x = X.shape if nspots_x > 1 and nspots_y == 1: pitch_x = pitch_y = np.mean(np.diff(X, axis=1)) elif nspots_y > 1 and nspots_x == 1: pitch_x = pitch_y = np.mean(np.diff(Y, axis=0)) else: # both nspots_x and nspots_y are > 1 pitch_x = np.mean(np.diff(X, axis=1)) pitch_y = np.mean(np.diff(Y, axis=0)) return pitch_x, pitch_y
28,122
def cleanup(signal_received, frame): """Cleanup method, clears all pins. """ clear_all_pins() sys.exit(0)
28,123
def upload(files, to, config, delete_on_success, print_file_id, force_file, forward, directories, large_files, caption, no_thumbnail): """Upload one or more files to Telegram using your personal account. The maximum file size is 1.5 GiB and by default they will be saved in your saved messages. """ client = Client(config or default_config()) client.start() files = DIRECTORY_MODES[directories](files) if directories == 'fail': # Validate now files = list(files) files = LARGE_FILE_MODES[large_files](files) if large_files == 'fail': # Validate now files = list(files) client.send_files(to, files, delete_on_success, print_file_id, force_file, forward, caption, no_thumbnail)
28,124
def scilab_console(): """ This requires that the optional Scilab program be installed and in your PATH, but no optional Sage packages need to be installed. EXAMPLES: sage: from sage.interfaces.scilab import scilab_console # optional - scilab sage: scilab_console() # optional - scilab; not tested ___________________________________________ scilab-5.0.3 Consortium Scilab (DIGITEO) Copyright (c) 1989-2008 (INRIA) Copyright (c) 1989-2007 (ENPC) ___________________________________________ Startup execution: loading initial environment -->2+3 ans = 5. -->quit Typing quit exits the Scilab console and returns you to Sage. Scilab, like Sage, remembers its history from one session to another. """ os.system('scilab -nogui')
28,125
def _create_lists(config, results, current, stack, inside_cartesian=None): """ An ugly recursive method to transform config dict into a tree of AbstractNestedList. """ # Have we done it already? try: return results[current] except KeyError: pass # Check recursion depth and detect loops if current in stack: raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack)) if len(stack) > 99: raise ConfigurationError('Rule {!r} is too deep'.format(stack[0])) # Track recursion depth stack.append(current) try: # Check what kind of list we have listdef = config[current] list_type = listdef[_CONF.FIELD.TYPE] # 1. List of words if list_type == _CONF.TYPE.WORDS: results[current] = WordList(listdef['words']) # List of phrases elif list_type == _CONF.TYPE.PHRASES: results[current] = PhraseList(listdef['phrases']) # 2. Simple list of lists elif list_type == _CONF.TYPE.NESTED: results[current] = NestedList([_create_lists(config, results, x, stack, inside_cartesian=inside_cartesian) for x in listdef[_CONF.FIELD.LISTS]]) # 3. Cartesian list of lists elif list_type == _CONF.TYPE.CARTESIAN: if inside_cartesian is not None: raise ConfigurationError("Cartesian list {!r} contains another Cartesian list " "{!r}. Nested Cartesian lists are not allowed." .format(inside_cartesian, current)) results[current] = CartesianList([_create_lists(config, results, x, stack, inside_cartesian=current) for x in listdef[_CONF.FIELD.LISTS]]) # 4. Scalar elif list_type == _CONF.TYPE.CONST: results[current] = Scalar(listdef[_CONF.FIELD.VALUE]) # Unknown type else: raise InitializationError("Unknown list type: {!r}".format(list_type)) # Return the result return results[current] finally: stack.pop()
28,126
def encode(value): """ Encode strings in UTF-8. :param value: value to be encoded in UTF-8 :return: encoded value """ return str(u''.join(value).encode('utf-8'))
28,127
def get_season(months, str_='{}'): """ Creates a season string. Parameters: - months (list of int) - str_ (str, optional): Formatter string, should contain exactly one {} at the position where the season substring is included. Returns: str """ if months is None: return '' elif len(set(months).difference([1, 2, 12])) == 0: return str_.format('DJF') elif len(set(months).difference([3, 4, 5])) == 0: return str_.format('MAM') elif len(set(months).difference([6, 7, 8])) == 0: return str_.format('JJA') elif len(set(months).difference([9, 10, 11])) == 0: return str_.format('SON') elif len(set(months).difference([11, 12, 1, 2, 3])) == 0: return str_.format('NDJFM') elif len(set(months).difference([5, 6, 7, 8, 9])) == 0: return str_.format('MJJAS') else: return str_.format('-'.join(map(str, months)))
28,128
def data_availability(tags): """ get availability based on the validation tags Args: tags (pandas.DataFrame): errors tagged as true (see function data_validation) Returns: pandas.Series: availability """ return ~tags.any(axis=1)
28,129
def name(ctx: EndpointContext) -> None: """Handles changing the name of the client. If the name is longer than 31 characters it will be cut off after the 31st character. Informs the client of the success of the operation. Args: ctx: The request's context. """ name_param = ctx.params["name"] if not isinstance(name_param, str): raise HTTPException.unsupported_media_type(True) new_name = escape(name_param)[:31] ctx.session["nickname"] = new_name ctx.json_ok()
28,130
async def fetch_image_by_id( image_uid: str ): """ API request to return a single image by uid """ image_uid = int(image_uid) image = utils_com.get_com_image_by_uid(image_uid) return image
28,131
def last(user, channel, text): """Show the last lines from the log""" max_lines = lala.config.get_int("max_lines") s_text = text.split() try: lines = min(max_lines, int(s_text[1])) except IndexError: lines = max_lines logfile = lala.config.get("log_file") with codecs.open(logfile, "r", "utf-8") as _file: _lines = _file.readlines() lines = min(lines, len(_lines)) msg(user, _lines[-lines:], log=False)
28,132
def get_spring_break(soup_lst, year): """ Purpose: * returns a list of the weekdays during spring break * only relevant for spring semesters """ spring_break_week = set() # search for the "Spring Break begins after last class." text for i in range(len(soup_lst)): if soup_lst[i] == "Spring Break begins after last class.": pre_friday = datetime.strptime( soup_lst[i - 1] + " " + year, "%B %d %Y") break next_day = pre_friday + timedelta(1) while next_day.weekday() != 4: if next_day.weekday() != 5 and next_day.weekday() != 6: spring_break_week.add(next_day) next_day += timedelta(1) spring_break_week.add(next_day) return spring_break_week
28,133
def GetProQ3Option(query_para):#{{{ """Return the proq3opt in list """ yes_or_no_opt = {} for item in ['isDeepLearning', 'isRepack', 'isKeepFiles']: if query_para[item]: yes_or_no_opt[item] = "yes" else: yes_or_no_opt[item] = "no" proq3opt = [ "-r", yes_or_no_opt['isRepack'], "-deep", yes_or_no_opt['isDeepLearning'], "-k", yes_or_no_opt['isKeepFiles'], "-quality", query_para['method_quality'], "-output_pdbs", "yes" #always output PDB file (with proq3 written at the B-factor column) ] if 'targetlength' in query_para: proq3opt += ["-t", str(query_para['targetlength'])] return proq3opt
28,134
def test_migration_slug_generator(unit, resource): """ Test that the migration which adds slug fields also generates values for those. """ # set initial fake values for slugs Unit.objects.filter(id=unit.id).update(slug="xxxfakeunit") Resource.objects.filter(id=resource.id).update(slug="xxxfakeresource") # execute the migration's slug generator forwards(apps, None, force=True) # check that the fake values are long gone unit.refresh_from_db() resource.refresh_from_db() assert unit.slug == "testiyksikko" assert resource.slug == "testiyksikon-testiresurssi"
28,135
def is_empty_config(host): """ Check if any services should to be configured to run on the given host. """ return host.AS is None
28,136
def total_value(metric): """Given a time series of values, sum the values""" total = 0 for i in metric: total += i return total
28,137
def unpackJSON(target_naming_scheme, chemdf_dict): """ most granular data for each row of the final CSV is the well information. Each well will need all associated information of chemicals, run, etc. Unpack those values first and then copy the generated array to each of the invidual wells developed enough now that it should be broken up into smaller pieces! Parameters ---------- target_naming_scheme : target folder for storing the run and associated data. chemdf_dict : dict of pandas.DataFrames assembled from all lab inventories reads in all of the chemical inventories which describe the chemical content from each lab used across the dataset construction Return ------ concat_df_raw : pd.DataFrame, all of the raw values from the processed JSON files Notes: unlike previous version, no additional calculations are performed, just parsing the files """ concat_df = pd.DataFrame() concat_df_raw = pd.DataFrame() json_list = [] for my_exp_json in sorted(os.listdir(target_naming_scheme)): if my_exp_json.endswith(".json"): json_list.append(my_exp_json) for my_exp_json in tqdm(json_list): modlog.info('(3/4) Unpacking %s' %my_exp_json) concat_df = pd.DataFrame() #appends each run to the original dataframe json_fname = (os.path.join(target_naming_scheme, my_exp_json)) experiment_dict = json.load(open(json_fname, 'r')) modlog.info('Parsing %s to 2d dataframe' %json_fname) tray_df = parser.tray_parser(experiment_dict) #generates the tray level dataframe concat_df = pd.concat([concat_df,tray_df], ignore_index=True, sort=True) #generates a well level unique ID and aligns runID_df=pd.DataFrame(data=[concat_df['_raw_jobserial'] + '_' + concat_df['_raw_vialsite']]).transpose() runID_df.columns=['runid_vial'] #combines all operations into a final dataframe for the entire tray level view with all information concat_df = pd.concat([concat_df, runID_df], sort=True, axis=1) #Combines the most recent dataframe with the final dataframe which is targeted for export concat_df_raw = pd.concat([concat_df_raw,concat_df], sort=True) return(concat_df_raw)
28,138
def mtrace(m): """Takes module name, adds imported modules recursively to mtable.""" if mtable.has_key(m): return mtable[m] = [] mf = get_module_file(m) if mf == None: return f = open(mf) l = f.readline() while l: # Skipping doc strings at beginning of file idx = string.find(l, '"""') if idx != -1 and idx == string.rfind(l, '"""'): l = f.readline() while l and string.find(l, '"""') == -1: l = f.readline() # If there are no more lines, exit if not l: break # Sanitize line and split into tokens l = string.strip(l) l = string.replace(l, ',', ' ') token = string.split(l) tl = len(token) i = 0 # Iterate over tokens while i < tl: # Skip to next line if we hit a commnet if token[i][0] == '#': # comment break # If line is `from <mod> import *` if (token[i] == 'from' or token[i] == '"from') \ and i +2 < tl and token[i+2] == 'import': module = token[i+1] # Add mod to mtable and recurse if not module in mtable[m]: mtable[m].append(module) mtrace(module) # Move past 'from <mod>' i = i + 2 # If line is `import <mod>[,<mod>]*` elif token[i] == 'import': # Iterate over further tokens for j in token[i+1:]: # Exit if we hit a commnet if j[0] == '#': # comment break module = j if module[-1] == ',': module = module[:-1] # Add mod to mtable and recurse if not module in mtable[m]: mtable[m].append(module) mtrace(module) # Move to next mod i = i + 1 # Go to next token i = i + 1 # Go to next line l = f.readline()
28,139
def set_thread_exception_handler(): """ taken from http://bugs.python.org/issue1230540 Workaround for sys.excepthook thread bug From http://spyced.blogspot.com/2007/06/workaround-for-sysexcepthook-bug.html (https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470). Call once from __main__ before creating any threads. If using psyco, call psyco.cannotcompile(threading.Thread.run) since this replaces a new-style class method. """ init_old = threading.Thread.__init__ def init(self, *args, **kwargs): init_old(self, *args, **kwargs) run_old = self.run def run_with_except_hook(*args, **kw): try: run_old(*args, **kw) except (KeyboardInterrupt, SystemExit): raise except: from pychron.core.ui.gui import invoke_in_main_thread invoke_in_main_thread(sys.excepthook, *sys.exc_info()) self.run = run_with_except_hook threading.Thread.__init__ = init
28,140
def create_vpn_int_feature_template(types, name, if_name, description, ip_addr_name, color): """ Usage: sdwancli template feature create banner -t '["vedge-cloud", "vedge-1000"]' -n VE-banner-2 """ headers = authentication(vmanage) base_url = "https://" + f'{vmanage["host"]}:{vmanage["port"]}/dataservice' api = "/template/feature" url = base_url + api if ip_addr_name: payload = { "deviceType": types, "templateType": "vpn-vedge-interface", "templateMinVersion": "15.0.0", "templateDefinition": { "if-name": { "vipObjectType": "object", "vipType": "constant", "vipValue": if_name, "vipVariableName": "vpn_if_name" }, "description": { "vipObjectType": "object", "vipType": "constant", "vipValue": description, "vipVariableName": "vpn_if_description" }, "ip": { "address": { "vipObjectType": "object", "vipType": "variableName", "vipValue": "", "vipVariableName": ip_addr_name } }, "shutdown": { "vipObjectType": "object", "vipType": "constant", "vipValue": "false", "vipVariableName": "vpn_if_shutdown" }, "tunnel-interface": { "color": { "value": { "vipObjectType": "object", "vipType": "constant", "vipValue": color, "vipVariableName": "vpn_if_tunnel_color_value" }, "restrict": { "vipObjectType": "node-only", "vipType": "ignore", "vipValue": "false", "vipVariableName": "vpn_if_tunnel_color_restrict" } }, "allow-service": { "sshd": { "vipObjectType": "object", "vipType": "constant", "vipValue": "true", "vipVariableName": "vpn_if_tunnel_sshd" }, "all": { "vipObjectType": "object", "vipType": "constant", "vipValue": "true", "vipVariableName": "vpn_if_tunnel_all" }, "netconf": { "vipObjectType": "object", "vipType": "constant", "vipValue": "true", "vipVariableName": "vpn_if_tunnel_netconf" } } } }, "factoryDefault": "false", "templateName": name, "templateDescription": "VPN Interface Ethernet feature template" } else: payload = { "deviceType": types, "templateType": "vpn-vedge-interface", "templateMinVersion": "15.0.0", "templateDefinition": { "if-name": { "vipObjectType": "object", "vipType": "constant", "vipValue": if_name, "vipVariableName": "vpn_if_name" }, "description": { "vipObjectType": "object", "vipType": "constant", "vipValue": description, "vipVariableName": "vpn_if_description" } }, "factoryDefault": "false", "templateName": name, "templateDescription": "VPN Interface Ethernet feature template" } response = requests.post( url=url, data=json.dumps(payload), headers=headers, verify=False) if response.status_code == 200: print(json.dumps(response.json(), indent=4)) else: print("Failed to create the template ") exit()
28,141
def generate_fixture_tests(metafunc: Any, base_fixture_path: str, filter_fn: Callable[..., Any] = identity, preprocess_fn: Callable[..., Any] = identity) -> None: """ Helper function for use with `pytest_generate_tests` which will use the pytest caching facilities to reduce the load time for fixture tests. - `metafunc` is the parameter from `pytest_generate_tests` - `base_fixture_path` is the base path that fixture files will be collected from. - `filter_fn` handles ignoring or marking the various fixtures. See `filter_fixtures`. - `preprocess_fn` handles any preprocessing that should be done on the raw fixtures (such as expanding the statetest fixtures to be multiple tests for each fork. """ if 'fixture_data' in metafunc.fixturenames: all_fixtures = find_fixtures(base_fixture_path) if not all_fixtures: raise AssertionError( f"Suspiciously found zero fixtures: {base_fixture_path}" ) filtered_fixtures = filter_fn(preprocess_fn(all_fixtures)) metafunc.parametrize('fixture_data', filtered_fixtures, ids=idfn)
28,142
def validate_email_add(email_str): """Validates the email string""" email = extract_email_id(email_str) import re return re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
28,143
def get_seed_nodes_json(json_node: dict, seed_nodes_control: dict or list) -> dict: """ We need to seed some json sections for extract_fields. This seeds those nodes as needed. """ seed_json_output = {} if isinstance(seed_nodes_control, dict) or isinstance(seed_nodes_control, list): for node in seed_nodes_control: for key, value in node.items(): if value in json_node: seed_json_output[key] = json_node[value] return seed_json_output
28,144
def load_object(import_path): """ Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the likes. Import paths should be: "mypackage.mymodule.MyObject". It then imports the module up until the last dot and tries to get the attribute after that dot from the imported module. If the import path does not contain any dots, a TypeError is raised. If the module cannot be imported, an ImportError is raised. If the attribute does not exist in the module, a AttributeError is raised. """ if not isinstance(import_path, basestring): return import_path if '.' not in import_path: raise TypeError( "'import_path' argument to 'django_load.core.load_object' " +\ "must contain at least one dot.") module_name, object_name = import_path.rsplit('.', 1) module = import_module(module_name) return getattr(module, object_name)
28,145
def _inufft(kspace, trajectory, sensitivities=None, image_shape=None, tol=1e-5, max_iter=10, return_cg_state=False, multicoil=None, combine_coils=True): """MR image reconstruction using iterative inverse NUFFT. For the parameters, see `tfmr.reconstruct`. """ kspace = tf.convert_to_tensor(kspace) trajectory = tf.convert_to_tensor(trajectory) if sensitivities is not None: sensitivities = tf.convert_to_tensor(sensitivities) # Infer rank from number of dimensions in trajectory. rank = trajectory.shape[-1] if rank > 3: raise ValueError( f"Can only reconstruct images up to rank 3, but `trajectory` implies " f"rank {rank}.") # Check inputs and set defaults. if image_shape is None: # `image_shape` is required. raise ValueError("Argument `image_shape` must be provided for NUFFT.") image_shape = tf.TensorShape(image_shape) image_shape.assert_has_rank(rank) if multicoil is None: # `multicoil` defaults to True if sensitivities were passed; False # otherwise. multicoil = sensitivities is not None batch_shape = tf.shape(kspace)[:-1] # Set up system operator and right hand side. linop_nufft = linalg_ops.LinearOperatorNUFFT(image_shape, trajectory) operator = tf.linalg.LinearOperatorComposition( [linop_nufft.H, linop_nufft], is_self_adjoint=True, is_positive_definite=True) # Compute right hand side. rhs = tf.linalg.matvec(linop_nufft.H, kspace) # Solve linear system using conjugate gradient iteration. result = linalg_ops.conjugate_gradient(operator, rhs, x=None, tol=tol, max_iter=max_iter) # Restore image shape. image = tf.reshape(result.x, tf.concat([batch_shape, image_shape], 0)) # Do coil combination. if multicoil and combine_coils: image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1) return (image, result) if return_cg_state else image
28,146
def adjust_cart(request, item_id): """Adjust the quantity of the specified product to the specified amount""" album = get_object_or_404(Album, pk=item_id) # Returns 404 if an invalid quantity is entered try: quantity = int(request.POST.get("quantity")) except Exception as e: return HttpResponse(status=404) cart = request.session.get("cart", {}) # Updates quantity of existing cart item or removes from cart if quantity < 0 if quantity > 0: cart[item_id] = quantity messages.success(request, f"Updated {album.name} quantity to {cart[item_id]}") else: cart.pop(item_id) messages.success(request, f"Removed {album.name} from your cart.") request.session["cart"] = cart return redirect(reverse("view_cart"))
28,147
def calculate_delta_v(scouseobject, momone, momnine): """ Calculate the difference between the moment one and the velocity of the channel containing the peak flux Parameters ---------- scouseobject : instance of the scousepy class momone : ndarray moment one (intensity-weighted average velocity) map momnine : ndarray map containing the velocities of channels containing the peak flux at each location """ # Generate an empty array delta_v = np.empty(np.shape(momone)) delta_v.fill(np.nan) delta_v = np.abs(momone.value-momnine.value) return delta_v
28,148
def _validate_image(values): """ Validates the incoming data and raises a Invalid exception if anything is out of order. :param values: Mapping of image metadata to check """ status = values.get('status', None) if not status: msg = "Image status is required." raise exception.Invalid(msg) if status not in STATUSES: msg = "Invalid image status '%s' for image." % status raise exception.Invalid(msg) return values
28,149
def revcmp(x, y): """Does the reverse of cmp(): Return negative if y<x, zero if y==x, positive if y>x""" return cmp(y, x)
28,150
def linear_regression(data: pd.DataFrame): """ https://www.statsmodels.org/ :param data: 数据集中要包含收盘价Close :return: 拟合的y,k,b以及k转化的角度 """ y_arr = data.Close.values x_arr = np.arange(0, len(y_arr)) b_arr = sm.add_constant(x_arr) model = regression.linear_model.OLS(y_arr, b_arr).fit() b, k = model.params # y = kx + b : params[1] = k y_fit = x_arr * k + b return y_fit, k, b, np.rad2deg(k)
28,151
def get_expiry(): """ Returns the membership IDs of memberships expiring within 'time_frame' amount of MONTHS """ time_frame = request.args.get('time_frame') try: time_frame = int(time_frame) except ValueError as e: print(e) return jsonify({ 'code': 400, 'error': 'Not valid monthly time frame, should only be int' }) expiring_members = [] session = Session() now = datetime.date.today() relativeMonths = now - relativedelta(months=time_frame) memberShooterTable = session.query(Member, Shooter) \ .join(Shooter) \ .filter(and_(Member.endDate > relativeMonths, Member.status != Status.EXPIRED)) print("Memberships expiring with " + str(time_frame) + " months") for row in memberShooterTable: print(row) print(row.Member.email) print(row.Shooter.name) returnMember = {'name': row.Shooter.name, 'mid': row.Member.mid, 'email': row.Member.email, 'endDate': row.Member.endDate} expiring_members.append(returnMember) return jsonify({ 'code': 200, 'table': 'Expiring Members', 'entries': expiring_members })
28,152
def get_functions(input_file): """Alias for load_data bellow.""" return load_data(input_file)
28,153
def is_elem_ref(elem_ref): """ Returns true if the elem_ref is an element reference :param elem_ref: :return: """ return ( elem_ref and isinstance(elem_ref, tuple) and len(elem_ref) == 3 and (elem_ref[0] == ElemRefObj or elem_ref[0] == ElemRefArr) )
28,154
def read_csv(filepath_or_buffer: Literal["test.csv"], encoding: Literal["utf8"]): """ usage.modin: 1 """ ...
28,155
def listen(target, identifier, fn, *args, **kw): """Register a listener function for the given target. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) event.listen( UniqueConstraint, "after_parent_attach", unique_constraint_name) A given function can also be invoked for only the first invocation of the event using the ``once`` argument:: def on_config(): do_config() event.listen(Mapper, "before_configure", on_config, once=True) .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` and :func:`.event.listens_for`. .. note:: The :func:`.listen` function cannot be called at the same time that the target event is being run. This has implications for thread safety, and also means an event cannot be added from inside the listener function for itself. The list of events to be run are present inside of a mutable collection that can't be changed during iteration. Event registration and removal is not intended to be a "high velocity" operation; it is a configurational operation. For systems that need to quickly associate and deassociate with events at high scale, use a mutable structure that is handled from inside of a single listener. .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now used as the container for the list of events, which explicitly disallows collection mutation while the collection is being iterated. .. seealso:: :func:`.listens_for` :func:`.remove` """ _event_key(target, identifier, fn).listen(*args, **kw)
28,156
def analyse_subcommand( analyser: Analyser, param: Subcommand ) -> Tuple[str, SubcommandResult]: """ 分析 Subcommand 部分 Args: analyser: 使用的分析器 param: 目标Subcommand """ if param.requires: if analyser.sentences != param.requires: raise ParamsUnmatched(f"{param.name}'s required is not '{' '.join(analyser.sentences)}'") analyser.sentences = [] if param.is_compact: name, _ = analyser.next_data() if name.startswith(param.name): analyser.reduce_data(name.lstrip(param.name), replace=True) else: raise ParamsUnmatched(f"{name} dose not matched with {param.name}") else: name, _ = analyser.next_data(param.separators) if name != param.name: # 先匹配选项名称 raise ParamsUnmatched(f"{name} dose not matched with {param.name}") name = param.dest res: SubcommandResult = {"value": None, "args": {}, 'options': {}} if param.sub_part_len.stop == 0: res['value'] = Ellipsis return name, res args = False subcommand = res['options'] need_args = param.nargs > 0 for _ in param.sub_part_len: sub_param = analyse_params(analyser, param.sub_params) # type: ignore if sub_param and isinstance(sub_param, List): for p in sub_param: _current_index = analyser.current_index _content_index = analyser.content_index try: subcommand.setdefault(*analyse_option(analyser, p)) break except Exception as e: exc = e analyser.current_index = _current_index analyser.content_index = _content_index continue else: raise exc # type: ignore # noqa elif not args: res['args'] = analyse_args(analyser, param.args, param.nargs) args = True if need_args and not args: raise ArgumentMissing(config.lang.subcommand_args_missing.format(name=name)) return name, res
28,157
def fetch_pg_types(columns_info, trans_obj): """ This method is used to fetch the pg types, which is required to map the data type comes as a result of the query. Args: columns_info: """ # get the default connection as current connection attached to trans id # holds the cursor which has query result so we cannot use that connection # to execute another query otherwise we'll lose query result. manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(trans_obj.sid) default_conn = manager.connection(did=trans_obj.did) # Connect to the Server if not connected. res = [] if not default_conn.connected(): status, msg = default_conn.connect() if not status: return status, msg oids = [columns_info[col]['type_code'] for col in columns_info] if oids: status, res = default_conn.execute_dict( u"""SELECT oid, format_type(oid,null) as typname FROM pg_type WHERE oid IN %s ORDER BY oid; """, [tuple(oids)]) if not status: return False, res return status, res['rows'] else: return True, []
28,158
def open_popup(text) -> bool: """ Opens popup when it's text is updated """ if text is not None: return True return False
28,159
def se_beta_formatter(value: str) -> str: """ SE Beta formatter. This formats SE beta values. A valid SE beta values is a positive float. @param value: @return: """ try: se_beta = float(value) if se_beta >= 0: result = str(se_beta) else: raise ValueError(f'position expected positive float "{value}"') except ValueError as value_error: raise ValueError( f'position could not be parsed as integer "{value}" details : {value_error}', ) from value_error return result
28,160
def replace_missing_data( data: pd.DataFrame, target_col: str, source_col: str, dropna: Optional[bool] = False, inplace: Optional[bool] = False, ) -> Optional[pd.DataFrame]: """Replace missing data in one column by data from another column. Parameters ---------- data : :class:`~pandas.DataFrame` input data with values to replace target_col : str target column, i.e., column in which missing values should be replaced source_col : str source column, i.e., column values used to replace missing values in ``target_col`` dropna : bool, optional whether to drop rows with missing values in ``target_col`` or not. Default: ``False`` inplace : bool, optional whether to perform the operation inplace or not. Default: ``False`` Returns ------- :class:`~pandas.DataFrame` or ``None`` dataframe with replaced missing values or ``None`` if ``inplace`` is ``True`` """ _assert_is_dtype(data, pd.DataFrame) if not inplace: data = data.copy() data[target_col].fillna(data[source_col], inplace=True) if dropna: data.dropna(subset=[target_col], inplace=True) if inplace: return None return data
28,161
def jsonpath_parse(data, jsonpath, match_all=False): """Parse value in the data for the given ``jsonpath``. Retrieve the nested entry corresponding to ``data[jsonpath]``. For example, a ``jsonpath`` of ".foo.bar.baz" means that the data section should conform to: .. code-block:: yaml --- foo: bar: baz: <data_to_be_extracted_here> :param data: The `data` section of a document. :param jsonpath: A multi-part key that references a nested path in ``data``. :param match_all: Whether to return all matches or just the first one. :returns: Entry that corresponds to ``data[jsonpath]`` if present, else None. Example:: src_name = sub['src']['name'] src_path = sub['src']['path'] src_doc = db_api.document_get(schema=src_schema, name=src_name) src_secret = utils.jsonpath_parse(src_doc['data'], src_path) # Do something with the extracted secret from the source document. """ jsonpath = _normalize_jsonpath(jsonpath) p = _jsonpath_parse(jsonpath) matches = p.find(data) if matches: result = [m.value for m in matches] return result if match_all else result[0]
28,162
def test_bug_size(): """Two series of length 1500 should not trigger a size error. The warping paths matrix is of size 1501**2 = 2_253_001. If using 64bit values: 1501**2*64/(8*1024*1024) = 17.2MiB. """ with util_numpy.test_uses_numpy() as np: s1 = np.random.rand(1500) s2 = np.random.rand(1500) d1, _ = dtw.warping_paths_fast(s1, s2) d2, _ = dtw.warping_paths(s1, s2) assert d1 == pytest.approx(d2)
28,163
def extract_urlparam(name, urlparam): """ Attempts to extract a url parameter embedded in another URL parameter. """ if urlparam is None: return None query = name+'=' if query in urlparam: split_args = urlparam[urlparam.index(query):].replace(query, '').split('&') return split_args[0] if split_args else None else: return None
28,164
def get_nlb_data(elb_data, region, load_balancer_name, ssl_hc_path): """ Render a dictionary which contains Network Load Balancer attributes """ if debug: logger.debug("Building the Network Load Balancer data structure") # this is used for building the load balancer spec nlb_data = {'VpcId': elb_data['LoadBalancerDescriptions'][0]['VPCId'], 'Region': region, 'Nlb_name': elb_data['LoadBalancerDescriptions'][0]['LoadBalancerName'], 'Subnets': elb_data['LoadBalancerDescriptions'][0]['Subnets'], 'Security_groups': elb_data['LoadBalancerDescriptions'][0]['SecurityGroups'], 'Scheme': elb_data['LoadBalancerDescriptions'][0]['Scheme'], 'Tags': elb_data['TagDescriptions'][0]['Tags'], 'listeners': [], 'Type': 'network', 'target_group_attributes': [], 'target_group_arns': []} # this is used for building the listeners specs for elb_listener in elb_data['LoadBalancerDescriptions'][0]['ListenerDescriptions']: listener = {'Protocol': elb_listener['Listener']['Protocol'], 'Port': elb_listener['Listener']['LoadBalancerPort'], 'TargetGroup_Port': elb_listener['Listener']['InstancePort'], 'TargetGroup_Protocol': elb_listener['Listener']['InstanceProtocol']} targetgroup_attribute = { 'dereg_timeout_seconds_delay': str(elb_data['LoadBalancerAttributes']['ConnectionDraining']['Timeout']), 'TargetGroup_Port': elb_listener['Listener']['InstancePort'] } nlb_data['listeners'].append(listener) nlb_data['target_group_attributes'].append(targetgroup_attribute) # this is used for building the target groups nlb_data['target_groups'] = [] target_group = {} # Get health check target hc_target = elb_data['LoadBalancerDescriptions'][ 0]['HealthCheck']['Target'] # Set health check interval if elb_data['LoadBalancerDescriptions'][0]['HealthCheck']['Interval'] < 15: print("The minimal supported health check interval is 10. Setting it to 10 seconds") target_group['HealthCheckIntervalSeconds'] = 10 else: print("The health check internal is set to 30 seconds") target_group['HealthCheckIntervalSeconds'] = 30 # Set healthy and unhealthy threshold to the same value which is the # healthy threshold of Classic Load Balancer target_group['HealthyThresholdCount'] = elb_data['LoadBalancerDescriptions'][0]['HealthCheck'][ 'HealthyThreshold'] target_group['UnhealthyThresholdCount'] = elb_data['LoadBalancerDescriptions'][0]['HealthCheck'][ 'HealthyThreshold'] # Set VPC ID target_group['VpcId'] = elb_data[ 'LoadBalancerDescriptions'][0]['VPCId'] # Set health check protocol target_group['HealthCheckProtocol'] = hc_target.split(':')[0] # If health check protocol is TCP if hc_target.split(':')[0] == "TCP": target_group['HealthCheckPort'] = hc_target.split(':')[1] # If health check protocol is HTTP or HTTPs elif hc_target.split(':')[0] == "SSL": target_group['HealthCheckProtocol'] = "HTTPS" target_group['HealthCheckPort'] = hc_target.split(':')[1] target_group['HealthCheckPath'] = ssl_hc_path else: target_group['HealthCheckPort'] = hc_target.split(':')[1].split('/')[0] target_group['HealthCheckPath'] = '/' + hc_target.split('/', 1)[1] for listener in nlb_data['listeners']: target_group['Protocol'] = listener['TargetGroup_Protocol'] target_group['Port'] = listener['TargetGroup_Port'] # target group name comes from the first 18 character of the Classic Load Balancer name, \ # "-nlb-tg-" and target group port. target_group['Name'] = load_balancer_name[: 18] + "-nlb-tg-" + \ str(listener['TargetGroup_Port']) # Only append unique Target Group if target_group not in nlb_data['target_groups']: nlb_data['target_groups'].append(target_group.copy()) # Get registered backend instances nlb_data['instanceIds'] = [] for instance in elb_data['LoadBalancerDescriptions'][0]['Instances']: nlb_data['instanceIds'].append(instance['InstanceId']) if debug: logger.debug("nlb_data:") logger.debug(nlb_data) return nlb_data
28,165
def get_objects_for_group(group, perms, klass=None, any_perm=False, accept_global_perms=True): """ Returns a queryset of objects for which there can be calculated a path.... """ raise NotImplementedError
28,166
def _get_required_var(key: str, data: Dict[str, Any]) -> str: """Get a value from a dict coerced to str. raise RequiredVariableNotPresentException if it does not exist""" value = data.get(key) if value is None: raise RequiredVariableNotPresentException(f"Missing required var {key}") return str(value)
28,167
def phones(): """Return a list of phones used in the main dict.""" cmu_phones = [] for line in phones_stream(): parts = line.decode("utf-8").strip().split() cmu_phones.append((parts[0], parts[1:])) return cmu_phones
28,168
def the_state_and_the_temperature_of_the_tpm_hw_can_be_monitored(devices): """the state and the temperature of the TPM_HW can be monitored..""" if devices.mccs_tile_0001.simulationMode == 1: logger.info('MCCS tile 0001 is in simulation mode') devices.print_device_states() device = devices.mccs_tile_0001 measurement_cadence = 1.0 # seconds to wait between measurements num_measurements_required = 20 # Code will loop until this many measurements are taken temperature = [] mccs_time = [] while len(temperature) < num_measurements_required: temperature.append(device.fpga1_temperature) mccs_time.append(device.fpga1_time) time.sleep(measurement_cadence) # num_secs = measurement_cadence * num_measurements_required # assert (len(set(temperature))!=1), f"No variation seen in the temperature values of {device} over {num_secs} seconds" # assert (len(set(mccs_time))!=1), f"No variation seen in the time values of {device} over {num_secs} seconds" logger.info(temperature) logger.info(mccs_time) with atomic(devices.all_device_names,'State','OFF',5): devices.tmc_central_node.StandByTelescope()
28,169
def load_specific_forecast(city, provider, date, forecasts): """reads in the city, provider, date and forecast_path and returns the data queried from the forecast path :param city: city for which the weather forecast is for :type string :param provider: provider for which the weather forecast is for :type string :param date: date for which the weather forecast is for, e.g. '2015-06-29' :type datetime :param forecasts: dataframe containing all forecasts :type pandas dataframe :return: dataFrame containing relevant dwd data """ # get rows with the correct city, provider and date data_city = forecasts[forecasts['city']==city] data_provider = data_city[data_city['Provider']==provider] if provider != 'openweathermap': # cut the time data_provider.loc[:, 'Date'] = data_provider.loc[:, 'Date'].map(cut_time, na_action='ignore') data_provider.loc[:, 'ref_date'] = data_provider.loc[:,'ref_date'].map(cut_time, na_action='ignore') else: data_provider.loc[:, 'ref_date'] = data_provider.loc[:,'ref_date'].map(cut_time,na_action='ignore') data_provider.loc[:, 'Date'] = data_provider.loc[:,'pred_offset'].map(cut_time, na_action='ignore') data_provider.loc[:, 'pred_offset'] = (data_provider.loc[:,'Date'] - data_provider['ref_date']).\ map(lambda delta: delta/np.timedelta64(1, 'D'), na_action='ignore') return data_provider[data_provider['Date'] == date]
28,170
def eeg_to_montage(eeg): """Returns an instance of montage from an eeg file""" from numpy import array, isnan from mne.channels import Montage pos = array([eeg.info['chs'][i]['loc'][:3] for i in range(eeg.info['nchan'])]) if not isnan(pos).all(): selection = [i for i in range(eeg.info['nchan'])] montage = Montage(pos, eeg.info['ch_names'], selection=selection, kind='custom') return montage else: return None
28,171
def test_ass_style_list_modifying_style_emits_modification_event_in_parent() -> None: """Test that modifying an style emits a modification event in the context of its parent list. """ subscriber = Mock() style = AssStyle(name="dummy style") styles = AssStyleList() styles.append(style) styles.items_modified.subscribe(subscriber) style.scale(3) subscriber.assert_called_once()
28,172
def mul_inv2(x:int, k:int) -> int: """ Computes x*2^{-1} in (Z/3^kZ)*.""" return (inv2(k)*x)%(3**k)
28,173
def Be(Subject = P.CA(), Contract=FALSE): """Synonym for Agree("be").""" return Agree("be", Subject, Contract)
28,174
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=0.99): """ Armijo linesearch function that works with matrices find an approximate minimum of f(xk+alpha*pk) that satifies the armijo conditions. Parameters ---------- f : function loss function xk : np.ndarray initial position pk : np.ndarray descent direction gfk : np.ndarray gradient of f at xk old_fval : float loss value at xk args : tuple, optional arguments given to f c1 : float, optional c1 const in armijo rule (>0) alpha0 : float, optional initial step (>0) Returns ------- alpha : float step that satisfy armijo conditions fc : int nb of function call fa : float loss value at step alpha """ xk = np.atleast_1d(xk) fc = [0] def phi(alpha1): fc[0] += 1 return f(xk + alpha1 * pk, *args) if old_fval is None: phi0 = phi(0.) else: phi0 = old_fval derphi0 = np.sum(gfk.T * pk) alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0) return alpha, fc[0], phi1
28,175
def lprob2sigma(lprob): """ translates a log_e(probability) to units of Gaussian sigmas """ if (lprob>-36.): sigma = norm.ppf(1.-0.5*exp(1.*lprob)) else: sigma = sqrt( log(2./pi) - 2.*log(8.2) - 2.*lprob ) return float(sigma)
28,176
def convert_from_fortran_bool(stringbool): """ Converts a string in this case ('T', 'F', or 't', 'f') to True or False :param stringbool: a string ('t', 'f', 'F', 'T') :return: boolean (either True or False) """ true_items = ['True', 't', 'T'] false_items = ['False', 'f', 'F'] if isinstance(stringbool, str): if stringbool in false_items: return False elif stringbool in true_items: return True else: raise ValueError(f"Could not convert: '{stringbool}' to boolean, " "which is not 'True', 'False', 't', 'T', 'F' or 'f'") elif isinstance(stringbool, bool): return stringbool # no conversion needed... raise TypeError(f"Could not convert: '{stringbool}' to boolean, " 'only accepts str or boolean')
28,177
def generate(host, port, user, password, dbname, prompt, verbose): """ Generate a YAML spec that represents the role attributes, memberships, object ownerships, and privileges for all roles in a database. Note that roles and memberships are database cluster-wide settings, i.e. they are the same across multiple databases within a given Postgres instance. Object ownerships and privileges are specific to each individual database within a Postgres instance. Inputs: host - str; the database server host port - str; the database server port user - str; the database user name password - str; the database user's password dbname - str; the database to connect to and configure prompt - bool; whether to prompt for a password verbose - bool; whether to show all queries that are executed and all debug log messages during execution """ if verbose: root_logger = logging.getLogger('') root_logger.setLevel(logging.DEBUG) if prompt: password = getpass.getpass() spec = create_spec(host, port, user, password, dbname, verbose) output_spec(spec)
28,178
def gaussian_device(n_subsystems): """Number of qubits or modes.""" return DummyDevice(wires=n_subsystems)
28,179
def create_playlist(current_user, user_id): """ Creates a playlist. :param user_id: the ID of the user. :return: 200, playlist created successfully. """ x = user_id user = session.query(User).filter_by(id=user_id).one() data = request.get_json() new_playlist = Playlist(name=data['name'], description=data['description'], user_id=x) db.session.add(new_playlist) db.session.commit() return jsonify({ 'message' : 'playlist %s created successfully %name' })
28,180
def gram_matrix(y): """ Input shape: b,c,h,w Output shape: b,c,c """ (b, ch, h, w) = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram
28,181
def process_pair_v2(data, global_labels): """ :param path: graph pair data. :return data: Dictionary with data, also containing processed DGL graphs. """ # print('Using v2 process_pair') edges_1 = data["graph_1"] #diff from v1 edges_2 = data["graph_2"] #diff from v1 edges_1 = np.array(edges_1, dtype=np.int64); edges_2 = np.array(edges_2, dtype=np.int64); G_1 = dgl.DGLGraph((edges_1[:,0], edges_1[:,1])); G_2 = dgl.DGLGraph((edges_2[:,0], edges_2[:,1])); G_1.add_edges(G_1.nodes(), G_1.nodes()) #diff from v1 G_2.add_edges(G_2.nodes(), G_2.nodes()) #diff from v1 edges_1 = torch.from_numpy(edges_1.T).type(torch.long) edges_2 = torch.from_numpy(edges_2.T).type(torch.long) data["edge_index_1"] = edges_1 data["edge_index_2"] = edges_2 features_1, features_2 = [], [] for n in data["labels_1"]: features_1.append([1.0 if global_labels[n] == i else 0.0 for i in global_labels.values()]) for n in data["labels_2"]: features_2.append([1.0 if global_labels[n] == i else 0.0 for i in global_labels.values()]) G_1.ndata['features'] = torch.FloatTensor(np.array(features_1)); G_2.ndata['features'] = torch.FloatTensor(np.array(features_2)); G_1.ndata['type'] = np.array(data["labels_1"]); G_2.ndata['type'] = np.array(data["labels_2"]); data['G_1'] = G_1; data['G_2'] = G_2; norm_ged = data["ged"]/(0.5*(len(data["labels_1"])+len(data["labels_2"]))) data["target"] = torch.from_numpy(np.exp(-norm_ged).reshape(1, 1)).view(-1).float() return data
28,182
def calculate_line_changes(diff: Diff) -> Tuple[int, int]: """Return a two-tuple (additions, deletions) of a diff.""" additions = 0 deletions = 0 raw_diff = "\n".join(diff.raw_unified_diff()) for line in raw_diff.splitlines(): if line.startswith("+ "): additions += 1 elif line.startswith("- "): deletions += 1 return additions, deletions
28,183
def get_synonyms(token): """ get synonyms of word using wordnet args: token: string returns: synonyms: list containing synonyms as strings """ synonyms = [] if len(wordnet.synsets(token)) == 0: return None for synset in wordnet.synsets(token): for lemma in synset.lemmas(): synonyms.append(lemma.name()) synonyms = _remove_repeated_elements(synonyms) synonyms.remove(token) return synonyms
28,184
def concat_experiments_on_channel(experiments, channel_name): """Combines channel values from experiments into one dataframe. This function helps to compare channel values from a list of experiments by combining them in a dataframe. E.g: Say we want to extract the `log_loss` channel values for a list of experiments. The resulting dataframe will have ['id','x_log_loss','y_log_loss'] columns. Args: experiments(list): list of `neptune.experiments.Experiment` objects. channel_name(str): name of the channel for which we want to extract values. Returns: `pandas.DataFrame`: Dataframe of ['id','x_CHANNEL_NAME','y_CHANNEL_NAME'] values concatenated from a list of experiments. Examples: Instantiate a session:: from neptune.sessions import Session session = Session() Fetch a project and a list of experiments:: project = session.get_projects('neptune-ai')['neptune-ai/Salt-Detection'] experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Construct a channel value dataframe:: from neptunecontrib.api.utils import concat_experiments_on_channel compare_df = concat_experiments_on_channel(experiments,'unet_0 epoch_val iout loss') Note: If an experiment in the list of experiments does not contain the channel with a specified channel_name it will be omitted. """ combined_df = [] for experiment in experiments: if channel_name in experiment.get_channels().keys(): channel_df = experiment.get_numeric_channels_values(channel_name) channel_df['id'] = experiment.id combined_df.append(channel_df) combined_df = pd.concat(combined_df, axis=0) return combined_df
28,185
def sghmc_naive_mh_noresample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula): """ This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo with Metropolis-Hastings correction in unidimensional cases without resampling procedure. """ B = 1/2*epsilon*V theta = [theta_init] r = [r_init] for t in range(nt-1): epsilon0 = max(epsilon, formula(t)) theta0, r0 = theta[-1], r[-1] for i in range(m): theta0 = theta0 + epsilon0*1/M*r0 r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0)) # Metropolis-Hastings correction u = np.random.uniform() H1 = u_hat_func(theta0) + 1/2*r0**2*1/M H2 = u_hat_func(theta[-1]) + 1/2*r[-1]**2*1/M p = np.exp(H2 - H1) if u < min(1,p): theta.append(theta0) r.append(r0) return [theta, r]
28,186
def detect_voices(aud, sr=44100): """ Detect the presence and absence of voices in an array of audio Args: Returns: """ pcm_16 = np.round( (np.iinfo(np.int16).max * aud)).astype(np.int16).tobytes() voices = [ VAD.is_speech(pcm_16[2 * ix:2 * (ix + SMOOTHING_WSIZE)], sample_rate=SAMPLING_RATE) for ix in range(0, len(aud), SMOOTHING_WSIZE) ] return voices
28,187
def turn_on(hass, entity_id=None): """Turn on specified automation or all.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
28,188
def process_to_binary_otsu_image(img_path, inverse=False, max_threshold=255): """ Purpose: Process an image to binary colours using binary otsu thresholding. Args: img_path - path to the image to process inverse - if true an inverted binary thresholding will be applied (optional). max_threshold - the max value to be given if a pixels value is more than the threshold value (optional). Returns: binary_image_tuple[0] - optimal threshold value found by otsu threshold. binary_image_tuple[1] - binary image. """ img = cv2.imread(img_path) gray_img = convert_bgr_to_gray(img) if inverse: binary_image_tuple = threshold_binary_inv_otsu(gray_img, max_threshold) else: binary_image_tuple = threshold_binary_otsu(gray_img, max_threshold) return binary_image_tuple
28,189
def _fold_in_str(rng, data): """Folds a string into a jax.random.PRNGKey using its SHA-1 hash.""" m = hashlib.sha1() m.update(data.encode('utf-8')) d = m.digest() hash_int = int.from_bytes(d[:4], byteorder='big', signed=True) return random.fold_in(rng, hash_int)
28,190
def create_task_dialog(request): """called when creating tasks """ return data_dialog(request, mode='create', entity_type='Task')
28,191
def deroulementRandom(b): """Play the Tic-Tac-Toe game randomly.""" print("----------") print(b) if b.is_game_over(): res = getresult(b) if res == 1: print("Victory of X") elif res == -1: print("Victory of O") else: print("Draw") return RandomMove(b) deroulementRandom(b)
28,192
def raw_rearrange(da, pattern, **kwargs): """Crudely wrap `einops.rearrange <https://einops.rocks/api/rearrange/>`_. Wrapper around einops.rearrange with a very similar syntax. Spaces, parenthesis ``()`` and `->` are not allowed in dimension names. Parameters ---------- da : xarray.DataArray Input array pattern : string Pattern string. Same syntax as patterns in einops with two caveats: * Unless splitting or stacking, you must use the actual dimension names. * When splitting or stacking you can use `(dim1 dim2)=dim`. This is `necessary` for the left hand side as it identifies the dimension to split, and optional on the right hand side, if omitted the stacked dimension will be given a default name. kwargs : dict, optional Passed to :func:`xarray_einstats.einops.rearrange` Returns ------- xarray.DataArray See Also -------- xarray_einstats.einops.rearrange: More flexible and powerful wrapper over einops.rearrange. It is also more verbose. """ if "->" in pattern: in_pattern, out_pattern = pattern.split("->") in_dims = translate_pattern(in_pattern) else: out_pattern = pattern in_dims = None out_dims = translate_pattern(out_pattern) return rearrange(da, out_dims=out_dims, in_dims=in_dims, **kwargs)
28,193
def get_users() -> Tuple[int, ...]: """Count user ids in db.""" db = get_database_connection() user_searches = db.keys(pattern=f'{DB_SEARCH_PREFIX}*') user_ids = [ int(user_search.decode('utf-8').lstrip(DB_SEARCH_PREFIX)) for user_search in user_searches ] return tuple(user_ids)
28,194
def rotate_files(fname, cnt=0, max_cnt=5): """Function: rotate_files Description: Move a set of files up a sequence of backup files (e.g. file.0, file.1, file.2, etc). It is a recursive function as it will find the largest sequence file or opening in the sequence and then rename the files appropriately. Arguments: (input) fname -> File name. (input) cnt -> Current sequence count. (input) max_cnt -> Largest sequence of files to use. """ if cnt < max_cnt and os.path.isfile(fname + "." + str(cnt)): rotate_files(fname, cnt + 1, max_cnt) # Rename file to +1 os.rename(fname + "." + str(cnt), fname + "." + str(cnt + 1))
28,195
async def login( email: str, password: str, session: Optional[ClientSession] = None, *, conf_update_interval: Optional[timedelta] = None, device_set_debounce: Optional[timedelta] = None, ): """Login using email and password.""" if session: response = await _do_login(session, email, password, headers=_headers("")) else: async with ClientSession() as _session: response = await _do_login(_session, email, password) return Client( response.get("userunits", '0'), session, conf_update_interval=conf_update_interval, device_set_debounce=device_set_debounce, )
28,196
def test_field_mapper_operate_on_values(sdc_builder, sdc_executor): """ Test the Field Mapper processor, by value. Rounds double fields up to the nearest integer (ceiling). The pipeline that will be constructed is: dev_raw_data_source (JSON data) >> field_mapper (ceiling) >> trash """ raw_data = """{ "someData": { "value1": 19.2, "value2": -16.5, "value3": 1987.44, "subData": { "value4": 0.45 } }, "moreData": { "value5": 19884.5, "value6": -0.25 } }""" pipeline_builder = sdc_builder.get_pipeline_builder() dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data) field_mapper = pipeline_builder.add_stage('Field Mapper', type='processor') field_mapper.set_attributes( operate_on = 'FIELD_VALUES', conditional_expression = '${f:type() == \'DOUBLE\'}', mapping_expression = '${math:ceil(f:value())}', maintain_original_paths = False ) trash = pipeline_builder.add_stage('Trash') dev_raw_data_source >> field_mapper >> trash pipeline = pipeline_builder.build('Field mapper - The Daves I Know') sdc_executor.add_pipeline(pipeline) snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot sdc_executor.stop_pipeline(pipeline) sdc_executor.remove_pipeline(pipeline) field_mapper_output = snapshot[field_mapper.instance_name].output output_record = field_mapper_output[0]; assert output_record.get_field_data('/someData/value1') == 20 assert output_record.get_field_data('/someData/value2') == -16 assert output_record.get_field_data('/someData/value3') == 1988 assert output_record.get_field_data('/someData/subData/value4') == 1 assert output_record.get_field_data('/moreData/value5') == 19885 assert output_record.get_field_data('/moreData/value6') == 0
28,197
def rcomp_prediction(system, rcomp, predargs, init_cond): """ Make a prediction with the given system Parameters: system (str): Name of the system to predict rcomp (ResComp): Trained reservoir computer predargs (variable length arguments): Passed directly into rcomp.predict init_cond (dict): Keyword args passed rcomp.predict Returns: pre (ndarray): Reservoir computer prediction """ if system == "softrobot": pre = rcomp.predict(*predargs, **init_cond) else: pre = rcomp.predict(predargs, **init_cond) return pre
28,198
def signal_handler(signal, frame): """Handles explicit interruptions.""" logging.exception('Received Ctrl-C, terminating...') if sock: kill_socket() sys.exit(1)
28,199