code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def find_classes(self, name=".*", no_external=False): """ Find classes by name, using regular expression This method will return all ClassAnalysis Object that match the name of the class. :param name: regular expression for class name (default ".*") :param no_external: Remove external classes from the output (default False) :rtype: generator of `ClassAnalysis` """ for cname, c in self.classes.items(): if no_external and isinstance(c.get_vm_class(), ExternalClass): continue if re.match(name, cname): yield c
Find classes by name, using regular expression This method will return all ClassAnalysis Object that match the name of the class. :param name: regular expression for class name (default ".*") :param no_external: Remove external classes from the output (default False) :rtype: generator of `ClassAnalysis`
def _ss_matrices(self,beta): """ Creates the state space matrices required Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- T, Z, R, Q : np.array State space matrices used in KFS algorithm """ T = np.identity(self.state_no) Z = self.X R = np.identity(self.state_no) Q = np.identity(self.state_no) for i in range(0,self.state_no): Q[i][i] = self.latent_variables.z_list[i].prior.transform(beta[i]) return T, Z, R, Q
Creates the state space matrices required Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- T, Z, R, Q : np.array State space matrices used in KFS algorithm
def linearRegressionAnalysis(series): """ Returns factor and offset of linear regression function by least squares method. """ n = safeLen(series) sumI = sum([i for i, v in enumerate(series) if v is not None]) sumV = sum([v for i, v in enumerate(series) if v is not None]) sumII = sum([i * i for i, v in enumerate(series) if v is not None]) sumIV = sum([i * v for i, v in enumerate(series) if v is not None]) denominator = float(n * sumII - sumI * sumI) if denominator == 0: return None else: factor = (n * sumIV - sumI * sumV) / denominator / series.step offset = sumII * sumV - sumIV * sumI offset = offset / denominator - factor * series.start return factor, offset
Returns factor and offset of linear regression function by least squares method.
def select_elements(self, json_string, expr): """ Return list of elements from _json_string_, matching [ http://jsonselect.org/ | JSONSelect] expression. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price | =>\n | 12.99 """ load_input_json = self.string_to_json(json_string) # parsing jsonselect match = jsonselect.match(sel=expr, obj=load_input_json) ret = list(match) return ret if ret else None
Return list of elements from _json_string_, matching [ http://jsonselect.org/ | JSONSelect] expression. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price | =>\n | 12.99
def to_dict(dictish): """ Given something that closely resembles a dictionary, we attempt to coerce it into a propery dictionary. """ if hasattr(dictish, 'iterkeys'): m = dictish.iterkeys elif hasattr(dictish, 'keys'): m = dictish.keys else: raise ValueError(dictish) return dict((k, dictish[k]) for k in m())
Given something that closely resembles a dictionary, we attempt to coerce it into a propery dictionary.
def depth_soil_density(self, value=None): """Corresponds to IDD Field `depth_soil_density` Args: value (float): value for IDD Field `depth_soil_density` Unit: kg/m3 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_soil_density`'.format(value)) self._depth_soil_density = value
Corresponds to IDD Field `depth_soil_density` Args: value (float): value for IDD Field `depth_soil_density` Unit: kg/m3 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def multi_char_literal(chars): """Emulates character integer literals in C. Given a string "abc", returns the value of the C single-quoted literal 'abc'. """ num = 0 for index, char in enumerate(chars): shift = (len(chars) - index - 1) * 8 num |= ord(char) << shift return num
Emulates character integer literals in C. Given a string "abc", returns the value of the C single-quoted literal 'abc'.
def processIqRegistry(self, entity): """ :type entity: IqProtocolEntity """ if entity.getTag() == "iq": iq_id = entity.getId() if iq_id in self.iqRegistry: originalIq, successClbk, errorClbk = self.iqRegistry[iq_id] del self.iqRegistry[iq_id] if entity.getType() == IqProtocolEntity.TYPE_RESULT and successClbk: successClbk(entity, originalIq) elif entity.getType() == IqProtocolEntity.TYPE_ERROR and errorClbk: errorClbk(entity, originalIq) return True return False
:type entity: IqProtocolEntity
def get_response(cmd, conn): """Return a response""" resp = conn.socket().makefile('rb', -1) resp_dict = dict( code=0, message='', isspam=False, score=0.0, basescore=0.0, report=[], symbols=[], headers={}, ) if cmd == 'TELL': resp_dict['didset'] = False resp_dict['didremove'] = False data = resp.read() lines = data.split('\r\n') for index, line in enumerate(lines): if index == 0: match = RESPONSE_RE.match(line) if not match: raise SpamCResponseError( 'spamd unrecognized response: %s' % data) resp_dict.update(match.groupdict()) resp_dict['code'] = int(resp_dict['code']) else: if not line.strip(): continue match = SPAM_RE.match(line) if match: tmp = match.groupdict() resp_dict['score'] = float(tmp['score']) resp_dict['basescore'] = float(tmp['basescore']) resp_dict['isspam'] = tmp['isspam'] in ['True', 'Yes'] if not match: if cmd == 'SYMBOLS': match = PART_RE.findall(line) for part in match: resp_dict['symbols'].append(part) if not match and cmd != 'PROCESS': match = RULE_RE.findall(line) if match: resp_dict['report'] = [] for part in match: score = part[0] + part[1] score = score.strip() resp_dict['report'].append( dict(score=score, name=part[2], description=SPACE_RE.sub(" ", part[3]))) if line.startswith('DidSet:'): resp_dict['didset'] = True if line.startswith('DidRemove:'): resp_dict['didremove'] = True if cmd == 'PROCESS': resp_dict['message'] = ''.join(lines[4:]) + '\r\n' if cmd == 'HEADERS': parser = Parser() headers = parser.parsestr('\r\n'.join(lines[4:]), headersonly=True) for key in headers.keys(): resp_dict['headers'][key] = headers[key] return resp_dict
Return a response
def parker_weighting(ray_trafo, q=0.25): """Create parker weighting for a `RayTransform`. Parker weighting is a weighting function that ensures that oversampled fan/cone beam data are weighted such that each line has unit weight. It is useful in analytic reconstruction methods such as FBP to give a more accurate result and can improve convergence rates for iterative methods. See the article `Parker weights revisited`_ for more information. Parameters ---------- ray_trafo : `RayTransform` The ray transform for which to compute the weights. q : float, optional Parameter controlling the speed of the roll-off at the edges of the weighting. 1.0 gives the classical Parker weighting, while smaller values in general lead to lower noise but stronger discretization artifacts. Returns ------- parker_weighting : ``ray_trafo.range`` element See Also -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Indicator function for helical data odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d odl.tomo.geometry.conebeam.ConeFlatGeometry : Use case in 3d (for pitch 0) References ---------- .. _Parker weights revisited: https://www.ncbi.nlm.nih.gov/pubmed/11929021 """ # Note: Parameter names taken from WES2002 # Extract parameters src_radius = ray_trafo.geometry.src_radius det_radius = ray_trafo.geometry.det_radius ndim = ray_trafo.geometry.ndim angles = ray_trafo.range.meshgrid[0] min_rot_angle = ray_trafo.geometry.motion_partition.min_pt alen = ray_trafo.geometry.motion_params.length # Parker weightings are not defined for helical geometries if ray_trafo.geometry.ndim != 2: pitch = ray_trafo.geometry.pitch if pitch != 0: raise ValueError('Parker weighting window is only defined with ' '`pitch==0`') # Find distance from projection of rotation axis for each pixel if ndim == 2: dx = ray_trafo.range.meshgrid[1] elif ndim == 3: # Find projection of axis on detector rot_dir = _rotation_direction_in_detector(ray_trafo.geometry) # If axis is aligned to a coordinate axis, save some memory and time by # using broadcasting if rot_dir[0] == 0: dx = rot_dir[1] * ray_trafo.range.meshgrid[2] elif rot_dir[1] == 0: dx = rot_dir[0] * ray_trafo.range.meshgrid[1] else: dx = (rot_dir[0] * ray_trafo.range.meshgrid[1] + rot_dir[1] * ray_trafo.range.meshgrid[2]) # Compute parameters dx_abs_max = np.max(np.abs(dx)) max_fan_angle = 2 * np.arctan2(dx_abs_max, src_radius + det_radius) delta = max_fan_angle / 2 epsilon = alen - np.pi - max_fan_angle if epsilon < 0: raise Exception('data not sufficiently sampled for parker weighting') # Define utility functions def S(betap): return (0.5 * (1.0 + np.sin(np.pi * betap)) * (np.abs(betap) < 0.5) + (betap >= 0.5)) def b(alpha): return q * (2 * delta - 2 * alpha + epsilon) # Create weighting function beta = np.asarray(angles - min_rot_angle, dtype=ray_trafo.range.dtype) # rotation angle alpha = np.asarray(np.arctan2(dx, src_radius + det_radius), dtype=ray_trafo.range.dtype) # Compute sum in place to save memory S_sum = S(beta / b(alpha) - 0.5) S_sum += S((beta - 2 * delta + 2 * alpha - epsilon) / b(alpha) + 0.5) S_sum -= S((beta - np.pi + 2 * alpha) / b(-alpha) - 0.5) S_sum -= S((beta - np.pi - 2 * delta - epsilon) / b(-alpha) + 0.5) scale = 0.5 * alen / np.pi return ray_trafo.range.element( np.broadcast_to(S_sum * scale, ray_trafo.range.shape))
Create parker weighting for a `RayTransform`. Parker weighting is a weighting function that ensures that oversampled fan/cone beam data are weighted such that each line has unit weight. It is useful in analytic reconstruction methods such as FBP to give a more accurate result and can improve convergence rates for iterative methods. See the article `Parker weights revisited`_ for more information. Parameters ---------- ray_trafo : `RayTransform` The ray transform for which to compute the weights. q : float, optional Parameter controlling the speed of the roll-off at the edges of the weighting. 1.0 gives the classical Parker weighting, while smaller values in general lead to lower noise but stronger discretization artifacts. Returns ------- parker_weighting : ``ray_trafo.range`` element See Also -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Indicator function for helical data odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d odl.tomo.geometry.conebeam.ConeFlatGeometry : Use case in 3d (for pitch 0) References ---------- .. _Parker weights revisited: https://www.ncbi.nlm.nih.gov/pubmed/11929021
def _parse_error(self, error): """ Parses a single GLSL error and extracts the linenr and description Other GLIR implementations may omit this. """ error = str(error) # Nvidia # 0(7): error C1008: undefined variable "MV" m = re.match(r'(\d+)\((\d+)\)\s*:\s(.*)', error) if m: return int(m.group(2)), m.group(3) # ATI / Intel # ERROR: 0:131: '{' : syntax error parse error m = re.match(r'ERROR:\s(\d+):(\d+):\s(.*)', error) if m: return int(m.group(2)), m.group(3) # Nouveau # 0:28(16): error: syntax error, unexpected ')', expecting '(' m = re.match(r'(\d+):(\d+)\((\d+)\):\s(.*)', error) if m: return int(m.group(2)), m.group(4) # Other ... return None, error
Parses a single GLSL error and extracts the linenr and description Other GLIR implementations may omit this.
def main() -> None: """Main function of this script""" # Make sure we have access to self if 'self' not in globals(): print("Run 'set locals_in_py true' and then rerun this script") return # Make sure the user passed in an output file if len(sys.argv) != 2: print("Usage: {} <output_file>".format(os.path.basename(sys.argv[0]))) return # Open the output file outfile_path = os.path.expanduser(sys.argv[1]) try: outfile = open(outfile_path, 'w') except OSError as e: print("Error opening {} because: {}".format(outfile_path, e)) return # Write the help summary header = '{0}\nSUMMARY\n{0}\n'.format(ASTERISKS) outfile.write(header) result = app('help -v') outfile.write(result.stdout) # Get a list of all commands and help topics and then filter out duplicates all_commands = set(self.get_all_commands()) all_topics = set(self.get_help_topics()) to_save = list(all_commands | all_topics) to_save.sort() for item in to_save: is_command = item in all_commands add_help_to_file(item, outfile, is_command) if is_command: # Add any sub-commands for subcmd in get_sub_commands(getattr(self.cmd_func(item), 'argparser', None)): full_cmd = '{} {}'.format(item, subcmd) add_help_to_file(full_cmd, outfile, is_command) outfile.close() print("Output written to {}".format(outfile_path))
Main function of this script
def off(self, evnt, func): ''' Remove a previously registered event handler function. Example: base.off( 'foo', onFooFunc ) ''' funcs = self._syn_funcs.get(evnt) if funcs is None: return try: funcs.remove(func) except ValueError: pass
Remove a previously registered event handler function. Example: base.off( 'foo', onFooFunc )
def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2017-07-01: :mod:`v2017_07_01.models<azure.mgmt.containerservice.v2017_07_01.models>` * 2018-03-31: :mod:`v2018_03_31.models<azure.mgmt.containerservice.v2018_03_31.models>` * 2018-08-01-preview: :mod:`v2018_08_01_preview.models<azure.mgmt.containerservice.v2018_08_01_preview.models>` * 2018-09-30-preview: :mod:`v2018_09_30_preview.models<azure.mgmt.containerservice.v2018_09_30_preview.models>` * 2019-02-01: :mod:`v2019_02_01.models<azure.mgmt.containerservice.v2019_02_01.models>` """ if api_version == '2017-07-01': from .v2017_07_01 import models return models elif api_version == '2018-03-31': from .v2018_03_31 import models return models elif api_version == '2018-08-01-preview': from .v2018_08_01_preview import models return models elif api_version == '2018-09-30-preview': from .v2018_09_30_preview import models return models elif api_version == '2019-02-01': from .v2019_02_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
Module depends on the API version: * 2017-07-01: :mod:`v2017_07_01.models<azure.mgmt.containerservice.v2017_07_01.models>` * 2018-03-31: :mod:`v2018_03_31.models<azure.mgmt.containerservice.v2018_03_31.models>` * 2018-08-01-preview: :mod:`v2018_08_01_preview.models<azure.mgmt.containerservice.v2018_08_01_preview.models>` * 2018-09-30-preview: :mod:`v2018_09_30_preview.models<azure.mgmt.containerservice.v2018_09_30_preview.models>` * 2019-02-01: :mod:`v2019_02_01.models<azure.mgmt.containerservice.v2019_02_01.models>`
def read_all(self): """Read all remaining data in the packet. (Subsequent read() will return errors.) """ result = self._data[self._position:] self._position = None # ensure no subsequent read() return result
Read all remaining data in the packet. (Subsequent read() will return errors.)
def compute_route_stats_base( trip_stats_subset: DataFrame, headway_start_time: str = "07:00:00", headway_end_time: str = "19:00:00", *, split_directions: bool = False, ) -> DataFrame: """ Compute stats for the given subset of trips stats. Parameters ---------- trip_stats_subset : DataFrame Subset of the output of :func:`.trips.compute_trip_stats` split_directions : boolean If ``True``, then separate the stats by trip direction (0 or 1); otherwise aggregate trips visiting from both directions headway_start_time : string HH:MM:SS time string indicating the start time for computing headway stats headway_end_time : string HH:MM:SS time string indicating the end time for computing headway stats Returns ------- DataFrame Columns are - ``'route_id'`` - ``'route_short_name'`` - ``'route_type'`` - ``'direction_id'`` - ``'num_trips'``: number of trips on the route in the subset - ``'num_trip_starts'``: number of trips on the route with nonnull start times - ``'num_trip_ends'``: number of trips on the route with nonnull end times that end before 23:59:59 - ``'is_loop'``: 1 if at least one of the trips on the route has its ``is_loop`` field equal to 1; 0 otherwise - ``'is_bidirectional'``: 1 if the route has trips in both directions; 0 otherwise - ``'start_time'``: start time of the earliest trip on the route - ``'end_time'``: end time of latest trip on the route - ``'max_headway'``: maximum of the durations (in minutes) between trip starts on the route between ``headway_start_time`` and ``headway_end_time`` on the given dates - ``'min_headway'``: minimum of the durations (in minutes) mentioned above - ``'mean_headway'``: mean of the durations (in minutes) mentioned above - ``'peak_num_trips'``: maximum number of simultaneous trips in service (for the given direction, or for both directions when ``split_directions==False``) - ``'peak_start_time'``: start time of first longest period during which the peak number of trips occurs - ``'peak_end_time'``: end time of first longest period during which the peak number of trips occurs - ``'service_duration'``: total of the duration of each trip on the route in the given subset of trips; measured in hours - ``'service_distance'``: total of the distance traveled by each trip on the route in the given subset of trips; measured in whatever distance units are present in ``trip_stats_subset``; contains all ``np.nan`` entries if ``feed.shapes is None`` - ``'service_speed'``: service_distance/service_duration; measured in distance units per hour - ``'mean_trip_distance'``: service_distance/num_trips - ``'mean_trip_duration'``: service_duration/num_trips If not ``split_directions``, then remove the direction_id column and compute each route's stats, except for headways, using its trips running in both directions. In this case, (1) compute max headway by taking the max of the max headways in both directions; (2) compute mean headway by taking the weighted mean of the mean headways in both directions. If ``trip_stats_subset`` is empty, return an empty DataFrame. Raise a ValueError if ``split_directions`` and no non-NaN direction ID values present """ if trip_stats_subset.empty: return pd.DataFrame() # Convert trip start and end times to seconds to ease calculations below f = trip_stats_subset.copy() f[["start_time", "end_time"]] = f[["start_time", "end_time"]].applymap( hp.timestr_to_seconds ) headway_start = hp.timestr_to_seconds(headway_start_time) headway_end = hp.timestr_to_seconds(headway_end_time) def compute_route_stats_split_directions(group): # Take this group of all trips stats for a single route # and compute route-level stats. d = OrderedDict() d["route_short_name"] = group["route_short_name"].iat[0] d["route_type"] = group["route_type"].iat[0] d["num_trips"] = group.shape[0] d["num_trip_starts"] = group["start_time"].count() d["num_trip_ends"] = group.loc[ group["end_time"] < 24 * 3600, "end_time" ].count() d["is_loop"] = int(group["is_loop"].any()) d["start_time"] = group["start_time"].min() d["end_time"] = group["end_time"].max() # Compute max and mean headway stimes = group["start_time"].values stimes = sorted( [ stime for stime in stimes if headway_start <= stime <= headway_end ] ) headways = np.diff(stimes) if headways.size: d["max_headway"] = np.max(headways) / 60 # minutes d["min_headway"] = np.min(headways) / 60 # minutes d["mean_headway"] = np.mean(headways) / 60 # minutes else: d["max_headway"] = np.nan d["min_headway"] = np.nan d["mean_headway"] = np.nan # Compute peak num trips active_trips = hp.get_active_trips_df( group[["start_time", "end_time"]] ) times, counts = active_trips.index.values, active_trips.values start, end = hp.get_peak_indices(times, counts) d["peak_num_trips"] = counts[start] d["peak_start_time"] = times[start] d["peak_end_time"] = times[end] d["service_distance"] = group["distance"].sum() d["service_duration"] = group["duration"].sum() return pd.Series(d) def compute_route_stats(group): d = OrderedDict() d["route_short_name"] = group["route_short_name"].iat[0] d["route_type"] = group["route_type"].iat[0] d["num_trips"] = group.shape[0] d["num_trip_starts"] = group["start_time"].count() d["num_trip_ends"] = group.loc[ group["end_time"] < 24 * 3600, "end_time" ].count() d["is_loop"] = int(group["is_loop"].any()) d["is_bidirectional"] = int(group["direction_id"].unique().size > 1) d["start_time"] = group["start_time"].min() d["end_time"] = group["end_time"].max() # Compute headway stats headways = np.array([]) for direction in [0, 1]: stimes = group[group["direction_id"] == direction][ "start_time" ].values stimes = sorted( [ stime for stime in stimes if headway_start <= stime <= headway_end ] ) headways = np.concatenate([headways, np.diff(stimes)]) if headways.size: d["max_headway"] = np.max(headways) / 60 # minutes d["min_headway"] = np.min(headways) / 60 # minutes d["mean_headway"] = np.mean(headways) / 60 # minutes else: d["max_headway"] = np.nan d["min_headway"] = np.nan d["mean_headway"] = np.nan # Compute peak num trips active_trips = hp.get_active_trips_df( group[["start_time", "end_time"]] ) times, counts = active_trips.index.values, active_trips.values start, end = hp.get_peak_indices(times, counts) d["peak_num_trips"] = counts[start] d["peak_start_time"] = times[start] d["peak_end_time"] = times[end] d["service_distance"] = group["distance"].sum() d["service_duration"] = group["duration"].sum() return pd.Series(d) if split_directions: f = f.loc[lambda x: x.direction_id.notnull()].assign( direction_id=lambda x: x.direction_id.astype(int) ) if f.empty: raise ValueError( "At least one trip stats direction ID value " "must be non-NaN." ) g = ( f.groupby(["route_id", "direction_id"]) .apply(compute_route_stats_split_directions) .reset_index() ) # Add the is_bidirectional column def is_bidirectional(group): d = {} d["is_bidirectional"] = int( group["direction_id"].unique().size > 1 ) return pd.Series(d) gg = g.groupby("route_id").apply(is_bidirectional).reset_index() g = g.merge(gg) else: g = f.groupby("route_id").apply(compute_route_stats).reset_index() # Compute a few more stats g["service_speed"] = g["service_distance"] / g["service_duration"] g["mean_trip_distance"] = g["service_distance"] / g["num_trips"] g["mean_trip_duration"] = g["service_duration"] / g["num_trips"] # Convert route times to time strings g[["start_time", "end_time", "peak_start_time", "peak_end_time"]] = g[ ["start_time", "end_time", "peak_start_time", "peak_end_time"] ].applymap(lambda x: hp.timestr_to_seconds(x, inverse=True)) return g
Compute stats for the given subset of trips stats. Parameters ---------- trip_stats_subset : DataFrame Subset of the output of :func:`.trips.compute_trip_stats` split_directions : boolean If ``True``, then separate the stats by trip direction (0 or 1); otherwise aggregate trips visiting from both directions headway_start_time : string HH:MM:SS time string indicating the start time for computing headway stats headway_end_time : string HH:MM:SS time string indicating the end time for computing headway stats Returns ------- DataFrame Columns are - ``'route_id'`` - ``'route_short_name'`` - ``'route_type'`` - ``'direction_id'`` - ``'num_trips'``: number of trips on the route in the subset - ``'num_trip_starts'``: number of trips on the route with nonnull start times - ``'num_trip_ends'``: number of trips on the route with nonnull end times that end before 23:59:59 - ``'is_loop'``: 1 if at least one of the trips on the route has its ``is_loop`` field equal to 1; 0 otherwise - ``'is_bidirectional'``: 1 if the route has trips in both directions; 0 otherwise - ``'start_time'``: start time of the earliest trip on the route - ``'end_time'``: end time of latest trip on the route - ``'max_headway'``: maximum of the durations (in minutes) between trip starts on the route between ``headway_start_time`` and ``headway_end_time`` on the given dates - ``'min_headway'``: minimum of the durations (in minutes) mentioned above - ``'mean_headway'``: mean of the durations (in minutes) mentioned above - ``'peak_num_trips'``: maximum number of simultaneous trips in service (for the given direction, or for both directions when ``split_directions==False``) - ``'peak_start_time'``: start time of first longest period during which the peak number of trips occurs - ``'peak_end_time'``: end time of first longest period during which the peak number of trips occurs - ``'service_duration'``: total of the duration of each trip on the route in the given subset of trips; measured in hours - ``'service_distance'``: total of the distance traveled by each trip on the route in the given subset of trips; measured in whatever distance units are present in ``trip_stats_subset``; contains all ``np.nan`` entries if ``feed.shapes is None`` - ``'service_speed'``: service_distance/service_duration; measured in distance units per hour - ``'mean_trip_distance'``: service_distance/num_trips - ``'mean_trip_duration'``: service_duration/num_trips If not ``split_directions``, then remove the direction_id column and compute each route's stats, except for headways, using its trips running in both directions. In this case, (1) compute max headway by taking the max of the max headways in both directions; (2) compute mean headway by taking the weighted mean of the mean headways in both directions. If ``trip_stats_subset`` is empty, return an empty DataFrame. Raise a ValueError if ``split_directions`` and no non-NaN direction ID values present
def decode_escapes(s): '''Unescape libconfig string literals''' def decode_match(match): return codecs.decode(match.group(0), 'unicode-escape') return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
Unescape libconfig string literals
def calculate_check_interval( self, max_interval, ewma_factor, max_days=None, max_updates=None, ewma=0, ewma_ts=None, add_partial=None ): '''Calculate interval for checks as average time (ewma) between updates for specified period.''' if not add_partial: posts_base = self.posts.only('date_modified').order_by('date_modified') if ewma_ts: posts_base = posts_base.filter(date_modified__gt=ewma_ts) posts = posts_base if max_days: posts = posts.filter(date_modified__gt=timezone.now() - timedelta(max_days)) if max_updates and max_updates > 0: posts = posts[:max_updates] if len(posts) < max_updates: posts = posts_base[:max_updates] timestamps = posts.values_list('date_modified', flat=True) else: timestamps = list() if add_partial: if not ewma_ts: try: ewma_ts = self.posts.only('date_modified')\ .order_by('-date_modified')[0].date_modified except (ObjectDoesNotExist, IndexError): return 0 # no previous timestamp available timestamps.append(add_partial) if (add_partial - ewma_ts).total_seconds() < ewma: # It doesn't make sense to lower interval due to frequent check attempts. return ewma for ts in timestamps: if ewma_ts is None: # first post ewma_ts = ts continue ewma_ts, interval = ts, (ts - ewma_ts).total_seconds() ewma = ewma_factor * interval + (1 - ewma_factor) * ewma return min(timedelta(max_interval).total_seconds(), ewma)
Calculate interval for checks as average time (ewma) between updates for specified period.
def create(self, name, *args, **kwargs): """ Need to wrap the default call to handle exceptions. """ try: return super(ImageMemberManager, self).create(name, *args, **kwargs) except Exception as e: if e.http_status == 403: raise exc.UnsharableImage("You cannot share a public image.") else: raise
Need to wrap the default call to handle exceptions.
def create_thing(self, lid): """Create a new Thing with a local id (lid). Returns a [Thing](Thing.m.html#IoticAgent.IOT.Thing.Thing) object if successful or if the Thing already exists Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `lid` (required) (string) local identifier of your Thing. The local id is your name or nickname for the thing. It's "local" in that it's only available to you on this container, not searchable and not visible to others. """ evt = self.create_thing_async(lid) self._wait_and_except_if_failed(evt) try: with self.__new_things: return self.__new_things.pop(lid) except KeyError as ex: raise raise_from(IOTClientError('Thing %s not in cache (post-create)' % lid), ex)
Create a new Thing with a local id (lid). Returns a [Thing](Thing.m.html#IoticAgent.IOT.Thing.Thing) object if successful or if the Thing already exists Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `lid` (required) (string) local identifier of your Thing. The local id is your name or nickname for the thing. It's "local" in that it's only available to you on this container, not searchable and not visible to others.
def from_text(text): """Convert the text format message into a message object. @param text: The text format message. @type text: string @raises UnknownHeaderField: @raises dns.exception.SyntaxError: @rtype: dns.message.Message object""" # 'text' can also be a file, but we don't publish that fact # since it's an implementation detail. The official file # interface is from_file(). m = Message() reader = _TextReader(text, m) reader.read() return m
Convert the text format message into a message object. @param text: The text format message. @type text: string @raises UnknownHeaderField: @raises dns.exception.SyntaxError: @rtype: dns.message.Message object
def check_bom(file): """Determines file codec from from its BOM record. If file starts with BOM record encoded with UTF-8 or UTF-16(BE/LE) then corresponding encoding name is returned, otherwise None is returned. In both cases file current position is set to after-BOM bytes. The file must be open in binary mode and positioned at offset 0. """ # try to read first three bytes lead = file.read(3) if len(lead) == 3 and lead == codecs.BOM_UTF8: # UTF-8, position is already OK, use canonical name return codecs.lookup('utf-8').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_BE: # need to backup one character if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-be').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_LE: # need to backup one character if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-le').name else: # no BOM, rewind file.seek(-len(lead), os.SEEK_CUR) return None
Determines file codec from from its BOM record. If file starts with BOM record encoded with UTF-8 or UTF-16(BE/LE) then corresponding encoding name is returned, otherwise None is returned. In both cases file current position is set to after-BOM bytes. The file must be open in binary mode and positioned at offset 0.
def valid(): ''' List valid entries in mine configuration. CLI Example: .. code-block:: bash salt '*' mine.valid ''' m_data = __salt__['config.merge']('mine_functions', {}) # If we don't have any mine functions configured, then we should just bail out if not m_data: return data = {} for func in m_data: if m_data[func] and isinstance(m_data[func], dict): mine_func = m_data[func].pop('mine_function', func) if not _mine_function_available(mine_func): continue data[func] = {mine_func: m_data[func]} elif m_data[func] and isinstance(m_data[func], list): mine_func = func if isinstance(m_data[func][0], dict) and 'mine_function' in m_data[func][0]: mine_func = m_data[func][0]['mine_function'] m_data[func].pop(0) if not _mine_function_available(mine_func): continue data[func] = {mine_func: m_data[func]} else: if not _mine_function_available(func): continue data[func] = m_data[func] return data
List valid entries in mine configuration. CLI Example: .. code-block:: bash salt '*' mine.valid
def commits(self): """ Returns the current DataFrame joined with the commits DataFrame. It just returns the last commit in a reference (aka the current state). >>> commits_df = refs_df.commits If you want all commits from the references, use the `all_reference_commits` method, but take into account that getting all the commits will lead to a lot of repeated tree entries and blobs, thus making your query very slow. >>> commits_df = refs_df.all_reference_commits :rtype: CommitsDataFrame """ return CommitsDataFrame(self._engine_dataframe.getCommits(), self._session, self._implicits)
Returns the current DataFrame joined with the commits DataFrame. It just returns the last commit in a reference (aka the current state). >>> commits_df = refs_df.commits If you want all commits from the references, use the `all_reference_commits` method, but take into account that getting all the commits will lead to a lot of repeated tree entries and blobs, thus making your query very slow. >>> commits_df = refs_df.all_reference_commits :rtype: CommitsDataFrame
def make_matepairs(fastafile): """ Assumes the mates are adjacent sequence records """ assert op.exists(fastafile) matefile = fastafile.rsplit(".", 1)[0] + ".mates" if op.exists(matefile): logging.debug("matepairs file `{0}` found".format(matefile)) else: logging.debug("parsing matepairs from `{0}`".format(fastafile)) matefw = open(matefile, "w") it = SeqIO.parse(fastafile, "fasta") for fwd, rev in zip(it, it): print("{0}\t{1}".format(fwd.id, rev.id), file=matefw) matefw.close() return matefile
Assumes the mates are adjacent sequence records
def elapsed_time(self): """ Return elapsed time as min:sec:ms. The .split separates out the millisecond """ td = (datetime.datetime.now() - self.start_time) sec = td.seconds ms = int(td.microseconds / 1000) return '{:02}:{:02}.{:03}'.format(sec % 3600 // 60, sec % 60, ms)
Return elapsed time as min:sec:ms. The .split separates out the millisecond
def read(self, size): """Read bytes from the stream and block until sample rate is achieved. Args: size: number of bytes to read from the stream. """ now = time.time() missing_dt = self._sleep_until - now if missing_dt > 0: time.sleep(missing_dt) self._sleep_until = time.time() + self._sleep_time(size) data = (self._wavep.readframes(size) if self._wavep else self._fp.read(size)) # When reach end of audio stream, pad remainder with silence (zeros). if not data: return b'\x00' * size return data
Read bytes from the stream and block until sample rate is achieved. Args: size: number of bytes to read from the stream.
def url_replace_param(url, name, value): """ Replace a GET parameter in an URL """ url_components = urlparse(force_str(url)) query_params = parse_qs(url_components.query) query_params[name] = value query = urlencode(query_params, doseq=True) return force_text( urlunparse( [ url_components.scheme, url_components.netloc, url_components.path, url_components.params, query, url_components.fragment, ] ) )
Replace a GET parameter in an URL
def run_per_switch_cmds(self, switch_cmds): """Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch. """ for switch_ip, cmds in switch_cmds.items(): switch = self._switches.get(switch_ip) self.run_openstack_sg_cmds(cmds, switch)
Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch.
def _send_size(self): " Report terminal size to server. " rows, cols = _get_size(sys.stdout.fileno()) self._send_packet({ 'cmd': 'size', 'data': [rows, cols] })
Report terminal size to server.
def getReffs(self, textId, level=1, subreference=None): """ Retrieve the siblings of a textual node :param textId: CtsTextMetadata Identifier :type textId: str :param level: Depth for retrieval :type level: int :param subreference: CapitainsCtsPassage Reference :type subreference: str :return: List of references :rtype: [str] """ text = CtsText( urn=textId, retriever=self.endpoint ) return text.getReffs(level, subreference)
Retrieve the siblings of a textual node :param textId: CtsTextMetadata Identifier :type textId: str :param level: Depth for retrieval :type level: int :param subreference: CapitainsCtsPassage Reference :type subreference: str :return: List of references :rtype: [str]
def integer_ceil(a, b): '''Return the ceil integer of a div b.''' quanta, mod = divmod(a, b) if mod: quanta += 1 return quanta
Return the ceil integer of a div b.
def _help_basic(self): """ Help for Workbench Basics """ help = '%sWorkbench: Getting started...' % (color.Yellow) help += '\n%sStore a sample into Workbench:' % (color.Green) help += '\n\t%s$ workbench.store_sample(raw_bytes, filename, type_tag)' % (color.LightBlue) help += '\n\n%sNotice store_sample returns an md5 of the sample...'% (color.Yellow) help += '\n%sRun workers on the sample (view, meta, whatever...):' % (color.Green) help += '\n\t%s$ workbench.work_request(\'view\', md5)%s' % (color.LightBlue, color.Normal) return help
Help for Workbench Basics
def _republish_dropped_message(self, reason): """Republish the original message that was received it is being dropped by the consumer. This for internal use and should not be extended or used directly. :param str reason: The reason the message was dropped """ self.logger.debug('Republishing due to ProcessingException') properties = dict(self._message.properties) or {} if 'headers' not in properties or not properties['headers']: properties['headers'] = {} properties['headers']['X-Dropped-By'] = self.name properties['headers']['X-Dropped-Reason'] = reason properties['headers']['X-Dropped-Timestamp'] = \ datetime.datetime.utcnow().isoformat() properties['headers']['X-Original-Exchange'] = self._message.exchange self._message.channel.basic_publish( self._drop_exchange, self._message.routing_key, self._message.body, pika.BasicProperties(**properties))
Republish the original message that was received it is being dropped by the consumer. This for internal use and should not be extended or used directly. :param str reason: The reason the message was dropped
def convert_episode_to_batch_major(episode): """Converts an episode to have the batch dimension in the major (first) dimension. """ episode_batch = {} for key in episode.keys(): val = np.array(episode[key]).copy() # make inputs batch-major instead of time-major episode_batch[key] = val.swapaxes(0, 1) return episode_batch
Converts an episode to have the batch dimension in the major (first) dimension.
def rotate_crop(centerij, sz, angle, img=None, mode='constant', **kwargs): """ rotate and crop if no img, then return crop function :param centerij: :param sz: :param angle: :param img: [h,w,d] :param mode: padding option :return: cropped image or function """ # crop enough size ( 2 * sqrt(sum(sz^2) ) # rotate from skimage import transform sz = np.array(sz) crop_half = int(np.ceil(np.sqrt(np.square(sz).sum()))) if centerij[0] >= crop_half or centerij[1] >= crop_half: raise NotImplementedError slicei = slice(centerij[0] - crop_half, centerij[0] + crop_half) slicej = slice(centerij[1] - crop_half, centerij[1] + crop_half) # slicei = (centerij[0] - crop_half, centerij[0] + crop_half) # slicej = (centerij[1] - crop_half, centerij[1] + crop_half) # def _pad_if_need(im): # imshape = im.shape # pad_need = slicei[0] < 0 or slicej[0] < 0 or slice # padwidth = [(slicei[0], np.maximum(0, slicei[1] - imshape[0])), # (slicej[0], np.maximum(0, slicej[1] - imshape[1]))] def _rotate_cropcenter(im): enoughcrop = im[slicei, slicej] rotated = transform.rotate(enoughcrop, angle, resize=False, preserve_range=True, mode=mode, **kwargs) return cropcenter(sz, rotated) if img is not None: return _rotate_cropcenter(img) return _rotate_cropcenter
rotate and crop if no img, then return crop function :param centerij: :param sz: :param angle: :param img: [h,w,d] :param mode: padding option :return: cropped image or function
def lookup_string(self, keysym): """Return a string corresponding to KEYSYM, or None if no reasonable translation is found. """ s = self.keysym_translations.get(keysym) if s is not None: return s import Xlib.XK return Xlib.XK.keysym_to_string(keysym)
Return a string corresponding to KEYSYM, or None if no reasonable translation is found.
def set_executing(on: bool): """ Toggle whether or not the current thread is executing a step file. This will only apply when the current thread is a CauldronThread. This function has no effect when run on a Main thread. :param on: Whether or not the thread should be annotated as executing a step file. """ my_thread = threading.current_thread() if isinstance(my_thread, threads.CauldronThread): my_thread.is_executing = on
Toggle whether or not the current thread is executing a step file. This will only apply when the current thread is a CauldronThread. This function has no effect when run on a Main thread. :param on: Whether or not the thread should be annotated as executing a step file.
def create(cls, *args, **kwargs) -> 'Entity': """Create a new record in the repository. Also performs unique validations before creating the entity :param args: positional arguments for the entity :param kwargs: keyword arguments for the entity """ logger.debug( f'Creating new `{cls.__name__}` object using data {kwargs}') model_cls = repo_factory.get_model(cls) repository = repo_factory.get_repository(cls) try: # Build the entity from the input arguments # Raises validation errors, if any, at this point entity = cls(*args, **kwargs) # Do unique checks, create this object and return it entity._validate_unique() # Perform Pre-Save Actions entity.pre_save() # Build the model object and create it model_obj = repository.create(model_cls.from_entity(entity)) # Update the auto fields of the entity for field_name, field_obj in entity.meta_.declared_fields.items(): if isinstance(field_obj, Auto): if isinstance(model_obj, dict): field_val = model_obj[field_name] else: field_val = getattr(model_obj, field_name) setattr(entity, field_name, field_val) # Set Entity status to saved entity.state_.mark_saved() # Perform Post-Save Actions entity.post_save() return entity except ValidationError: # FIXME Log Exception raise
Create a new record in the repository. Also performs unique validations before creating the entity :param args: positional arguments for the entity :param kwargs: keyword arguments for the entity
def get_config(self, budget): """ Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration """ self.logger.debug('start sampling a new configuration.') sample = None info_dict = {} # If no model is available, sample from prior # also mix in a fraction of random configs if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction: sample = self.configspace.sample_configuration() info_dict['model_based_pick'] = False best = np.inf best_vector = None if sample is None: try: #sample from largest budget budget = max(self.kde_models.keys()) l = self.kde_models[budget]['good'].pdf g = self.kde_models[budget]['bad' ].pdf minimize_me = lambda x: max(1e-32, g(x))/max(l(x),1e-32) kde_good = self.kde_models[budget]['good'] kde_bad = self.kde_models[budget]['bad'] for i in range(self.num_samples): idx = np.random.randint(0, len(kde_good.data)) datum = kde_good.data[idx] vector = [] for m,bw,t in zip(datum, kde_good.bw, self.vartypes): bw = max(bw, self.min_bandwidth) if t == 0: bw = self.bw_factor*bw try: vector.append(sps.truncnorm.rvs(-m/bw,(1-m)/bw, loc=m, scale=bw)) except: self.logger.warning("Truncated Normal failed for:\ndatum=%s\nbandwidth=%s\nfor entry with value %s"%(datum, kde_good.bw, m)) self.logger.warning("data in the KDE:\n%s"%kde_good.data) else: if np.random.rand() < (1-bw): vector.append(int(m)) else: vector.append(np.random.randint(t)) val = minimize_me(vector) if not np.isfinite(val): self.logger.warning('sampled vector: %s has EI value %s'%(vector, val)) self.logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data)) self.logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw)) self.logger.warning("l(x) = %s"%(l(vector))) self.logger.warning("g(x) = %s"%(g(vector))) # right now, this happens because a KDE does not contain all values for a categorical parameter # this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one # if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, so it shouldn't be terrible. if np.isfinite(l(vector)): best_vector = vector break if val < best: best = val best_vector = vector if best_vector is None: self.logger.debug("Sampling based optimization with %i samples failed -> using random configuration"%self.num_samples) sample = self.configspace.sample_configuration().get_dictionary() info_dict['model_based_pick'] = False else: self.logger.debug('best_vector: {}, {}, {}, {}'.format(best_vector, best, l(best_vector), g(best_vector))) for i, hp_value in enumerate(best_vector): if isinstance( self.configspace.get_hyperparameter( self.configspace.get_hyperparameter_by_idx(i) ), ConfigSpace.hyperparameters.CategoricalHyperparameter ): best_vector[i] = int(np.rint(best_vector[i])) sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary() try: sample = ConfigSpace.util.deactivate_inactive_hyperparameters( configuration_space=self.configspace, configuration=sample ) info_dict['model_based_pick'] = True except Exception as e: self.logger.warning(("="*50 + "\n")*3 +\ "Error converting configuration:\n%s"%sample+\ "\n here is a traceback:" +\ traceback.format_exc()) raise(e) except: self.logger.warning("Sampling based optimization with %i samples failed\n %s \nUsing random configuration"%(self.num_samples, traceback.format_exc())) sample = self.configspace.sample_configuration() info_dict['model_based_pick'] = False try: sample = ConfigSpace.util.deactivate_inactive_hyperparameters( configuration_space=self.configspace, configuration=sample.get_dictionary() ).get_dictionary() except Exception as e: self.logger.warning("Error (%s) converting configuration: %s -> " "using random configuration!", e, sample) sample = self.configspace.sample_configuration().get_dictionary() self.logger.debug('done sampling a new configuration.') return sample, info_dict
Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration
def find_program(basename): """ Find program in PATH and return absolute path Try adding .exe or .bat to basename on Windows platforms (return None if not found) """ names = [basename] if os.name == 'nt': # Windows platforms extensions = ('.exe', '.bat', '.cmd') if not basename.endswith(extensions): names = [basename+ext for ext in extensions]+[basename] for name in names: path = is_program_installed(name) if path: return path
Find program in PATH and return absolute path Try adding .exe or .bat to basename on Windows platforms (return None if not found)
def _make_subvolume(self, **args): """Creates a subvolume, adds it to this class and returns it.""" from imagemounter.volume import Volume v = Volume(disk=self.disk, parent=self.parent, volume_detector=self.volume_detector, **args) # vstype is not passed down, let it decide for itself. self.volumes.append(v) return v
Creates a subvolume, adds it to this class and returns it.
def resolve(cls, all_known_repos, name): """We require the list of all remote repo paths to be passed in to this because otherwise we would need to import the spec assembler in this module, which would give us circular imports.""" match = None for repo in all_known_repos: if repo.remote_path == name: # user passed in a full name return repo if name == repo.short_name: if match is None: match = repo else: raise RuntimeError('Short repo name {} is ambiguous. It matches both {} and {}'.format(name, match.remote_path, repo.remote_path)) if match is None: raise RuntimeError('Short repo name {} does not match any known repos'.format(name)) return match
We require the list of all remote repo paths to be passed in to this because otherwise we would need to import the spec assembler in this module, which would give us circular imports.
def finish_displayhook(self): """Finish up all displayhook activities.""" io.stdout.write(self.shell.separate_out2) io.stdout.flush()
Finish up all displayhook activities.
def get_cache_mode(service, pool_name): """ Find the current caching mode of the pool_name given. :param service: six.string_types. The Ceph user name to run the command under :param pool_name: six.string_types :return: int or None """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) if six.PY3: out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: if pool['pool_name'] == pool_name: return pool['cache_mode'] return None except ValueError: raise
Find the current caching mode of the pool_name given. :param service: six.string_types. The Ceph user name to run the command under :param pool_name: six.string_types :return: int or None
def mnist_tutorial_cw(train_start=0, train_end=60000, test_start=0, test_end=10000, viz_enabled=VIZ_ENABLED, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE, source_samples=SOURCE_SAMPLES, learning_rate=LEARNING_RATE, attack_iterations=ATTACK_ITERATIONS, model_path=MODEL_PATH, targeted=TARGETED): """ MNIST tutorial for Carlini and Wagner's attack :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :param viz_enabled: (boolean) activate plots of adversarial examples :param nb_epochs: number of epochs to train model :param batch_size: size of training batches :param nb_classes: number of output classes :param source_samples: number of test inputs to attack :param learning_rate: learning rate for training :param model_path: path to the model file :param targeted: should we run a targeted attack? or untargeted? :return: an AccuracyReport object """ # Object used to keep track of (and return) key accuracies report = AccuracyReport() # Set TF random seed to improve reproducibility tf.set_random_seed(1234) # Create TF session sess = tf.Session() print("Created TensorFlow session.") set_log_level(logging.DEBUG) # Get MNIST test data mnist = MNIST(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) x_train, y_train = mnist.get_set('train') x_test, y_test = mnist.get_set('test') # Obtain Image Parameters img_rows, img_cols, nchannels = x_train.shape[1:4] nb_classes = y_train.shape[1] # Define input TF placeholder x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels)) y = tf.placeholder(tf.float32, shape=(None, nb_classes)) nb_filters = 64 # Define TF model graph model = ModelBasicCNN('model1', nb_classes, nb_filters) preds = model.get_logits(x) loss = CrossEntropy(model, smoothing=0.1) print("Defined TensorFlow model graph.") ########################################################################### # Training the model using TensorFlow ########################################################################### # Train an MNIST model train_params = { 'nb_epochs': nb_epochs, 'batch_size': batch_size, 'learning_rate': learning_rate, 'filename': os.path.split(model_path)[-1] } rng = np.random.RandomState([2017, 8, 30]) # check if we've trained before, and if we have, use that pre-trained model if os.path.exists(model_path + ".meta"): tf_model_load(sess, model_path) else: train(sess, loss, x_train, y_train, args=train_params, rng=rng) saver = tf.train.Saver() saver.save(sess, model_path) # Evaluate the accuracy of the MNIST model on legitimate test examples eval_params = {'batch_size': batch_size} accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params) assert x_test.shape[0] == test_end - test_start, x_test.shape print('Test accuracy on legitimate test examples: {0}'.format(accuracy)) report.clean_train_clean_eval = accuracy ########################################################################### # Craft adversarial examples using Carlini and Wagner's approach ########################################################################### nb_adv_per_sample = str(nb_classes - 1) if targeted else '1' print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample + ' adversarial examples') print("This could take some time ...") # Instantiate a CW attack object cw = CarliniWagnerL2(model, sess=sess) if viz_enabled: assert source_samples == nb_classes idxs = [np.where(np.argmax(y_test, axis=1) == i)[0][0] for i in range(nb_classes)] if targeted: if viz_enabled: # Initialize our array for grid visualization grid_shape = (nb_classes, nb_classes, img_rows, img_cols, nchannels) grid_viz_data = np.zeros(grid_shape, dtype='f') adv_inputs = np.array( [[instance] * nb_classes for instance in x_test[idxs]], dtype=np.float32) else: adv_inputs = np.array( [[instance] * nb_classes for instance in x_test[:source_samples]], dtype=np.float32) one_hot = np.zeros((nb_classes, nb_classes)) one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1 adv_inputs = adv_inputs.reshape( (source_samples * nb_classes, img_rows, img_cols, nchannels)) adv_ys = np.array([one_hot] * source_samples, dtype=np.float32).reshape((source_samples * nb_classes, nb_classes)) yname = "y_target" else: if viz_enabled: # Initialize our array for grid visualization grid_shape = (nb_classes, 2, img_rows, img_cols, nchannels) grid_viz_data = np.zeros(grid_shape, dtype='f') adv_inputs = x_test[idxs] else: adv_inputs = x_test[:source_samples] adv_ys = None yname = "y" if targeted: cw_params_batch_size = source_samples * nb_classes else: cw_params_batch_size = source_samples cw_params = {'binary_search_steps': 1, yname: adv_ys, 'max_iterations': attack_iterations, 'learning_rate': CW_LEARNING_RATE, 'batch_size': cw_params_batch_size, 'initial_const': 10} adv = cw.generate_np(adv_inputs, **cw_params) eval_params = {'batch_size': np.minimum(nb_classes, source_samples)} if targeted: adv_accuracy = model_eval( sess, x, y, preds, adv, adv_ys, args=eval_params) else: if viz_enabled: err = model_eval(sess, x, y, preds, adv, y_test[idxs], args=eval_params) adv_accuracy = 1 - err else: err = model_eval(sess, x, y, preds, adv, y_test[:source_samples], args=eval_params) adv_accuracy = 1 - err if viz_enabled: for j in range(nb_classes): if targeted: for i in range(nb_classes): grid_viz_data[i, j] = adv[i * nb_classes + j] else: grid_viz_data[j, 0] = adv_inputs[j] grid_viz_data[j, 1] = adv[j] print(grid_viz_data.shape) print('--------------------------------------') # Compute the number of adversarial examples that were successfully found print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy)) report.clean_train_adv_eval = 1. - adv_accuracy # Compute the average distortion introduced by the algorithm percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2, axis=(1, 2, 3))**.5) print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed)) # Close TF session sess.close() # Finally, block & display a grid of all the adversarial examples if viz_enabled: _ = grid_visual(grid_viz_data) return report
MNIST tutorial for Carlini and Wagner's attack :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :param viz_enabled: (boolean) activate plots of adversarial examples :param nb_epochs: number of epochs to train model :param batch_size: size of training batches :param nb_classes: number of output classes :param source_samples: number of test inputs to attack :param learning_rate: learning rate for training :param model_path: path to the model file :param targeted: should we run a targeted attack? or untargeted? :return: an AccuracyReport object
def create(gandi, resource, flags, algorithm, public_key): """Create DNSSEC key.""" result = gandi.dnssec.create(resource, flags, algorithm, public_key) return result
Create DNSSEC key.
def _get_front_idxs_from_id(fronts, id): """ Return a list of tuples of the form (frequency_idx, sample_idx), corresponding to all the indexes of the given front. """ if id == -1: # This is the only special case. # -1 is the index of the catch-all final column offset front. freq_idxs = np.arange(fronts.shape[0], dtype=np.int64) sample_idxs = np.ones(len(freq_idxs), dtype=np.int64) * (fronts.shape[1] - 1) else: freq_idxs, sample_idxs = np.where(fronts == id) return [(f, i) for f, i in zip(freq_idxs, sample_idxs)]
Return a list of tuples of the form (frequency_idx, sample_idx), corresponding to all the indexes of the given front.
def main(self): """this main routine sets up the signal handlers, the source and destination crashstorage systems at the theaded task manager. That starts a flock of threads that are ready to shepherd crashes from the source to the destination.""" self._setup_task_manager() self._setup_source_and_destination() self.task_manager.blocking_start(waiting_func=self.waiting_func) self.close() self.config.logger.info('done.')
this main routine sets up the signal handlers, the source and destination crashstorage systems at the theaded task manager. That starts a flock of threads that are ready to shepherd crashes from the source to the destination.
def p_bound_segments(self, p): """bound_segments : bound_segment FORWARD_SLASH bound_segments | bound_segment""" p[0] = p[1] if len(p) > 2: p[0].extend(p[3])
bound_segments : bound_segment FORWARD_SLASH bound_segments | bound_segment
def complete_token_filtered(aliases, prefix, expanded): """Find all starting matches in dictionary *aliases* that start with *prefix*, but filter out any matches already in *expanded*""" complete_ary = aliases.keys() return [cmd for cmd in complete_ary if cmd.startswith(prefix)]
Find all starting matches in dictionary *aliases* that start with *prefix*, but filter out any matches already in *expanded*
def patch(self, request): """ Update the status of a video. """ attrs = ('edx_video_id', 'status') missing = [attr for attr in attrs if attr not in request.data] if missing: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'"{missing}" params must be specified.'.format(missing=' and '.join(missing))} ) edx_video_id = request.data['edx_video_id'] video_status = request.data['status'] if video_status not in VALID_VIDEO_STATUSES: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'"{status}" is not a valid Video status.'.format(status=video_status)} ) try: video = Video.objects.get(edx_video_id=edx_video_id) video.status = video_status video.save() response_status = status.HTTP_200_OK response_payload = {} except Video.DoesNotExist: response_status = status.HTTP_400_BAD_REQUEST response_payload = { 'message': u'Video is not found for specified edx_video_id: {edx_video_id}'.format( edx_video_id=edx_video_id ) } return Response(status=response_status, data=response_payload)
Update the status of a video.
def _validate_edata(self, edata): """Validate edata argument of raise_exception_if method.""" # pylint: disable=R0916 if edata is None: return True if not (isinstance(edata, dict) or _isiterable(edata)): return False edata = [edata] if isinstance(edata, dict) else edata for edict in edata: if (not isinstance(edict, dict)) or ( isinstance(edict, dict) and ( ("field" not in edict) or ("field" in edict and (not isinstance(edict["field"], str))) or ("value" not in edict) ) ): return False return True
Validate edata argument of raise_exception_if method.
def major(self, major: int) -> None: """ param major Major version number property. Must be a non-negative integer. """ self.filter_negatives(major) self._major = major
param major Major version number property. Must be a non-negative integer.
def configure_extensions(app): """ Configure application extensions """ db.init_app(app) app.wsgi_app = ProxyFix(app.wsgi_app) assets.init_app(app) for asset in bundles: for (name, bundle) in asset.iteritems(): assets.register(name, bundle) login_manager.login_view = 'frontend.login' login_manager.login_message_category = 'info' @login_manager.user_loader def load_user(id): return User.query.get(int(id)) login_manager.init_app(app) cache.init_app(app) migrate.init_app(app, db) toolbar.init_app(app)
Configure application extensions
def _update_marshallers(self): """ Update the full marshaller list and other data structures. Makes a full list of both builtin and user marshallers and rebuilds internal data structures used for looking up which marshaller to use for reading/writing Python objects to/from file. Also checks for whether the required modules are present or not, loading the required modules (if not doing lazy loading), and whether the required modules are imported already or not. """ # Combine all sets of marshallers. self._marshallers = [] for v in self._priority: if v == 'builtin': self._marshallers.extend(self._builtin_marshallers) elif v == 'plugin': self._marshallers.extend(self._plugin_marshallers) elif v == 'user': self._marshallers.extend(self._user_marshallers) else: raise ValueError('priority attribute has an illegal ' 'element value.') # Determine whether the required modules are present, do module # loading, and determine whether the required modules are # imported. self._has_required_modules = len(self._marshallers) * [False] self._imported_required_modules = \ len(self._marshallers) * [False] for i, m in enumerate(self._marshallers): # Check if the required modules are here. try: for name in m.required_parent_modules: if name not in sys.modules \ and pkgutil.find_loader(name) is None: raise ImportError('module not present') except ImportError: self._has_required_modules[i] = False except: raise else: self._has_required_modules[i] = True # Modules obviously can't be fully loaded if not all are # present. if not self._has_required_modules[i]: self._imported_required_modules[i] = False continue # Check if all modules are loaded or not, and load them if # doing lazy loading. try: for name in m.required_modules: if name not in sys.modules: raise ImportError('module not loaded yet.') except ImportError: if self._lazy_loading: self._imported_required_modules[i] = False else: success = self._import_marshaller_modules(m) self._has_required_modules[i] = success self._imported_required_modules[i] = success except: raise else: self._imported_required_modules[i] = True # Construct the dictionary to look up the appropriate marshaller # by type, the equivalent one to read data types given type # strings needs to be created from it (basically, we have to # make the key be the python_type_string from it), and the # equivalent one to read data types given MATLAB class strings # needs to be created from it (basically, we have to make the # key be the matlab_class from it). # # Marshallers earlier in the list have priority (means that the # builtins have the highest). Since the types can be specified # as strings as well, duplicates will be checked for by running # each type through str if it isn't str. types_as_str = set() self._types = dict() self._type_strings = dict() self._matlab_classes = dict() for i, m in enumerate(self._marshallers): # types. for tp in m.types: if isinstance(tp, str): tp_as_str = tp else: tp_as_str = tp.__module__ + '.' + tp.__name__ if tp_as_str not in types_as_str: self._types[tp_as_str] = i types_as_str.add(tp_as_str) # type strings for type_string in m.python_type_strings: if type_string not in self._type_strings: self._type_strings[type_string] = i # matlab classes. for matlab_class in m.matlab_classes: if matlab_class not in self._matlab_classes: self._matlab_classes[matlab_class] = i
Update the full marshaller list and other data structures. Makes a full list of both builtin and user marshallers and rebuilds internal data structures used for looking up which marshaller to use for reading/writing Python objects to/from file. Also checks for whether the required modules are present or not, loading the required modules (if not doing lazy loading), and whether the required modules are imported already or not.
def get_json(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets the JSON file from ArcGIS """ params = { 'where': where, 'outFields': ", ".join(fields), 'returnGeometry': True, 'outSR': srid, 'f': "pjson", 'orderByFields': self.object_id_field, 'returnCountOnly': count_only } if self.token: params['token'] = self.token if self.geom_type: params.update({'geometryType': self.geom_type}) response = requests.get(self._build_query_request(layer), params=params) return response.json()
Gets the JSON file from ArcGIS
def tag_arxiv(line): """Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2 """ def tagger(match): groups = match.groupdict() if match.group('suffix'): groups['suffix'] = ' ' + groups['suffix'] else: groups['suffix'] = '' return u'<cds.REPORTNUMBER>arXiv:%(year)s'\ u'%(month)s.%(num)s%(suffix)s' \ u'</cds.REPORTNUMBER>' % groups line = re_arxiv_5digits.sub(tagger, line) line = re_arxiv.sub(tagger, line) line = re_new_arxiv_5digits.sub(tagger, line) line = re_new_arxiv.sub(tagger, line) return line
Tag arxiv report numbers We handle arXiv in 2 ways: * starting with arXiv:1022.1111 * this format exactly 9999.9999 We also format the output to the standard arxiv notation: * arXiv:2007.12.1111 * arXiv:2007.12.1111v2
def get_datasets_in_nodes(): """ Get the node associated with each dataset. Some datasets will have an ambiguous node since they exists in more than one node. """ data_dir = os.path.join(scriptdir, "..", "usgs", "data") cwic = map(lambda d: d["datasetName"], api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE)['data']) ee = map(lambda d: d["datasetName"], api.datasets(None, EARTH_EXPLORER_CATALOG_NODE)['data']) hdds = map(lambda d: d["datasetName"], api.datasets(None, HDDS_EXPLORER_CATALOG_NODE)['data']) lpcs = map(lambda d: d["datasetName"], api.datasets(None, LPCS_EXPLORER_CATALOG_NODE)['data']) # Create mapping from dataset to node datasets = {} datasets.update( { ds : "CWIC" for ds in cwic } ) datasets.update( { ds : "EE" for ds in ee } ) datasets.update( { ds : "HDDS" for ds in hdds } ) datasets.update( { ds : "LPCS" for ds in lpcs } ) datasets_path = os.path.join(data_dir, "datasets.json") with open(datasets_path, "w") as f: f.write(json.dumps(datasets)) # Find the datasets with ambiguous nodes cwic_ee = [ds for ds in cwic if ds in ee] cwic_hdds = [ds for ds in cwic if ds in hdds] cwic_lpcs = [ds for ds in cwic if ds in lpcs] ee_hdds = [ds for ds in ee if ds in hdds] ee_lpcs = [ds for ds in ee if ds in lpcs] hdds_lpcs = [ds for ds in hdds if ds in lpcs]
Get the node associated with each dataset. Some datasets will have an ambiguous node since they exists in more than one node.
def _analyze(self): """ Apply the filter to the log file """ for parsed_line in self.parsed_lines: if 'ip' in parsed_line: if parsed_line['ip'] in self.filter['ips']: self.noisy_logs.append(parsed_line) else: self.quiet_logs.append(parsed_line) else: self.quiet_logs.append(parsed_line)
Apply the filter to the log file
def contains_pt(self, pt): """Containment test.""" obj1, obj2 = self.objects return obj2.contains_pt(pt) and np.logical_not(obj1.contains_pt(pt))
Containment test.
def setattrs_from_paxos(self, paxos): """ Registers changes of attribute value on Paxos instance. """ changes = {} for name in self.paxos_variables: paxos_value = getattr(paxos, name) if paxos_value != getattr(self, name, None): self.print_if_verbose("{} {}: {}".format(self.network_uid, name, paxos_value)) changes[name] = paxos_value setattr(self, name, paxos_value) if changes: self.__trigger_event__( event_class=self.AttributesChanged, changes=changes )
Registers changes of attribute value on Paxos instance.
def _iter_step_func_decorators(self): """Find functions with step decorator in parsed file.""" for node in self.py_tree.find_all('def'): for decorator in node.decorators: if decorator.name.value == 'step': yield node, decorator break
Find functions with step decorator in parsed file.
def decode_bytes(byt, enc='utf-8'): """Given a string or bytes input, return a string. Args: bytes - bytes or string enc - encoding to use for decoding the byte string. """ try: strg = byt.decode(enc) except UnicodeDecodeError as err: strg = "Unable to decode message:\n{}\n{}".format(str(byt), err) except (AttributeError, UnicodeEncodeError): # If byt is already a string, just return it return byt return strg
Given a string or bytes input, return a string. Args: bytes - bytes or string enc - encoding to use for decoding the byte string.
def tab_complete(self): """ If there is a single option available one tab completes the option. """ opts = self._complete_options() if len(opts) == 1: self.set_current_text(opts[0] + os.sep) self.hide_completer()
If there is a single option available one tab completes the option.
def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False): """ Verifies an RSA, DSA or ECDSA signature via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to verify the signature with :param signature: A byte string of the signature to verify :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw" :param rsa_pss_padding: If PSS padding should be used for RSA keys :raises: oscrypto.errors.SignatureError - when the signature is determined to be invalid ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library """ algo = certificate_or_public_key.algorithm if algo == 'rsa' and rsa_pss_padding: hash_length = { 'sha1': 20, 'sha224': 28, 'sha256': 32, 'sha384': 48, 'sha512': 64 }.get(hash_algorithm, 0) decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature) key_size = certificate_or_public_key.bit_size if not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature): raise SignatureError('Signature is invalid') return if algo == 'rsa' and hash_algorithm == 'raw': padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature) try: plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext) if not constant_compare(plaintext, data): raise ValueError() except (ValueError): raise SignatureError('Signature is invalid') return hash_handle = None try: alg_id = { 'md5': Advapi32Const.CALG_MD5, 'sha1': Advapi32Const.CALG_SHA1, 'sha256': Advapi32Const.CALG_SHA_256, 'sha384': Advapi32Const.CALG_SHA_384, 'sha512': Advapi32Const.CALG_SHA_512, }[hash_algorithm] hash_handle_pointer = new(advapi32, 'HCRYPTHASH *') res = advapi32.CryptCreateHash( certificate_or_public_key.context_handle, alg_id, null(), 0, hash_handle_pointer ) handle_error(res) hash_handle = unwrap(hash_handle_pointer) res = advapi32.CryptHashData(hash_handle, data, len(data), 0) handle_error(res) if algo == 'dsa': # Windows doesn't use the ASN.1 Sequence for DSA signatures, # so we have to convert it here for the verification to work try: signature = algos.DSASignature.load(signature).to_p1363() # Switch the two integers so that the reversal later will # result in the correct order half_len = len(signature) // 2 signature = signature[half_len:] + signature[:half_len] except (ValueError, OverflowError, TypeError): raise SignatureError('Signature is invalid') # The CryptoAPI expects signatures to be in little endian byte order, # which is the opposite of other systems, so we must reverse it reversed_signature = signature[::-1] res = advapi32.CryptVerifySignatureW( hash_handle, reversed_signature, len(signature), certificate_or_public_key.key_handle, null(), 0 ) handle_error(res) finally: if hash_handle: advapi32.CryptDestroyHash(hash_handle)
Verifies an RSA, DSA or ECDSA signature via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to verify the signature with :param signature: A byte string of the signature to verify :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw" :param rsa_pss_padding: If PSS padding should be used for RSA keys :raises: oscrypto.errors.SignatureError - when the signature is determined to be invalid ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library
def figure(path,display=False,close=True): """ Can be used with the **with** statement:: import litus import numpy as np import matplotlib.pylab as plt x = np.arange(0,10,0.1) with litus.figure("some_test.png") as f: plt.plot(x,np.cos(x)) # plots to a first plot with litus.figure("some_other_test.png"): plt.plot(-1*np.array(x)) # plots to a second plot plt.plot(x,np.sin(x)) # plots to the first plot again f.set_tight_layout(True) # using the figure object Or if they are to be used in an interactive console:: import litus import numpy as np import matplotlib.pylab as plt x = np.arange(0,10,0.1) with litus.figure("some_test.png",display=True): plt.plot(x,np.cos(x)) # plots to a first plot with litus.figure("some_other_test.png",close=False): plt.plot(-1*np.array(x)) # plots to a second plot plt.plot(x,np.sin(x)) # plots to the first plot again Both of these figures will be displayed, but the second one will remain open and can be activated again. """ return Figure(path,display=display,close=close)
Can be used with the **with** statement:: import litus import numpy as np import matplotlib.pylab as plt x = np.arange(0,10,0.1) with litus.figure("some_test.png") as f: plt.plot(x,np.cos(x)) # plots to a first plot with litus.figure("some_other_test.png"): plt.plot(-1*np.array(x)) # plots to a second plot plt.plot(x,np.sin(x)) # plots to the first plot again f.set_tight_layout(True) # using the figure object Or if they are to be used in an interactive console:: import litus import numpy as np import matplotlib.pylab as plt x = np.arange(0,10,0.1) with litus.figure("some_test.png",display=True): plt.plot(x,np.cos(x)) # plots to a first plot with litus.figure("some_other_test.png",close=False): plt.plot(-1*np.array(x)) # plots to a second plot plt.plot(x,np.sin(x)) # plots to the first plot again Both of these figures will be displayed, but the second one will remain open and can be activated again.
def secure(func_or_obj, check_permissions_for_obj=None): """ This method secures a method or class depending on invocation. To decorate a method use one argument: @secure(<check_permissions_method>) To secure a class, invoke with two arguments: secure(<obj instance>, <check_permissions_method>) """ if _allowed_check_permissions_types(func_or_obj): return _secure_method(func_or_obj) else: if not _allowed_check_permissions_types(check_permissions_for_obj): msg = "When securing an object, secure() requires the " + \ "second argument to be method" raise TypeError(msg) return _SecuredAttribute(func_or_obj, check_permissions_for_obj)
This method secures a method or class depending on invocation. To decorate a method use one argument: @secure(<check_permissions_method>) To secure a class, invoke with two arguments: secure(<obj instance>, <check_permissions_method>)
def general_settings(): """general settings """ import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['font.size'] = 7.0 mpl.rcParams['axes.labelsize'] = 7.0 mpl.rcParams['xtick.labelsize'] = 7.0 mpl.rcParams['ytick.labelsize'] = 7.0 mpl.rcParams["lines.linewidth"] = 1.5 mpl.rcParams["lines.markeredgewidth"] = 3.0 mpl.rcParams["lines.markersize"] = 3.0 # mpl.rcParams['font.sans-serif'] = 'Droid Sans' # mpl.rcParams['font.family'] = 'Open Sans' # mpl.rcParams['font.weight'] = 400 mpl.rcParams['mathtext.default'] = 'regular' # mpl.rcParams['font.family'] = 'Droid Sans' mpl.rcParams['text.usetex'] = False mpl.rc( 'text.latex', preamble=''.join(( # r'\usepackage{droidsans}', # r'\usepackage[T1]{fontenc} ', r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}', r'\renewcommand\familydefault{\sfdefault} ', # r'\usepackage{mathastext} ' )) )
general settings
def _commit_change(alias_table, export_path=None, post_commit=True): """ Record changes to the alias table. Also write new alias config hash and collided alias, if any. Args: alias_table: The alias table to commit. export_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH. post_commit: True if we want to perform some extra actions after writing alias to file. """ with open(export_path or GLOBAL_ALIAS_PATH, 'w+') as alias_config_file: alias_table.write(alias_config_file) if post_commit: alias_config_file.seek(0) alias_config_hash = hashlib.sha1(alias_config_file.read().encode('utf-8')).hexdigest() AliasManager.write_alias_config_hash(alias_config_hash) collided_alias = AliasManager.build_collision_table(alias_table.sections()) AliasManager.write_collided_alias(collided_alias) build_tab_completion_table(alias_table)
Record changes to the alias table. Also write new alias config hash and collided alias, if any. Args: alias_table: The alias table to commit. export_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH. post_commit: True if we want to perform some extra actions after writing alias to file.
def create_token(self, user_id, permission_obj): """ 'permission_obj' param should be a string. e.g. '[{"access":"d_u_list","oid":{"id":"1576946496","type":"Domain"}}]' http://docs.exosite.com/portals/#add-user-permission """ headers = { 'User-Agent': self.user_agent(), 'Content-Type': self.content_type() } headers.update(self.headers()) url = self.portals_url()+'/users/{0}/permissions'.format(user_id) # print("URL: {0}".format(url)) r = requests.post( url, data=permission_obj, headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("create_token: Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) r.raise_for_status()
'permission_obj' param should be a string. e.g. '[{"access":"d_u_list","oid":{"id":"1576946496","type":"Domain"}}]' http://docs.exosite.com/portals/#add-user-permission
def has_pfn(self, url, site=None): """ Wrapper of the pegasus hasPFN function, that allows it to be called outside of specific pegasus functions. """ curr_pfn = dax.PFN(url, site) return self.hasPFN(curr_pfn)
Wrapper of the pegasus hasPFN function, that allows it to be called outside of specific pegasus functions.
def simple_value(self, elt, ps, mixed=False): '''Get the value of the simple content of this element. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object. mixed -- ignore element content, optional text node ''' if not _valid_encoding(elt): raise EvaluateException('Invalid encoding', ps.Backtrace(elt)) c = _children(elt) if mixed is False: if len(c) == 0: raise EvaluateException('Value missing', ps.Backtrace(elt)) for c_elt in c: if c_elt.nodeType == _Node.ELEMENT_NODE: raise EvaluateException('Sub-elements in value', ps.Backtrace(c_elt)) # It *seems* to be consensus that ignoring comments and # concatenating the text nodes is the right thing to do. return ''.join([E.nodeValue for E in c if E.nodeType in [ _Node.TEXT_NODE, _Node.CDATA_SECTION_NODE ]])
Get the value of the simple content of this element. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object. mixed -- ignore element content, optional text node
def add_one(self, url: str, url_properties: Optional[URLProperties]=None, url_data: Optional[URLData]=None): '''Add a single URL to the table. Args: url: The URL to be added url_properties: Additional values to be saved url_data: Additional data to be saved ''' self.add_many([AddURLInfo(url, url_properties, url_data)])
Add a single URL to the table. Args: url: The URL to be added url_properties: Additional values to be saved url_data: Additional data to be saved
def plot_prob_profit_trade(round_trips, ax=None): """ Plots a probability distribution for the event of making a profitable trade. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips ax : matplotlib.Axes, optional Axes upon which to plot. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ x = np.linspace(0, 1., 500) round_trips['profitable'] = round_trips.pnl > 0 dist = sp.stats.beta(round_trips.profitable.sum(), (~round_trips.profitable).sum()) y = dist.pdf(x) lower_perc = dist.ppf(.025) upper_perc = dist.ppf(.975) lower_plot = dist.ppf(.001) upper_plot = dist.ppf(.999) if ax is None: ax = plt.subplot() ax.plot(x, y) ax.axvline(lower_perc, color='0.5') ax.axvline(upper_perc, color='0.5') ax.set_xlabel('Probability of making a profitable decision') ax.set_ylabel('Belief') ax.set_xlim(lower_plot, upper_plot) ax.set_ylim((0, y.max() + 1.)) return ax
Plots a probability distribution for the event of making a profitable trade. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips ax : matplotlib.Axes, optional Axes upon which to plot. Returns ------- ax : matplotlib.Axes The axes that were plotted on.
def is_prev_free(self): """ Returns a concrete state of the flag indicating whether the previous chunk is free or not. Issues a warning if that flag is symbolic and has multiple solutions, and then assumes that the previous chunk is free. :returns: True if the previous chunk is free; False otherwise """ flag = self.state.memory.load(self.base + self._chunk_size_t_size, self._chunk_size_t_size) & CHUNK_P_MASK def sym_flag_handler(flag): l.warning("A chunk's P flag is symbolic; assuming it is not set") return self.state.solver.min_int(flag) flag = concretize(flag, self.state.solver, sym_flag_handler) return False if flag else True
Returns a concrete state of the flag indicating whether the previous chunk is free or not. Issues a warning if that flag is symbolic and has multiple solutions, and then assumes that the previous chunk is free. :returns: True if the previous chunk is free; False otherwise
def transform(self, data, test=False): '''Transform image data to latent space. Parameters ---------- data : array-like shape (n_images, image_width, image_height, n_colors) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- latent_vec : array-like shape (n_images, latent_width) ''' #make sure that data has the right shape. if not type(data) == Variable: if len(data.shape) < 4: data = data[np.newaxis] if len(data.shape) != 4: raise TypeError("Invalid dimensions for image data. Dim = %s.\ Must be 4d array." % str(data.shape)) if data.shape[1] != self.color_channels: if data.shape[-1] == self.color_channels: data = data.transpose(0, 3, 1, 2) else: raise TypeError("Invalid dimensions for image data. Dim = %s" % str(data.shape)) data = Variable(data) else: if len(data.data.shape) < 4: data.data = data.data[np.newaxis] if len(data.data.shape) != 4: raise TypeError("Invalid dimensions for image data. Dim = %s.\ Must be 4d array." % str(data.data.shape)) if data.data.shape[1] != self.color_channels: if data.data.shape[-1] == self.color_channels: data.data = data.data.transpose(0, 3, 1, 2) else: raise TypeError("Invalid dimensions for image data. Dim = %s" % str(data.shape)) # Actual transformation. if self.flag_gpu: data.to_gpu() z = self._encode(data, test=test)[0] z.to_cpu() return z.data
Transform image data to latent space. Parameters ---------- data : array-like shape (n_images, image_width, image_height, n_colors) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- latent_vec : array-like shape (n_images, latent_width)
def sort_values( self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ): """Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame. """ axis = self._get_axis_number(axis) if not is_list_like(by): by = [by] # Currently, sort_values will just reindex based on the sorted values. # TODO create a more efficient way to sort if axis == 0: broadcast_value_dict = {col: self[col] for col in by} broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index) new_index = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).index return self.reindex(index=new_index, copy=not inplace) else: broadcast_value_list = [ self[row :: len(self.index)]._to_pandas() for row in by ] index_builder = list(zip(broadcast_value_list, by)) broadcast_values = pandas.concat( [row for row, idx in index_builder], copy=False ) broadcast_values.columns = self.columns new_columns = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).columns return self.reindex(columns=new_columns, copy=not inplace)
Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame.
def parse(self, args): """ :param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string """ if args is None: args = self._default_args if isinstance(args, six.string_types): args = shlex.split(args) return args
:param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string
def escape(self, text): """Replace characters with their character references. Replace characters by their named entity references. Non-ASCII characters, if they do not have a named entity reference, are replaced by numerical character references. The return value is guaranteed to be ASCII. """ return self.__escapable.sub(self.__escape, compat.text_type(text) ).encode('ascii')
Replace characters with their character references. Replace characters by their named entity references. Non-ASCII characters, if they do not have a named entity reference, are replaced by numerical character references. The return value is guaranteed to be ASCII.
def close_session(self): """ Close tensorflow session. Exposes for memory management. """ with self._graph.as_default(): self._sess.close() self._sess = None
Close tensorflow session. Exposes for memory management.
def targetwords(self, index, targetwords, alignment): """Return the aligned targetwords for a specified index in the source words""" return [ targetwords[x] for x in alignment[index] ]
Return the aligned targetwords for a specified index in the source words
def deploy(self): ''' Creates a link at the original path of this target ''' if not os.path.exists(self.path): makedirs(self.path) link(self.vault_path, self.real_path)
Creates a link at the original path of this target
def intersection_box(box1, box2): """ Finds an intersection box that is common to both given boxes. :param box1: Box object 1 :param box2: Box object 2 :return: None if there is no intersection otherwise the new Box """ b1_x2, b1_y2 = box1.bottom_right() b2_x2, b2_y2 = box2.bottom_right() x, y = max(box1.x, box2.x), max(box1.y, box2.y) x2, y2 = min(b1_x2, b2_x2), min(b1_y2, b2_y2) w, h = max(0, x2-x), max(0, y2-y) return Box(x, y, w, h)
Finds an intersection box that is common to both given boxes. :param box1: Box object 1 :param box2: Box object 2 :return: None if there is no intersection otherwise the new Box
def ensure_parent_dir_exists(file_path): """Ensures that the parent directory exists""" parent = os.path.dirname(file_path) if parent: os.makedirs(parent, exist_ok=True)
Ensures that the parent directory exists
def ping_entry(self, entry): """ Ping an entry to a directory. """ entry_url = '%s%s' % (self.ressources.site_url, entry.get_absolute_url()) categories = '|'.join([c.title for c in entry.categories.all()]) try: reply = self.server.weblogUpdates.extendedPing( self.ressources.current_site.name, self.ressources.blog_url, entry_url, self.ressources.blog_feed, categories) except Exception: try: reply = self.server.weblogUpdates.ping( self.ressources.current_site.name, self.ressources.blog_url, entry_url, categories) except Exception: reply = {'message': '%s is an invalid directory.' % self.server_name, 'flerror': True} return reply
Ping an entry to a directory.
def main() -> None: """ Command-line handler for the ``pause_process_by_disk_space`` tool. Use the ``--help`` option for help. """ parser = ArgumentParser( description="Pauses and resumes a process by disk space; LINUX ONLY." ) parser.add_argument( "process_id", type=int, help="Process ID." ) parser.add_argument( "--path", required=True, help="Path to check free space for (e.g. '/')" ) parser.add_argument( "--pause_when_free_below", type=str, required=True, help="Pause process when free disk space below this value (in bytes " "or as e.g. '50G')" ) parser.add_argument( "--resume_when_free_above", type=str, required=True, help="Resume process when free disk space above this value (in bytes " "or as e.g. '70G')" ) parser.add_argument( "--check_every", type=int, required=True, help="Check every n seconds (where this is n)" ) parser.add_argument( "--verbose", action="store_true", help="Verbose output" ) args = parser.parse_args() main_only_quicksetup_rootlogger( level=logging.DEBUG if args.verbose else logging.INFO) minimum = human2bytes(args.pause_when_free_below) maximum = human2bytes(args.resume_when_free_above) path = args.path process_id = args.process_id period = args.check_every pause_args = ["kill", "-STOP", str(process_id)] resume_args = ["kill", "-CONT", str(process_id)] assert minimum < maximum, "Minimum must be less than maximum" log.info( "Starting: controlling process {proc}; " "checking disk space every {period} s; " "will pause when free space on {path} is less than {minimum} and " "resume when free space is at least {maximum}; " "pause command will be {pause}; " "resume command will be {resume}.".format( proc=process_id, period=period, path=path, minimum=sizeof_fmt(minimum), maximum=sizeof_fmt(maximum), pause=pause_args, resume=resume_args, )) log.debug("Presuming that the process is RUNNING to begin with.") paused = False while True: if not is_running(process_id): log.info("Process {} is no longer running", process_id) sys.exit(0) space = shutil.disk_usage(path).free log.debug("Disk space on {} is {}", path, sizeof_fmt(space)) if space < minimum and not paused: log.info("Disk space down to {}: pausing process {}", sizeof_fmt(space), process_id) subprocess.check_call(pause_args) paused = True elif space >= maximum and paused: log.info("Disk space up to {}: resuming process {}", sizeof_fmt(space), process_id) subprocess.check_call(resume_args) paused = False log.debug("Sleeping for {} seconds...", period) sleep(period)
Command-line handler for the ``pause_process_by_disk_space`` tool. Use the ``--help`` option for help.
def get_auth_token(self, user): """ Returns the user's authentication token. """ data = [str(user.id), self.security.hashing_context.hash(encode_string(user._password))] return self.security.remember_token_serializer.dumps(data)
Returns the user's authentication token.
def register_actions(self, shortcut_manager): """Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions. """ self.add_callback_to_shortcut_manager('save', partial(self.call_action_callback, "on_save_activate")) self.add_callback_to_shortcut_manager('save_as', partial(self.call_action_callback, "on_save_as_activate")) self.add_callback_to_shortcut_manager('save_as_copy', partial(self.call_action_callback, "on_save_as_copy_activate")) self.add_callback_to_shortcut_manager('save_state_as', partial(self.call_action_callback, "on_save_selected_state_as_activate")) self.add_callback_to_shortcut_manager('substitute_state', partial(self.call_action_callback, "on_substitute_selected_state_activate")) self.add_callback_to_shortcut_manager('substitute_library_with_template', partial(self.call_action_callback, "on_substitute_library_with_template_activate")) self.add_callback_to_shortcut_manager('open', partial(self.call_action_callback, "on_open_activate")) self.add_callback_to_shortcut_manager('open_library_state_separately', self.on_open_library_state_separately_activate) self.add_callback_to_shortcut_manager('new', partial(self.call_action_callback, "on_new_activate")) self.add_callback_to_shortcut_manager('quit', partial(self.call_action_callback, "on_quit_activate")) self.add_callback_to_shortcut_manager('is_start_state', partial(self.call_action_callback, "on_toggle_is_start_state_active")) callback_function = partial(self.call_action_callback, "on_add_transitions_from_closest_sibling_state_active") self.add_callback_to_shortcut_manager('transition_from_closest_sibling_state', callback_function) callback_function = partial(self.call_action_callback, "on_add_transitions_to_closest_sibling_state_active") self.add_callback_to_shortcut_manager('transition_to_closest_sibling_state', callback_function) callback_function = partial(self.call_action_callback, "on_add_transitions_to_parent_state_active") self.add_callback_to_shortcut_manager('transition_to_parent_state', callback_function) self.add_callback_to_shortcut_manager('group', partial(self.call_action_callback, "on_group_states_activate")) self.add_callback_to_shortcut_manager('ungroup', partial(self.call_action_callback, "on_ungroup_state_activate")) self.add_callback_to_shortcut_manager('start', partial(self.call_action_callback, "on_start_activate")) self.add_callback_to_shortcut_manager('start_from_selected', partial(self.call_action_callback, "on_start_from_selected_state_activate")) self.add_callback_to_shortcut_manager('run_to_selected', partial(self.call_action_callback, "on_run_to_selected_state_activate")) self.add_callback_to_shortcut_manager('stop', partial(self.call_action_callback, "on_stop_activate")) self.add_callback_to_shortcut_manager('pause', partial(self.call_action_callback, "on_pause_activate")) self.add_callback_to_shortcut_manager('step_mode', partial(self.call_action_callback, "on_step_mode_activate")) self.add_callback_to_shortcut_manager('step', partial(self.call_action_callback, "on_step_into_activate")) self.add_callback_to_shortcut_manager('backward_step', partial(self.call_action_callback, "on_backward_step_activate")) self.add_callback_to_shortcut_manager('reload', partial(self.call_action_callback, "on_refresh_all_activate")) self.add_callback_to_shortcut_manager('show_data_flows', self.show_data_flows_toggled_shortcut) self.add_callback_to_shortcut_manager('show_data_values', self.show_data_values_toggled_shortcut) self.add_callback_to_shortcut_manager('data_flow_mode', self.data_flow_mode_toggled_shortcut) self.add_callback_to_shortcut_manager('show_aborted_preempted', self.show_aborted_preempted) self.add_callback_to_shortcut_manager('fullscreen', self.on_toggle_full_screen_mode)
Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions.
def decode(self, data, password): """Decode existing GNTP Registration message :param string data: Message to decode """ self.raw = gntp.shim.u(data) parts = self.raw.split('\r\n\r\n') self.info = self._parse_info(self.raw) self._validate_password(password) self.headers = self._parse_dict(parts[0]) for i, part in enumerate(parts): if i == 0: continue # Skip Header if part.strip() == '': continue notice = self._parse_dict(part) if notice.get('Notification-Name', False): self.notifications.append(notice) elif notice.get('Identifier', False): notice['Data'] = self._decode_binary(part, notice) #open('register.png','wblol').write(notice['Data']) self.resources[notice.get('Identifier')] = notice
Decode existing GNTP Registration message :param string data: Message to decode
def _get_audio_object_type(self, r): """Raises BitReaderError""" audioObjectType = r.bits(5) if audioObjectType == 31: audioObjectTypeExt = r.bits(6) audioObjectType = 32 + audioObjectTypeExt return audioObjectType
Raises BitReaderError
def create(self, customer_name, street, city, region, postal_code, iso_country, friendly_name=values.unset, emergency_enabled=values.unset, auto_correct_address=values.unset): """ Create a new AddressInstance :param unicode customer_name: The name to associate with the new address :param unicode street: The number and street address of the new address :param unicode city: The city of the new address :param unicode region: The state or region of the new address :param unicode postal_code: The postal code of the new address :param unicode iso_country: The ISO country code of the new address :param unicode friendly_name: A string to describe the new resource :param bool emergency_enabled: Whether to enable emergency calling on the new address :param bool auto_correct_address: Whether we should automatically correct the address :returns: Newly created AddressInstance :rtype: twilio.rest.api.v2010.account.address.AddressInstance """ data = values.of({ 'CustomerName': customer_name, 'Street': street, 'City': city, 'Region': region, 'PostalCode': postal_code, 'IsoCountry': iso_country, 'FriendlyName': friendly_name, 'EmergencyEnabled': emergency_enabled, 'AutoCorrectAddress': auto_correct_address, }) payload = self._version.create( 'POST', self._uri, data=data, ) return AddressInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Create a new AddressInstance :param unicode customer_name: The name to associate with the new address :param unicode street: The number and street address of the new address :param unicode city: The city of the new address :param unicode region: The state or region of the new address :param unicode postal_code: The postal code of the new address :param unicode iso_country: The ISO country code of the new address :param unicode friendly_name: A string to describe the new resource :param bool emergency_enabled: Whether to enable emergency calling on the new address :param bool auto_correct_address: Whether we should automatically correct the address :returns: Newly created AddressInstance :rtype: twilio.rest.api.v2010.account.address.AddressInstance
def get_all_network_interfaces(self, filters=None): """ Retrieve all of the Elastic Network Interfaces (ENI's) associated with your account. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.networkinterface.NetworkInterface` """ params = {} if filters: self.build_filter_params(params, filters) return self.get_list('DescribeNetworkInterfaces', params, [('item', NetworkInterface)], verb='POST')
Retrieve all of the Elastic Network Interfaces (ENI's) associated with your account. :type filters: dict :param filters: Optional filters that can be used to limit the results returned. Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value. The set of allowable filter names/values is dependent on the request being performed. Check the EC2 API guide for details. :rtype: list :return: A list of :class:`boto.ec2.networkinterface.NetworkInterface`
def iter(self, columnnames, order='', sort=True): """Return a tableiter object. :class:`tableiter` lets one iterate over a table by returning in each iteration step a reference table containing equal values for the given columns. By default a sort is done on the given columns to get the correct iteration order. `order` | 'ascending' is iterate in ascending order (is the default). | 'descending' is iterate in descending order. `sort=False` do not sort (because table is already in correct order). For example, iterate by time through a measurementset table:: t = table('3c343.MS') for ts in t.iter('TIME'): print ts.nrows() """ from .tableiter import tableiter return tableiter(self, columnnames, order, sort)
Return a tableiter object. :class:`tableiter` lets one iterate over a table by returning in each iteration step a reference table containing equal values for the given columns. By default a sort is done on the given columns to get the correct iteration order. `order` | 'ascending' is iterate in ascending order (is the default). | 'descending' is iterate in descending order. `sort=False` do not sort (because table is already in correct order). For example, iterate by time through a measurementset table:: t = table('3c343.MS') for ts in t.iter('TIME'): print ts.nrows()
def list_records_for_build_config_set(id, page_size=200, page_index=0, sort="", q=""): """ Get a list of BuildRecords for the given BuildConfigSetRecord """ data = list_records_for_build_config_set_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
Get a list of BuildRecords for the given BuildConfigSetRecord
def install_agent(agent_key, agent_version=1): ''' Function downloads Server Density installation agent, and installs sd-agent with agent_key. Optionally the agent_version would select the series to use (defaults on the v1 one). CLI Example: .. code-block:: bash salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498 salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498 2 ''' work_dir = os.path.join(__opts__['cachedir'], 'tmp') if not os.path.isdir(work_dir): os.mkdir(work_dir) install_file = tempfile.NamedTemporaryFile(dir=work_dir, suffix='.sh', delete=False) install_filename = install_file.name install_file.close() account_field = 'account_url' url = 'https://www.serverdensity.com/downloads/agent-install.sh' if agent_version == 2: account_field = 'account_name' url = 'https://archive.serverdensity.com/agent-install.sh' account = get_sd_auth(account_field) __salt__['cmd.run']( cmd='curl -L {0} -o {1}'.format(url, install_filename), cwd=work_dir ) __salt__['cmd.run'](cmd='chmod +x {0}'.format(install_filename), cwd=work_dir) return __salt__['cmd.run']( cmd='{filename} -a {account} -k {agent_key}'.format( filename=install_filename, account=account, agent_key=agent_key), cwd=work_dir )
Function downloads Server Density installation agent, and installs sd-agent with agent_key. Optionally the agent_version would select the series to use (defaults on the v1 one). CLI Example: .. code-block:: bash salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498 salt '*' serverdensity_device.install_agent c2bbdd6689ff46282bdaa07555641498 2
def police_priority_map_conform_map_pri7_conform(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') conform = ET.SubElement(police_priority_map, "conform") map_pri7_conform = ET.SubElement(conform, "map-pri7-conform") map_pri7_conform.text = kwargs.pop('map_pri7_conform') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def human2bytes(s): """ >>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824 """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') letter = s[-1:].strip().upper() num = s[:-1] assert num.isdigit() and letter in symbols, s num = float(num) prefix = {symbols[0]: 1} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 return int(num * prefix[letter])
>>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): import subprocess lockfile = None try: fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: if isinstance(plugin, basestring): plugin = load(plugin) app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3)
Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter.
def turn_physical_on(self,ro=None,vo=None): """ NAME: turn_physical_on PURPOSE: turn on automatic returning of outputs in physical units INPUT: ro= reference distance (kpc) vo= reference velocity (km/s) OUTPUT: (none) HISTORY: 2016-01-19 - Written - Bovy (UofT) """ self._roSet= True self._voSet= True if not ro is None: self._ro= ro if not vo is None: self._vo= vo return None
NAME: turn_physical_on PURPOSE: turn on automatic returning of outputs in physical units INPUT: ro= reference distance (kpc) vo= reference velocity (km/s) OUTPUT: (none) HISTORY: 2016-01-19 - Written - Bovy (UofT)