code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
system = self.system pos = [] names = [] pairs = '' size = jac.size diag = jac[0:size[0] ** 2:size[0] + 1] for idx in range(size[0]): if abs(diag[idx]) <= 1e-8: pos.append(idx) for idx in pos: names.append(system.varname.__dict__[name][idx]) if len(names) > 0: for i, j in zip(pos, names): pairs += '{0}: {1}\n'.format(i, j) logger.debug('Jacobian diagonal check:') logger.debug(pairs)
def check_diag(self, jac, name)
Check matrix ``jac`` for diagonal elements that equals 0
3.840814
3.618305
1.061495
_, _, tb = sys.exc_info() while tb.tb_next: tb = tb.tb_next f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filefullpath = co.co_filename filename = os.path.basename(filefullpath) name = co.co_name linecache.checkcache(filefullpath) line = linecache.getline(filefullpath, lineno, f.f_globals) if line: line = line.strip() else: line = None return filefullpath, filename, lineno, name, line
def get_exception_source()
Returns full file path, file name, line number, function name, and line contents causing the last exception.
1.798808
1.702239
1.05673
# Function taken from Python 3.6 QueueHandler # The format operation gets traceback text into record.exc_text # (if there's exception data), and also puts the message into # record.message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info attribute, as it's no longer needed and, if not None, # will typically not be pickleable. self.format(record) record.msg = record.message record.args = None record.exc_info = None return record
def prepare(self, record)
Prepares a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact.
3.805721
4.265472
0.892216
try: self._callback(self.prepare(record)) except Exception: self.handleError(record)
def emit(self, record)
Send a LogRecord to the callback function, after preparing it for serialization.
4.681175
4.057303
1.153765
if status_code is not None: self._service_status = status_code self._utilization.update_status(status_code) # Check whether IDLE status should be delayed if self._service_status == CommonService.SERVICE_STATUS_IDLE: if self._status_idle_since is None: self._status_idle_since = time.time() return elif self._status_idle_since + 0.5 > time.time(): return else: self._status_idle_since = None new_status = self._service_status != self._service_status_announced if ( ( new_status or self._status_last_broadcast + self._status_interval <= time.time() ) and self._transport and self._transport.is_connected() ): self._service_status_announced = self._service_status self._transport.broadcast_status(self.get_status()) self._status_last_broadcast = time.time()
def update_status(self, status_code=None)
Update the service status kept inside the frontend (_service_status). The status is broadcast over the network immediately. If the status changes to IDLE then this message is delayed. The IDLE status is only broadcast if it is held for over 0.5 seconds. When the status does not change it is still broadcast every _status_interval seconds. :param status_code: Either an integer describing the service status (see workflows.services.common_service), or None if the status is unchanged.
3.103184
2.600452
1.193325
self.log.debug("Entered main loop") while not self.shutdown: # If no service is running slow down the main loop if not self._pipe_service: time.sleep(0.3) self.update_status() # While a service is running, check for incoming messages from that service if self._pipe_service and self._pipe_service.poll(1): try: message = self._pipe_service.recv() if isinstance(message, dict) and "band" in message: # only dictionaries with 'band' entry are valid messages try: handler = getattr(self, "parse_band_" + message["band"]) except AttributeError: handler = None self.log.warning("Unknown band %s", str(message["band"])) if handler: # try: handler(message) # except Exception: # print('Uh oh. What to do.') else: self.log.warning("Invalid message received %s", str(message)) except EOFError: # Service has gone away error_message = False if self._service_status == CommonService.SERVICE_STATUS_END: self.log.info("Service terminated") elif self._service_status == CommonService.SERVICE_STATUS_ERROR: error_message = "Service terminated with error code" elif self._service_status in ( CommonService.SERVICE_STATUS_NONE, CommonService.SERVICE_STATUS_NEW, CommonService.SERVICE_STATUS_STARTING, ): error_message = ( "Service may have died unexpectedly in " + "initialization (last known status: %s)" % CommonService.human_readable_state.get( self._service_status, self._service_status ) ) else: error_message = ( "Service may have died unexpectedly" " (last known status: %s)" % CommonService.human_readable_state.get( self._service_status, self._service_status ) ) if error_message: self.log.error(error_message) self._terminate_service() if self.restart_service: self.exponential_backoff() else: self.shutdown = True if error_message: raise workflows.Error(error_message) with self.__lock: if ( self._service is None and self.restart_service and self._service_factory ): self.update_status(status_code=CommonService.SERVICE_STATUS_NEW) self.switch_service() # Check that the transport is alive if not self._transport.is_connected(): self._terminate_service() raise workflows.Error("Lost transport layer connection") self.log.debug("Left main loop") self.update_status(status_code=CommonService.SERVICE_STATUS_TEARDOWN) self._terminate_service() self.log.debug("Terminating.")
def run(self)
The main loop of the frontend. Here incoming messages from the service are processed and forwarded to the corresponding callback methods.
3.421334
3.318307
1.031048
if self._pipe_commands: self._pipe_commands.send(command) else: if self.shutdown: # Stop delivering messages in shutdown. self.log.info( "During shutdown no command queue pipe found for command\n%s", str(command), ) else: self.log.error( "No command queue pipe found for command\n%s", str(command) )
def send_command(self, command)
Send command to service via the command queue.
5.774755
5.108583
1.130402
if not isinstance(message, dict): return relevant = False if "host" in message: # Filter by host if message["host"] != self.__hostid: return relevant = True if "service" in message: # Filter by service if message["service"] != self._service_class_name: return relevant = True if not relevant: # Ignore message unless at least one filter matches return if message.get("command"): self.log.info( "Received command '%s' via transport layer", message["command"] ) if message["command"] == "shutdown": self.shutdown = True else: self.log.warning("Received invalid transport command message")
def process_transport_command(self, header, message)
Parse a command coming in through the transport command subscription
3.682559
3.569442
1.03169
if "payload" in message and hasattr(message["payload"], "name"): record = message["payload"] for k in dir(record): if k.startswith("workflows_exc_"): setattr(record, k[14:], getattr(record, k)) delattr(record, k) for k, v in self.get_status().items(): setattr(record, "workflows_" + k, v) logging.getLogger(record.name).handle(record) else: self.log.warning( "Received broken record on log band\n" + "Message: %s\nRecord: %s", str(message), str( hasattr(message.get("payload"), "__dict__") and message["payload"].__dict__ ), )
def parse_band_log(self, message)
Process incoming logging messages from the service.
3.953272
3.730031
1.05985
self.log.debug("Service requests termination") self._terminate_service() if not self.restart_service: self.shutdown = True
def parse_band_request_termination(self, message)
Service declares it should be terminated.
9.052119
6.887255
1.314329
if message.get("name"): self._service_name = message["name"] else: self.log.warning( "Received broken record on set_name band\nMessage: %s", str(message) )
def parse_band_set_name(self, message)
Process incoming message indicating service name change.
8.013097
6.413738
1.249364
self.log.debug("Status update: " + str(message)) self.update_status(status_code=message["statuscode"])
def parse_band_status_update(self, message)
Process incoming status updates from the service.
5.797485
5.016924
1.155585
return { "host": self.__hostid, "status": self._service_status_announced, "statustext": CommonService.human_readable_state.get( self._service_status_announced ), "service": self._service_name, "serviceclass": self._service_class_name, "utilization": self._utilization.report(), "workflows": workflows.version(), }
def get_status(self)
Returns a dictionary containing all relevant status information to be broadcast across the network.
7.544106
6.873206
1.097611
last_service_switch = self._service_starttime if not last_service_switch: return time_since_last_switch = time.time() - last_service_switch if not self._service_rapidstarts: self._service_rapidstarts = 0 minimum_wait = 0.1 * (2 ** self._service_rapidstarts) minimum_wait = min(5, minimum_wait) if time_since_last_switch > 10: self._service_rapidstarts = 0 return self._service_rapidstarts += 1 self.log.debug("Slowing down service starts (%.1f seconds)", minimum_wait) time.sleep(minimum_wait)
def exponential_backoff(self)
A function that keeps waiting longer and longer the more rapidly it is called. It can be used to increasingly slow down service starts when they keep failing.
3.793165
3.375204
1.123833
if new_service: self._service_factory = new_service with self.__lock: # Terminate existing service if necessary if self._service is not None: self._terminate_service() # Find service class if necessary if isinstance(self._service_factory, basestring): self._service_factory = workflows.services.lookup(self._service_factory) if not self._service_factory: return False # Set up new service object service_instance = self._service_factory( environment=self._service_environment ) # Set up pipes and connect service object svc_commands, self._pipe_commands = multiprocessing.Pipe(False) self._pipe_service, svc_tofrontend = multiprocessing.Pipe(False) service_instance.connect(commands=svc_commands, frontend=svc_tofrontend) # Set up transport layer for new service service_instance.transport = self._transport_factory() # Start new service in a separate process self._service = multiprocessing.Process( target=service_instance.start, args=(), kwargs={"verbose_log": self._verbose_service}, ) self._service_name = service_instance.get_name() self._service_class_name = service_instance.__class__.__name__ self._service.daemon = True self._service.name = "workflows-service" self._service.start() self._service_starttime = time.time() # Starting the process copies all file descriptors. # At this point (and no sooner!) the passed pipe objects must be closed # in this process here. svc_commands.close() svc_tofrontend.close() self.log.info("Started service: %s", self._service_name) return True
def switch_service(self, new_service=None)
Start a new service in a subprocess. :param new_service: Either a service name or a service class. If not set, start up a new instance of the previous class :return: True on success, False on failure.
3.968127
3.952949
1.00384
with self.__lock: if self._service: self._service.terminate() if self._pipe_commands: self._pipe_commands.close() if self._pipe_service: self._pipe_service.close() self._pipe_commands = None self._pipe_service = None self._service_class_name = None self._service_name = None if self._service_status != CommonService.SERVICE_STATUS_TEARDOWN: self.update_status(status_code=CommonService.SERVICE_STATUS_END) if self._service: self._service.join() # must wait for process to be actually destroyed self._service = None
def _terminate_service(self)
Force termination of running service. Disconnect queues, end queue feeder threads. Wait for service process to clear, drop all references.
3.714316
3.462612
1.072692
if self.sparselib == 'umfpack': return umfpack.symbolic(A) elif self.sparselib == 'klu': return klu.symbolic(A)
def symbolic(self, A)
Return the symbolic factorization of sparse matrix ``A`` Parameters ---------- sparselib Library name in ``umfpack`` and ``klu`` A Sparse matrix Returns symbolic factorization -------
4.01716
2.44325
1.644187
if self.sparselib == 'umfpack': return umfpack.numeric(A, F) elif self.sparselib == 'klu': return klu.numeric(A, F)
def numeric(self, A, F)
Return the numeric factorization of sparse matrix ``A`` using symbolic factorization ``F`` Parameters ---------- A Sparse matrix F Symbolic factorization Returns ------- N Numeric factorization of ``A``
4.109475
4.670392
0.879899
if self.sparselib == 'umfpack': umfpack.solve(A, N, b) elif self.sparselib == 'klu': klu.solve(A, F, N, b)
def solve(self, A, F, N, b)
Solve linear system ``Ax = b`` using numeric factorization ``N`` and symbolic factorization ``F``. Store the solution in ``b``. Parameters ---------- A Sparse matrix F Symbolic factorization N Numeric factorization b RHS of the equation Returns ------- None
4.652089
4.839779
0.961219
if self.sparselib == 'umfpack': return umfpack.linsolve(A, b) elif self.sparselib == 'klu': return klu.linsolve(A, b)
def linsolve(self, A, b)
Solve linear equation set ``Ax = b`` and store the solutions in ``b``. Parameters ---------- A Sparse matrix b RHS of the equation Returns ------- None
3.531707
4.383871
0.805614
# Bus Pi if not self.n: return m = self.system.dae.m xy_idx = range(m, self.n + m) self.system.varname.append( listname='unamey', xy_idx=xy_idx, var_name='P', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=xy_idx, var_name='P', element_name=self.name) # Bus Qi xy_idx = range(m + self.n, m + 2 * self.n) self.system.varname.append( listname='unamey', xy_idx=xy_idx, var_name='Q', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=xy_idx, var_name='Q', element_name=self.name)
def _varname_inj(self)
Customize varname for bus injections
2.769399
2.610382
1.060917
if not self.system.pflow.config.flatstart: dae.y[self.a] = self.angle + 1e-10 * uniform(self.n) dae.y[self.v] = self.voltage else: dae.y[self.a] = matrix(0.0, (self.n, 1), 'd') + 1e-10 * uniform(self.n) dae.y[self.v] = matrix(1.0, (self.n, 1), 'd')
def init0(self, dae)
Set bus Va and Vm initial values
3.822448
3.505894
1.090292
if (not self.islanded_buses) and (not self.island_sets): return a, v = list(), list() # for islanded areas without a slack bus # TODO: fix for islanded sets without sw # for island in self.island_sets: # nosw = 1 # for item in self.system.SW.bus: # if self.uid[item] in island: # nosw = 0 # break # if nosw: # self.islanded_buses += island # self.island_sets.remove(island) a = self.islanded_buses v = [self.n + item for item in a] dae.g[a] = 0 dae.g[v] = 0
def gisland(self, dae)
Reset g(x) for islanded buses and areas
5.491147
4.363315
1.258481
if self.system.Bus.islanded_buses: a = self.system.Bus.islanded_buses v = [self.system.Bus.n + item for item in a] dae.set_jac(Gy, 1e-6, a, a) dae.set_jac(Gy, 1e-6, v, v)
def gyisland(self, dae)
Reset gy(x) for islanded buses and areas
5.918261
4.416303
1.340094
if not (self.islanded_buses and self.island_sets): return a, v = list(), list() # for islanded areas without a slack bus for island in self.island_sets: nosw = 1 for item in self.system.SW.bus: if self.uid[item] in island: nosw = 0 break if nosw: self.islanded_buses += island self.island_sets.remove(island) a = self.islanded_buses v = [self.n + item for item in a] dae.g[a] = 0 dae.g[v] = 0
def gisland(self, dae)
Reset g(x) for islanded buses and areas
6.00981
4.653182
1.291548
if not hasattr(get_known_transports, "cache"): setattr( get_known_transports, "cache", { e.name: e.load() for e in pkg_resources.iter_entry_points("workflows.transport") }, ) return get_known_transports.cache.copy()
def get_known_transports()
Return a dictionary of all known transport mechanisms.
3.032811
2.752984
1.101645
ws = [0] * self.n for i in range(self.n): q = ceil(t / self.dt[i]) q_prev = 0 if q == 0 else q - 1 r = t % self.dt[i] r = 0 if abs(r) < 1e-6 else r if r == 0: ws[i] = self.speed[i][q] else: t1 = self.time[i][q_prev] s1 = self.speed[i][q_prev] s2 = self.speed[i][q] ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i] return matrix(ws)
def windspeed(self, t)
Return the wind speed list at time `t`
2.754999
2.677233
1.029047
self.vf0 = vf self.system.dae.y[self.vf] = matrix(vf)
def set_vf0(self, vf)
set value for self.vf0 and dae.y[self.vf]
12.209021
4.794827
2.54629
self.devman.sort_device() self.call.setup() self.model_setup() self.xy_addr0() self.dae.setup() self.to_sysbase() return self
def setup(self)
Set up the power system object by executing the following workflow: * Sort the loaded models to meet the initialization sequence * Create call strings for routines * Call the ``setup`` function of the loaded models * Assign addresses for the loaded models * Call ``dae.setup`` to assign memory for the numerical dae structure * Convert model parameters to the system base Returns ------- PowerSystem The instance of the PowerSystem
26.870178
12.049016
2.230072
if self.config.base: for item in self.devman.devices: self.__dict__[item].data_to_sys_base()
def to_sysbase(self)
Convert model parameters to system base. This function calls the ``data_to_sys_base`` function of the loaded models. Returns ------- None
14.215375
13.433491
1.058204
if self.config.base: for item in self.devman.devices: self.__dict__[item].data_to_elem_base()
def to_elembase(self)
Convert parameters back to element base. This function calls the ```data_to_elem_base``` function. Returns ------- None
16.015846
12.715166
1.259586
if not hasattr(self, name): self.__dict__[name] = Group(self, name) self.loaded_groups.append(name)
def group_add(self, name='Ungrouped')
Dynamically add a group instance to the system if not exist. Parameters ---------- name : str, optional ('Ungrouped' as default) Name of the group Returns ------- None
3.788669
4.381001
0.864795
# non-JIT models for file, pair in non_jits.items(): for cls, name in pair.items(): themodel = importlib.import_module('andes.models.' + file) theclass = getattr(themodel, cls) self.__dict__[name] = theclass(self, name) group = self.__dict__[name]._group self.group_add(group) self.__dict__[group].register_model(name) self.devman.register_device(name) # import JIT models for file, pair in jits.items(): for cls, name in pair.items(): self.__dict__[name] = JIT(self, file, cls, name)
def model_import(self)
Import and instantiate the non-JIT models and the JIT models. Models defined in ``jits`` and ``non_jits`` in ``models/__init__.py`` will be imported and instantiated accordingly. Returns ------- None
4.556047
4.114913
1.107204
for r in routines.__all__: file = importlib.import_module('.' + r.lower(), 'andes.routines') self.__dict__[r.lower()] = getattr(file, r)(self)
def routine_import(self)
Dynamically import routines as defined in ``routines/__init__.py``. The command-line argument ``--routine`` is defined in ``__cli__`` in each routine file. A routine instance will be stored in the system instance with the name being all lower case. For example, a routine for power flow study should be defined in ``routines/pflow.py`` where ``__cli__ = 'pflow'``. The class name for the routine should be ``Pflow``. The routine instance will be saved to ``PowerSystem.pflow``. Returns ------- None
5.07816
5.432455
0.934782
for device in self.devman.devices: if self.__dict__[device].n: try: self.__dict__[device].setup() except Exception as e: raise e
def model_setup(self)
Call the ``setup`` function of the loaded models. This function is to be called after parsing all the data files during the system set up. Returns ------- None
6.645863
7.352543
0.903886
for device, pflow in zip(self.devman.devices, self.call.pflow): if pflow: self.__dict__[device]._addr() self.__dict__[device]._intf_network() self.__dict__[device]._intf_ctrl() self.varname.resize() for device, pflow in zip(self.devman.devices, self.call.pflow): if pflow: self.__dict__[device]._varname()
def xy_addr0(self)
Assign indicies and variable names for variables used in power flow For each loaded model with the ``pflow`` flag as ``True``, the following functions are called sequentially: * ``_addr()`` * ``_intf_network()`` * ``_intf_ctrl()`` After resizing the ``varname`` instance, variable names from models are stored by calling ``_varname()`` Returns ------- None
5.765868
3.191754
1.806489
stagens = [] for device, stagen in zip(self.devman.devices, self.call.stagen): if stagen: stagens.append(device) for gen in idx: for stagen in stagens: if gen in self.__dict__[stagen].uid.keys(): self.__dict__[stagen].disable_gen(gen)
def rmgen(self, idx)
Remove the static generators if their dynamic models exist Parameters ---------- idx : list A list of static generator idx Returns ------- None
6.252291
6.254881
0.999586
ret = [] for model in self.__dict__['Event'].all_models: if self.__dict__[model].is_time(sim_time): ret.append(model) if self.Breaker.is_time(sim_time): ret.append('Breaker') return ret
def check_event(self, sim_time)
Check for event occurrance for``Event`` group models at ``sim_time`` Parameters ---------- sim_time : float The current simulation time Returns ------- list A list of model names who report (an) event(s) at ``sim_time``
5.194508
4.49989
1.154363
times = [] times.extend(self.Breaker.get_times()) for model in self.__dict__['Event'].all_models: times.extend(self.__dict__[model].get_times()) if times: times = sorted(list(set(times))) return times
def get_event_times(self)
Return event times of Fault, Breaker and other timed events Returns ------- list A sorted list of event times
5.840324
5.747899
1.01608
if conf_path is None: return conf = configparser.ConfigParser() conf.read(conf_path) self.config.load_config(conf) for r in routines.__all__: self.__dict__[r.lower()].config.load_config(conf) logger.debug('Loaded config file from {}.'.format(conf_path))
def load_config(self, conf_path)
Load config from an ``andes.conf`` file. This function creates a ``configparser.ConfigParser`` object to read the specified conf file and calls the ``load_config`` function of the config instances of the system and the routines. Parameters ---------- conf_path : None or str Path to the Andes config file. If ``None``, the function body will not run. Returns ------- None
4.210881
3.538634
1.189974
if os.path.isfile(file_path): logger.debug('File {} alreay exist. Overwrite? [y/N]'.format(file_path)) choice = input('File {} alreay exist. Overwrite? [y/N]'.format(file_path)).lower() if len(choice) == 0 or choice[0] != 'y': logger.info('File not overwritten.') return conf = self.config.dump_conf() for r in routines.__all__: conf = self.__dict__[r.lower()].config.dump_conf(conf) with open(file_path, 'w') as f: conf.write(f) logger.info('Config written to {}'.format(file_path))
def dump_config(self, file_path)
Dump system and routine configurations to an rc-formatted file. Parameters ---------- file_path : str path to the configuration file. The user will be prompted if the file already exists. Returns ------- None
3.102417
3.195565
0.970851
if not hasattr(self, 'Line'): logger.error('<Line> device not found.') return self.Line.connectivity(self.Bus) if show_info is True: if len(self.Bus.islanded_buses) == 0 and len( self.Bus.island_sets) == 0: logger.debug('System is interconnected.') else: logger.info( 'System contains {:d} islands and {:d} islanded buses.'. format( len(self.Bus.island_sets), len(self.Bus.islanded_buses))) nosw_island = [] # no slack bus island msw_island = [] # multiple slack bus island for idx, island in enumerate(self.Bus.island_sets): nosw = 1 for item in self.SW.bus: if self.Bus.uid[item] in island: nosw -= 1 if nosw == 1: nosw_island.append(idx) elif nosw < 0: msw_island.append(idx) if nosw_island: logger.warning( 'Slack bus is not defined for {:g} island(s).'.format( len(nosw_island))) if msw_island: logger.warning( 'Multiple slack buses are defined for {:g} island(s).'. format(len(nosw_island))) if (not nosw_island) and (not msw_island): logger.debug( 'Each island has a slack bus correctly defined.')
def check_islands(self, show_info=False)
Check the connectivity for the ac system Parameters ---------- show_info : bool Show information when the system has islands. To be used when initializing power flow. Returns ------- None
3.203117
3.055882
1.048181
if self.pflow.solved is False: logger.error('Power flow not solved when getting bus data.') return tuple([False] * 8) idx = self.Bus.idx names = self.Bus.name Vm = [self.dae.y[x] for x in self.Bus.v] if self.pflow.config.usedegree: Va = [self.dae.y[x] * rad2deg for x in self.Bus.a] else: Va = [self.dae.y[x] for x in self.Bus.a] Pg = [self.Bus.Pg[x] for x in range(self.Bus.n)] Qg = [self.Bus.Qg[x] for x in range(self.Bus.n)] Pl = [self.Bus.Pl[x] for x in range(self.Bus.n)] Ql = [self.Bus.Ql[x] for x in range(self.Bus.n)] if sort_names: ret = (list(x) for x in zip(*sorted( zip(idx, names, Vm, Va, Pg, Qg, Pl, Ql), key=itemgetter(0)))) else: ret = idx, names, Vm, Va, Pg, Qg, Pl, Ql return ret
def get_busdata(self, sort_names=False)
get ac bus data from solved power flow
2.576409
2.402849
1.072231
if not self.Node.n: return if not self.pflow.solved: logger.error('Power flow not solved when getting bus data.') return tuple([False] * 7) idx = self.Node.idx names = self.Node.name V = [self.dae.y[x] for x in self.Node.v] if sort_names: ret = (list(x) for x in zip(*sorted(zip(idx, names, V), key=itemgetter(0)))) else: ret = idx, names, V return ret
def get_nodedata(self, sort_names=False)
get dc node data from solved power flow
5.939427
4.916299
1.208109
if not self.pflow.solved: logger.error('Power flow not solved when getting line data.') return tuple([False] * 7) idx = self.Line.idx fr = self.Line.bus1 to = self.Line.bus2 Sloss = self.Line.S1 + self.Line.S2 Pfr = list(self.Line.S1.real()) Qfr = list(self.Line.S1.imag()) Pto = list(self.Line.S2.real()) Qto = list(self.Line.S2.imag()) Ploss = list(Sloss.real()) Qloss = list(Sloss.imag()) if sort_names: ret = (list(x) for x in zip(*sorted( zip(idx, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss), key=itemgetter(0)))) else: ret = idx, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss return ret
def get_linedata(self, sort_names=False)
get line data from solved power flow
2.961116
2.690799
1.10046
assert isinstance(model, str) if model not in self.all_models: self.all_models.append(model)
def register_model(self, model)
Register ``model`` to this group :param model: model name :return: None
3.642045
3.442065
1.058099
if idx is None: idx = model + '_' + str(len(self._idx_model)) self._idx_model[idx] = model self._idx.append(idx) return idx
def register_element(self, model, idx)
Register element with index ``idx`` to ``model`` :param model: model name :param idx: element idx :return: final element idx
4.149626
4.24026
0.978625
ret = [] scalar = False # TODO: ensure idx is unique in this Group if isinstance(idx, (int, float, str)): scalar = True idx = [idx] models = [self._idx_model[i] for i in idx] for i, m in zip(idx, models): ret.append(self.system.__dict__[m].get_field(field, idx=i)) if scalar is True: return ret[0] else: return ret
def get_field(self, field, idx)
Return the field ``field`` of elements ``idx`` in the group :param field: field name :param idx: element idx :return: values of the requested field
4.674031
4.724199
0.989381
if isinstance(idx, (int, float, str)): idx = [idx] if isinstance(value, (int, float)): value = [value] models = [self._idx_model[i] for i in idx] for i, m, v in zip(idx, models, value): assert hasattr(self.system.__dict__[m], field) uid = self.system.__dict__[m].get_uid(idx) self.system.__dict__[m].__dict__[field][uid] = v
def set_field(self, field, idx, value)
Set the field ``field`` of elements ``idx`` to ``value``. This function does not if the field is valid for all models. :param field: field name :param idx: element idx :param value: value of fields to set :return: None
3.495697
3.293758
1.06131
# Enumerate all known services known_services = workflows.services.get_known_services() # Set up parser parser = OptionParser( usage=program_name + " [options]" if program_name else None, version=version ) parser.add_option("-?", action="help", help=SUPPRESS_HELP) parser.add_option( "-s", "--service", dest="service", metavar="SVC", default=None, help="Name of the service to start. Known services: " + ", ".join(known_services), ) parser.add_option( "-t", "--transport", dest="transport", metavar="TRN", default="StompTransport", help="Transport mechanism. Known mechanisms: " + ", ".join(workflows.transport.get_known_transports()) + " (default: %default)", ) workflows.transport.add_command_line_options(parser) # Call on_parser_preparation hook parser = self.on_parser_preparation(parser) or parser # Parse command line options (options, args) = parser.parse_args(cmdline_args) # Call on_parsing hook (options, args) = self.on_parsing(options, args) or (options, args) # Create Transport factory transport_factory = workflows.transport.lookup(options.transport) # Call on_transport_factory_preparation hook transport_factory = ( self.on_transport_factory_preparation(transport_factory) or transport_factory ) # Set up on_transport_preparation hook to affect newly created transport objects true_transport_factory_call = transport_factory.__call__ def on_transport_preparation_hook(): transport_object = true_transport_factory_call() return self.on_transport_preparation(transport_object) or transport_object transport_factory.__call__ = on_transport_preparation_hook # When service name is specified, check if service exists or can be derived if options.service and options.service not in known_services: matching = [s for s in known_services if s.startswith(options.service)] if not matching: matching = [ s for s in known_services if s.lower().startswith(options.service.lower()) ] if matching and len(matching) == 1: options.service = matching[0] kwargs.update({"service": options.service, "transport": transport_factory}) # Call before_frontend_construction hook kwargs = self.before_frontend_construction(kwargs) or kwargs # Create Frontend object frontend = workflows.frontend.Frontend(**kwargs) # Call on_frontend_preparation hook frontend = self.on_frontend_preparation(frontend) or frontend # Start Frontend try: frontend.run() except KeyboardInterrupt: print("\nShutdown via Ctrl+C")
def run( self, cmdline_args=None, program_name="start_service", version=workflows.version(), **kwargs )
Example command line interface to start services. :param cmdline_args: List of command line arguments to pass to parser :param program_name: Name of the command line tool to display in help :param version: Version number to print when run with '--version'
2.494121
2.510787
0.993362
sort_direction = request.GET.get("dir") field_name = (request.GET.get("sort") or "") if sort_direction else "" sort_sign = "-" if sort_direction == "desc" else "" result_field = "{sign}{field}".format(sign=sort_sign, field=field_name) return result_field
def get_sort_field(request)
Retrieve field used for sorting a queryset :param request: HTTP request :return: the sorted field name, prefixed with "-" if ordering is descending
3.294733
3.169323
1.03957
bits = [b for b in token.split_contents()] if len(bits) < 2: raise template.TemplateSyntaxError("anchor tag takes at least 1 argument.") title_is_var = False try: title = bits[2] if title[0] in ('"', "'"): if title[0] == title[-1]: title = title[1:-1] else: raise template.TemplateSyntaxError( 'anchor tag title must be a "string", _("trans string"), or variable' ) elif title.startswith('_("') or title.startswith("_('"): title = _(title[3:-2]) else: title_is_var = True except IndexError: title = bits[1].capitalize() return SortAnchorNode(bits[1].strip(), title.strip(), title_is_var)
def anchor(parser, token)
Parses a tag that's supposed to be in this format '{% anchor field title %}' Title may be a "string", _("trans string"), or variable
3.488387
2.746691
1.270033
params['format'] = 'json' if sys.version_info[0] == 3: fixed = urllib.urlencode(tuple(params.items())) # urllib.urlencode (in Python 2) expects str objects, not unicode elif sys.version_info[0] == 2: fixed = urllib.urlencode( tuple((to_bytes(k), to_bytes(v)) for k, v in params.items())).encode('utf-8') if force_get: request = urllib2.Request(url + '?' + fixed) else: if sys.version_info[0] == 3: fixed = bytearray(fixed, 'utf-8') request = urllib2.Request(url, fixed) if self._http_user is not None: auth_str = '%s:%s' % (self._http_user, self._http_password) if sys.version_info[0] == 3: auth_str = bytearray(auth_str, 'utf-8') base64string = base64.encodestring(auth_str).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) request.add_header('Accept-encoding', 'gzip') response = self._opener.open(request) if isinstance(self._cj, cookielib.FileCookieJar): self._cj.save() if response.headers.get('Content-Encoding') == 'gzip': compressed = StringIO(response.read()) gzipper = gzip.GzipFile(fileobj=compressed) data = gzipper.read() else: data = response.read() if sys.version_info[0] == 3: encoding = response.info().get_content_charset() or "utf-8" data = data.decode(encoding) return data
def _fetch_http(self, url, params, force_get=False)
Standard HTTP request handler for this class with gzip and cookie support. This was separated out of :py:func:`MediaWiki.call` to make :py:func:`MediaWiki.normalize_api_url` useful. .. note:: This function should not be used. Use :py:func:`MediaWiki.call` instead. :param url: URL to send POST request to :param params: dictionary of query string parameters :param force_get: force a GET request instead of POST
2.075505
2.095695
0.990366
return json.loads(self._fetch_http(self._api_url, params))
def call(self, params)
Make an API call to the wiki. *params* is a dictionary of query string arguments. For example, to get basic information about the wiki, run: >>> wiki.call({'action': 'query', 'meta': 'siteinfo'}) which would make a call to ``http://domain/w/api.php?action=query&meta=siteinfo&format=json`` (except the query string would be sent in POST). :param params: dictionary of query string parameters :returns: dictionary containing API response
9.788351
12.694012
0.7711
def tester(self, api_url): data = self._fetch_http(api_url, {'action': 'query', 'meta': 'siteinfo'}) try: data_json = json.loads(data) return (data, data_json) except ValueError: return (data, None) data, data_json = tester(self, self._api_url) if data_json: return self._api_url else: # if there's an index.php in the URL, we might find the API if 'index.php' in self._api_url: test_api_url = self._api_url.split('index.php')[0] + 'api.php' test_data, test_data_json = tester(self, test_api_url) if test_data_json: self._api_url = test_api_url return self._api_url return None
def normalize_api_url(self)
Checks that the API URL used to initialize this object actually returns JSON. If it doesn't, make some educated guesses and try to find the correct URL. :returns: a valid API URL or ``None``
2.724831
2.658645
1.024895
def do_login(self, user, passwd, token=None): data = {'action': 'login', 'lgname': user, 'lgpassword': passwd} if token: data['lgtoken'] = token result = self.call(data) if result['login']['result'] == 'Success': self._high_limits = None return True elif result['login']['result'] == 'NeedToken' and not token: return do_login(self, user, passwd, result['login']['token']) else: return False return do_login(self, user, passwd)
def login(self, user, passwd)
Logs into the wiki with username *user* and password *passwd*. Returns ``True`` on successful login. :param user: username :param passwd: password :returns: ``True`` on successful login, otherwise ``False``
3.435565
2.975946
1.154445
if self._high_limits is None: result = self.call({'action': 'query', 'meta': 'userinfo', 'uiprop': 'rights'}) self._high_limits = 'apihighlimits' in \ result['query']['userinfo']['rights'] if self._high_limits: return high else: return low
def limits(self, low, high)
Convenience function for determining appropriate limits in the API. If the (usually logged-in) client has the ``apihighlimits`` right, it will return *high*; otherwise it will return *low*. It's generally a good idea to use the highest limit possible; this reduces the amount of HTTP requests and therefore overhead. Read the API documentation for details on the limits for the function you are using. :param low: value to return if client does not have ``apihighlimits`` :param high: value to return if client has ``apihighlimits`` :returns: *low* or *high*
5.216236
3.690267
1.413512
if self._namespaces is None: result = self.call({'action': 'query', 'meta': 'siteinfo', 'siprop': 'namespaces'}) self._namespaces = {} self._psuedo_namespaces = {} for nsid in result['query']['namespaces']: if int(nsid) >= 0: self._namespaces[int(nsid)] = \ result['query']['namespaces'][nsid]['*'] else: self._psuedo_namespaces[int(nsid)] = \ result['query']['namespaces'][nsid]['*'] if psuedo: retval = {} retval.update(self._namespaces) retval.update(self._psuedo_namespaces) return retval else: return self._namespaces
def namespaces(self, psuedo=True)
Fetches a list of namespaces for this wiki and returns them as a dictionary of namespace IDs corresponding to namespace names. If *psuedo* is ``True``, the dictionary will also list psuedo-namespaces, which are the "Special:" and "Media:" namespaces (special because they have no content associated with them and their IDs are negative). :param psuedo: boolean to determine inclusion of psuedo-namespaces :returns: dictionary of namespace IDs and names
2.066674
1.895953
1.090045
if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError("No axes defined") loc = {} for dimensionElement in locationElement.findall(".dimension"): dimName = dimensionElement.attrib.get("name") if self._strictAxisNames and dimName not in self.axisDefaults: # In case the document contains no axis definitions, self.log.warning("Location with undefined axis: \"%s\".", dimName) continue xValue = yValue = None try: xValue = dimensionElement.attrib.get('xvalue') xValue = float(xValue) except ValueError: self.log.warning("KeyError in readLocation xValue %3.3f", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not None: yValue = float(yValue) except ValueError: pass if yValue is not None: loc[dimName] = (xValue, yValue) else: loc[dimName] = xValue return loc
def readLocationElement(self, locationElement)
Format 0 location reader
3.903938
3.885111
1.004846
import os, glob if os.path.isdir(documentPath): # process all *.designspace documents in this folder todo = glob.glob(os.path.join(documentPath, "*.designspace")) else: # process the todo = [documentPath] results = [] for path in todo: document = DesignSpaceProcessor(ufoVersion=outputUFOFormatVersion) document.useVarlib = useVarlib document.roundGeometry = roundGeometry document.read(path) try: r = document.generateUFO(processRules=processRules) results.append(r) except: if logger: logger.exception("ufoProcessor error") #results += document.generateUFO(processRules=processRules) reader = None return results
def build( documentPath, outputUFOFormatVersion=3, roundGeometry=True, verbose=True, # not supported logPath=None, # not supported progressFunc=None, # not supported processRules=True, logger=None, useVarlib=False, )
Simple builder for UFO designspaces.
3.66211
3.676933
0.995969
if self._infoMutator: return self._infoMutator infoItems = [] for sourceDescriptor in self.sources: if sourceDescriptor.layerName is not None: continue loc = Location(sourceDescriptor.location) sourceFont = self.fonts[sourceDescriptor.name] if sourceFont is None: continue if hasattr(sourceFont.info, "toMathInfo"): infoItems.append((loc, sourceFont.info.toMathInfo())) else: infoItems.append((loc, self.mathInfoClass(sourceFont.info))) bias, self._infoMutator = self.getVariationModel(infoItems, axes=self.serializedAxes, bias=self.newDefaultLocation()) return self._infoMutator
def getInfoMutator(self)
Returns a info mutator
5.428369
5.360778
1.012608
if self._kerningMutator and pairs == self._kerningMutatorPairs: return self._kerningMutator kerningItems = [] if pairs is None: for sourceDescriptor in self.sources: if sourceDescriptor.layerName is not None: continue if not sourceDescriptor.muteKerning: loc = Location(sourceDescriptor.location) sourceFont = self.fonts[sourceDescriptor.name] if sourceFont is None: continue # this makes assumptions about the groups of all sources being the same. kerningItems.append((loc, self.mathKerningClass(sourceFont.kerning, sourceFont.groups))) else: self._kerningMutatorPairs = pairs for sourceDescriptor in self.sources: # XXX check sourceDescriptor layerName, only foreground should contribute if sourceDescriptor.layerName is not None: continue if not os.path.exists(sourceDescriptor.path): continue if not sourceDescriptor.muteKerning: sourceFont = self.fonts[sourceDescriptor.name] if sourceFont is None: continue loc = Location(sourceDescriptor.location) # XXX can we get the kern value from the fontparts kerning object? kerningItem = self.mathKerningClass(sourceFont.kerning, sourceFont.groups) sparseKerning = {} for pair in pairs: v = kerningItem.get(pair) if v is not None: sparseKerning[pair] = v kerningItems.append((loc, self.mathKerningClass(sparseKerning))) bias, self._kerningMutator = self.getVariationModel(kerningItems, axes=self.serializedAxes, bias=self.newDefaultLocation()) return self._kerningMutator
def getKerningMutator(self, pairs=None)
Return a kerning mutator, collect the sources, build mathGlyphs. If no pairs are given: calculate the whole table. If pairs are given then query the sources for a value and make a mutator only with those values.
3.962876
3.858355
1.02709
items = [] empties = [] foundEmpty = False for sourceDescriptor in self.sources: if not os.path.exists(sourceDescriptor.path): #kthxbai p = "\tMissing UFO at %s" % sourceDescriptor.path if p not in self.problems: self.problems.append(p) continue if glyphName in sourceDescriptor.mutedGlyphNames: continue thisIsDefault = self.default == sourceDescriptor ignoreMaster, filteredLocation = self.filterThisLocation(sourceDescriptor.location, self.mutedAxisNames) if ignoreMaster: continue f = self.fonts.get(sourceDescriptor.name) if f is None: continue loc = Location(sourceDescriptor.location) sourceLayer = f if not glyphName in f: # log this> continue layerName = getDefaultLayerName(f) sourceGlyphObject = None # handle source layers if sourceDescriptor.layerName is not None: # start looking for a layer # Do not bother for mutatorMath designspaces layerName = sourceDescriptor.layerName sourceLayer = getLayer(f, sourceDescriptor.layerName) if sourceLayer is None: continue if glyphName not in sourceLayer: # start looking for a glyph # this might be a support in a sparse layer # so we're skipping! continue # still have to check if the sourcelayer glyph is empty if not glyphName in sourceLayer: continue else: sourceGlyphObject = sourceLayer[glyphName] if checkGlyphIsEmpty(sourceGlyphObject, allowWhiteSpace=True): foundEmpty = True #sourceGlyphObject = None #continue if decomposeComponents: # what about decomposing glyphs in a partial font? temp = self.glyphClass() p = temp.getPointPen() dpp = DecomposePointPen(sourceLayer, p) sourceGlyphObject.drawPoints(dpp) temp.width = sourceGlyphObject.width temp.name = sourceGlyphObject.name processThis = temp else: processThis = sourceGlyphObject sourceInfo = dict(source=f.path, glyphName=glyphName, layerName=layerName, location=filteredLocation, # sourceDescriptor.location, sourceName=sourceDescriptor.name, ) if hasattr(processThis, "toMathGlyph"): processThis = processThis.toMathGlyph() else: processThis = self.mathGlyphClass(processThis) items.append((loc, processThis, sourceInfo)) empties.append((thisIsDefault, foundEmpty)) # check the empties: # if the default glyph is empty, then all must be empty # if the default glyph is not empty then none can be empty checkedItems = [] emptiesAllowed = False # first check if the default is empty. # remember that the sources can be in any order for i, p in enumerate(empties): isDefault, isEmpty = p if isDefault and isEmpty: emptiesAllowed = True # now we know what to look for if not emptiesAllowed: for i, p in enumerate(empties): isDefault, isEmpty = p if not isEmpty: checkedItems.append(items[i]) else: for i, p in enumerate(empties): isDefault, isEmpty = p if isEmpty: checkedItems.append(items[i]) return checkedItems
def collectMastersForGlyph(self, glyphName, decomposeComponents=False)
Return a glyph mutator.defaultLoc decomposeComponents = True causes the source glyphs to be decomposed first before building the mutator. That gives you instances that do not depend on a complete font. If you're calculating previews for instance. XXX check glyphs in layers
4.964937
4.8449
1.024776
try: return self.fontClass(path, layerClass=self.layerClass, libClass=self.libClass, kerningClass=self.kerningClass, groupsClass=self.groupsClass, infoClass=self.infoClass, featuresClass=self.featuresClass, glyphClass=self.glyphClass, glyphContourClass=self.glyphContourClass, glyphPointClass=self.glyphPointClass, glyphComponentClass=self.glyphComponentClass, glyphAnchorClass=self.glyphAnchorClass) except TypeError: # if our fontClass doesnt support all the additional classes return self.fontClass(path)
def _instantiateFont(self, path)
Return a instance of a font object with all the given subclasses
2.966582
2.765291
1.072792
whiteSpace = [ 0x9, # horizontal tab 0xa, # line feed 0xb, # vertical tab 0xc, # form feed 0xd, # carriage return 0x20, # space 0x85, # next line 0xa0, # nobreak space 0x1680, # ogham space mark 0x180e, # mongolian vowel separator 0x2000, # en quad 0x2001, # em quad 0x2003, # en space 0x2004, # three per em space 0x2005, # four per em space 0x2006, # six per em space 0x2007, # figure space 0x2008, # punctuation space 0x2009, # thin space 0x200a, # hair space 0x2028, # line separator 0x2029, # paragraph separator 0x202f, # narrow no break space 0x205f, # medium mathematical space 0x3000, # ideographic space ] emptyPen = EmptyPen() glyph.drawPoints(emptyPen) if emptyPen.isEmpty(): # we're empty? if glyph.unicode in whiteSpace and allowWhiteSpace: # are we allowed to be? return False return True return False
def checkGlyphIsEmpty(glyph, allowWhiteSpace=True)
This will establish if the glyph is completely empty by drawing the glyph with an EmptyPen. Additionally, the unicode of the glyph is checked against a list of known unicode whitespace characters. This makes it possible to filter out glyphs that have a valid reason to be empty and those that can be ignored.
1.872823
1.791643
1.04531
# Only configure if necessary if not skip_configuration: configuration_command = ['python', 'waf', 'configure', '--enable-examples', '--disable-gtk', '--disable-python'] if optimized: configuration_command += ['--build-profile=optimized', '--out=build/optimized'] # Check whether path points to a valid installation subprocess.call(configuration_command, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Build ns-3 build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Show a progress bar if show_progress: line_iterator = self.get_build_output(build_process) pbar = None try: [initial, total] = next(line_iterator) pbar = tqdm(line_iterator, initial=initial, total=total, unit='file', desc='Building ns-3', smoothing=0) for current, total in pbar: pbar.n = current except (StopIteration): if pbar is not None: pbar.n = pbar.total else: # Wait for the build to finish anyway build_process.communicate()
def configure_and_build(self, show_progress=True, optimized=True, skip_configuration=False)
Configure and build the ns-3 code. Args: show_progress (bool): whether or not to display a progress bar during compilation. optimized (bool): whether to use an optimized build. If False, use a standard ./waf configure. skip_configuration (bool): whether to skip the configuration step, and only perform compilation.
3.719076
3.673629
1.012371
while True: output = process.stdout.readline() if output == b'' and process.poll() is not None: if process.returncode > 0: raise Exception("Compilation ended with an error" ".\nSTDERR\n%s\nSTDOUT\n%s" % (process.stderr.read(), process.stdout.read())) return if output: # Parse the output to get current and total tasks # This assumes the progress displayed by waf is in the form # [current/total] matches = re.search(r'\[\s*(\d+?)/(\d+)\].*', output.strip().decode('utf-8')) if matches is not None: yield [int(matches.group(1)), int(matches.group(2))]
def get_build_output(self, process)
Parse the output of the ns-3 build process to extract the information that is needed to draw the progress bar. Args: process: the subprocess instance to listen to.
3.821849
3.651512
1.046648
# At the moment, we rely on regex to extract the list of available # parameters. This solution will break if the format of the output # changes, but this is the best option that is currently available. result = subprocess.check_output([self.script_executable, '--PrintHelp'], env=self.environment, cwd=self.path).decode('utf-8') # Isolate the list of parameters options = re.findall('.*Program\s(?:Options|Arguments):' '(.*)General\sArguments.*', result, re.DOTALL) global_options = subprocess.check_output([self.script_executable, '--PrintGlobals'], env=self.environment, cwd=self.path).decode('utf-8') # Get the single parameter names args = [] if len(options): args += re.findall('.*--(.*?)[?::|=].*', options[0], re.MULTILINE) if len(global_options): args += [k for k in re.findall('.*--(.*?)[?::|=].*', global_options, re.MULTILINE) if k not in ['RngRun', 'RngSeed', 'SchedulerType', 'SimulatorImplementationType', 'ChecksumEnabled']] return sorted(args)
def get_available_parameters(self)
Return a list of the parameters made available by the script.
5.68862
5.543368
1.026203
for idx, parameter in enumerate(parameter_list): current_result = { 'params': {}, 'meta': {} } current_result['params'].update(parameter) command = [self.script_executable] + ['--%s=%s' % (param, value) for param, value in parameter.items()] # Run from dedicated temporary folder current_result['meta']['id'] = str(uuid.uuid4()) temp_dir = os.path.join(data_folder, current_result['meta']['id']) os.makedirs(temp_dir) start = time.time() # Time execution stdout_file_path = os.path.join(temp_dir, 'stdout') stderr_file_path = os.path.join(temp_dir, 'stderr') with open(stdout_file_path, 'w') as stdout_file, open( stderr_file_path, 'w') as stderr_file: return_code = subprocess.call(command, cwd=temp_dir, env=self.environment, stdout=stdout_file, stderr=stderr_file) end = time.time() # Time execution if return_code > 0: complete_command = [self.script] complete_command.extend(command[1:]) complete_command = "python waf --run \"%s\"" % ( ' '.join(complete_command)) with open(stdout_file_path, 'r') as stdout_file, open( stderr_file_path, 'r') as stderr_file: raise Exception(('Simulation exited with an error.\n' 'Params: %s\n' '\nStderr: %s\n' 'Stdout: %s\n' 'Use this command to reproduce:\n' '%s' % (parameter, stderr_file.read(), stdout_file.read(), complete_command))) current_result['meta']['elapsed_time'] = end-start yield current_result
def run_simulations(self, parameter_list, data_folder)
Run several simulations using a certain combination of parameters. Yields results as simulations are completed. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to save subfolders containing simulation output.
2.548421
2.537476
1.004314
# Convert non-list values to single-element lists # This is required to make sure product work. for key in param_ranges: if not isinstance(param_ranges[key], list): param_ranges[key] = [param_ranges[key]] return [dict(zip(param_ranges, v)) for v in product(*param_ranges.values())]
def list_param_combinations(param_ranges)
Create a list of all parameter combinations from a dictionary specifying desired parameter values as lists. Example: >>> param_ranges = {'a': [1], 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] Additionally, this function is robust in case values are not lists: >>> param_ranges = {'a': 1, 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}]
3.370612
4.042333
0.833828
if not debug: command = "python waf --run \"" + script + " " + " ".join( ['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" else: command = "python waf --run " + script + " --command-template=\"" +\ "gdb --args %s " + " ".join(['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" return command
def get_command_from_result(script, result, debug=False)
Return the command that is needed to obtain a certain result. Args: params (dict): Dictionary containing parameter: value pairs. debug (bool): Whether the command should include the debugging template.
3.62111
3.405118
1.063431
np.seterr(all='raise') parsed = {} for filename, contents in result['output'].items(): if dtypes.get(filename) is None: dtypes[filename] = None if converters.get(filename) is None: converters[filename] = None with warnings.catch_warnings(): warnings.simplefilter("ignore") parsed[filename] = np.genfromtxt(io.StringIO(contents), dtype=dtypes[filename], converters=converters[filename] ).tolist() return parsed
def automatic_parser(result, dtypes={}, converters={})
Try and automatically convert strings formatted as tables into nested list structures. Under the hood, this function essentially applies the genfromtxt function to all files in the output, and passes it the additional kwargs. Args: result (dict): the result to parse. dtypes (dict): a dictionary containing the dtype specification to perform parsing for each available filename. See the numpy genfromtxt documentation for more details on how to format these.
2.351082
2.523713
0.931596
# Convert paths to be absolute ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) # Verify if the specified campaign is already available if Path(campaign_dir).exists() and not overwrite: # Try loading manager = CampaignManager.load(campaign_dir, ns_path, runner_type=runner_type, optimized=optimized, check_repo=check_repo) if manager.db.get_script() == script: return manager else: del manager # Initialize runner runner = CampaignManager.create_runner(ns_path, script, runner_type=runner_type, optimized=optimized) # Get list of parameters to save in the DB params = runner.get_available_parameters() # Get current commit commit = "" if check_repo: from git import Repo, exc commit = Repo(ns_path).head.commit.hexsha # Create a database manager from the configuration db = DatabaseManager.new(script=script, params=params, commit=commit, campaign_dir=campaign_dir, overwrite=overwrite) return cls(db, runner, check_repo)
def new(cls, ns_path, script, campaign_dir, runner_type='Auto', overwrite=False, optimized=True, check_repo=True)
Create a new campaign from an ns-3 installation and a campaign directory. This method will create a DatabaseManager, which will install a database in the specified campaign_dir. If a database is already available at the ns_path described in the specified campaign_dir and its configuration matches config, this instance is used instead. If the overwrite argument is set to True instead, the specified directory is wiped and a new campaign is created in its place. Furthermore, this method will initialize a SimulationRunner, of type specified by the runner_type parameter, which will be locked on the ns-3 installation at ns_path and set up to run the desired script. Finally, note that creation of a campaign requires a git repository to be initialized at the specified ns_path. This will allow SEM to save the commit at which the simulations are run, enforce reproducibility and avoid mixing results coming from different versions of ns-3 and its libraries. Args: ns_path (str): path to the ns-3 installation to employ in this campaign. script (str): ns-3 script that will be executed to run simulations. campaign_dir (str): path to the directory in which to save the simulation campaign database. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). Use Auto to automatically pick the best runner. overwrite (bool): whether to overwrite already existing campaign_dir folders. This deletes the directory if and only if it only contains files that were detected to be created by sem. optimized (bool): whether to configure the runner to employ an optimized ns-3 build.
3.357688
3.279117
1.023961
# Convert paths to be absolute if ns_path is not None: ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) # Read the existing configuration into the new DatabaseManager db = DatabaseManager.load(campaign_dir) script = db.get_script() runner = None if ns_path is not None: runner = CampaignManager.create_runner(ns_path, script, runner_type, optimized) return cls(db, runner, check_repo)
def load(cls, campaign_dir, ns_path=None, runner_type='Auto', optimized=True, check_repo=True)
Load an existing simulation campaign. Note that specifying an ns-3 installation is not compulsory when using this method: existing results will be available, but in order to run additional simulations it will be necessary to specify a SimulationRunner object, and assign it to the CampaignManager. Args: campaign_dir (str): path to the directory in which to save the simulation campaign database. ns_path (str): path to the ns-3 installation to employ in this campaign. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). optimized (bool): whether to configure the runner to employ an optimized ns-3 build.
3.670508
3.481995
1.054139
# locals() contains a dictionary pairing class names with class # objects: we can create the object using the desired class starting # from its name. if runner_type == 'Auto' and DRMAA_AVAILABLE: runner_type = 'GridRunner' elif runner_type == 'Auto': runner_type = 'ParallelRunner' return locals().get(runner_type, globals().get(runner_type))( ns_path, script, optimized=optimized)
def create_runner(ns_path, script, runner_type='Auto', optimized=True)
Create a SimulationRunner from a string containing the desired class implementation, and return it. Args: ns_path (str): path to the ns-3 installation to employ in this SimulationRunner. script (str): ns-3 script that will be executed to run simulations. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). If Auto, automatically pick the best available runner (GridRunner if DRMAA is available, ParallelRunner otherwise). optimized (bool): whether to configure the runner to employ an optimized ns-3 build.
6.390762
5.783201
1.105056
# Make sure we have a runner to run simulations with. # This can happen in case the simulation campaign is loaded and not # created from scratch. if self.runner is None: raise Exception("No runner was ever specified" " for this CampaignManager.") # Return if the list is empty if param_list == []: return # Check all parameter combinations fully specify the desired simulation desired_params = self.db.get_params() for p in param_list: # Besides the parameters that were actually passed, we add the ones # that are always available in every script passed = list(p.keys()) available = ['RngRun'] + desired_params if set(passed) != set(available): raise ValueError("Specified parameter combination does not " "match the supported parameters:\n" "Passed: %s\nSupported: %s" % (sorted(passed), sorted(available))) # Check that the current repo commit corresponds to the one specified # in the campaign if self.check_repo: self.check_repo_ok() # Build ns-3 before running any simulations # At this point, we can assume the project was already configured self.runner.configure_and_build(skip_configuration=True) # Shuffle simulations # This mixes up long and short simulations, and gives better time # estimates. shuffle(param_list) # Offload simulation execution to self.runner # Note that this only creates a generator for the results, no # computation is performed on this line. results = self.runner.run_simulations(param_list, self.db.get_data_dir()) # Wrap the result generator in the progress bar generator. if show_progress: result_generator = tqdm(results, total=len(param_list), unit='simulation', desc='Running simulations') else: result_generator = results # Insert result object in db. Using the generator here ensures we # save results as they are finalized by the SimulationRunner, and # that they are kept even if execution is terminated abruptly by # crashes or by a KeyboardInterrupt. for result in result_generator: self.db.insert_result(result)
def run_simulations(self, param_list, show_progress=True)
Run several simulations specified by a list of parameter combinations. Note: this function does not verify whether we already have the required simulations in the database - it just runs all the parameter combinations that are specified in the list. Args: param_list (list): list of parameter combinations to execute. Items of this list are dictionaries, with one key for each parameter, and a value specifying the parameter value (which can be either a string or a number). show_progress (bool): whether or not to show a progress bar with percentage and expected remaining time.
7.219229
7.1615
1.008061
params_to_simulate = [] if runs is not None: # Get next available runs from the database next_runs = self.db.get_next_rngruns() available_params = [r['params'] for r in self.db.get_results()] for param_comb in param_list: # Count how many param combinations we found, and remove them # from the list of available_params for faster searching in the # future needed_runs = runs for i, p in enumerate(available_params): if param_comb == {k: p[k] for k in p.keys() if k != "RngRun"}: needed_runs -= 1 new_param_combs = [] for needed_run in range(needed_runs): # Here it's important that we make copies of the # dictionaries, so that if we modify one we don't modify # the others. This is necessary because after this step, # typically, we will add the RngRun key which must be # different for each copy. new_param = deepcopy(param_comb) new_param['RngRun'] = next(next_runs) new_param_combs += [new_param] params_to_simulate += new_param_combs else: for param_comb in param_list: if not self.db.get_results(param_comb): params_to_simulate += [param_comb] return params_to_simulate
def get_missing_simulations(self, param_list, runs=None)
Return a list of the simulations among the required ones that are not available in the database. Args: param_list (list): a list of dictionaries containing all the parameters combinations. runs (int): an integer representing how many repetitions are wanted for each parameter combination, None if the dictionaries in param_list already feature the desired RngRun value.
4.215636
3.824648
1.102228
# If we are passed a dictionary, we need to expand this if isinstance(param_list, dict): param_list = list_param_combinations(param_list) # If we are passed a list already, just run the missing simulations self.run_simulations( self.get_missing_simulations(param_list, runs))
def run_missing_simulations(self, param_list, runs=None)
Run the simulations from the parameter list that are not yet available in the database. This function also makes sure that we have at least runs replications for each parameter combination. Additionally, param_list can either be a list containing the desired parameter combinations or a dictionary containing multiple values for each parameter, to be expanded into a list. Args: param_list (list, dict): either a list of parameter combinations or a dictionary to be expanded into a list through the list_param_combinations function. runs (int): the number of runs to perform for each parameter combination. This parameter is only allowed if the param_list specification doesn't feature an 'RngRun' key already.
4.640747
3.988115
1.163644
return np.array(self.get_space(self.db.get_complete_results(), {}, parameter_space, runs, result_parsing_function))
def get_results_as_numpy_array(self, parameter_space, result_parsing_function, runs)
Return the results relative to the desired parameter space in the form of a numpy array. Args: parameter_space (dict): dictionary containing parameter/list-of-values pairs. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. runs (int): number of runs to gather for each parameter combination.
8.065258
11.241019
0.717485
# Make sure all values are lists for key in parameter_space: if not isinstance(parameter_space[key], list): parameter_space[key] = [parameter_space[key]] # Add a dimension label for each non-singular dimension dimension_labels = [{key: str(parameter_space[key])} for key in parameter_space.keys() if len(parameter_space[key]) > 1] + [{'runs': range(runs)}] # Create a list of the parameter names return savemat( filename, {'results': self.get_results_as_numpy_array(parameter_space, result_parsing_function, runs=runs), 'dimension_labels': dimension_labels})
def save_to_mat_file(self, parameter_space, result_parsing_function, filename, runs)
Return the results relative to the desired parameter space in the form of a .mat file. Args: parameter_space (dict): dictionary containing parameter/list-of-values pairs. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. filename (path): name of output .mat file. runs (int): number of runs to gather for each parameter combination.
3.640743
3.784297
0.962066
np.save(filename, self.get_results_as_numpy_array( parameter_space, result_parsing_function, runs=runs))
def save_to_npy_file(self, parameter_space, result_parsing_function, filename, runs)
Save results to a numpy array file format.
2.791902
2.952937
0.945466
self.space_to_folders(self.db.get_results(), {}, parameter_space, runs, folder_name)
def save_to_folders(self, parameter_space, folder_name, runs)
Save results to a folder structure.
10.009119
9.943695
1.006579
# Base case: we iterate over the runs and copy files in the final # directory. if not param_space: for run, r in enumerate(current_result_list[:runs]): files = self.db.get_result_files(r) new_dir = os.path.join(current_directory, "run=%s" % run) os.makedirs(new_dir, exist_ok=True) for filename, filepath in files.items(): shutil.copyfile(filepath, os.path.join(new_dir, filename)) return [key, value] = list(param_space.items())[0] # Iterate over dictionary values for v in value: next_query = deepcopy(current_query) temp_query = deepcopy(current_query) # For each list, recur 'fixing' that dimension. next_query[key] = v # Update query # Create folder folder_name = ("%s=%s" % (key, v)).replace('/', '_') new_dir = os.path.join(current_directory, folder_name) os.makedirs(new_dir, exist_ok=True) next_param_space = deepcopy(param_space) del(next_param_space[key]) temp_query[key] = v temp_result_list = [r for r in current_result_list if self.satisfies_query(r, temp_query)] self.space_to_folders(temp_result_list, next_query, next_param_space, runs, new_dir)
def space_to_folders(self, current_result_list, current_query, param_space, runs, current_directory)
Convert a parameter space specification to a directory tree with a nested structure.
2.878436
2.946863
0.97678
np_array = np.array( self.get_space( self.db.get_complete_results(), {}, collections.OrderedDict([(k, v) for k, v in parameter_space.items()]), runs, result_parsing_function)) # Create a parameter space only containing the variable parameters clean_parameter_space = collections.OrderedDict( [(k, v) for k, v in parameter_space.items()]) clean_parameter_space['runs'] = range(runs) if isinstance(output_labels, list): clean_parameter_space['metrics'] = output_labels xr_array = xr.DataArray(np_array, coords=clean_parameter_space, dims=list(clean_parameter_space.keys())) return xr_array
def get_results_as_xarray(self, parameter_space, result_parsing_function, output_labels, runs)
Return the results relative to the desired parameter space in the form of an xarray data structure. Args: parameter_space (dict): The space of parameters to export. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. output_labels (list): a list of labels to apply to the results dimensions, output by the result_parsing_function. runs (int): the number of runs to export for each parameter combination.
3.61887
3.989249
0.907156
if result_parsing_function is None: result_parsing_function = CampaignManager.files_in_dictionary # Note that this function operates recursively. # Base case if not param_space: results = [r for r in current_result_list if self.satisfies_query(r, current_query)] parsed = [] for r in results[:runs]: parsed.append(result_parsing_function(r)) return parsed space = [] [key, value] = list(param_space.items())[0] # Iterate over dictionary values for v in value: next_query = deepcopy(current_query) temp_query = deepcopy(current_query) # For each list, recur 'fixing' that dimension. next_query[key] = v next_param_space = deepcopy(param_space) del(next_param_space[key]) temp_query[key] = v temp_result_list = [r for r in current_result_list if self.satisfies_query(r, temp_query)] space.append(self.get_space(temp_result_list, next_query, next_param_space, runs, result_parsing_function)) return space
def get_space(self, current_result_list, current_query, param_space, runs, result_parsing_function)
Convert a parameter space specification to a nested array structure representing the space. In other words, if the parameter space is:: param_space = { 'a': [1, 2], 'b': [3, 4] } the function will return a structure like the following:: [ [ {'a': 1, 'b': 3}, {'a': 1, 'b': 4} ], [ {'a': 2, 'b': 3}, {'a': 2, 'b': 4} ] ] where the first dimension represents a, and the second dimension represents b. This nested-array structure can then be easily converted to a numpy array via np.array(). Args: current_query (dict): the query to apply to the structure. param_space (dict): representation of the parameter space. result_parsing_function (function): user-defined function to call on results, typically used to parse data and outputting metrics. runs (int): the number of runs to query for each parameter combination.
3.257262
3.384278
0.962469
from git import Repo, exc # Check that git is at the expected commit and that the repo is not # dirty if self.runner is not None: path = self.runner.path try: repo = Repo(path) except(exc.InvalidGitRepositoryError): raise Exception("No git repository detected.\nIn order to " "use SEM and its reproducibility enforcing " "features, please create a git repository at " "the root of your ns-3 project.") current_commit = repo.head.commit.hexsha campaign_commit = self.db.get_commit() if repo.is_dirty(untracked_files=True): raise Exception("ns-3 repository is not clean") if current_commit != campaign_commit: raise Exception("ns-3 repository is on a different commit " "from the one specified in the campaign")
def check_repo_ok(self)
Make sure that the ns-3 repository's HEAD commit is the same as the one saved in the campaign database, and that the ns-3 repository is clean (i.e., no untracked or modified files exist).
5.342927
4.613842
1.158021
sem.parallelrunner.MAX_PARALLEL_PROCESSES = max_processes # Create a campaign campaign = sem.CampaignManager.new(ns_3_path, script, results_dir, overwrite=False, optimized=not no_optimization) # Print campaign info click.echo(campaign) # Run the simulations [params, defaults] = zip(*get_params_and_defaults(campaign.db.get_params(), campaign.db)) # Check whether we need to read parameters from the command line if not parameters: # Substitute non-None defaults with their string representation # This will be then converted back to a Python data structure in # query_parameters string_defaults = list() for idx, d in enumerate(defaults): if d is not None: string_defaults.append(str(d)) else: string_defaults.append(d) script_params = query_parameters(params, defaults=string_defaults) else: script_params = import_parameters_from_file(parameters) # Finally, run the simulations campaign.run_missing_simulations(script_params, runs=click.prompt("Total runs", type=int))
def run(ns_3_path, results_dir, script, no_optimization, parameters, max_processes)
Run multiple simulations.
5.799188
5.80454
0.999078
campaign = sem.CampaignManager.load(results_dir) # Pick the most appropriate function based on the level of detail we want if hide_simulation_output: get_results_function = campaign.db.get_results else: get_results_function = campaign.db.get_complete_results # If a result id was specified, just query for that result if result_id: output = '\n\n\n'.join([pprint.pformat(item) for item in get_results_function(result_id=result_id)]) else: [params, defaults] = zip(*get_params_and_defaults( campaign.db.get_params(), campaign.db)) if not parameters: # Convert to string string_defaults = list() for idx, d in enumerate(defaults): string_defaults.append(str(d)) script_params = query_parameters(params, string_defaults) else: script_params = import_parameters_from_file(parameters) # Perform the search output = '\n\n\n'.join([pprint.pformat(item) for item in get_results_function(script_params)]) # Print the results if no_pager: click.echo(output) else: click.echo_via_pager(output)
def view(results_dir, result_id, hide_simulation_output, parameters, no_pager)
View results of simulations.
3.709768
3.768704
0.984362
campaign = sem.CampaignManager.load(results_dir) result = campaign.db.get_results(result_id=result_id)[0] click.echo("Simulation command:") click.echo(sem.utils.get_command_from_result(campaign.db.get_script(), result)) click.echo("Debug command:") click.echo(sem.utils.get_command_from_result(campaign.db.get_script(), result, debug=True))
def command(results_dir, result_id)
Print the command that needs to be used to reproduce a result.
4.265355
4.029961
1.058411
# Get the extension _, extension = os.path.splitext(filename) campaign = sem.CampaignManager.load(results_dir) [params, defaults] = zip(*get_params_and_defaults(campaign.db.get_params(), campaign.db)) if do_not_try_parsing: parsing_function = None else: parsing_function = sem.utils.automatic_parser if not parameters: # Convert to string string_defaults = list() for idx, d in enumerate(defaults): string_defaults.append(str(d)) parameter_query = query_parameters(params, string_defaults) else: # Import specified parameter file parameter_query = import_parameters_from_file(parameters) if extension == ".mat": campaign.save_to_mat_file(parameter_query, parsing_function, filename, runs=click.prompt("Runs", type=int)) elif extension == ".npy": campaign.save_to_npy_file(parameter_query, parsing_function, filename, runs=click.prompt("Runs", type=int)) elif extension == "": campaign.save_to_folders(parameter_query, filename, runs=click.prompt("Runs", type=int)) else: # Unrecognized format raise ValueError("Format not recognized")
def export(results_dir, filename, do_not_try_parsing, parameters)
Export results to file. An extension in filename is required to deduce the file type. If no extension is specified, a directory tree export will be used. Note that this command automatically tries to parse the simulation output. Supported extensions: .mat (Matlab file), .npy (Numpy file), no extension (Directory tree)
3.942804
4.050431
0.973428
# Get paths for all campaign JSONS jsons = [] for s in sources: filename = "%s.json" % os.path.split(s)[1] jsons += [os.path.join(s, filename)] # Check that the configuration for all campaigns is the same reference_config = TinyDB(jsons[0]).table('config') for j in jsons[1:]: for i, j in zip(reference_config.all(), TinyDB(j).table('config').all()): assert i == j # Create folders for new results directory filename = "%s.json" % os.path.split(output_dir)[1] output_json = os.path.join(output_dir, filename) output_data = os.path.join(output_dir, 'data') os.makedirs(output_data) # Create new database db = TinyDB(output_json) db.table('config').insert_multiple(reference_config.all()) # Import results from all databases to the new JSON file for s in sources: filename = "%s.json" % os.path.split(s)[1] current_db = TinyDB(os.path.join(s, filename)) db.table('results').insert_multiple(current_db.table('results').all()) # Copy or move results to new data folder for s in sources: for r in glob.glob(os.path.join(s, 'data/*')): basename = os.path.basename(r) if move: shutil.move(r, os.path.join(output_data, basename)) else: shutil.copytree(r, os.path.join(output_data, basename)) if move: for s in sources: shutil.rmtree(os.path.join(s, 'data/*')) shutil.rmtree(os.path.join(s, "%s.json" % os.path.split(s)[1])) shutil.rmtree(s)
def merge(move, output_dir, sources)
Merge multiple results folder into one, by copying the results over to a new folder. For a faster operation (which on the other hand destroys the campaign data if interrupted), the move option can be used to directly move results to the new folder.
2.408557
2.316008
1.03996
return [[p, d] for p, d in db.get_all_values_of_all_params().items()]
def get_params_and_defaults(param_list, db)
Deduce [parameter, default] pairs from simulations available in the db. Args: param_list (list): List of parameters to query for. db (DatabaseManager): Database where to query for defaults.
7.938762
8.747237
0.907574
script_params = collections.OrderedDict([k, []] for k in param_list) for param, default in zip(list(script_params.keys()), defaults): user_input = click.prompt("%s" % param, default=default) script_params[param] = ast.literal_eval(user_input) return script_params
def query_parameters(param_list, defaults=None)
Asks the user for parameters. If available, proposes some defaults. Args: param_list (list): List of parameters to ask the user for values. defaults (list): A list of proposed defaults. It must be a list of the same length as param_list. A value of None in one element of the list means that no default will be proposed for the corresponding parameter.
3.534588
3.75628
0.940981
params = {} with open(parameters_file, 'r') as f: matches = re.findall('(.*): (.*)', f.read()) for m in matches: params[m[0]] = ast.literal_eval(m[1]) return params
def import_parameters_from_file(parameters_file)
Try importing a parameter dictionary from file. We expect values in parameters_file to be defined as follows: param1: value1 param2: [value2, value3]
2.610924
2.919969
0.894162
self.data_folder = data_folder with Pool(processes=MAX_PARALLEL_PROCESSES) as pool: for result in pool.imap_unordered(self.launch_simulation, parameter_list): yield result
def run_simulations(self, parameter_list, data_folder)
This function runs multiple simulations in parallel. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to create output folders.
3.617771
4.108502
0.880557
return next(SimulationRunner.run_simulations(self, [parameter], self.data_folder))
def launch_simulation(self, parameter)
Launch a single simulation, using SimulationRunner's facilities. This function is used by ParallelRunner's run_simulations to map simulation running over the parameter list. Args: parameter (dict): the parameter combination to simulate.
15.568825
14.573655
1.068286
return self.helper.string.serialization.json.stringify( obj=obj, beautify=beautify, raise_exception=raise_exception)
def stringify(self, obj, beautify=False, raise_exception=False)
Alias of helper.string.serialization.json.stringify
5.293786
2.308929
2.292745
return self.helper.string.serialization.json.parse( text=text, encoding=encoding, raise_exception=raise_exception)
def parse(self, text, encoding='utf8', raise_exception=False)
Alias of helper.string.serialization.json.parse
6.635807
2.506233
2.647721
# We only accept absolute paths if not Path(campaign_dir).is_absolute(): raise ValueError("Path is not absolute") # Make sure the directory does not exist already if Path(campaign_dir).exists() and not overwrite: raise FileExistsError("The specified directory already exists") elif Path(campaign_dir).exists() and overwrite: # Verify we are not deleting files belonging to the user campaign_dir_name = os.path.basename(campaign_dir) folder_contents = set(os.listdir(campaign_dir)) allowed_files = set( ['data', '%s.json' % campaign_dir_name] + # Allow hidden files (like .DS_STORE in macos) [os.path.basename(os.path.normpath(f)) for f in glob.glob(os.path.join(campaign_dir, ".*"))]) if(not folder_contents.issubset(allowed_files)): raise ValueError("The specified directory cannot be overwritten" " because it contains user files.") # This operation destroys data. shutil.rmtree(campaign_dir) # Create the directory and database file in it # The indent and separators ensure the database is human readable. os.makedirs(campaign_dir) tinydb = TinyDB(os.path.join(campaign_dir, "%s.json" % os.path.basename(campaign_dir))) # Save the configuration in the database config = { 'script': script, 'commit': commit, 'params': sorted(params) } tinydb.table('config').insert(config) return cls(tinydb, campaign_dir)
def new(cls, script, commit, params, campaign_dir, overwrite=False)
Initialize a new class instance with a set configuration and filename. The created database has the same name of the campaign directory. Args: script (str): the ns-3 name of the script that will be used in this campaign; commit (str): the commit of the ns-3 installation that is used to run the simulations. params (list): a list of the parameters that can be used on the script. campaign_dir (str): The path of the file where to save the DB. overwrite (bool): Whether or not existing directories should be overwritten.
3.564069
3.508443
1.015855
# We only accept absolute paths if not Path(campaign_dir).is_absolute(): raise ValueError("Path is not absolute") # Verify file exists if not Path(campaign_dir).exists(): raise ValueError("Directory does not exist") # Extract filename from campaign dir filename = "%s.json" % os.path.split(campaign_dir)[1] filepath = os.path.join(campaign_dir, filename) try: # Read TinyDB instance from file tinydb = TinyDB(filepath) # Make sure the configuration is a valid dictionary assert set( tinydb.table('config').all()[0].keys()) == set(['script', 'params', 'commit']) except: # Remove the database instance created by tinydb os.remove(filepath) raise ValueError("Specified campaign directory seems corrupt") return cls(tinydb, campaign_dir)
def load(cls, campaign_dir)
Initialize from an existing database. It is assumed that the database json file has the same name as its containing folder. Args: campaign_dir (str): The path to the campaign directory.
4.535041
4.586905
0.988693