code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
metadata = Metadata(source=self.actor_urn).__dict__
metadata['source_connector'] = 'irc'
metadata['source_channel'] = e.target
metadata['source_user'] = e.source
metadata['source_username'] = e.source.split('!')[0]
metadata['user_id'] = metadata['source_user']
metadata['display_name'] = metadata['source_username']
return metadata | def set_metadata(self, e) | This function sets the metadata that is common between pub and priv | 4.38287 | 4.338463 | 1.010236 |
text = e.arguments[0]
metadata = self.set_metadata(e)
metadata['is_private_message'] = False
message = Message(text=text, metadata=metadata).__dict__
self.baseplate.tell(message) | def on_pubmsg(self, c, e) | This function runs when the bot receives a public message. | 8.100368 | 7.455624 | 1.086478 |
text = e.arguments[0]
logger.debug('{0!s}'.format(e.source))
metadata = self.set_metadata(e)
metadata['is_private_message'] = True
message = Message(text=text, metadata=metadata).__dict__
self.baseplate.tell(message) | def on_privmsg(self, c, e) | This function runs when the bot receives a private message (query). | 6.953763 | 6.693814 | 1.038834 |
self.backoff = 1 # Assume we had a good connection. Reset backoff.
if self.nickserv:
if Utilities.isNotEmpty(self.nickserv_pass):
self.identify(c, e, self.nickserv_pass)
time.sleep(3) # Make sure Nickserv really sees us
else:
logger.error('If nickserv is enabled, you must supply'
' a password')
if self.nickserv is False and self.nickserv_pass is not None:
logger.warn('It appears you provided a nickserv password but '
'did not enable nickserv authentication')
for channel in self.my_channels:
logger.debug('Attempting to join {0!s}'.format(channel))
c.join(channel) | def on_welcome(self, c, e) | This function runs when the bot successfully connects to the IRC server | 5.329283 | 5.26925 | 1.011393 |
self._connect()
super(irc.bot.SingleServerIRCBot, self).start() | def run(self) | Run the bot in a thread.
Implementing the IRC listener as a thread allows it to
listen without blocking IRCLego's ability to listen
as a pykka actor.
:return: None | 11.171928 | 9.669081 | 1.155428 |
'''
Attempts to send a message to the specified destination in IRC
Extends Legobot.Lego.handle()
Args:
message (Legobot.Message): message w/ metadata to send.
'''
logger.debug(message)
if Utilities.isNotEmpty(message['metadata']['opts']):
target = message['metadata']['opts']['target']
for split_line in Utilities.tokenize(message['text']):
for truncated_line in Utilities.truncate(split_line):
self.botThread.connection.privmsg(target, truncated_line)
# Delay to prevent floods
time.sleep(0.25) | def handle(self, message) | Attempts to send a message to the specified destination in IRC
Extends Legobot.Lego.handle()
Args:
message (Legobot.Message): message w/ metadata to send. | 9.021684 | 4.219717 | 2.137983 |
if self.sparselib not in self.sparselib_alt:
logger.warning("Invalid sparse library <{}>".format(self.sparselib))
self.sparselib = 'umfpack'
if self.sparselib == 'klu' and not KLU:
logger.info("Optional package \"cvxoptklu\" available for speed up")
self.sparselib = 'umfpack'
return True | def check(self) | Check config data consistency
Returns
------- | 7.657856 | 7.68382 | 0.996621 |
if self.time != actual_time:
self.time = actual_time
else:
return
for i in range(self.n):
if self.tf[i] == self.time:
logger.info(
' <Fault> Applying fault on Bus <{}> at t={}.'.format(
self.bus[i], self.tf[i]))
self.u[i] = 1
self.active += 1
self.angle0 = self.system.dae.y[self.system.Bus.a]
self.volt0 = self.system.dae.y[self.system.Bus.n:]
self.system.dae.factorize = True
elif self.tc[i] == self.time:
logger.info(
' <Fault> Clearing fault on Bus <{}> at t={}.'.format(
self.bus[i], self.tc[i]))
self.u[i] = 0
self.active -= 1
self.system.dae.y[self.system.Bus.n:] = self.volt0
# self.system.dae.y[self.a] = self.anglepre
self.system.dae.factorize = True | def apply(self, actual_time) | Check time and apply faults | 3.435989 | 3.230618 | 1.06357 |
logmessage = {
"time": (time.time() % 1000) * 1000,
"header": "",
"message": message,
}
if header:
logmessage["header"] = (
json.dumps(header, indent=2) + "\n" + "----------------" + "\n"
)
if isinstance(message, dict):
logmessage["message"] = (
json.dumps(message, indent=2) + "\n" + "----------------" + "\n"
)
print("=== Consume ====\n{header}{message}".format(**logmessage))
self.log.info("Received message @{time}".format(**logmessage))
self.log.debug(
"Received message @{time}\n{header}{message}".format(**logmessage)
)
time.sleep(0.1) | def consume_message(self, header, message) | Consume a message | 3.013888 | 2.981956 | 1.010708 |
if self.log_file is not None and message['should_log']:
message_copy = Message(message['text'],
Metadata(None).__dict__,
message['should_log']).__dict__
with open(self.log_file, mode='w') as f:
f.write(json.dumps(message_copy))
logger.info(message['metadata']['source'])
if self.listening_for(message):
self_thread = self.HandlerThread(self.handle, message)
self_thread.start()
self.cleanup()
for child in self.children:
child.tell(message) | def on_receive(self, message) | Handle being informed of a message.
This function is called whenever a Lego receives a message, as
specified in the pykka documentation.
Legos should not override this function.
:param message:
:return: | 5.865258 | 6.393807 | 0.917334 |
self.lock.acquire()
logger.debug('Acquired lock in cleanup for ' + str(self))
self.children = [child for child in self.children if child.is_alive()]
self.lock.release() | def cleanup(self) | Clean up finished children.
:return: None | 4.426433 | 4.153313 | 1.06576 |
try:
baseplate = kwargs['baseplate']
except:
if self.baseplate is None:
baseplate = self.actor_ref
else:
baseplate = self.baseplate
try:
lock = kwargs['lock']
except:
lock = self.lock
child = child_type.start(baseplate, lock, *args, **kwargs)
self.children.append(child) | def add_child(self, child_type, *args, **kwargs) | Initialize and keep track of a child.
:param child_type: a class inheriting from Lego to initialize \
an instance of
:param args: arguments for initializing the child
:param kwargs: keyword arguments for initializing the child
:return: | 3.172402 | 3.373202 | 0.940472 |
metadata = Metadata(source=self.actor_urn,
dest=message['metadata']['source']).__dict__
metadata['opts'] = opts
message = Message(text=text, metadata=metadata,
should_log=message['should_log']).__dict__
dest_actor = ActorRegistry.get_by_urn(message['metadata']['dest'])
if dest_actor is not None:
dest_actor.tell(message)
else:
raise("Tried to send message to nonexistent actor") | def reply(self, message, text, opts=None) | Reply to the sender of the provided message with a message \
containing the provided text.
:param message: the message to reply to
:param text: the text to reply with
:param opts: A dictionary of additional values to add to metadata
:return: None | 5.199278 | 4.983151 | 1.043372 |
if not opts:
opts = {}
opts['attachment'] = attachment
opts['fallback'] = text
text += '\n {}'.format(attachment)
self.reply(message, text, opts) | def reply_attachment(self, message, text, attachment, opts=None) | Convenience method for formatting reply as attachment (if available)
and passing it on to the reply method. Individual connectors can then
deal with the attachment or simply pass it on as a regular message
:param message: the message to reply to
:param text: the text to reply with
:param attachment: the attachment link
:param opts: A dictionary of additional values to add to metadata
:return: None | 4.176181 | 4.355512 | 0.958827 |
try:
source = message['metadata']['source_channel']
thread = message['metadata'].get('thread_ts')
opts = {'target': source, 'thread': thread}
except LookupError:
source = None
opts = None
logger.error("Could not identify source from message:{}\n"
.format(str(message)))
return opts | def build_reply_opts(self, message) | Convenience method for constructing default options for a
reply message.
:param message: the message to reply to
:return: opts | 5.938725 | 6.539226 | 0.908169 |
device = data[0]
action = data[1]
if data[2] == '*':
data[2] = '.*'
regex = re.compile(data[2])
prop = data[3]
value = float(data[4])
if action == 'MUL':
for item in range(system.__dict__[device].n):
if regex.search(system.__dict__[device].name[item]):
system.__dict__[device].__dict__[prop][item] *= value
elif action == 'REP':
for item in range(system.__dict__[device].n):
if regex.search(system.__dict__[device].name[item]):
system.__dict__[device].__dict__[prop][item] = value
elif action == 'DIV':
if not value:
return
for item in range(system.__dict__[device].n):
if regex.search(system.__dict__[device].name[item]):
system.__dict__[device].__dict__[prop][item] /= value
elif action == 'SUM':
for item in range(system.__dict__[device].n):
if regex.search(system.__dict__[device].name[item]):
system.__dict__[device].__dict__[prop][item] += value
elif action == 'SUB':
for item in range(system.__dict__[device].n):
if regex.search(system.__dict__[device].name[item]):
system.__dict__[device].__dict__[prop][item] -= value
elif action == 'POW':
for item in range(system.__dict__[device].n):
if regex.search(system.__dict__[device].name[item]):
system.__dict__[device].__dict__[prop][item] **= value
else:
print('ALTER action <%s> is not defined', action) | def alter(data, system) | Alter data in dm format devices | 1.665662 | 1.642413 | 1.014155 |
retval = True
fid = open(file, 'r')
sep = re.compile(r'\s*,\s*')
comment = re.compile(r'^#\s*')
equal = re.compile(r'\s*=\s*')
math = re.compile(r'[*/+-]')
double = re.compile(r'[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?')
# parse data
while True:
line = fid.readline()
if not line:
break
line = line.replace('\n', '')
line = line.strip()
if not line:
continue
if comment.search(line):
continue
# span multiple line
while line.endswith(',') or line.endswith(';'):
newline = fid.readline()
line = line.replace('\n', '')
newline = newline.strip()
if not newline:
continue
if comment.search(newline):
continue
line += ' ' + newline
data = sep.split(line)
device = data.pop(0)
device = device.strip()
if device == 'ALTER':
alter(data, system)
continue
if device == 'INCLUDE':
logger.debug('Parsing include file <{}>'.format(data[0]))
newpath = data[0]
if not os.path.isfile(newpath):
newpath = os.path.join(system.files.path, data[0])
if not os.path.isfile(newpath):
raise FileNotFoundError(
'Unable to locate file in {}'.format(newpath))
read(newpath, system, header=False) # recursive call
logger.debug('Parsing of include file <{}> completed.'.format(
data[0]))
continue
kwargs = {}
for item in data:
pair = equal.split(item)
key = pair[0].strip()
value = pair[1].strip()
if value.startswith('"'):
value = value[1:-1]
elif value.startswith('['):
array = value[1:-1].split(';')
if math.search(value): # execute simple operations
value = list(map(lambda x: eval(x), array))
else:
value = list(map(lambda x: float(x), array))
elif double.search(value):
if math.search(value): # execute simple operations
value = eval(value)
else:
value = float(value)
elif value == 'True':
value = True
elif value == 'False':
value = False
else:
value = int(value)
kwargs[key] = value
index = kwargs.pop('idx', None)
namex = kwargs.pop('name', None)
try:
system.__dict__[device].elem_add(idx=index, name=namex, **kwargs)
except KeyError:
logger.error(
'Error adding device {:s} to powersystem object.'.format(
device))
logger.debug(
'Make sure you have added the jit models in __init__.py'
)
fid.close()
return retval | def read(file, system, header=True) | Read a dm format file and elem_add to system | 2.758105 | 2.699312 | 1.021781 |
# TODO: Check for bugs!!!
out = list()
out.append('# DOME format version 1.0')
ppl = 7 # parameter per line
retval = True
dev_list = sorted(system.devman.devices)
for dev in dev_list:
model = system.__dict__[dev]
if not model.n:
continue
out.append('')
header = dev + ', '
space = ' ' * (len(dev) + 2)
keys = list(model._data.keys())
keys.extend(['name', 'idx'])
keys = sorted(keys)
# remove non-existent keys
for key in keys:
if key not in model.__dict__.keys():
keys.pop(key)
nline = int(ceil(len(keys) / ppl))
nelement = model.n
vals = [''] * len(keys)
# for each element, read values
for elem in range(nelement):
for idx, key in enumerate(keys):
if model._flags['sysbase'] and key in model._store.keys():
val = model._store[key][elem]
else:
val = model.__dict__[key][elem]
if isinstance(val, float):
val = round(val, 5)
elif isinstance(val, str):
val = '"{}"'.format(val)
elif isinstance(val, list):
val = list(val)
val = '; '.join(str(i) for i in val)
val = '[{}]'.format(val)
elif val is None:
val = 0
vals[idx] = val
pair = []
for key, val in zip(keys, vals):
pair.append('{} = {}'.format(key, val))
for line in range(nline):
string = ', '.join(pair[ppl * line:ppl * (line + 1)])
if line == 0: # append header or space
string = header + string
else:
string = space + string
if not line == nline - 1: # add comma except for last line
string += ','
out.append(string)
fid = open(file, 'w')
for line in out:
fid.write(line + '\n')
fid.close()
return retval | def write(file, system) | Write data in system to a dm file | 3.481373 | 3.437024 | 1.012903 |
if "add_argument" in dir(parser):
return cls.add_command_line_options_argparse(parser)
else:
return cls.add_command_line_options_optparse(parser) | def add_command_line_options(cls, parser) | function to inject command line parameters | 2.90146 | 3.044183 | 0.953116 |
import argparse
class SetParameter(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
cls.config[option_string] = value
if option_string == "--stomp-conf":
cls.load_configuration_file(value)
argparser.add_argument(
"--stomp-host",
metavar="HOST",
default=cls.defaults.get("--stomp-host"),
help="Stomp broker address, default '%(default)s'",
type=str,
action=SetParameter,
)
argparser.add_argument(
"--stomp-port",
metavar="PORT",
default=cls.defaults.get("--stomp-port"),
help="Stomp broker port, default '%(default)s'",
type=int,
action=SetParameter,
)
argparser.add_argument(
"--stomp-user",
metavar="USER",
default=cls.defaults.get("--stomp-user"),
help="Stomp user, default '%(default)s'",
type=str,
action=SetParameter,
)
argparser.add_argument(
"--stomp-pass",
metavar="PASS",
default=cls.defaults.get("--stomp-pass"),
help="Stomp password",
type=str,
action=SetParameter,
)
argparser.add_argument(
"--stomp-prfx",
metavar="PRE",
default=cls.defaults.get("--stomp-prfx"),
help="Stomp namespace prefix, default '%(default)s'",
type=str,
action=SetParameter,
)
argparser.add_argument(
"--stomp-conf",
metavar="CNF",
default=cls.defaults.get("--stomp-conf"),
help="Stomp configuration file containing connection information, disables default values",
type=str,
action=SetParameter,
) | def add_command_line_options_argparse(cls, argparser) | function to inject command line parameters into
a Python ArgumentParser. | 1.821711 | 1.835526 | 0.992473 |
def set_parameter(option, opt, value, parser):
cls.config[opt] = value
if opt == "--stomp-conf":
cls.load_configuration_file(value)
optparser.add_option(
"--stomp-host",
metavar="HOST",
default=cls.defaults.get("--stomp-host"),
help="Stomp broker address, default '%default'",
type="string",
nargs=1,
action="callback",
callback=set_parameter,
)
optparser.add_option(
"--stomp-port",
metavar="PORT",
default=cls.defaults.get("--stomp-port"),
help="Stomp broker port, default '%default'",
type="int",
nargs=1,
action="callback",
callback=set_parameter,
)
optparser.add_option(
"--stomp-user",
metavar="USER",
default=cls.defaults.get("--stomp-user"),
help="Stomp user, default '%default'",
type="string",
nargs=1,
action="callback",
callback=set_parameter,
)
optparser.add_option(
"--stomp-pass",
metavar="PASS",
default=cls.defaults.get("--stomp-pass"),
help="Stomp password",
type="string",
nargs=1,
action="callback",
callback=set_parameter,
)
optparser.add_option(
"--stomp-prfx",
metavar="PRE",
default=cls.defaults.get("--stomp-prfx"),
help="Stomp namespace prefix, default '%default'",
type="string",
nargs=1,
action="callback",
callback=set_parameter,
)
optparser.add_option(
"--stomp-conf",
metavar="CNF",
default=cls.defaults.get("--stomp-conf"),
help="Stomp configuration file containing connection information, disables default values",
type="string",
nargs=1,
action="callback",
callback=set_parameter,
) | def add_command_line_options_optparse(cls, optparser) | function to inject command line parameters into
a Python OptionParser. | 1.71697 | 1.718973 | 0.998834 |
self._connected = self._connected and self._conn.is_connected()
return self._connected | def is_connected(self) | Return connection status | 5.956959 | 4.844272 | 1.229691 |
if self._connected:
self._connected = False
self._conn.disconnect() | def disconnect(self) | Gracefully close connection to stomp server. | 5.468668 | 3.675328 | 1.48794 |
self._broadcast(
"transient.status",
json.dumps(status),
headers={"expires": str(int((15 + time.time()) * 1000))},
) | def broadcast_status(self, status) | Broadcast transient status information to all listeners | 7.209915 | 6.489562 | 1.111002 |
headers = {}
if kwargs.get("exclusive"):
headers["activemq.exclusive"] = "true"
if kwargs.get("ignore_namespace"):
destination = "/queue/" + channel
else:
destination = "/queue/" + self._namespace + channel
if kwargs.get("priority"):
headers["activemq.priority"] = kwargs["priority"]
if kwargs.get("retroactive"):
headers["activemq.retroactive"] = "true"
if kwargs.get("selector"):
headers["selector"] = kwargs["selector"]
if kwargs.get("transformation"):
if kwargs["transformation"] == True:
headers["transformation"] = "jms-object-json"
else:
headers["transformation"] = kwargs["transformation"]
if kwargs.get("acknowledgement"):
ack = "client-individual"
else:
ack = "auto"
self._conn.subscribe(destination, sub_id, headers=headers, ack=ack) | def _subscribe(self, sub_id, channel, callback, **kwargs) | Listen to a queue, notify via callback function.
:param sub_id: ID for this subscription in the transport layer
:param channel: Queue name to subscribe to
:param callback: Function to be called when messages are received
:param **kwargs: Further parameters for the transport layer. For example
acknowledgement: If true receipt of each message needs to be
acknowledged.
exclusive: Attempt to become exclusive subscriber to the queue.
ignore_namespace: Do not apply namespace to the destination name
priority: Consumer priority, messages are sent to higher
priority consumers whenever possible.
selector: Only receive messages filtered by a selector. See
https://activemq.apache.org/activemq-message-properties.html
for potential filter criteria. Uses SQL 92 syntax.
transformation: Transform messages into different format. If set
to True, will use 'jms-object-json' formatting. | 2.782439 | 2.1371 | 1.301969 |
headers = {}
if kwargs.get("ignore_namespace"):
destination = "/topic/" + channel
else:
destination = "/topic/" + self._namespace + channel
if kwargs.get("retroactive"):
headers["activemq.retroactive"] = "true"
if kwargs.get("transformation"):
if kwargs["transformation"] == True:
headers["transformation"] = "jms-object-json"
else:
headers["transformation"] = kwargs["transformation"]
self._conn.subscribe(destination, sub_id, headers=headers) | def _subscribe_broadcast(self, sub_id, channel, callback, **kwargs) | Listen to a broadcast topic, notify via callback function.
:param sub_id: ID for this subscription in the transport layer
:param channel: Topic name to subscribe to
:param callback: Function to be called when messages are received
:param **kwargs: Further parameters for the transport layer. For example
ignore_namespace: Do not apply namespace to the destination name
retroactive: Ask broker to send old messages if possible
transformation: Transform messages into different format. If set
to True, will use 'jms-object-json' formatting. | 3.476195 | 2.399097 | 1.44896 |
if not headers:
headers = {}
if "persistent" not in headers:
headers["persistent"] = "true"
if delay:
# The 'delay' mechanism is only supported when
# schedulerSupport is enabled on the broker.
headers["AMQ_SCHEDULED_DELAY"] = int(1000 * delay)
if expiration:
headers["expires"] = int((time.time() + expiration) * 1000)
if kwargs.get("ignore_namespace"):
destination = "/queue/" + destination
else:
destination = "/queue/" + self._namespace + destination
try:
self._conn.send(destination, message, headers=headers, **kwargs)
except stomp.exception.NotConnectedException:
self._connected = False
raise workflows.Disconnected("No connection to stomp host") | def _send(
self, destination, message, headers=None, delay=None, expiration=None, **kwargs
) | Send a message to a queue.
:param destination: Queue name to send to
:param message: A string to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
expiration: Optional expiration time, relative to sending time
headers: Optional dictionary of header entries
ignore_namespace: Do not apply namespace to the destination name
persistent: Whether to mark messages as persistent, to be kept
between broker restarts. Default is 'true'.
transaction: Transaction ID if message should be part of a
transaction | 4.251057 | 3.815988 | 1.114012 |
self._conn.ack(message_id, subscription_id, **kwargs) | def _ack(self, message_id, subscription_id, **kwargs) | Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be acknowledged
:param subscription: ID of the relevant subscriptiong
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction | 4.284975 | 5.499672 | 0.779133 |
self._conn.nack(message_id, subscription_id, **kwargs) | def _nack(self, message_id, subscription_id, **kwargs) | Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be rejected
:param subscription: ID of the relevant subscriptiong
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction | 3.835764 | 4.838106 | 0.792824 |
if dev_name not in self.devices:
self.devices.append(dev_name)
group_name = self.system.__dict__[dev_name]._group
if group_name not in self.group.keys():
self.group[group_name] = {} | def register_device(self, dev_name) | register a device to the device list | 3.181674 | 3.130376 | 1.016387 |
if dev_name not in self.devices:
logger.error(
'Device {} missing. call add_device before adding elements'.
format(dev_name))
return
group_name = self.system.__dict__[dev_name]._group
if idx is None: # "if not idx" will fail for idx==0.0
idx = dev_name + '_' + str(len(self.group[group_name].keys()))
self.group[group_name][idx] = dev_name
return idx | def register_element(self, dev_name, idx=None) | Register a device element to the group list
Parameters
----------
dev_name : str
model name
idx : str
element idx
Returns
-------
str
assigned idx | 5.052778 | 4.990388 | 1.012502 |
self.devices.sort()
# idx: the indices of order-sensitive models
# names: an ordered list of order-sensitive models
idx = []
names = []
for dev in order:
# if ``dev`` in ``order`` is a model file name:
# initialize the models in alphabet order
if dev in all_models:
all_dev = list(sorted(all_models[dev].keys()))
for item in all_dev:
if item in self.devices:
idx.append(self.devices.index(item))
names.append(item)
# if ``dev`` presents as a model name
elif dev in self.devices:
idx.append(self.devices.index(dev))
names.append(dev)
idx = sorted(idx)
for id, name in zip(idx, names):
self.devices[id] = name | def sort_device(self) | Sort device to follow the order of initialization
:return: None | 4.648829 | 4.493871 | 1.034482 |
system = self.system
config = self.config
if not system.dae.n:
freq = 1.0
elif system.dae.n == 1:
B = matrix(system.dae.Gx)
self.solver.linsolve(system.dae.Gy, B)
As = system.dae.Fx - system.dae.Fy * B
freq = abs(As[0, 0])
else:
freq = 20.0
if freq > system.freq:
freq = float(system.freq)
tspan = abs(config.tf - config.t0)
tcycle = 1 / freq
config.deltatmax = min(5 * tcycle, tspan / 100.0)
config.deltat = min(tcycle, tspan / 100.0)
config.deltatmin = min(tcycle / 64, config.deltatmax / 20)
if config.fixt:
if config.tstep <= 0:
logger.warning('Fixed time step is negative or zero')
logger.warning('Switching to automatic time step')
config.fixt = False
else:
config.deltat = config.tstep
if config.tstep < config.deltatmin:
logger.warning(
'Fixed time step is below the estimated minimum')
self.h = config.deltat | def _calc_time_step_first(self) | Compute the first time step and save to ``self.h``
Returns
-------
None | 4.307785 | 4.381142 | 0.983256 |
system = self.system
config = self.config
convergence = self.convergence
niter = self.niter
t = self.t
if t == 0:
self._calc_time_step_first()
return
if convergence:
if niter >= 15:
config.deltat = max(config.deltat * 0.5, config.deltatmin)
elif niter <= 6:
config.deltat = min(config.deltat * 1.1, config.deltatmax)
else:
config.deltat = max(config.deltat * 0.95, config.deltatmin)
# adjust fixed time step if niter is high
if config.fixt:
config.deltat = min(config.tstep, config.deltat)
else:
config.deltat *= 0.9
if config.deltat < config.deltatmin:
config.deltat = 0
if system.Fault.is_time(t) or system.Breaker.is_time(t):
config.deltat = min(config.deltat, 0.002778)
elif system.check_event(t):
config.deltat = min(config.deltat, 0.002778)
if config.method == 'fwdeuler':
config.deltat = min(config.deltat, config.tstep)
# last step size
if self.t + config.deltat > config.tf:
config.deltat = config.tf - self.t
# reduce time step for fixed_times events
for fixed_t in self.fixed_times:
if (fixed_t > self.t) and (fixed_t <= self.t + config.deltat):
config.deltat = fixed_t - self.t
self.switch = True
break
self.h = config.deltat | def calc_time_step(self) | Set the time step during time domain simulations
Parameters
----------
convergence: bool
truth value of the convergence of the last step
niter: int
current iteration count
t: float
current simulation time
Returns
-------
float
computed time step size | 3.086737 | 3.042029 | 1.014697 |
system = self.system
config = self.config
dae = self.system.dae
if system.pflow.solved is False:
return
t, s = elapsed()
# Assign indices for post-powerflow device variables
system.xy_addr1()
# Assign variable names for bus injections and line flows if enabled
system.varname.resize_for_flows()
system.varname.bus_line_names()
# Reshape dae to retain power flow solutions
system.dae.init1()
# Initialize post-powerflow device variables
for device, init1 in zip(system.devman.devices, system.call.init1):
if init1:
system.__dict__[device].init1(system.dae)
# compute line and area flow
if config.compute_flows:
dae.init_fg()
self.compute_flows() # TODO: move to PowerSystem
t, s = elapsed(t)
if system.dae.n:
logger.debug('Dynamic models initialized in {:s}.'.format(s))
else:
logger.debug('No dynamic model loaded.')
# system.dae flags initialize
system.dae.factorize = True
system.dae.mu = 1.0
system.dae.kg = 0.0 | def init(self) | Initialize time domain simulation
Returns
-------
None | 10.273256 | 10.78076 | 0.952925 |
ret = False
system = self.system
config = self.config
dae = self.system.dae
# maxit = config.maxit
# tol = config.tol
if system.pflow.solved is False:
logger.warning('Power flow not solved. Simulation cannot continue.')
return ret
t0, _ = elapsed()
t1 = t0
self.streaming_init()
logger.info('')
logger.info('-> Time Domain Simulation: {} method, t={} s'
.format(self.config.method, self.config.tf))
self.load_pert()
self.run_step0()
config.qrtstart = time()
while self.t < config.tf:
self.check_fixed_times()
self.calc_time_step()
if self.callpert is not None:
self.callpert(self.t, self.system)
if self.h == 0:
break
# progress time and set time in dae
self.t += self.h
dae.t = self.t
# backup actual variables
self.x0 = matrix(dae.x)
self.y0 = matrix(dae.y)
self.f0 = matrix(dae.f)
# apply fixed_time interventions and perturbations
self.event_actions()
# reset flags used in each step
self.err = 1
self.niter = 0
self.convergence = False
self.implicit_step()
if self.convergence is False:
try:
self.restore_values()
continue
except ValueError:
self.t = config.tf
ret = False
break
self.step += 1
self.compute_flows()
system.varout.store(self.t, self.step)
self.streaming_step()
# plot variables and display iteration status
perc = max(min((self.t - config.t0) / (config.tf - config.t0) * 100, 100), 0)
# show iteration info every 30 seconds or every 20%
t2, _ = elapsed(t1)
if t2 - t1 >= 30:
t1 = t2
logger.info(' ({:.0f}%) time = {:.4f}s, step = {}, niter = {}'
.format(100 * self.t / config.tf, self.t, self.step,
self.niter))
if perc > self.next_pc or self.t == config.tf:
self.next_pc += 20
logger.info(' ({:.0f}%) time = {:.4f}s, step = {}, niter = {}'
.format(100 * self.t / config.tf, self.t, self.step, self.niter))
# compute max rotor angle difference
# diff_max = anglediff()
# quasi-real-time check and wait
rt_end = config.qrtstart + (self.t - config.t0) * config.kqrt
if config.qrt:
# the ending time has passed
if time() - rt_end > 0:
# simulation is too slow
if time() - rt_end > config.kqrt:
logger.debug('Simulation over-run at t={:4.4g} s.'.format(self.t))
# wait to finish
else:
self.headroom += (rt_end - time())
while time() - rt_end < 0:
sleep(1e-5)
if config.qrt:
logger.debug('RT headroom time: {} s.'.format(str(self.headroom)))
if self.t != config.tf:
logger.error('Reached minimum time step. Convergence is not likely.')
ret = False
else:
ret = True
if system.config.dime_enable:
system.streaming.finalize()
_, s = elapsed(t0)
if ret is True:
logger.info(' Time domain simulation finished in {:s}.'.format(s))
else:
logger.info(' Time domain simulation failed in {:s}.'.format(s))
self.success = ret
self.dump_results(success=self.success)
return ret | def run(self) | Run time domain simulation
Returns
-------
bool
Success flag | 4.947262 | 4.887475 | 1.012233 |
if self.convergence is True:
return
dae = self.system.dae
system = self.system
inc_g = self.inc[dae.n:dae.m + dae.n]
max_g_err_sign = 1 if abs(max(inc_g)) > abs(min(inc_g)) else -1
if max_g_err_sign == 1:
max_g_err_idx = list(inc_g).index(max(inc_g))
else:
max_g_err_idx = list(inc_g).index(min(inc_g))
logger.debug(
'Maximum mismatch = {:.4g} at equation <{}>'.format(
max(abs(inc_g)), system.varname.unamey[max_g_err_idx]))
logger.debug(
'Reducing time step h={:.4g}s for t={:.4g}'.format(self.h, self.t))
# restore initial variable data
dae.x = matrix(self.x0)
dae.y = matrix(self.y0)
dae.f = matrix(self.f0) | def restore_values(self) | Restore x, y, and f values if not converged
Returns
-------
None | 4.269498 | 4.131431 | 1.033419 |
config = self.config
system = self.system
dae = self.system.dae
# constant short names
In = spdiag([1] * dae.n)
h = self.h
while self.err > config.tol and self.niter < config.maxit:
if self.t - self.t_jac >= 5:
dae.rebuild = True
self.t_jac = self.t
elif self.niter > 4:
dae.rebuild = True
elif dae.factorize:
dae.rebuild = True
# rebuild Jacobian
if dae.rebuild:
exec(system.call.int)
dae.rebuild = False
else:
exec(system.call.int_fg)
# complete Jacobian matrix dae.Ac
if config.method == 'euler':
dae.Ac = sparse(
[[In - h * dae.Fx, dae.Gx], [-h * dae.Fy, dae.Gy]],
'd')
dae.q = dae.x - self.x0 - h * dae.f
elif config.method == 'trapezoidal':
dae.Ac = sparse([[In - h * 0.5 * dae.Fx, dae.Gx],
[-h * 0.5 * dae.Fy, dae.Gy]], 'd')
dae.q = dae.x - self.x0 - h * 0.5 * (dae.f + self.f0)
# windup limiters
dae.reset_Ac()
if dae.factorize:
self.F = self.solver.symbolic(dae.Ac)
dae.factorize = False
self.inc = -matrix([dae.q, dae.g])
try:
N = self.solver.numeric(dae.Ac, self.F)
self.solver.solve(dae.Ac, self.F, N, self.inc)
except ArithmeticError:
logger.error('Singular matrix')
dae.check_diag(dae.Gy, 'unamey')
dae.check_diag(dae.Fx, 'unamex')
# force quit
self.niter = config.maxit + 1
break
except ValueError:
logger.warning('Unexpected symbolic factorization')
dae.factorize = True
continue
else:
inc_x = self.inc[:dae.n]
inc_y = self.inc[dae.n:dae.m + dae.n]
dae.x += inc_x
dae.y += inc_y
self.err = max(abs(self.inc))
if np.isnan(self.inc).any():
logger.error('Iteration error: NaN detected.')
self.niter = config.maxit + 1
break
self.niter += 1
if self.niter <= config.maxit:
self.convergence = True | def implicit_step(self) | Integrate one step using trapezoidal method. Sets convergence and niter flags.
Returns
-------
None | 4.090491 | 4.113559 | 0.994392 |
system = self.system
dae = system.dae
if self.switch:
system.Breaker.apply(self.t)
for item in system.check_event(self.t):
system.__dict__[item].apply(self.t)
dae.rebuild = True
self.switch = False | def event_actions(self) | Take actions for timed events
Returns
-------
None | 9.512513 | 9.188216 | 1.035295 |
system = self.system
if system.files.pert:
try:
sys.path.append(system.files.path)
module = importlib.import_module(system.files.pert[:-3])
self.callpert = getattr(module, 'pert')
except ImportError:
logger.warning('Pert file is discarded due to import errors.')
self.callpert = None | def load_pert(self) | Load perturbation files to ``self.callpert``
Returns
-------
None | 4.753564 | 4.911672 | 0.96781 |
dae = self.system.dae
system = self.system
self.inc = zeros(dae.m + dae.n, 1)
system.varout.store(self.t, self.step)
self.streaming_step() | def run_step0(self) | For the 0th step, store the data and stream data
Returns
-------
None | 11.318023 | 11.548275 | 0.980062 |
system = self.system
if system.config.dime_enable:
system.streaming.sync_and_handle()
system.streaming.vars_to_modules()
system.streaming.vars_to_pmu() | def streaming_step(self) | Sync, handle and streaming for each integration step
Returns
-------
None | 14.714673 | 13.827291 | 1.064176 |
system = self.system
config = self.config
if system.config.dime_enable:
config.compute_flows = True
system.streaming.send_init(recepient='all')
logger.info('Waiting for modules to send init info...')
sleep(0.5)
system.streaming.sync_and_handle() | def streaming_init(self) | Send out initialization variables and process init from modules
Returns
-------
None | 12.631897 | 13.274682 | 0.951578 |
system = self.system
config = self.config
dae = system.dae
if config.compute_flows:
# compute and append series injections on buses
exec(system.call.bus_injection)
bus_inj = dae.g[:2 * system.Bus.n]
exec(system.call.seriesflow)
system.Area.seriesflow(system.dae)
system.Area.interchange_varout()
dae.y = matrix([
dae.y, bus_inj, system.Line._line_flows, system.Area.inter_varout
]) | def compute_flows(self) | If enabled, compute the line flows after each step
Returns
-------
None | 13.800054 | 13.491249 | 1.022889 |
system = self.system
t, _ = elapsed()
if success and (not system.files.no_output):
# system.varout.dump()
system.varout.dump_np_vars()
_, s = elapsed(t)
logger.info('Simulation data dumped in {:s}.'.format(s)) | def dump_results(self, success) | Dump simulation results to ``dat`` and ``lst`` files
Returns
-------
None | 12.927141 | 13.909804 | 0.929355 |
try:
fid = open(file, 'r')
raw_file = fid.readlines()
except IOError:
print('* IOError while reading input card file.')
return
ret_dict = dict()
ret_dict['outfile'] = file.split('.')[0].lower() + '.py'
key, val = None, None
for idx, line in enumerate(raw_file):
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
elif '#' in line:
line = line.split('#')[0]
if '=' in line: # defining a field
key, val = line.split('=')
key, val = key.strip(), val.strip()
val = [] if val == '' else val
ret_dict.update({key: val})
if val:
val = val.split(';')
else:
val.extend(line.split(';'))
if val:
val = de_blank(val)
ret_dict[key] = val
ret_dict_ord = dict(ret_dict)
for key, val in ret_dict.items():
if not val:
continue
if type(val) == list:
if ':' in val[0]:
new_val = {} # return in a dictionary
new_val_ord = [
] # return in an ordered list with the dict keys at 0
for item in val:
try:
m, n = item.split(':')
except ValueError:
print('* Error: check line <{}>'.format(item))
return
m, n = m.strip(), n.strip()
if ',' in n:
n = n.split(',')
n = de_blank(n)
n = [to_number(i) for i in n]
else:
n = to_number(n)
new_val.update({m.strip(): n})
new_val_ord.append([m.strip(), n])
ret_dict[key] = new_val
ret_dict_ord[key] = new_val_ord
ret_dict['name'] = ret_dict['name'][0]
ret_dict['doc_string'] = ret_dict['doc_string'][0]
ret_dict['group'] = ret_dict['group'][0]
ret_dict['service_keys'] = list(ret_dict['service_eq'].keys())
ret_dict['consts'] = list(ret_dict['data'].keys()) + list(
ret_dict['service_eq'].keys())
ret_dict['init1_eq'] = ret_dict_ord['init1_eq']
ret_dict['service_eq'] = ret_dict_ord['service_eq']
ret_dict['ctrl'] = ret_dict_ord['ctrl']
copy_algebs = []
copy_states = []
for item in ret_dict['ctrl']:
key, val = item
if val[3] == 'y':
copy_algebs.append(key)
elif val[3] == 'x':
copy_states.append(key)
elif val[3] == 'c':
ret_dict['consts'].append(key)
ret_dict['copy_algebs'] = copy_algebs
ret_dict['copy_states'] = copy_states
return run(system, **ret_dict) | def read(file, system) | Parse an ANDES card file into internal variables | 2.782822 | 2.739717 | 1.015733 |
ret = list(val)
if type(val) == list:
for idx, item in enumerate(val):
if item.strip() == '':
ret.remove(item)
else:
ret[idx] = item.strip()
return ret | def de_blank(val) | Remove blank elements in `val` and return `ret` | 2.746714 | 2.368095 | 1.159883 |
if not sym_const:
sym_const = []
if not sym_states:
sym_states = []
if not sym_algebs:
sym_algebs = []
expr_str = []
if type(expr) in (int, float):
return expr
if expr.is_Atom:
if expr in sym_const:
expr_str = 'self.{}'.format(expr)
elif expr in sym_states:
expr_str = 'dae.x[self.{}]'.format(expr)
elif expr in sym_algebs:
expr_str = 'dae.y[self.{}]'.format(expr)
elif expr.is_Number:
if expr.is_Integer:
expr_str = str(int(expr))
else:
expr_str = str(float(expr))
# if expr.is_negative:
# expr_str = '{}'.format(expr)
# else:
# expr_str = str(expr)
else:
raise AttributeError('Unknown free symbol <{}>'.format(expr))
else:
nargs = len(expr.args)
arg_str = []
for arg in expr.args:
arg_str.append(stringfy(arg, sym_const, sym_states, sym_algebs))
if expr.is_Add:
expr_str = ''
for idx, item in enumerate(arg_str):
if idx == 0:
if len(item) > 1 and item[1] == ' ':
item = item[0] + item[2:]
if idx > 0:
if item[0] == '-':
item = ' ' + item
else:
item = ' + ' + item
expr_str += item
elif expr.is_Mul:
if nargs == 2 and expr.args[0].is_Integer: # number * matrix
if expr.args[0].is_positive:
expr_str = '{}*{}'.format(*arg_str)
elif expr.args[0] == Integer('-1'):
expr_str = '- {}'.format(arg_str[1])
else: # negative but not -1
expr_str = '{}*{}'.format(*arg_str)
else: # matrix dot multiplication
if expr.args[0] == Integer('-1'):
# bring '-' out of mul()
expr_str = ', '.join(arg_str[1:])
expr_str = '- mul(' + expr_str + ')'
else:
expr_str = ', '.join(arg_str)
expr_str = 'mul(' + expr_str + ')'
elif expr.is_Function:
expr_str = ', '.join(arg_str)
expr_str = str(expr.func) + '(' + expr_str + ')'
elif expr.is_Pow:
if arg_str[1] == '-1':
expr_str = 'div(1, {})'.format(arg_str[0])
else:
expr_str = '({})**{}'.format(*arg_str)
elif expr.is_Div:
expr_str = ', '.join(arg_str)
expr_str = 'div(' + expr_str + ')'
else:
raise NotImplementedError
return expr_str | def stringfy(expr, sym_const=None, sym_states=None, sym_algebs=None) | Convert the right-hand-side of an equation into CVXOPT matrix operations | 2.117692 | 2.114697 | 1.001416 |
dyr = {}
data = []
end = 0
retval = True
sep = ','
fid = open(file, 'r')
for line in fid.readlines():
if line.find('/') >= 0:
line = line.split('/')[0]
end = 1
if line.find(',') >= 0: # mixed comma and space splitter not allowed
line = [to_number(item.strip()) for item in line.split(sep)]
else:
line = [to_number(item.strip()) for item in line.split()]
if not line:
end = 0
continue
data.extend(line)
if end == 1:
field = data[1]
if field not in dyr.keys():
dyr[field] = []
dyr[field].append(data)
end = 0
data = []
fid.close()
# elem_add device elements to system
supported = [
'GENROU',
'GENCLS',
'ESST3A',
'ESDC2A',
'SEXS',
'EXST1',
'ST2CUT',
'IEEEST',
'TGOV1',
]
used = list(supported)
for model in supported:
if model not in dyr.keys():
used.remove(model)
continue
for data in dyr[model]:
add_dyn(system, model, data)
needed = list(dyr.keys())
for i in supported:
if i in needed:
needed.remove(i)
logger.warning('Models currently unsupported: {}'.format(
', '.join(needed)))
return retval | def readadd(file, system) | read DYR file | 4.763397 | 4.581137 | 1.039785 |
recipe = recipe.copy()
for k in list(recipe):
if k not in ("start", "error") and int(k) and k != int(k):
recipe[int(k)] = recipe[k]
del recipe[k]
for k in list(recipe):
if "output" in recipe[k] and not isinstance(
recipe[k]["output"], (list, dict)
):
recipe[k]["output"] = [recipe[k]["output"]]
# dicts should be normalized, too
if "start" in recipe:
recipe["start"] = [tuple(x) for x in recipe["start"]]
return recipe | def _sanitize(recipe) | Clean up a recipe that may have been stored as serialized json string.
Convert any numerical pointers that are stored as strings to integers. | 3.556959 | 3.468394 | 1.025535 |
if not self.recipe:
raise workflows.Error("Invalid recipe: No recipe defined")
# Without a 'start' node nothing would happen
if "start" not in self.recipe:
raise workflows.Error('Invalid recipe: "start" node missing')
if not self.recipe["start"]:
raise workflows.Error('Invalid recipe: "start" node empty')
if not all(
isinstance(x, (list, tuple)) and len(x) == 2 for x in self.recipe["start"]
):
raise workflows.Error('Invalid recipe: "start" node invalid')
if any(x[0] == "start" for x in self.recipe["start"]):
raise workflows.Error('Invalid recipe: "start" node points to itself')
# Check that 'error' node points to regular nodes only
if "error" in self.recipe and isinstance(
self.recipe["error"], (list, tuple, basestring)
):
if "start" in self.recipe["error"]:
raise workflows.Error(
'Invalid recipe: "error" node points to "start" node'
)
if "error" in self.recipe["error"]:
raise workflows.Error('Invalid recipe: "error" node points to itself')
# All other nodes must be numeric
nodes = list(
filter(
lambda x: not isinstance(x, int) and x not in ("start", "error"),
self.recipe,
)
)
if nodes:
raise workflows.Error('Invalid recipe: Node "%s" is not numeric' % nodes[0])
# Detect cycles
touched_nodes = set(["start", "error"])
def flatten_links(struct):
if struct is None:
return []
if isinstance(struct, int):
return [struct]
if isinstance(struct, list):
if not all(isinstance(x, int) for x in struct):
raise workflows.Error(
"Invalid recipe: Invalid link in recipe (%s)" % str(struct)
)
return struct
if isinstance(struct, dict):
joined_list = []
for sub_list in struct.values():
joined_list += flatten_links(sub_list)
return joined_list
raise workflows.Error(
"Invalid recipe: Invalid link in recipe (%s)" % str(struct)
)
def find_cycles(path):
if path[-1] not in self.recipe:
raise workflows.Error(
'Invalid recipe: Node "%s" is referenced via "%s" but missing'
% (str(path[-1]), str(path[:-1]))
)
touched_nodes.add(path[-1])
node = self.recipe[path[-1]]
for outgoing in ("output", "error"):
if outgoing in node:
references = flatten_links(node[outgoing])
for n in references:
if n in path:
raise workflows.Error(
"Invalid recipe: Recipe contains cycle (%s -> %s)"
% (str(path), str(n))
)
find_cycles(path + [n])
for link in self.recipe["start"]:
find_cycles(["start", link[0]])
if "error" in self.recipe:
if isinstance(self.recipe["error"], (list, tuple)):
for link in self.recipe["error"]:
find_cycles(["error", link])
else:
find_cycles(["error", self.recipe["error"]])
# Test recipe for unreferenced nodes
for node in self.recipe:
if node not in touched_nodes:
raise workflows.Error(
'Invalid recipe: Recipe contains unreferenced node "%s"' % str(node)
) | def validate(self) | Check whether the encoded recipe is valid. It must describe a directed
acyclical graph, all connections must be defined, etc. | 2.27704 | 2.212823 | 1.02902 |
class SafeString(object):
def __init__(self, s):
self.string = s
def __repr__(self):
return "{" + self.string + "}"
def __str__(self):
return "{" + self.string + "}"
def __getitem__(self, item):
return SafeString(self.string + "[" + item + "]")
class SafeDict(dict):
def __missing__(self, key):
return SafeString(key)
# By default the python formatter class is used to resolve {item} references
formatter = string.Formatter()
# Special format strings "{$REPLACE:(...)}" use this data structure
# formatter to return the referenced data structure rather than a formatted
# string.
ds_formatter = string.Formatter()
def ds_format_field(value, spec):
ds_format_field.last = value
return ""
ds_formatter.format_field = ds_format_field
params = SafeDict(parameters)
def _recursive_apply(item):
if isinstance(item, basestring):
if item.startswith("{$REPLACE") and item.endswith("}"):
try:
ds_formatter.vformat("{" + item[10:-1] + "}", (), parameters)
except KeyError:
return None
return copy.deepcopy(ds_formatter.format_field.last)
else:
return formatter.vformat(item, (), params)
if isinstance(item, dict):
return {
_recursive_apply(key): _recursive_apply(value)
for key, value in item.items()
}
if isinstance(item, tuple):
return tuple(_recursive_apply(list(item)))
if isinstance(item, list):
return [_recursive_apply(x) for x in item]
return item
self.recipe = _recursive_apply(self.recipe) | def apply_parameters(self, parameters) | Recursively apply dictionary entries in 'parameters' to {item}s in recipe
structure, leaving undefined {item}s as they are. A special case is a
{$REPLACE:item}, which replaces the string with a copy of the referenced
parameter item.
Examples:
parameters = { 'x':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '5': '{y}' }
parameters = { 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '{x}': '5' }
parameters = { 'x':'3', 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '3': '5' }
parameters = { 'l': [ 1, 2 ] }
apply_parameters( { 'x': '{$REPLACE:l}' }, parameters )
=> { 'x': [ 1, 2 ] } | 3.5983 | 3.30428 | 1.088982 |
# Merging empty values returns a copy of the original
if not other:
return Recipe(self.recipe)
# When a string is passed, merge with a constructed recipe object
if isinstance(other, basestring):
return self.merge(Recipe(other))
# Merging empty recipes returns a copy of the original
if not other.recipe:
return Recipe(self.recipe)
# If own recipe empty, use other recipe
if not self.recipe:
return Recipe(other.recipe)
# Assuming both recipes are valid
self.validate()
other.validate()
# Start from current recipe
new_recipe = self.recipe
# Find the maximum index of the current recipe
max_index = max(1, *filter(lambda x: isinstance(x, int), self.recipe.keys()))
next_index = max_index + 1
# Set up a translation table for indices and copy all entries
translation = {}
for key, value in other.recipe.items():
if isinstance(key, int):
if key not in translation:
translation[key] = next_index
next_index = next_index + 1
new_recipe[translation[key]] = value
# Rewrite all copied entries to point to new keys
def translate(x):
if isinstance(x, list):
return list(map(translate, x))
elif isinstance(x, tuple):
return tuple(map(translate, x))
elif isinstance(x, dict):
return {k: translate(v) for k, v in x.items()}
else:
return translation[x]
for idx in translation.values():
if "output" in new_recipe[idx]:
new_recipe[idx]["output"] = translate(new_recipe[idx]["output"])
if "error" in new_recipe[idx]:
new_recipe[idx]["error"] = translate(new_recipe[idx]["error"])
# Join 'start' nodes
for (idx, param) in other.recipe["start"]:
new_recipe["start"].append((translate(idx), param))
# Join 'error' nodes
if "error" in other.recipe:
if "error" not in new_recipe:
new_recipe["error"] = translate(other.recipe["error"])
else:
if isinstance(new_recipe["error"], (list, tuple)):
new_recipe["error"] = list(new_recipe["error"])
else:
new_recipe["error"] = list([new_recipe["error"]])
if isinstance(other.recipe["error"], (list, tuple)):
new_recipe["error"].extend(translate(other.recipe["error"]))
else:
new_recipe["error"].append(translate(other.recipe["error"]))
# # Minimize DAG
# queuehash, topichash = {}, {}
# for k, v in new_recipe.items():
# if isinstance(v, dict):
# if 'queue' in v:
# queuehash[v['queue']] = queuehash.get(v['queue'], [])
# queuehash[v['queue']].append(k)
# if 'topic' in v:
# topichash[v['topic']] = topichash.get(v['topic'], [])
# topichash[v['topic']].append(k)
#
# print queuehash
# print topichash
return Recipe(new_recipe) | def merge(self, other) | Merge two recipes together, returning a single recipe containing all
nodes.
Note: This does NOT yet return a minimal recipe.
:param other: A Recipe object that should be merged with the current
Recipe object.
:return: A new Recipe object containing information from both recipes. | 2.461349 | 2.427866 | 1.013791 |
value = value.strip() if value else ""
if value and len(value) > 1 and (value[0] == value[-1] == '"'):
value = value[1:-1]
if not value:
value = ""
return value | def strip_spaces_and_quotes(value) | Remove invalid whitespace and/or single pair of dquotes and return None
for empty strings.
Used to prepare cookie values, path, and domain attributes in a way which
tolerates simple formatting mistakes and standards variations. | 2.666646 | 2.851832 | 0.935064 |
if data is None:
return None
# We'll soon need to unquote to recover our UTF-8 data.
# In Python 2, unquote crashes on chars beyond ASCII. So encode functions
# had better not include anything beyond ASCII in data.
# In Python 3, unquote crashes on bytes objects, requiring conversion to
# str objects (unicode) using decode().
# But in Python 2, the same decode causes unquote to butcher the data.
# So in that case, just leave the bytes.
if isinstance(data, bytes):
if sys.version_info > (3, 0, 0): # pragma: no cover
data = data.decode('ascii')
# Recover URL encoded data
unquoted = unquote(data)
# Without this step, Python 2 may have good URL decoded *bytes*,
# which will therefore not normalize as unicode and not compare to
# the original.
if isinstance(unquoted, bytes):
unquoted = unquoted.decode('utf-8')
return unquoted | def parse_string(data, unquote=default_unquote) | Decode URL-encoded strings to UTF-8 containing the escaped chars. | 8.585878 | 8.160737 | 1.052096 |
# Do the regex magic; also enforces 2 or 4 digit years
match = Definitions.DATE_RE.match(value) if value else None
if not match:
return None
# We're going to extract and prepare captured data in 'data'.
data = {}
captured = match.groupdict()
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
# If we matched on the RFC 1123 family format
if captured['year']:
for field in fields:
data[field] = captured[field]
# If we matched on the asctime format, use year2 etc.
else:
for field in fields:
data[field] = captured[field + "2"]
year = data['year']
# Interpret lame 2-digit years - base the cutoff on UNIX epoch, in case
# someone sets a '70' cookie meaning 'distant past'. This won't break for
# 58 years and people who use 2-digit years are asking for it anyway.
if len(year) == 2:
if int(year) < 70:
year = "20" + year
else:
year = "19" + year
year = int(year)
# Clamp to [1900, 9999]: strftime has min 1900, datetime has max 9999
data['year'] = max(1900, min(year, 9999))
# Other things which are numbers should convert to integer
for field in ['day', 'hour', 'minute', 'second']:
if data[field] is None:
data[field] = 0
data[field] = int(data[field])
# Look up the number datetime needs for the named month
data['month'] = Definitions.month_numbers[data['month'].lower()]
return datetime.datetime(**data) | def parse_date(value) | Parse an RFC 1123 or asctime-like format date string to produce
a Python datetime object (without a timezone). | 5.319153 | 5.231407 | 1.016773 |
"Process a cookie value"
if value is None:
return None
value = strip_spaces_and_quotes(value)
value = parse_string(value, unquote=unquote)
if not allow_spaces:
assert ' ' not in value
return value | def parse_value(value, allow_spaces=True, unquote=default_unquote) | Process a cookie value | 4.866242 | 4.23081 | 1.150192 |
"Validate a cookie name string"
if isinstance(name, bytes):
name = name.decode('ascii')
if not Definitions.COOKIE_NAME_RE.match(name):
return False
# This module doesn't support $identifiers, which are part of an obsolete
# and highly complex standard which is never used.
if name[0] == "$":
return False
return True | def valid_name(name) | Validate a cookie name string | 9.664455 | 8.538815 | 1.131826 |
if value is None:
return False
# Put the value through a round trip with the given quote and unquote
# functions, so we will know whether data will get lost or not in the event
# that we don't complain.
encoded = encode_cookie_value(value, quote=quote)
decoded = parse_string(encoded, unquote=unquote)
# If the original string made the round trip, this is a valid value for the
# given quote and unquote functions. Since the round trip can generate
# different unicode forms, normalize before comparing, so we can ignore
# trivial inequalities.
decoded_normalized = (normalize("NFKD", decoded)
if not isinstance(decoded, bytes) else decoded)
value_normalized = (normalize("NFKD", value)
if not isinstance(value, bytes) else value)
if decoded_normalized == value_normalized:
return True
return False | def valid_value(value, quote=default_cookie_quote, unquote=default_unquote) | Validate a cookie value string.
This is generic across quote/unquote functions because it directly verifies
the encoding round-trip using the specified quote/unquote functions.
So if you use different quote/unquote functions, use something like this
as a replacement for valid_value::
my_valid_value = lambda value: valid_value(value, quote=my_quote,
unquote=my_unquote) | 5.22255 | 5.242933 | 0.996112 |
"Validate an expires datetime object"
# We want something that acts like a datetime. In particular,
# strings indicate a failure to parse down to an object and ints are
# nonstandard and ambiguous at best.
if not hasattr(date, 'tzinfo'):
return False
# Relevant RFCs define UTC as 'close enough' to GMT, and the maximum
# difference between UTC and GMT is often stated to be less than a second.
if date.tzinfo is None or _total_seconds(date.utcoffset()) < 1.1:
return True
return False | def valid_date(date) | Validate an expires datetime object | 14.614756 | 13.250894 | 1.102926 |
"Validate a cookie domain ASCII string"
# Using encoding on domain would confuse browsers into not sending cookies.
# Generate UnicodeDecodeError up front if it can't store as ASCII.
domain.encode('ascii')
# Domains starting with periods are not RFC-valid, but this is very common
# in existing cookies, so they should still parse with DOMAIN_AV.
if Definitions.DOMAIN_RE.match(domain):
return True
return False | def valid_domain(domain) | Validate a cookie domain ASCII string | 30.648153 | 25.991793 | 1.179147 |
"Validate a cookie path ASCII string"
# Generate UnicodeDecodeError if path can't store as ASCII.
value.encode("ascii")
# Cookies without leading slash will likely be ignored, raise ASAP.
if not (value and value[0] == "/"):
return False
if not Definitions.PATH_RE.match(value):
return False
return True | def valid_path(value) | Validate a cookie path ASCII string | 17.154129 | 13.844929 | 1.239019 |
"Validate a cookie Max-Age"
if isinstance(number, basestring):
try:
number = long(number)
except (ValueError, TypeError):
return False
if number >= 0 and number % 1 == 0:
return True
return False | def valid_max_age(number) | Validate a cookie Max-Age | 3.748948 | 3.262971 | 1.148937 |
if data is None:
return None
# encode() to ASCII bytes so quote won't crash on non-ASCII.
# but doing that to bytes objects is nonsense.
# On Python 2 encode crashes if s is bytes containing non-ASCII.
# On Python 3 encode crashes on all byte objects.
if not isinstance(data, bytes):
data = data.encode("utf-8")
# URL encode data so it is safe for cookie value
quoted = quote(data)
# Don't force to bytes, so that downstream can use proper string API rather
# than crippled bytes, and to encourage encoding to be done just once.
return quoted | def encode_cookie_value(data, quote=default_cookie_quote) | URL-encode strings to make them safe for a cookie value.
By default this uses urllib quoting, as used in many other cookie
implementations and in other Python code, instead of an ad hoc escaping
mechanism which includes backslashes (these also being illegal chars in RFC
6265). | 12.592697 | 12.715297 | 0.990358 |
if not date:
return None
assert valid_date(date)
# Avoid %a and %b, which can change with locale, breaking compliance
weekday = Definitions.weekday_abbr_list[date.weekday()]
month = Definitions.month_abbr_list[date.month - 1]
return date.strftime("{day}, %d {month} %Y %H:%M:%S GMT"
).format(day=weekday, month=month) | def render_date(date) | Render a date (e.g. an Expires value) per RFCs 6265/2616/1123.
Don't give this localized (timezone-aware) datetimes. If you use them,
convert them to GMT before passing them to this. There are too many
conversion corner cases to handle this universally. | 6.018055 | 5.90372 | 1.019367 |
cookies_dict = {}
for line in Definitions.EOL.split(header_data.strip()):
matches = Definitions.COOKIE_RE.finditer(line)
matches = [item for item in matches]
for match in matches:
invalid = match.group('invalid')
if invalid:
if not ignore_bad_cookies:
raise InvalidCookieError(data=invalid)
_report_invalid_cookie(invalid)
continue
name = match.group('name')
values = cookies_dict.get(name)
value = match.group('value').strip('"')
if values:
values.append(value)
else:
cookies_dict[name] = [value]
if not matches:
if not ignore_bad_cookies:
raise InvalidCookieError(data=line)
_report_invalid_cookie(line)
return cookies_dict | def _parse_request(header_data, ignore_bad_cookies=False) | Turn one or more lines of 'Cookie:' header data into a dict mapping
cookie names to cookie values (raw strings). | 2.741246 | 2.591922 | 1.057611 |
cookie_dict = {}
# Basic validation, extract name/value/attrs-chunk
match = Definitions.SET_COOKIE_HEADER_RE.match(line)
if not match:
if not ignore_bad_cookies:
raise InvalidCookieError(data=line)
_report_invalid_cookie(line)
return None
cookie_dict.update({
'name': match.group('name'),
'value': match.group('value')})
# Extract individual attrs from the attrs chunk
for match in Definitions.ATTR_RE.finditer(match.group('attrs')):
captured = dict((k, v) for (k, v) in match.groupdict().items() if v)
unrecognized = captured.get('unrecognized', None)
if unrecognized:
if not ignore_bad_attributes:
raise InvalidCookieAttributeError(None, unrecognized,
"unrecognized")
_report_unknown_attribute(unrecognized)
continue
# for unary flags
for key in ('secure', 'httponly'):
if captured.get(key):
captured[key] = True
# ignore subcomponents of expires - they're still there to avoid doing
# two passes
timekeys = ('weekday', 'month', 'day', 'hour', 'minute', 'second',
'year')
if 'year' in captured:
for key in timekeys:
del captured[key]
elif 'year2' in captured:
for key in timekeys:
del captured[key + "2"]
cookie_dict.update(captured)
return cookie_dict | def parse_one_response(line, ignore_bad_cookies=False,
ignore_bad_attributes=True) | Turn one 'Set-Cookie:' line into a dict mapping attribute names to
attribute values (raw strings). | 4.319137 | 4.273627 | 1.010649 |
cookie_dicts = []
for line in Definitions.EOL.split(header_data.strip()):
if not line:
break
cookie_dict = parse_one_response(
line, ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
if not cookie_dict:
continue
cookie_dicts.append(cookie_dict)
if not cookie_dicts:
if not ignore_bad_cookies:
raise InvalidCookieError(data=header_data)
_report_invalid_cookie(header_data)
return cookie_dicts | def _parse_response(header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True) | Turn one or more lines of 'Set-Cookie:' header data into a list of dicts
mapping attribute names to attribute values (as plain strings). | 2.888716 | 2.824254 | 1.022824 |
name = cookie_dict.get('name', None)
if not name:
raise InvalidCookieError("Cookie must have name")
raw_value = cookie_dict.get('value', '')
# Absence or failure of parser here is fatal; errors in present name
# and value should be found by Cookie.__init__.
value = cls.attribute_parsers['value'](raw_value)
cookie = cls(name, value)
# Parse values from serialized formats into objects
parsed = {}
for key, value in cookie_dict.items():
# Don't want to pass name/value to _set_attributes
if key in ('name', 'value'):
continue
parser = cls.attribute_parsers.get(key)
if not parser:
# Don't let totally unknown attributes pass silently
if not ignore_bad_attributes:
raise InvalidCookieAttributeError(
key, value, "unknown cookie attribute '%s'" % key)
_report_unknown_attribute(key)
continue
try:
parsed_value = parser(value)
except Exception as e:
reason = "did not parse with %r: %r" % (parser, e)
if not ignore_bad_attributes:
raise InvalidCookieAttributeError(
key, value, reason)
_report_invalid_attribute(key, value, reason)
parsed_value = ''
parsed[key] = parsed_value
# Set the parsed objects (does object validation automatically)
cookie._set_attributes(parsed, ignore_bad_attributes)
return cookie | def from_dict(cls, cookie_dict, ignore_bad_attributes=True) | Construct an instance from a dict of strings to parse.
The main difference between this and Cookie(name, value, **kwargs) is
that the values in the argument to this method are parsed.
If ignore_bad_attributes=True (default), values which did not parse
are set to '' in order to avoid passing bad data. | 4.084059 | 4.034149 | 1.012372 |
"Construct a Cookie object from a line of Set-Cookie header data."
cookie_dict = parse_one_response(
line, ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
if not cookie_dict:
return None
return cls.from_dict(
cookie_dict, ignore_bad_attributes=ignore_bad_attributes) | def from_string(cls, line, ignore_bad_cookies=False,
ignore_bad_attributes=True) | Construct a Cookie object from a line of Set-Cookie header data. | 3.084215 | 2.532865 | 1.217678 |
validator = self.attribute_validators.get(name, None)
if validator:
return True if validator(value) else False
return True | def validate(self, name, value) | Validate a cookie attribute with an appropriate validator.
The value comes in already parsed (for example, an expires value
should be a datetime). Called automatically when an attribute
value is set. | 3.949909 | 3.887052 | 1.016171 |
dictionary = {}
# Only look for attributes registered in attribute_names.
for python_attr_name, cookie_attr_name in self.attribute_names.items():
value = getattr(self, python_attr_name)
renderer = self.attribute_renderers.get(python_attr_name, None)
if renderer:
value = renderer(value)
# If renderer returns None, or it's just natively none, then the
# value is suppressed entirely - does not appear in any rendering.
if not value:
continue
dictionary[cookie_attr_name] = value
return dictionary | def attributes(self) | Export this cookie's attributes as a dict of encoded values.
This is an important part of the code for rendering attributes, e.g.
render_response(). | 5.416534 | 4.75943 | 1.138063 |
# Use whatever renderers are defined for name and value.
name, value = self.name, self.value
renderer = self.attribute_renderers.get('name', None)
if renderer:
name = renderer(name)
renderer = self.attribute_renderers.get('value', None)
if renderer:
value = renderer(value)
return ''.join((name, "=", value)) | def render_request(self) | Render as a string formatted for HTTP request headers
(simple 'Cookie: ' style). | 3.521604 | 3.330788 | 1.057288 |
# Use whatever renderers are defined for name and value.
# (.attributes() is responsible for all other rendering.)
name, value = self.name, self.value
renderer = self.attribute_renderers.get('name', None)
if renderer:
name = renderer(name)
renderer = self.attribute_renderers.get('value', None)
if renderer:
value = renderer(value)
return '; '.join(
['{0}={1}'.format(name, value)] +
[key if isinstance(val, bool) else '='.join((key, val))
for key, val in self.attributes().items()]
) | def render_response(self) | Render as a string formatted for HTTP response headers
(detailed 'Set-Cookie: ' style). | 3.902342 | 3.767014 | 1.035925 |
# Only the first one is accessible through the main interface,
# others accessible through get_all (all_cookies).
for cookie in args:
self.all_cookies.append(cookie)
if cookie.name in self:
continue
self[cookie.name] = cookie
for key, value in kwargs.items():
cookie = self.cookie_class(key, value)
self.all_cookies.append(cookie)
if key in self:
continue
self[key] = cookie | def add(self, *args, **kwargs) | Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie. | 4.343318 | 4.168478 | 1.041943 |
cookies_dict = _parse_request(
header_data, ignore_bad_cookies=ignore_bad_cookies)
cookie_objects = []
for name, values in cookies_dict.items():
for value in values:
# Use from_dict to check name and parse value
cookie_dict = {'name': name, 'value': value}
try:
cookie = self.cookie_class.from_dict(cookie_dict)
except InvalidCookieError:
if not ignore_bad_cookies:
raise
else:
cookie_objects.append(cookie)
try:
self.add(*cookie_objects)
except InvalidCookieError:
if not ignore_bad_cookies:
raise
_report_invalid_cookie(header_data)
return self | def parse_request(self, header_data, ignore_bad_cookies=False) | Parse 'Cookie' header data into Cookie objects, and add them to
this Cookies object.
:arg header_data: string containing only 'Cookie:' request headers or
header values (as in CGI/WSGI HTTP_COOKIE); if more than one, they must
be separated by CRLF (\\r\\n).
:arg ignore_bad_cookies: if set, will log each syntactically invalid
cookie (at the granularity of semicolon-delimited blocks) rather than
raising an exception at the first bad cookie.
:returns: a Cookies instance containing Cookie objects parsed from
header_data.
.. note::
If you want to parse 'Set-Cookie:' response headers, please use
parse_response instead. parse_request will happily turn 'expires=frob'
into a separate cookie without complaining, according to the grammar. | 2.851218 | 2.934524 | 0.971612 |
cookie_dicts = _parse_response(
header_data,
ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
cookie_objects = []
for cookie_dict in cookie_dicts:
cookie = self.cookie_class.from_dict(cookie_dict)
cookie_objects.append(cookie)
self.add(*cookie_objects)
return self | def parse_response(self, header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True) | Parse 'Set-Cookie' header data into Cookie objects, and add them to
this Cookies object.
:arg header_data: string containing only 'Set-Cookie:' request headers
or their corresponding header values; if more than one, they must be
separated by CRLF (\\r\\n).
:arg ignore_bad_cookies: if set, will log each syntactically invalid
cookie rather than raising an exception at the first bad cookie. (This
includes cookies which have noncompliant characters in the attribute
section).
:arg ignore_bad_attributes: defaults to True, which means to log but
not raise an error when a particular attribute is unrecognized. (This
does not necessarily mean that the attribute is invalid, although that
would often be the case.) if unset, then an error will be raised at the
first semicolon-delimited block which has an unknown attribute.
:returns: a Cookies instance containing Cookie objects parsed from
header_data, each with recognized attributes populated.
.. note::
If you want to parse 'Cookie:' headers (i.e., data like what's sent
with an HTTP request, which has only name=value pairs and no
attributes), then please use parse_request instead. Such lines often
contain multiple name=value pairs, and parse_response will throw away
the pairs after the first one, which will probably generate errors or
confusing behavior. (Since there's no perfect way to automatically
determine which kind of parsing to do, you have to tell it manually by
choosing correctly from parse_request between part_response.) | 2.340232 | 2.518244 | 0.929311 |
"Construct a Cookies object from request header data."
cookies = cls()
cookies.parse_request(
header_data, ignore_bad_cookies=ignore_bad_cookies)
return cookies | def from_request(cls, header_data, ignore_bad_cookies=False) | Construct a Cookies object from request header data. | 5.173487 | 4.072612 | 1.270312 |
"Construct a Cookies object from response header data."
cookies = cls()
cookies.parse_response(
header_data,
ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
return cookies | def from_response(cls, header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True) | Construct a Cookies object from response header data. | 3.310038 | 2.937891 | 1.126671 |
if not sort:
return ("; ".join(
cookie.render_request() for cookie in self.values()))
return ("; ".join(sorted(
cookie.render_request() for cookie in self.values()))) | def render_request(self, sort=True) | Render the dict's Cookie objects into a string formatted for HTTP
request headers (simple 'Cookie: ' style). | 4.840243 | 3.503281 | 1.381631 |
rendered = [cookie.render_response() for cookie in self.values()]
return rendered if not sort else sorted(rendered) | def render_response(self, sort=True) | Render the dict's Cookie objects into list of strings formatted for
HTTP response headers (detailed 'Set-Cookie: ' style). | 7.87619 | 5.12169 | 1.537811 |
return matrix(np.logical_or(a, b).astype('float'), a.size) | def aorb(a, b) | Return a matrix of logic comparison of A or B | 11.308671 | 7.500558 | 1.507711 |
return matrix(np.logical_and(a, b).astype('float'), a.size) | def aandb(a, b) | Return a matrix of logic comparison of A or B | 9.042003 | 7.620662 | 1.186511 |
return matrix(list(map(lambda x: 1 if x == 0 else x, a)), a.size) | def not0(a) | Return u if u!= 0, return 1 if u == 0 | 5.68018 | 6.057231 | 0.937752 |
ty = type(m)
if ty == matrix:
m = list(m)
m = sorted(m, reverse=reverse)
if ty == matrix:
m = matrix(m)
return m | def sort(m, reverse=False) | Return sorted m (default: ascending order) | 3.247565 | 3.028918 | 1.072186 |
return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse) | def sort_idx(m, reverse=False) | Return the indices of m in sorted order (default: ascending order) | 2.38443 | 2.136802 | 1.115887 |
mm = np.array(m)
idx_tuple = np.where(mm == val)
idx = idx_tuple[0].tolist()
return idx | def index(m, val) | Return the indices of all the ``val`` in ``m`` | 4.610271 | 4.120003 | 1.118997 |
ret = s
# try converting to float
try:
ret = float(s)
except ValueError:
ret = ret.strip('\'').strip()
# try converting to uid
try:
ret = int(s)
except ValueError:
pass
# try converting to boolean
if ret == 'True':
ret = True
elif ret == 'False':
ret = False
elif ret == 'None':
ret = None
return ret | def to_number(s) | Convert a string to a number.
If not successful, return the string without blanks | 2.70912 | 2.812185 | 0.963351 |
if len(a) != len(b):
raise ValueError('Argument a and b does not have the same length')
idx = 0
ret = matrix(0, (len(a), 1), 'd')
for m, n in zip(a, b):
try:
ret[idx] = m / n
except ZeroDivisionError:
ret[idx] = 1
finally:
idx += 1
return ret | def sdiv(a, b) | Safe division: if a == b == 0, sdiv(a, b) == 1 | 2.890373 | 2.744827 | 1.053026 |
if conf_path is None:
# test ./andes.conf
if os.path.isfile('andes.conf'):
conf_path = 'andes.conf'
# test ~/andes.conf
home_dir = os.path.expanduser('~')
if os.path.isfile(os.path.join(home_dir, '.andes', 'andes.conf')):
conf_path = os.path.join(home_dir, '.andes', 'andes.conf')
if conf_path is not None:
logger.debug('Found config file at {}.'.format(conf_path))
return conf_path | def get_config_load_path(conf_path=None) | Return config file load path
Priority:
1. conf_path
2. current directory
3. home directory
Parameters
----------
conf_path
Returns
------- | 2.019469 | 2.116213 | 0.954284 |
PATH = ''
if platform.system() in ('Linux', 'Darwin'):
PATH = tempfile.mkdtemp(prefix='andes-')
elif platform.system() == 'Windows':
APPDATA = os.getenv('APPDATA')
PATH = os.path.join(APPDATA, 'andes')
if not os.path.exists(PATH):
os.makedirs(PATH)
return PATH | def get_log_dir() | Get a directory for logging
On Linux or macOS, '/tmp/andes' is the default. On Windows,
'%APPDATA%/andes' is the default.
Returns
-------
str
Path to the logging directory | 2.685714 | 2.49573 | 1.076124 |
max_cache = int(self.system.tds.config.max_cache)
if len(self.vars) >= max_cache > 0:
self.dump()
self.vars = list()
self.t = list()
self.k = list()
logger.debug(
'varout cache cleared at simulation t = {:g}.'.format(
self.system.dae.t))
self._mode = 'a'
var_data = matrix([self.system.dae.x, self.system.dae.y])
# ===== This code block is deprecated =====
self.t.append(t)
self.k.append(step)
self.vars.append(var_data)
# =========================================
# clear data cache if written to disk
if self.np_nrows >= max_cache > 0:
self.dump_np_vars()
self.np_vars = np.zeros(self._np_block_shape)
self.np_nrows = 0
self.np_t = np.zeros((self._np_block_rows,))
self.np_k = np.zeros((self._np_block_rows,))
logger.debug(
'np_vars cache cleared at simulation t = {:g}.'.format(
self.system.dae.t))
self._mode = 'a'
# initialize before first-time adding data
if self.np_nrows == 0:
self.np_ncols = len(var_data)
self._np_block_shape = (self._np_block_rows, self.np_ncols)
self.np_vars = np.zeros(self._np_block_shape)
self.np_t = np.zeros((self._np_block_rows,))
self.np_k = np.zeros((self._np_block_rows,))
# adding data to the matrix
# self.np_vars[self.np_nrows, 0] = t
self.np_t[self.np_nrows] = t
self.np_k[self.np_nrows] = step
self.np_vars[self.np_nrows, :] = np.array(var_data).reshape((-1))
self.np_nrows += 1
# check if matrix extension is needed
if self.np_nrows >= self.np_vars.shape[0]:
self.np_vars = np.concatenate([self.np_vars, np.zeros(self._np_block_shape)], axis=0)
self.np_t = np.concatenate([self.np_t, np.zeros((self._np_block_rows,))], axis=0)
self.np_k = np.concatenate([self.np_k, np.zeros((self._np_block_rows,))], axis=0)
# remove the post-computed variables from the variable list
if self.system.tds.config.compute_flows:
self.system.dae.y = self.system.dae.y[:self.system.dae.m] | def store(self, t, step) | Record the state/algeb values at time t to self.vars | 2.743546 | 2.696254 | 1.01754 |
out = []
for item in self.vars:
out.append(list(item))
return np.array(out) | def show(self) | The representation of an Varout object
:return: the full result matrix (for use with PyCharm viewer)
:rtype: np.array | 7.161923 | 6.234095 | 1.148831 |
logger.warning('This function is deprecated and replaced by `concat_t_vars_np`.')
out = np.array([])
if len(self.t) == 0:
return out
out = np.ndarray(shape=(0, self.vars[0].size[0] + 1))
for t, var in zip(self.t, self.vars):
line = [[t]]
line[0].extend(list(var))
out = np.append(out, line, axis=0)
return out | def concat_t_vars(self) | Concatenate ``self.t`` with ``self.vars`` and output a single matrix
for data dump
:return matrix: concatenated matrix with ``self.t`` as the 0-th column | 3.795711 | 3.594791 | 1.055892 |
selected_np_vars = self.np_vars
if vars_idx is not None:
selected_np_vars = self.np_vars[:, vars_idx]
return np.concatenate([self.np_t[:self.np_nrows].reshape((-1, 1)),
selected_np_vars[:self.np_nrows, :]], axis=1) | def concat_t_vars_np(self, vars_idx=None) | Concatenate `self.np_t` with `self.np_vars` and return a single matrix.
The first column corresponds to time, and the rest of the matrix is the variables.
Returns
-------
np.array : concatenated matrix | 3.441251 | 3.114156 | 1.105035 |
assert isinstance(xidx, int)
if isinstance(yidx, int):
yidx = [yidx]
t_vars = self.concat_t_vars()
xdata = t_vars[:, xidx]
ydata = t_vars[:, yidx]
return xdata.tolist(), ydata.transpose().tolist() | def get_xy(self, yidx, xidx=0) | Return stored data for the given indices for plot
:param yidx: the indices of the y-axis variables(1-indexing)
:param xidx: the index of the x-axis variables
:return: None | 4.127554 | 4.12378 | 1.000915 |
ret = False
if self.system.files.no_output is True:
logger.debug('no_output is True, thus no TDS dump saved ')
return True
if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter):
ret = True
return ret | def dump_np_vars(self, store_format='csv', delimiter=',') | Dump the TDS simulation data to files by calling subroutines `write_lst` and
`write_np_dat`.
Parameters
-----------
store_format : str
dump format in `('csv', 'txt', 'hdf5')`
delimiter : str
delimiter for the `csv` and `txt` format
Returns
-------
bool: success flag | 9.888088 | 6.694562 | 1.477033 |
logger.warn('This function is deprecated and replaced by `dump_np_vars`.')
ret = False
if self.system.files.no_output:
# return ``True`` because it did not fail
return True
if self.write_lst() and self.write_dat():
ret = True
return ret | def dump(self) | Dump the TDS results to the output `dat` file
:return: succeed flag | 14.897378 | 12.167934 | 1.224314 |
ret = False
system = self.system
# compute the total number of columns, excluding time
if not system.Recorder.n:
n_vars = system.dae.m + system.dae.n
# post-computed power flows include:
# bus - (Pi, Qi)
# line - (Pij, Pji, Qij, Qji, Iij_Real, Iij_Imag, Iji_real, Iji_Imag)
if system.tds.config.compute_flows:
n_vars += 2 * system.Bus.n + 8 * system.Line.n + 2 * system.Area.n_combination
idx = list(range(n_vars))
else:
n_vars = len(system.Recorder.varout_idx)
idx = system.Recorder.varout_idx
# prepare data
t_vars_concatenated = self.concat_t_vars_np(vars_idx=idx)
try:
os.makedirs(os.path.abspath(os.path.dirname(system.files.dat)), exist_ok=True)
with open(system.files.dat, self._mode) as f:
if store_format in ('csv', 'txt'):
np.savetxt(f, t_vars_concatenated, fmt=fmt, delimiter=delimiter)
elif store_format == 'hdf5':
pass
ret = True
logger.info('TDS data dumped to <{}>'.format(system.files.dat))
except IOError:
logger.error('I/O Error while writing the dat file.')
return ret | def write_np_dat(self, store_format='csv', delimiter=',', fmt='%.12g') | Write TDS data stored in `self.np_vars` to the output file
Parameters
----------
store_format : str
dump format in ('csv', 'txt', 'hdf5')
delimiter : str
delimiter for the `csv` and `txt` format
fmt : str
output formatting template
Returns
-------
bool : success flag | 4.936812 | 4.93706 | 0.99995 |
logger.warn('This function is deprecated and replaced by `write_np_dat`.')
ret = False
system = self.system
# compute the total number of columns, excluding time
if not system.Recorder.n:
n_vars = system.dae.m + system.dae.n
# post-computed power flows include:
# bus - (Pi, Qi)
# line - (Pij, Pji, Qij, Qji, Iij_Real, Iij_Imag, Iji_real, Iji_Imag)
if system.tds.config.compute_flows:
n_vars += 2 * system.Bus.n + 8 * system.Line.n + 2 * system.Area.n_combination
idx = list(range(n_vars))
else:
n_vars = len(system.Recorder.varout_idx)
idx = system.Recorder.varout_idx
template = ['{:<8g}'] + ['{:0.10f}'] * n_vars
template = ' '.join(template)
# format the output in a string
out = ''
for t, line in zip(self.t, self.vars):
values = [t] + list(line[idx])
out += template.format(*values) + '\n'
try:
os.makedirs(os.path.abspath(os.path.dirname(system.files.dat)), exist_ok=True)
with open(system.files.dat, self._mode) as f:
f.write(out)
ret = True
except IOError:
logger.error('I/O Error while writing the dat file.')
return ret | def write_dat(self) | Write ``system.Varout.vars`` to a ``.dat`` file
:return: | 5.168615 | 4.920438 | 1.050438 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.