_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q17600
RtmBot.get_userid_from_botid
train
def get_userid_from_botid(self, botid): '''Perform a lookup of bots.info to resolve a botid to a userid Args: botid (string): Slack botid to lookup. Returns: string: userid value ''' botinfo = self.slack_client.api_call('bots.info', bot=botid) if botinfo['ok'] is True: return botinfo['bot'].get('user_id') else: return botid
python
{ "resource": "" }
q17601
RtmBot._parse_metadata
train
def _parse_metadata(self, message): '''Parse incoming messages to build metadata dict Lots of 'if' statements. It sucks, I know. Args: message (dict): JSON dump of message sent from Slack Returns: Legobot.Metadata ''' # Try to handle all the fields of events we care about. metadata = Metadata(source=self.actor_urn).__dict__ metadata['thread_ts'] = message.get('thread_ts') if 'presence' in message: metadata['presence'] = message['presence'] if 'text' in message: metadata['text'] = message['text'] elif 'previous_message' in message: # Try to handle slack links if 'text' in message['previous_message']: metadata['text'] = message['previous_message']['text'] else: metadata['text'] = None else: metadata['text'] = None if 'user' in message: metadata['source_user'] = message['user'] elif 'bot_id' in message: metadata['source_user'] = self.get_userid_from_botid( message['bot_id']) elif 'message' in message and 'user' in message['message']: metadata['source_user'] = message['message']['user'] else: metadata['source_user'] = None metadata['user_id'] = metadata['source_user'] metadata['display_name'] = self.get_username(metadata['source_user']) if 'channel' in message: metadata['source_channel'] = message['channel'] # Slack starts DM channel IDs with "D" if message['channel'].startswith('D'): metadata['is_private_message'] = True else: metadata['is_private_message'] = False metadata['source_connector'] = 'slack' return metadata
python
{ "resource": "" }
q17602
Slack.build_attachment
train
def build_attachment(self, text, target, attachment, thread): '''Builds a slack attachment. Args: message (Legobot.Message): message w/ metadata to send. Returns: attachment (dict): attachment data. ''' attachment = { 'as_user': True, 'text': text, 'channel': target, 'attachments': [ { 'fallback': text, 'image_url': attachment } ] } if thread: attachment['thread_ts'] = thread return attachment
python
{ "resource": "" }
q17603
guess
train
def guess(system): """ input format guess function. First guess by extension, then test by lines """ files = system.files maybe = [] if files.input_format: maybe.append(files.input_format) # first, guess by extension for key, val in input_formats.items(): if type(val) == list: for item in val: if files.ext.strip('.').lower() == item: maybe.append(key) else: if files.ext.strip('.').lower() == val: maybe.append(key) # second, guess by lines true_format = '' fid = open(files.case, 'r') for item in maybe: try: parser = importlib.import_module('.' + item, __name__) testlines = getattr(parser, 'testlines') if testlines(fid): true_format = item break except ImportError: logger.debug( 'Parser for {:s} format is not found. ' 'Format guess will continue.'. format(item)) fid.close() if true_format: logger.debug('Input format guessed as {:s}.'.format(true_format)) else: logger.error('Unable to determine case format.') files.input_format = true_format # guess addfile format if files.addfile: _, add_ext = os.path.splitext(files.addfile) for key, val in input_formats.items(): if type(val) == list: if add_ext[1:] in val: files.add_format = key else: if add_ext[1:] == val: files.add_format = key return true_format
python
{ "resource": "" }
q17604
parse
train
def parse(system): """ Parse input file with the given format in system.files.input_format """ t, _ = elapsed() input_format = system.files.input_format add_format = system.files.add_format # exit when no input format is given if not input_format: logger.error( 'No input format found. Specify or guess a format before parsing.') return False # exit if the format parser could not be imported try: parser = importlib.import_module('.' + input_format, __name__) dmparser = importlib.import_module('.' + 'dome', __name__) if add_format: addparser = importlib.import_module('.' + add_format, __name__) except ImportError: logger.error( 'Parser for {:s} format not found. Program will exit.'.format( input_format)) return False # try parsing the base case file logger.info('Parsing input file <{:s}>'.format(system.files.fullname)) if not parser.read(system.files.case, system): logger.error( 'Error parsing case file {:s} with {:s} format parser.'.format( system.files.fullname, input_format)) return False # Try parsing the addfile if system.files.addfile: if not system.files.add_format: logger.error('Unknown addfile format.') return logger.info('Parsing additional file {:s}.'.format( system.files.addfile)) if not addparser.readadd(system.files.addfile, system): logger.error( 'Error parsing addfile {:s} with {:s} format parser.'.format( system.files.addfile, input_format)) return False # Try parsing the dynfile with dm filter if system.files.dynfile: logger.info('Parsing input file {:s}.'.format( system.files.dynfile)) if not dmparser.read(system.files.dynfile, system): logger.error( 'Error parsing dynfile {:s} with dm format parser.'.format( system.files.dynfile)) return False _, s = elapsed(t) logger.debug('Case file {:s} parsed in {:s}.'.format( system.files.fullname, s)) return True
python
{ "resource": "" }
q17605
EIG.calc_state_matrix
train
def calc_state_matrix(self): """ Return state matrix and store to ``self.As`` Returns ------- matrix state matrix """ system = self.system Gyx = matrix(system.dae.Gx) self.solver.linsolve(system.dae.Gy, Gyx) self.As = matrix(system.dae.Fx - system.dae.Fy * Gyx) # ------------------------------------------------------ # TODO: use scipy eigs # self.As = sparse(self.As) # I = np.array(self.As.I).reshape((-1,)) # J = np.array(self.As.J).reshape((-1,)) # V = np.array(self.As.V).reshape((-1,)) # self.As = csr_matrix((V, (I, J)), shape=self.As.size) # ------------------------------------------------------ return self.As
python
{ "resource": "" }
q17606
EIG.calc_eigvals
train
def calc_eigvals(self): """ Solve eigenvalues of the state matrix ``self.As`` Returns ------- None """ self.eigs = numpy.linalg.eigvals(self.As) # TODO: use scipy.sparse.linalg.eigs(self.As) return self.eigs
python
{ "resource": "" }
q17607
EIG.calc_part_factor
train
def calc_part_factor(self): """ Compute participation factor of states in eigenvalues Returns ------- """ mu, N = numpy.linalg.eig(self.As) # TODO: use scipy.sparse.linalg.eigs(self.As) N = matrix(N) n = len(mu) idx = range(n) W = matrix(spmatrix(1.0, idx, idx, (n, n), N.typecode)) gesv(N, W) partfact = mul(abs(W.T), abs(N)) b = matrix(1.0, (1, n)) WN = b * partfact partfact = partfact.T for item in idx: mu_real = mu[item].real mu_imag = mu[item].imag mu[item] = complex(round(mu_real, 4), round(mu_imag, 4)) partfact[item, :] /= WN[item] # participation factor self.mu = matrix(mu) self.part_fact = matrix(partfact) return self.mu, self.part_fact
python
{ "resource": "" }
q17608
Breaker.get_times
train
def get_times(self): """Return all the action times and times-1e-6 in a list""" if not self.n: return [] self.times = list(mul(self.u1, self.t1)) + \ list(mul(self.u2, self.t2)) + \ list(mul(self.u3, self.t3)) + \ list(mul(self.u4, self.t4)) self.times = matrix(list(set(self.times))) self.times = list(self.times) + list(self.times - 1e-6) return self.times
python
{ "resource": "" }
q17609
Heartbeat.send
train
def send(self, ws, seq): """ Sends heartbeat message to Discord Attributes: ws: Websocket connection to discord seq: Sequence number of heartbeat """ payload = {'op': 1, 'd': seq} payload = json.dumps(payload) logger.debug("Sending heartbeat with payload {}".format(payload)) ws.send(payload) return
python
{ "resource": "" }
q17610
DiscoBot.create_message
train
def create_message(self, channel_id, text): """ Sends a message to a Discord channel or user via REST API Args: channel_id (string): ID of destingation Discord channel text (string): Content of message """ baseurl = self.rest_baseurl + \ '/channels/{}/messages'.format(channel_id) requests.post(baseurl, headers=self.headers, data=json.dumps({'content': text}))
python
{ "resource": "" }
q17611
DiscoBot.identify
train
def identify(self, token): """ Identifies to the websocket endpoint Args: token (string): Discord bot token """ payload = { 'op': 2, 'd': { 'token': self.token, 'properties': { '$os': sys.platform, '$browser': 'legobot', '$device': 'legobot' }, 'compress': False, 'large_threshold': 250 } } payload['d']['synced_guilds'] = [] logger.info("Identifying with the following message: \ {}".format(payload)) self.ws.send(json.dumps(payload)) return
python
{ "resource": "" }
q17612
DiscoBot.on_hello
train
def on_hello(self, message): """ Runs on a hello event from websocket connection Args: message (dict): Full message from Discord websocket connection" """ logger.info("Got a hello") self.identify(self.token) self.heartbeat_thread = Heartbeat(self.ws, message['d']['heartbeat_interval']) self.heartbeat_thread.start() return
python
{ "resource": "" }
q17613
DiscoBot.on_heartbeat
train
def on_heartbeat(self, message): """ Runs on a heartbeat event from websocket connection Args: message (dict): Full message from Discord websocket connection" """ logger.info("Got a heartbeat") logger.info("Heartbeat message: {}".format(message)) self.heartbeat_thread.update_sequence(message['d']) return
python
{ "resource": "" }
q17614
DiscoBot.on_message
train
def on_message(self, message): """ Runs on a create_message event from websocket connection Args: message (dict): Full message from Discord websocket connection" """ if 'content' in message['d']: metadata = self._parse_metadata(message) message = Message(text=message['d']['content'], metadata=metadata).__dict__ logger.debug(message) self.baseplate.tell(message)
python
{ "resource": "" }
q17615
DiscoBot._parse_metadata
train
def _parse_metadata(self, message): """ Sets metadata in Legobot message Args: message (dict): Full message from Discord websocket connection" Returns: Legobot.Metadata """ metadata = Metadata(source=self.actor_urn).__dict__ if 'author' in message['d']: metadata['source_user'] = message['d']['author']['username'] else: metadata['source_user'] = None if 'channel_id' in message['d']: metadata['source_channel'] = message['d']['channel_id'] else: metadata['source_channel'] = None metadata['user_id'] = metadata['source_user'] metadata['display_name'] = metadata['source_user'] metadata['source_connector'] = 'discord' return metadata
python
{ "resource": "" }
q17616
DiscoBot.handle
train
def handle(self, message): """ Dispatches messages to appropriate handler based on opcode Args: message (dict): Full message from Discord websocket connection """ opcode = message['op'] if opcode == 10: self.on_hello(message) elif opcode == 11: self.on_heartbeat(message) elif opcode == 0: self.on_message(message) else: logger.debug("Not a message we handle: OPCODE {}".format(opcode)) return
python
{ "resource": "" }
q17617
DAE.resize
train
def resize(self): """Resize dae and and extend for init1 variables """ yext = self.m - len(self.y) xext = self.n - len(self.x) if yext > 0: yzeros = zeros(yext, 1) yones = ones(yext, 1) self.y = matrix([self.y, yzeros], (self.m, 1), 'd') self.g = matrix([self.g, yzeros], (self.m, 1), 'd') self.uy = matrix([self.uy, yones], (self.m, 1), 'd') self.zymin = matrix([self.zymin, yones], (self.m, 1), 'd') self.zymax = matrix([self.zymax, yones], (self.m, 1), 'd') if xext > 0: xzeros = zeros(xext, 1) xones = ones(xext, 1) self.x = matrix([self.x, xzeros], (self.n, 1), 'd') self.f = matrix([self.f, xzeros], (self.n, 1), 'd') self.ux = matrix([self.ux, xones], (self.n, 1), 'd') self.zxmin = matrix([self.zxmin, xones], (self.n, 1), 'd') self.zxmax = matrix([self.zxmax, xones], (self.n, 1), 'd')
python
{ "resource": "" }
q17618
DAE.hard_limit
train
def hard_limit(self, yidx, ymin, ymax, min_set=None, max_set=None): """Set hard limits for algebraic variables and reset the equation mismatches :param yidx: algebraic variable indices :param ymin: lower limit to check for :param ymax: upper limit to check for :param min_set: optional lower limit to set (``ymin`` as default) :param max_set: optional upper limit to set (``ymax`` as default) :type yidx: list, matrix :type ymin: matrix, int, float, list :type ymax: matrix, int, float, list :type min_set: matrix :type max_set: matrix :return: None """ yidx = matrix(yidx) yval = self.y[yidx] ny = len(yidx) if isinstance(ymin, (int, float, list)): ymin = matrix(ymin, (ny, 1), 'd') if isinstance(ymax, (int, float, list)): ymax = matrix(ymax, (ny, 1), 'd') if not min_set: min_set = ymin elif isinstance(min_set, (int, float, list)): min_set = matrix(min_set, (ny, 1), 'd') if not max_set: max_set = ymax elif isinstance(max_set, (int, float, list)): max_set = matrix(max_set, (ny, 1), 'd') above = ageb(yval, ymax) below = aleb(yval, ymin) above_idx = index(above, 1.0) below_idx = index(below, 1.0) above_yidx = yidx[above_idx] below_yidx = yidx[below_idx] idx = list(above_idx) + list(below_idx) if len(above_yidx) > 0: self.y[above_yidx] = max_set[above_idx] self.zymax[above_yidx] = 0 if len(below_yidx) > 0: self.y[below_yidx] = min_set[below_idx] self.zymin[below_yidx] = 0 if len(idx): self.g[yidx[idx]] = 0 self.ac_reset = True
python
{ "resource": "" }
q17619
DAE.hard_limit_remote
train
def hard_limit_remote(self, yidx, ridx, rtype='y', rmin=None, rmax=None, min_yset=0, max_yset=0): """Limit the output of yidx if the remote y is not within the limits This function needs to be modernized. """ ny = len(yidx) assert ny == len( ridx), "Length of output vars and remote vars does not match" assert rtype in ('x', 'y'), "ridx must be either y (algeb) or x (state)" if isinstance(min_yset, (int, float)): min_yset = matrix(min_yset, (ny, 1), 'd') if isinstance(max_yset, (int, float)): max_yset = matrix(max_yset, (ny, 1), 'd') above_idx, below_idx = list(), list() yidx = matrix(yidx) if rmax: # find the over-limit remote idx above = ageb(self.__dict__[rtype][ridx], rmax) above_idx = index(above, 1.0) # reset the y values based on the remote limit violations self.y[yidx[above_idx]] = max_yset[above_idx] self.zymax[yidx[above_idx]] = 0 if rmin: below = aleb(self.__dict__[rtype][ridx], rmin) below_idx = index(below, 1.0) self.y[yidx[below_idx]] = min_yset[below_idx] self.zymin[yidx[below_idx]] = 0 idx = above_idx + below_idx self.g[yidx[idx]] = 0 if len(idx) > 0: self.factorize = True
python
{ "resource": "" }
q17620
DAE.anti_windup
train
def anti_windup(self, xidx, xmin, xmax): """ Anti-windup limiter for state variables. Resets the limited variables and differential equations. :param xidx: state variable indices :param xmin: lower limit :param xmax: upper limit :type xidx: matrix, list :type xmin: matrix, float, int, list :type xmax: matrix, float, int, list """ xidx = matrix(xidx) xval = self.x[xidx] fval = self.f[xidx] if isinstance(xmin, (float, int, list)): xmin = matrix(xmin, xidx.size, 'd') if isinstance(xmax, (float, int, list)): xmax = matrix(xmax, xidx.size, 'd') x_above = ageb(xval, xmax) f_above = ageb(fval, 0.0) x_below = aleb(xval, xmin) f_below = aleb(fval, 0.0) above = aandb(x_above, f_above) above_idx = index(above, 1.0) if len(above_idx) > 0: above_xidx = xidx[above_idx] self.x[above_xidx] = xmax[above_idx] self.zxmax[above_xidx] = 0 below = aandb(x_below, f_below) below_idx = index(below, 1.0) if len(below_idx) > 0: below_xidx = xidx[below_idx] self.x[below_xidx] = xmin[below_idx] self.zxmin[below_xidx] = 0 idx = list(above_idx) + list(below_idx) if len(idx) > 0: self.f[xidx[idx]] = 0 self.ac_reset = True
python
{ "resource": "" }
q17621
DAE.reset_Ac
train
def reset_Ac(self): """ Reset ``dae.Ac`` sparse matrix for disabled equations due to hard_limit and anti_windup limiters. :return: None """ if self.ac_reset is False: return mn = self.m + self.n x = index(aandb(self.zxmin, self.zxmax), 0.) y = [i + self.n for i in index(aandb(self.zymin, self.zymax), 0.)] xy = list(x) + y eye = spdiag([1.0] * mn) H = spmatrix(1.0, xy, xy, (mn, mn), 'd') # Modifying ``eye`` is more efficient than ``eye = eye - H``. # CVXOPT modifies eye in place because all the accessed elements exist. for idx in xy: eye[idx, idx] = 0 if len(xy) > 0: self.Ac = eye * (self.Ac * eye) - H self.q[x] = 0 self.ac_reset = False self.factorize = True
python
{ "resource": "" }
q17622
DAE.get_size
train
def get_size(self, m): """ Return the 2-D size of a Jacobian matrix in tuple """ nrow, ncol = 0, 0 if m[0] == 'F': nrow = self.n elif m[0] == 'G': nrow = self.m if m[1] == 'x': ncol = self.n elif m[1] == 'y': ncol = self.m return nrow, ncol
python
{ "resource": "" }
q17623
DAE.temp_to_spmatrix
train
def temp_to_spmatrix(self, ty): """ Convert Jacobian tuples to matrices :param ty: name of the matrices to convert in ``('jac0','jac')`` :return: None """ assert ty in ('jac0', 'jac') jac0s = ['Fx0', 'Fy0', 'Gx0', 'Gy0'] jacs = ['Fx', 'Fy', 'Gx', 'Gy'] if ty == 'jac0': todo = jac0s elif ty == 'jac': todo = jacs for m in todo: self.__dict__[m] = spmatrix(self._temp[m]['V'], self._temp[m]['I'], self._temp[m]['J'], self.get_size(m), 'd') if ty == 'jac': self.__dict__[m] += self.__dict__[m + '0'] self.apply_set(ty)
python
{ "resource": "" }
q17624
DAE.apply_set
train
def apply_set(self, ty): """ Apply Jacobian set values to matrices :param ty: Jacobian type in ``('jac0', 'jac')`` :return: """ assert ty in ('jac0', 'jac') if ty == 'jac0': todo = ['Fx0', 'Fy0', 'Gx0', 'Gy0'] else: todo = ['Fx', 'Fy', 'Gx', 'Gy'] for m in todo: for idx in range(len(self._set[m]['I'])): i = self._set[m]['I'][idx] j = self._set[m]['J'][idx] v = self._set[m]['V'][idx] self.__dict__[m][i, j] = v
python
{ "resource": "" }
q17625
DAE.show
train
def show(self, eq, value=None): """Show equation or variable array along with the names""" if eq in ['f', 'x']: key = 'unamex' elif eq in ['g', 'y']: key = 'unamey' if value: value = list(value) else: value = list(self.__dict__[eq]) out = '' for name, val, idx in zip(self.system.varname.__dict__[key], value, range(len(value))): out += '{:20s} [{:>12.4f}] {:g}\n'.format(name, val, idx) return out
python
{ "resource": "" }
q17626
DAE.find_val
train
def find_val(self, eq, val): """Return the name of the equation having the given value""" if eq not in ('f', 'g', 'q'): return elif eq in ('f', 'q'): key = 'unamex' elif eq == 'g': key = 'unamey' idx = 0 for m, n in zip(self.system.varname.__dict__[key], self.__dict__[eq]): if n == val: return m, idx idx += 1 return
python
{ "resource": "" }
q17627
DAE.reset_small
train
def reset_small(self, eq): """Reset numbers smaller than 1e-12 in f and g equations""" assert eq in ('f', 'g') for idx, var in enumerate(self.__dict__[eq]): if abs(var) <= 1e-12: self.__dict__[eq][idx] = 0
python
{ "resource": "" }
q17628
DAE.check_diag
train
def check_diag(self, jac, name): """ Check matrix ``jac`` for diagonal elements that equals 0 """ system = self.system pos = [] names = [] pairs = '' size = jac.size diag = jac[0:size[0] ** 2:size[0] + 1] for idx in range(size[0]): if abs(diag[idx]) <= 1e-8: pos.append(idx) for idx in pos: names.append(system.varname.__dict__[name][idx]) if len(names) > 0: for i, j in zip(pos, names): pairs += '{0}: {1}\n'.format(i, j) logger.debug('Jacobian diagonal check:') logger.debug(pairs)
python
{ "resource": "" }
q17629
get_exception_source
train
def get_exception_source(): """Returns full file path, file name, line number, function name, and line contents causing the last exception.""" _, _, tb = sys.exc_info() while tb.tb_next: tb = tb.tb_next f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filefullpath = co.co_filename filename = os.path.basename(filefullpath) name = co.co_name linecache.checkcache(filefullpath) line = linecache.getline(filefullpath, lineno, f.f_globals) if line: line = line.strip() else: line = None return filefullpath, filename, lineno, name, line
python
{ "resource": "" }
q17630
CallbackHandler.prepare
train
def prepare(self, record): # Function taken from Python 3.6 QueueHandler """ Prepares a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also puts the message into # record.message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info attribute, as it's no longer needed and, if not None, # will typically not be pickleable. self.format(record) record.msg = record.message record.args = None record.exc_info = None return record
python
{ "resource": "" }
q17631
CallbackHandler.emit
train
def emit(self, record): """Send a LogRecord to the callback function, after preparing it for serialization.""" try: self._callback(self.prepare(record)) except Exception: self.handleError(record)
python
{ "resource": "" }
q17632
Frontend.run
train
def run(self): """The main loop of the frontend. Here incoming messages from the service are processed and forwarded to the corresponding callback methods.""" self.log.debug("Entered main loop") while not self.shutdown: # If no service is running slow down the main loop if not self._pipe_service: time.sleep(0.3) self.update_status() # While a service is running, check for incoming messages from that service if self._pipe_service and self._pipe_service.poll(1): try: message = self._pipe_service.recv() if isinstance(message, dict) and "band" in message: # only dictionaries with 'band' entry are valid messages try: handler = getattr(self, "parse_band_" + message["band"]) except AttributeError: handler = None self.log.warning("Unknown band %s", str(message["band"])) if handler: # try: handler(message) # except Exception: # print('Uh oh. What to do.') else: self.log.warning("Invalid message received %s", str(message)) except EOFError: # Service has gone away error_message = False if self._service_status == CommonService.SERVICE_STATUS_END: self.log.info("Service terminated") elif self._service_status == CommonService.SERVICE_STATUS_ERROR: error_message = "Service terminated with error code" elif self._service_status in ( CommonService.SERVICE_STATUS_NONE, CommonService.SERVICE_STATUS_NEW, CommonService.SERVICE_STATUS_STARTING, ): error_message = ( "Service may have died unexpectedly in " + "initialization (last known status: %s)" % CommonService.human_readable_state.get( self._service_status, self._service_status ) ) else: error_message = ( "Service may have died unexpectedly" " (last known status: %s)" % CommonService.human_readable_state.get( self._service_status, self._service_status ) ) if error_message: self.log.error(error_message) self._terminate_service() if self.restart_service: self.exponential_backoff() else: self.shutdown = True if error_message: raise workflows.Error(error_message) with self.__lock: if ( self._service is None and self.restart_service and self._service_factory ): self.update_status(status_code=CommonService.SERVICE_STATUS_NEW) self.switch_service() # Check that the transport is alive if not self._transport.is_connected(): self._terminate_service() raise workflows.Error("Lost transport layer connection") self.log.debug("Left main loop") self.update_status(status_code=CommonService.SERVICE_STATUS_TEARDOWN) self._terminate_service() self.log.debug("Terminating.")
python
{ "resource": "" }
q17633
Frontend.send_command
train
def send_command(self, command): """Send command to service via the command queue.""" if self._pipe_commands: self._pipe_commands.send(command) else: if self.shutdown: # Stop delivering messages in shutdown. self.log.info( "During shutdown no command queue pipe found for command\n%s", str(command), ) else: self.log.error( "No command queue pipe found for command\n%s", str(command) )
python
{ "resource": "" }
q17634
Frontend.process_transport_command
train
def process_transport_command(self, header, message): """Parse a command coming in through the transport command subscription""" if not isinstance(message, dict): return relevant = False if "host" in message: # Filter by host if message["host"] != self.__hostid: return relevant = True if "service" in message: # Filter by service if message["service"] != self._service_class_name: return relevant = True if not relevant: # Ignore message unless at least one filter matches return if message.get("command"): self.log.info( "Received command '%s' via transport layer", message["command"] ) if message["command"] == "shutdown": self.shutdown = True else: self.log.warning("Received invalid transport command message")
python
{ "resource": "" }
q17635
Frontend.parse_band_log
train
def parse_band_log(self, message): """Process incoming logging messages from the service.""" if "payload" in message and hasattr(message["payload"], "name"): record = message["payload"] for k in dir(record): if k.startswith("workflows_exc_"): setattr(record, k[14:], getattr(record, k)) delattr(record, k) for k, v in self.get_status().items(): setattr(record, "workflows_" + k, v) logging.getLogger(record.name).handle(record) else: self.log.warning( "Received broken record on log band\n" + "Message: %s\nRecord: %s", str(message), str( hasattr(message.get("payload"), "__dict__") and message["payload"].__dict__ ), )
python
{ "resource": "" }
q17636
Frontend.parse_band_request_termination
train
def parse_band_request_termination(self, message): """Service declares it should be terminated.""" self.log.debug("Service requests termination") self._terminate_service() if not self.restart_service: self.shutdown = True
python
{ "resource": "" }
q17637
Frontend.parse_band_set_name
train
def parse_band_set_name(self, message): """Process incoming message indicating service name change.""" if message.get("name"): self._service_name = message["name"] else: self.log.warning( "Received broken record on set_name band\nMessage: %s", str(message) )
python
{ "resource": "" }
q17638
Frontend.parse_band_status_update
train
def parse_band_status_update(self, message): """Process incoming status updates from the service.""" self.log.debug("Status update: " + str(message)) self.update_status(status_code=message["statuscode"])
python
{ "resource": "" }
q17639
Frontend.get_status
train
def get_status(self): """Returns a dictionary containing all relevant status information to be broadcast across the network.""" return { "host": self.__hostid, "status": self._service_status_announced, "statustext": CommonService.human_readable_state.get( self._service_status_announced ), "service": self._service_name, "serviceclass": self._service_class_name, "utilization": self._utilization.report(), "workflows": workflows.version(), }
python
{ "resource": "" }
q17640
Frontend.exponential_backoff
train
def exponential_backoff(self): """A function that keeps waiting longer and longer the more rapidly it is called. It can be used to increasingly slow down service starts when they keep failing.""" last_service_switch = self._service_starttime if not last_service_switch: return time_since_last_switch = time.time() - last_service_switch if not self._service_rapidstarts: self._service_rapidstarts = 0 minimum_wait = 0.1 * (2 ** self._service_rapidstarts) minimum_wait = min(5, minimum_wait) if time_since_last_switch > 10: self._service_rapidstarts = 0 return self._service_rapidstarts += 1 self.log.debug("Slowing down service starts (%.1f seconds)", minimum_wait) time.sleep(minimum_wait)
python
{ "resource": "" }
q17641
Frontend._terminate_service
train
def _terminate_service(self): """Force termination of running service. Disconnect queues, end queue feeder threads. Wait for service process to clear, drop all references.""" with self.__lock: if self._service: self._service.terminate() if self._pipe_commands: self._pipe_commands.close() if self._pipe_service: self._pipe_service.close() self._pipe_commands = None self._pipe_service = None self._service_class_name = None self._service_name = None if self._service_status != CommonService.SERVICE_STATUS_TEARDOWN: self.update_status(status_code=CommonService.SERVICE_STATUS_END) if self._service: self._service.join() # must wait for process to be actually destroyed self._service = None
python
{ "resource": "" }
q17642
Solver.symbolic
train
def symbolic(self, A): """ Return the symbolic factorization of sparse matrix ``A`` Parameters ---------- sparselib Library name in ``umfpack`` and ``klu`` A Sparse matrix Returns symbolic factorization ------- """ if self.sparselib == 'umfpack': return umfpack.symbolic(A) elif self.sparselib == 'klu': return klu.symbolic(A)
python
{ "resource": "" }
q17643
Solver.numeric
train
def numeric(self, A, F): """ Return the numeric factorization of sparse matrix ``A`` using symbolic factorization ``F`` Parameters ---------- A Sparse matrix F Symbolic factorization Returns ------- N Numeric factorization of ``A`` """ if self.sparselib == 'umfpack': return umfpack.numeric(A, F) elif self.sparselib == 'klu': return klu.numeric(A, F)
python
{ "resource": "" }
q17644
Solver.solve
train
def solve(self, A, F, N, b): """ Solve linear system ``Ax = b`` using numeric factorization ``N`` and symbolic factorization ``F``. Store the solution in ``b``. Parameters ---------- A Sparse matrix F Symbolic factorization N Numeric factorization b RHS of the equation Returns ------- None """ if self.sparselib == 'umfpack': umfpack.solve(A, N, b) elif self.sparselib == 'klu': klu.solve(A, F, N, b)
python
{ "resource": "" }
q17645
Solver.linsolve
train
def linsolve(self, A, b): """ Solve linear equation set ``Ax = b`` and store the solutions in ``b``. Parameters ---------- A Sparse matrix b RHS of the equation Returns ------- None """ if self.sparselib == 'umfpack': return umfpack.linsolve(A, b) elif self.sparselib == 'klu': return klu.linsolve(A, b)
python
{ "resource": "" }
q17646
Bus._varname_inj
train
def _varname_inj(self): """Customize varname for bus injections""" # Bus Pi if not self.n: return m = self.system.dae.m xy_idx = range(m, self.n + m) self.system.varname.append( listname='unamey', xy_idx=xy_idx, var_name='P', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=xy_idx, var_name='P', element_name=self.name) # Bus Qi xy_idx = range(m + self.n, m + 2 * self.n) self.system.varname.append( listname='unamey', xy_idx=xy_idx, var_name='Q', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=xy_idx, var_name='Q', element_name=self.name)
python
{ "resource": "" }
q17647
Bus.init0
train
def init0(self, dae): """Set bus Va and Vm initial values""" if not self.system.pflow.config.flatstart: dae.y[self.a] = self.angle + 1e-10 * uniform(self.n) dae.y[self.v] = self.voltage else: dae.y[self.a] = matrix(0.0, (self.n, 1), 'd') + 1e-10 * uniform(self.n) dae.y[self.v] = matrix(1.0, (self.n, 1), 'd')
python
{ "resource": "" }
q17648
get_known_transports
train
def get_known_transports(): """Return a dictionary of all known transport mechanisms.""" if not hasattr(get_known_transports, "cache"): setattr( get_known_transports, "cache", { e.name: e.load() for e in pkg_resources.iter_entry_points("workflows.transport") }, ) return get_known_transports.cache.copy()
python
{ "resource": "" }
q17649
WindBase.windspeed
train
def windspeed(self, t): """Return the wind speed list at time `t`""" ws = [0] * self.n for i in range(self.n): q = ceil(t / self.dt[i]) q_prev = 0 if q == 0 else q - 1 r = t % self.dt[i] r = 0 if abs(r) < 1e-6 else r if r == 0: ws[i] = self.speed[i][q] else: t1 = self.time[i][q_prev] s1 = self.speed[i][q_prev] s2 = self.speed[i][q] ws[i] = s1 + (t - t1) * (s2 - s1) / self.dt[i] return matrix(ws)
python
{ "resource": "" }
q17650
PowerSystem.to_sysbase
train
def to_sysbase(self): """ Convert model parameters to system base. This function calls the ``data_to_sys_base`` function of the loaded models. Returns ------- None """ if self.config.base: for item in self.devman.devices: self.__dict__[item].data_to_sys_base()
python
{ "resource": "" }
q17651
PowerSystem.to_elembase
train
def to_elembase(self): """ Convert parameters back to element base. This function calls the ```data_to_elem_base``` function. Returns ------- None """ if self.config.base: for item in self.devman.devices: self.__dict__[item].data_to_elem_base()
python
{ "resource": "" }
q17652
PowerSystem.group_add
train
def group_add(self, name='Ungrouped'): """ Dynamically add a group instance to the system if not exist. Parameters ---------- name : str, optional ('Ungrouped' as default) Name of the group Returns ------- None """ if not hasattr(self, name): self.__dict__[name] = Group(self, name) self.loaded_groups.append(name)
python
{ "resource": "" }
q17653
PowerSystem.model_import
train
def model_import(self): """ Import and instantiate the non-JIT models and the JIT models. Models defined in ``jits`` and ``non_jits`` in ``models/__init__.py`` will be imported and instantiated accordingly. Returns ------- None """ # non-JIT models for file, pair in non_jits.items(): for cls, name in pair.items(): themodel = importlib.import_module('andes.models.' + file) theclass = getattr(themodel, cls) self.__dict__[name] = theclass(self, name) group = self.__dict__[name]._group self.group_add(group) self.__dict__[group].register_model(name) self.devman.register_device(name) # import JIT models for file, pair in jits.items(): for cls, name in pair.items(): self.__dict__[name] = JIT(self, file, cls, name)
python
{ "resource": "" }
q17654
PowerSystem.model_setup
train
def model_setup(self): """ Call the ``setup`` function of the loaded models. This function is to be called after parsing all the data files during the system set up. Returns ------- None """ for device in self.devman.devices: if self.__dict__[device].n: try: self.__dict__[device].setup() except Exception as e: raise e
python
{ "resource": "" }
q17655
PowerSystem.xy_addr0
train
def xy_addr0(self): """ Assign indicies and variable names for variables used in power flow For each loaded model with the ``pflow`` flag as ``True``, the following functions are called sequentially: * ``_addr()`` * ``_intf_network()`` * ``_intf_ctrl()`` After resizing the ``varname`` instance, variable names from models are stored by calling ``_varname()`` Returns ------- None """ for device, pflow in zip(self.devman.devices, self.call.pflow): if pflow: self.__dict__[device]._addr() self.__dict__[device]._intf_network() self.__dict__[device]._intf_ctrl() self.varname.resize() for device, pflow in zip(self.devman.devices, self.call.pflow): if pflow: self.__dict__[device]._varname()
python
{ "resource": "" }
q17656
PowerSystem.rmgen
train
def rmgen(self, idx): """ Remove the static generators if their dynamic models exist Parameters ---------- idx : list A list of static generator idx Returns ------- None """ stagens = [] for device, stagen in zip(self.devman.devices, self.call.stagen): if stagen: stagens.append(device) for gen in idx: for stagen in stagens: if gen in self.__dict__[stagen].uid.keys(): self.__dict__[stagen].disable_gen(gen)
python
{ "resource": "" }
q17657
PowerSystem.check_event
train
def check_event(self, sim_time): """ Check for event occurrance for``Event`` group models at ``sim_time`` Parameters ---------- sim_time : float The current simulation time Returns ------- list A list of model names who report (an) event(s) at ``sim_time`` """ ret = [] for model in self.__dict__['Event'].all_models: if self.__dict__[model].is_time(sim_time): ret.append(model) if self.Breaker.is_time(sim_time): ret.append('Breaker') return ret
python
{ "resource": "" }
q17658
PowerSystem.get_event_times
train
def get_event_times(self): """ Return event times of Fault, Breaker and other timed events Returns ------- list A sorted list of event times """ times = [] times.extend(self.Breaker.get_times()) for model in self.__dict__['Event'].all_models: times.extend(self.__dict__[model].get_times()) if times: times = sorted(list(set(times))) return times
python
{ "resource": "" }
q17659
PowerSystem.load_config
train
def load_config(self, conf_path): """ Load config from an ``andes.conf`` file. This function creates a ``configparser.ConfigParser`` object to read the specified conf file and calls the ``load_config`` function of the config instances of the system and the routines. Parameters ---------- conf_path : None or str Path to the Andes config file. If ``None``, the function body will not run. Returns ------- None """ if conf_path is None: return conf = configparser.ConfigParser() conf.read(conf_path) self.config.load_config(conf) for r in routines.__all__: self.__dict__[r.lower()].config.load_config(conf) logger.debug('Loaded config file from {}.'.format(conf_path))
python
{ "resource": "" }
q17660
PowerSystem.dump_config
train
def dump_config(self, file_path): """ Dump system and routine configurations to an rc-formatted file. Parameters ---------- file_path : str path to the configuration file. The user will be prompted if the file already exists. Returns ------- None """ if os.path.isfile(file_path): logger.debug('File {} alreay exist. Overwrite? [y/N]'.format(file_path)) choice = input('File {} alreay exist. Overwrite? [y/N]'.format(file_path)).lower() if len(choice) == 0 or choice[0] != 'y': logger.info('File not overwritten.') return conf = self.config.dump_conf() for r in routines.__all__: conf = self.__dict__[r.lower()].config.dump_conf(conf) with open(file_path, 'w') as f: conf.write(f) logger.info('Config written to {}'.format(file_path))
python
{ "resource": "" }
q17661
PowerSystem.check_islands
train
def check_islands(self, show_info=False): """ Check the connectivity for the ac system Parameters ---------- show_info : bool Show information when the system has islands. To be used when initializing power flow. Returns ------- None """ if not hasattr(self, 'Line'): logger.error('<Line> device not found.') return self.Line.connectivity(self.Bus) if show_info is True: if len(self.Bus.islanded_buses) == 0 and len( self.Bus.island_sets) == 0: logger.debug('System is interconnected.') else: logger.info( 'System contains {:d} islands and {:d} islanded buses.'. format( len(self.Bus.island_sets), len(self.Bus.islanded_buses))) nosw_island = [] # no slack bus island msw_island = [] # multiple slack bus island for idx, island in enumerate(self.Bus.island_sets): nosw = 1 for item in self.SW.bus: if self.Bus.uid[item] in island: nosw -= 1 if nosw == 1: nosw_island.append(idx) elif nosw < 0: msw_island.append(idx) if nosw_island: logger.warning( 'Slack bus is not defined for {:g} island(s).'.format( len(nosw_island))) if msw_island: logger.warning( 'Multiple slack buses are defined for {:g} island(s).'. format(len(nosw_island))) if (not nosw_island) and (not msw_island): logger.debug( 'Each island has a slack bus correctly defined.')
python
{ "resource": "" }
q17662
PowerSystem.get_busdata
train
def get_busdata(self, sort_names=False): """ get ac bus data from solved power flow """ if self.pflow.solved is False: logger.error('Power flow not solved when getting bus data.') return tuple([False] * 8) idx = self.Bus.idx names = self.Bus.name Vm = [self.dae.y[x] for x in self.Bus.v] if self.pflow.config.usedegree: Va = [self.dae.y[x] * rad2deg for x in self.Bus.a] else: Va = [self.dae.y[x] for x in self.Bus.a] Pg = [self.Bus.Pg[x] for x in range(self.Bus.n)] Qg = [self.Bus.Qg[x] for x in range(self.Bus.n)] Pl = [self.Bus.Pl[x] for x in range(self.Bus.n)] Ql = [self.Bus.Ql[x] for x in range(self.Bus.n)] if sort_names: ret = (list(x) for x in zip(*sorted( zip(idx, names, Vm, Va, Pg, Qg, Pl, Ql), key=itemgetter(0)))) else: ret = idx, names, Vm, Va, Pg, Qg, Pl, Ql return ret
python
{ "resource": "" }
q17663
PowerSystem.get_nodedata
train
def get_nodedata(self, sort_names=False): """ get dc node data from solved power flow """ if not self.Node.n: return if not self.pflow.solved: logger.error('Power flow not solved when getting bus data.') return tuple([False] * 7) idx = self.Node.idx names = self.Node.name V = [self.dae.y[x] for x in self.Node.v] if sort_names: ret = (list(x) for x in zip(*sorted(zip(idx, names, V), key=itemgetter(0)))) else: ret = idx, names, V return ret
python
{ "resource": "" }
q17664
PowerSystem.get_linedata
train
def get_linedata(self, sort_names=False): """ get line data from solved power flow """ if not self.pflow.solved: logger.error('Power flow not solved when getting line data.') return tuple([False] * 7) idx = self.Line.idx fr = self.Line.bus1 to = self.Line.bus2 Sloss = self.Line.S1 + self.Line.S2 Pfr = list(self.Line.S1.real()) Qfr = list(self.Line.S1.imag()) Pto = list(self.Line.S2.real()) Qto = list(self.Line.S2.imag()) Ploss = list(Sloss.real()) Qloss = list(Sloss.imag()) if sort_names: ret = (list(x) for x in zip(*sorted( zip(idx, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss), key=itemgetter(0)))) else: ret = idx, fr, to, Pfr, Qfr, Pto, Qto, Ploss, Qloss return ret
python
{ "resource": "" }
q17665
Group.register_model
train
def register_model(self, model): """ Register ``model`` to this group :param model: model name :return: None """ assert isinstance(model, str) if model not in self.all_models: self.all_models.append(model)
python
{ "resource": "" }
q17666
Group.register_element
train
def register_element(self, model, idx): """ Register element with index ``idx`` to ``model`` :param model: model name :param idx: element idx :return: final element idx """ if idx is None: idx = model + '_' + str(len(self._idx_model)) self._idx_model[idx] = model self._idx.append(idx) return idx
python
{ "resource": "" }
q17667
Group.get_field
train
def get_field(self, field, idx): """ Return the field ``field`` of elements ``idx`` in the group :param field: field name :param idx: element idx :return: values of the requested field """ ret = [] scalar = False # TODO: ensure idx is unique in this Group if isinstance(idx, (int, float, str)): scalar = True idx = [idx] models = [self._idx_model[i] for i in idx] for i, m in zip(idx, models): ret.append(self.system.__dict__[m].get_field(field, idx=i)) if scalar is True: return ret[0] else: return ret
python
{ "resource": "" }
q17668
Group.set_field
train
def set_field(self, field, idx, value): """ Set the field ``field`` of elements ``idx`` to ``value``. This function does not if the field is valid for all models. :param field: field name :param idx: element idx :param value: value of fields to set :return: None """ if isinstance(idx, (int, float, str)): idx = [idx] if isinstance(value, (int, float)): value = [value] models = [self._idx_model[i] for i in idx] for i, m, v in zip(idx, models, value): assert hasattr(self.system.__dict__[m], field) uid = self.system.__dict__[m].get_uid(idx) self.system.__dict__[m].__dict__[field][uid] = v
python
{ "resource": "" }
q17669
get_sort_field
train
def get_sort_field(request): """ Retrieve field used for sorting a queryset :param request: HTTP request :return: the sorted field name, prefixed with "-" if ordering is descending """ sort_direction = request.GET.get("dir") field_name = (request.GET.get("sort") or "") if sort_direction else "" sort_sign = "-" if sort_direction == "desc" else "" result_field = "{sign}{field}".format(sign=sort_sign, field=field_name) return result_field
python
{ "resource": "" }
q17670
MediaWiki.normalize_api_url
train
def normalize_api_url(self): """ Checks that the API URL used to initialize this object actually returns JSON. If it doesn't, make some educated guesses and try to find the correct URL. :returns: a valid API URL or ``None`` """ def tester(self, api_url): """ Attempts to fetch general information about the MediaWiki instance in order to test whether *api_url* will return JSON. """ data = self._fetch_http(api_url, {'action': 'query', 'meta': 'siteinfo'}) try: data_json = json.loads(data) return (data, data_json) except ValueError: return (data, None) data, data_json = tester(self, self._api_url) if data_json: return self._api_url else: # if there's an index.php in the URL, we might find the API if 'index.php' in self._api_url: test_api_url = self._api_url.split('index.php')[0] + 'api.php' test_data, test_data_json = tester(self, test_api_url) if test_data_json: self._api_url = test_api_url return self._api_url return None
python
{ "resource": "" }
q17671
build
train
def build( documentPath, outputUFOFormatVersion=3, roundGeometry=True, verbose=True, # not supported logPath=None, # not supported progressFunc=None, # not supported processRules=True, logger=None, useVarlib=False, ): """ Simple builder for UFO designspaces. """ import os, glob if os.path.isdir(documentPath): # process all *.designspace documents in this folder todo = glob.glob(os.path.join(documentPath, "*.designspace")) else: # process the todo = [documentPath] results = [] for path in todo: document = DesignSpaceProcessor(ufoVersion=outputUFOFormatVersion) document.useVarlib = useVarlib document.roundGeometry = roundGeometry document.read(path) try: r = document.generateUFO(processRules=processRules) results.append(r) except: if logger: logger.exception("ufoProcessor error") #results += document.generateUFO(processRules=processRules) reader = None return results
python
{ "resource": "" }
q17672
DesignSpaceProcessor.getInfoMutator
train
def getInfoMutator(self): """ Returns a info mutator """ if self._infoMutator: return self._infoMutator infoItems = [] for sourceDescriptor in self.sources: if sourceDescriptor.layerName is not None: continue loc = Location(sourceDescriptor.location) sourceFont = self.fonts[sourceDescriptor.name] if sourceFont is None: continue if hasattr(sourceFont.info, "toMathInfo"): infoItems.append((loc, sourceFont.info.toMathInfo())) else: infoItems.append((loc, self.mathInfoClass(sourceFont.info))) bias, self._infoMutator = self.getVariationModel(infoItems, axes=self.serializedAxes, bias=self.newDefaultLocation()) return self._infoMutator
python
{ "resource": "" }
q17673
DesignSpaceProcessor.collectMastersForGlyph
train
def collectMastersForGlyph(self, glyphName, decomposeComponents=False): """ Return a glyph mutator.defaultLoc decomposeComponents = True causes the source glyphs to be decomposed first before building the mutator. That gives you instances that do not depend on a complete font. If you're calculating previews for instance. XXX check glyphs in layers """ items = [] empties = [] foundEmpty = False for sourceDescriptor in self.sources: if not os.path.exists(sourceDescriptor.path): #kthxbai p = "\tMissing UFO at %s" % sourceDescriptor.path if p not in self.problems: self.problems.append(p) continue if glyphName in sourceDescriptor.mutedGlyphNames: continue thisIsDefault = self.default == sourceDescriptor ignoreMaster, filteredLocation = self.filterThisLocation(sourceDescriptor.location, self.mutedAxisNames) if ignoreMaster: continue f = self.fonts.get(sourceDescriptor.name) if f is None: continue loc = Location(sourceDescriptor.location) sourceLayer = f if not glyphName in f: # log this> continue layerName = getDefaultLayerName(f) sourceGlyphObject = None # handle source layers if sourceDescriptor.layerName is not None: # start looking for a layer # Do not bother for mutatorMath designspaces layerName = sourceDescriptor.layerName sourceLayer = getLayer(f, sourceDescriptor.layerName) if sourceLayer is None: continue if glyphName not in sourceLayer: # start looking for a glyph # this might be a support in a sparse layer # so we're skipping! continue # still have to check if the sourcelayer glyph is empty if not glyphName in sourceLayer: continue else: sourceGlyphObject = sourceLayer[glyphName] if checkGlyphIsEmpty(sourceGlyphObject, allowWhiteSpace=True): foundEmpty = True #sourceGlyphObject = None #continue if decomposeComponents: # what about decomposing glyphs in a partial font? temp = self.glyphClass() p = temp.getPointPen() dpp = DecomposePointPen(sourceLayer, p) sourceGlyphObject.drawPoints(dpp) temp.width = sourceGlyphObject.width temp.name = sourceGlyphObject.name processThis = temp else: processThis = sourceGlyphObject sourceInfo = dict(source=f.path, glyphName=glyphName, layerName=layerName, location=filteredLocation, # sourceDescriptor.location, sourceName=sourceDescriptor.name, ) if hasattr(processThis, "toMathGlyph"): processThis = processThis.toMathGlyph() else: processThis = self.mathGlyphClass(processThis) items.append((loc, processThis, sourceInfo)) empties.append((thisIsDefault, foundEmpty)) # check the empties: # if the default glyph is empty, then all must be empty # if the default glyph is not empty then none can be empty checkedItems = [] emptiesAllowed = False # first check if the default is empty. # remember that the sources can be in any order for i, p in enumerate(empties): isDefault, isEmpty = p if isDefault and isEmpty: emptiesAllowed = True # now we know what to look for if not emptiesAllowed: for i, p in enumerate(empties): isDefault, isEmpty = p if not isEmpty: checkedItems.append(items[i]) else: for i, p in enumerate(empties): isDefault, isEmpty = p if isEmpty: checkedItems.append(items[i]) return checkedItems
python
{ "resource": "" }
q17674
checkGlyphIsEmpty
train
def checkGlyphIsEmpty(glyph, allowWhiteSpace=True): """ This will establish if the glyph is completely empty by drawing the glyph with an EmptyPen. Additionally, the unicode of the glyph is checked against a list of known unicode whitespace characters. This makes it possible to filter out glyphs that have a valid reason to be empty and those that can be ignored. """ whiteSpace = [ 0x9, # horizontal tab 0xa, # line feed 0xb, # vertical tab 0xc, # form feed 0xd, # carriage return 0x20, # space 0x85, # next line 0xa0, # nobreak space 0x1680, # ogham space mark 0x180e, # mongolian vowel separator 0x2000, # en quad 0x2001, # em quad 0x2003, # en space 0x2004, # three per em space 0x2005, # four per em space 0x2006, # six per em space 0x2007, # figure space 0x2008, # punctuation space 0x2009, # thin space 0x200a, # hair space 0x2028, # line separator 0x2029, # paragraph separator 0x202f, # narrow no break space 0x205f, # medium mathematical space 0x3000, # ideographic space ] emptyPen = EmptyPen() glyph.drawPoints(emptyPen) if emptyPen.isEmpty(): # we're empty? if glyph.unicode in whiteSpace and allowWhiteSpace: # are we allowed to be? return False return True return False
python
{ "resource": "" }
q17675
SimulationRunner.configure_and_build
train
def configure_and_build(self, show_progress=True, optimized=True, skip_configuration=False): """ Configure and build the ns-3 code. Args: show_progress (bool): whether or not to display a progress bar during compilation. optimized (bool): whether to use an optimized build. If False, use a standard ./waf configure. skip_configuration (bool): whether to skip the configuration step, and only perform compilation. """ # Only configure if necessary if not skip_configuration: configuration_command = ['python', 'waf', 'configure', '--enable-examples', '--disable-gtk', '--disable-python'] if optimized: configuration_command += ['--build-profile=optimized', '--out=build/optimized'] # Check whether path points to a valid installation subprocess.call(configuration_command, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Build ns-3 build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Show a progress bar if show_progress: line_iterator = self.get_build_output(build_process) pbar = None try: [initial, total] = next(line_iterator) pbar = tqdm(line_iterator, initial=initial, total=total, unit='file', desc='Building ns-3', smoothing=0) for current, total in pbar: pbar.n = current except (StopIteration): if pbar is not None: pbar.n = pbar.total else: # Wait for the build to finish anyway build_process.communicate()
python
{ "resource": "" }
q17676
SimulationRunner.get_build_output
train
def get_build_output(self, process): """ Parse the output of the ns-3 build process to extract the information that is needed to draw the progress bar. Args: process: the subprocess instance to listen to. """ while True: output = process.stdout.readline() if output == b'' and process.poll() is not None: if process.returncode > 0: raise Exception("Compilation ended with an error" ".\nSTDERR\n%s\nSTDOUT\n%s" % (process.stderr.read(), process.stdout.read())) return if output: # Parse the output to get current and total tasks # This assumes the progress displayed by waf is in the form # [current/total] matches = re.search(r'\[\s*(\d+?)/(\d+)\].*', output.strip().decode('utf-8')) if matches is not None: yield [int(matches.group(1)), int(matches.group(2))]
python
{ "resource": "" }
q17677
SimulationRunner.run_simulations
train
def run_simulations(self, parameter_list, data_folder): """ Run several simulations using a certain combination of parameters. Yields results as simulations are completed. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to save subfolders containing simulation output. """ for idx, parameter in enumerate(parameter_list): current_result = { 'params': {}, 'meta': {} } current_result['params'].update(parameter) command = [self.script_executable] + ['--%s=%s' % (param, value) for param, value in parameter.items()] # Run from dedicated temporary folder current_result['meta']['id'] = str(uuid.uuid4()) temp_dir = os.path.join(data_folder, current_result['meta']['id']) os.makedirs(temp_dir) start = time.time() # Time execution stdout_file_path = os.path.join(temp_dir, 'stdout') stderr_file_path = os.path.join(temp_dir, 'stderr') with open(stdout_file_path, 'w') as stdout_file, open( stderr_file_path, 'w') as stderr_file: return_code = subprocess.call(command, cwd=temp_dir, env=self.environment, stdout=stdout_file, stderr=stderr_file) end = time.time() # Time execution if return_code > 0: complete_command = [self.script] complete_command.extend(command[1:]) complete_command = "python waf --run \"%s\"" % ( ' '.join(complete_command)) with open(stdout_file_path, 'r') as stdout_file, open( stderr_file_path, 'r') as stderr_file: raise Exception(('Simulation exited with an error.\n' 'Params: %s\n' '\nStderr: %s\n' 'Stdout: %s\n' 'Use this command to reproduce:\n' '%s' % (parameter, stderr_file.read(), stdout_file.read(), complete_command))) current_result['meta']['elapsed_time'] = end-start yield current_result
python
{ "resource": "" }
q17678
list_param_combinations
train
def list_param_combinations(param_ranges): """ Create a list of all parameter combinations from a dictionary specifying desired parameter values as lists. Example: >>> param_ranges = {'a': [1], 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] Additionally, this function is robust in case values are not lists: >>> param_ranges = {'a': 1, 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] """ # Convert non-list values to single-element lists # This is required to make sure product work. for key in param_ranges: if not isinstance(param_ranges[key], list): param_ranges[key] = [param_ranges[key]] return [dict(zip(param_ranges, v)) for v in product(*param_ranges.values())]
python
{ "resource": "" }
q17679
get_command_from_result
train
def get_command_from_result(script, result, debug=False): """ Return the command that is needed to obtain a certain result. Args: params (dict): Dictionary containing parameter: value pairs. debug (bool): Whether the command should include the debugging template. """ if not debug: command = "python waf --run \"" + script + " " + " ".join( ['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" else: command = "python waf --run " + script + " --command-template=\"" +\ "gdb --args %s " + " ".join(['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" return command
python
{ "resource": "" }
q17680
automatic_parser
train
def automatic_parser(result, dtypes={}, converters={}): """ Try and automatically convert strings formatted as tables into nested list structures. Under the hood, this function essentially applies the genfromtxt function to all files in the output, and passes it the additional kwargs. Args: result (dict): the result to parse. dtypes (dict): a dictionary containing the dtype specification to perform parsing for each available filename. See the numpy genfromtxt documentation for more details on how to format these. """ np.seterr(all='raise') parsed = {} for filename, contents in result['output'].items(): if dtypes.get(filename) is None: dtypes[filename] = None if converters.get(filename) is None: converters[filename] = None with warnings.catch_warnings(): warnings.simplefilter("ignore") parsed[filename] = np.genfromtxt(io.StringIO(contents), dtype=dtypes[filename], converters=converters[filename] ).tolist() return parsed
python
{ "resource": "" }
q17681
CampaignManager.new
train
def new(cls, ns_path, script, campaign_dir, runner_type='Auto', overwrite=False, optimized=True, check_repo=True): """ Create a new campaign from an ns-3 installation and a campaign directory. This method will create a DatabaseManager, which will install a database in the specified campaign_dir. If a database is already available at the ns_path described in the specified campaign_dir and its configuration matches config, this instance is used instead. If the overwrite argument is set to True instead, the specified directory is wiped and a new campaign is created in its place. Furthermore, this method will initialize a SimulationRunner, of type specified by the runner_type parameter, which will be locked on the ns-3 installation at ns_path and set up to run the desired script. Finally, note that creation of a campaign requires a git repository to be initialized at the specified ns_path. This will allow SEM to save the commit at which the simulations are run, enforce reproducibility and avoid mixing results coming from different versions of ns-3 and its libraries. Args: ns_path (str): path to the ns-3 installation to employ in this campaign. script (str): ns-3 script that will be executed to run simulations. campaign_dir (str): path to the directory in which to save the simulation campaign database. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). Use Auto to automatically pick the best runner. overwrite (bool): whether to overwrite already existing campaign_dir folders. This deletes the directory if and only if it only contains files that were detected to be created by sem. optimized (bool): whether to configure the runner to employ an optimized ns-3 build. """ # Convert paths to be absolute ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) # Verify if the specified campaign is already available if Path(campaign_dir).exists() and not overwrite: # Try loading manager = CampaignManager.load(campaign_dir, ns_path, runner_type=runner_type, optimized=optimized, check_repo=check_repo) if manager.db.get_script() == script: return manager else: del manager # Initialize runner runner = CampaignManager.create_runner(ns_path, script, runner_type=runner_type, optimized=optimized) # Get list of parameters to save in the DB params = runner.get_available_parameters() # Get current commit commit = "" if check_repo: from git import Repo, exc commit = Repo(ns_path).head.commit.hexsha # Create a database manager from the configuration db = DatabaseManager.new(script=script, params=params, commit=commit, campaign_dir=campaign_dir, overwrite=overwrite) return cls(db, runner, check_repo)
python
{ "resource": "" }
q17682
CampaignManager.load
train
def load(cls, campaign_dir, ns_path=None, runner_type='Auto', optimized=True, check_repo=True): """ Load an existing simulation campaign. Note that specifying an ns-3 installation is not compulsory when using this method: existing results will be available, but in order to run additional simulations it will be necessary to specify a SimulationRunner object, and assign it to the CampaignManager. Args: campaign_dir (str): path to the directory in which to save the simulation campaign database. ns_path (str): path to the ns-3 installation to employ in this campaign. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). optimized (bool): whether to configure the runner to employ an optimized ns-3 build. """ # Convert paths to be absolute if ns_path is not None: ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) # Read the existing configuration into the new DatabaseManager db = DatabaseManager.load(campaign_dir) script = db.get_script() runner = None if ns_path is not None: runner = CampaignManager.create_runner(ns_path, script, runner_type, optimized) return cls(db, runner, check_repo)
python
{ "resource": "" }
q17683
CampaignManager.create_runner
train
def create_runner(ns_path, script, runner_type='Auto', optimized=True): """ Create a SimulationRunner from a string containing the desired class implementation, and return it. Args: ns_path (str): path to the ns-3 installation to employ in this SimulationRunner. script (str): ns-3 script that will be executed to run simulations. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). If Auto, automatically pick the best available runner (GridRunner if DRMAA is available, ParallelRunner otherwise). optimized (bool): whether to configure the runner to employ an optimized ns-3 build. """ # locals() contains a dictionary pairing class names with class # objects: we can create the object using the desired class starting # from its name. if runner_type == 'Auto' and DRMAA_AVAILABLE: runner_type = 'GridRunner' elif runner_type == 'Auto': runner_type = 'ParallelRunner' return locals().get(runner_type, globals().get(runner_type))( ns_path, script, optimized=optimized)
python
{ "resource": "" }
q17684
CampaignManager.run_simulations
train
def run_simulations(self, param_list, show_progress=True): """ Run several simulations specified by a list of parameter combinations. Note: this function does not verify whether we already have the required simulations in the database - it just runs all the parameter combinations that are specified in the list. Args: param_list (list): list of parameter combinations to execute. Items of this list are dictionaries, with one key for each parameter, and a value specifying the parameter value (which can be either a string or a number). show_progress (bool): whether or not to show a progress bar with percentage and expected remaining time. """ # Make sure we have a runner to run simulations with. # This can happen in case the simulation campaign is loaded and not # created from scratch. if self.runner is None: raise Exception("No runner was ever specified" " for this CampaignManager.") # Return if the list is empty if param_list == []: return # Check all parameter combinations fully specify the desired simulation desired_params = self.db.get_params() for p in param_list: # Besides the parameters that were actually passed, we add the ones # that are always available in every script passed = list(p.keys()) available = ['RngRun'] + desired_params if set(passed) != set(available): raise ValueError("Specified parameter combination does not " "match the supported parameters:\n" "Passed: %s\nSupported: %s" % (sorted(passed), sorted(available))) # Check that the current repo commit corresponds to the one specified # in the campaign if self.check_repo: self.check_repo_ok() # Build ns-3 before running any simulations # At this point, we can assume the project was already configured self.runner.configure_and_build(skip_configuration=True) # Shuffle simulations # This mixes up long and short simulations, and gives better time # estimates. shuffle(param_list) # Offload simulation execution to self.runner # Note that this only creates a generator for the results, no # computation is performed on this line. results = self.runner.run_simulations(param_list, self.db.get_data_dir()) # Wrap the result generator in the progress bar generator. if show_progress: result_generator = tqdm(results, total=len(param_list), unit='simulation', desc='Running simulations') else: result_generator = results # Insert result object in db. Using the generator here ensures we # save results as they are finalized by the SimulationRunner, and # that they are kept even if execution is terminated abruptly by # crashes or by a KeyboardInterrupt. for result in result_generator: self.db.insert_result(result)
python
{ "resource": "" }
q17685
CampaignManager.get_missing_simulations
train
def get_missing_simulations(self, param_list, runs=None): """ Return a list of the simulations among the required ones that are not available in the database. Args: param_list (list): a list of dictionaries containing all the parameters combinations. runs (int): an integer representing how many repetitions are wanted for each parameter combination, None if the dictionaries in param_list already feature the desired RngRun value. """ params_to_simulate = [] if runs is not None: # Get next available runs from the database next_runs = self.db.get_next_rngruns() available_params = [r['params'] for r in self.db.get_results()] for param_comb in param_list: # Count how many param combinations we found, and remove them # from the list of available_params for faster searching in the # future needed_runs = runs for i, p in enumerate(available_params): if param_comb == {k: p[k] for k in p.keys() if k != "RngRun"}: needed_runs -= 1 new_param_combs = [] for needed_run in range(needed_runs): # Here it's important that we make copies of the # dictionaries, so that if we modify one we don't modify # the others. This is necessary because after this step, # typically, we will add the RngRun key which must be # different for each copy. new_param = deepcopy(param_comb) new_param['RngRun'] = next(next_runs) new_param_combs += [new_param] params_to_simulate += new_param_combs else: for param_comb in param_list: if not self.db.get_results(param_comb): params_to_simulate += [param_comb] return params_to_simulate
python
{ "resource": "" }
q17686
CampaignManager.run_missing_simulations
train
def run_missing_simulations(self, param_list, runs=None): """ Run the simulations from the parameter list that are not yet available in the database. This function also makes sure that we have at least runs replications for each parameter combination. Additionally, param_list can either be a list containing the desired parameter combinations or a dictionary containing multiple values for each parameter, to be expanded into a list. Args: param_list (list, dict): either a list of parameter combinations or a dictionary to be expanded into a list through the list_param_combinations function. runs (int): the number of runs to perform for each parameter combination. This parameter is only allowed if the param_list specification doesn't feature an 'RngRun' key already. """ # If we are passed a dictionary, we need to expand this if isinstance(param_list, dict): param_list = list_param_combinations(param_list) # If we are passed a list already, just run the missing simulations self.run_simulations( self.get_missing_simulations(param_list, runs))
python
{ "resource": "" }
q17687
CampaignManager.get_results_as_numpy_array
train
def get_results_as_numpy_array(self, parameter_space, result_parsing_function, runs): """ Return the results relative to the desired parameter space in the form of a numpy array. Args: parameter_space (dict): dictionary containing parameter/list-of-values pairs. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. runs (int): number of runs to gather for each parameter combination. """ return np.array(self.get_space(self.db.get_complete_results(), {}, parameter_space, runs, result_parsing_function))
python
{ "resource": "" }
q17688
CampaignManager.save_to_mat_file
train
def save_to_mat_file(self, parameter_space, result_parsing_function, filename, runs): """ Return the results relative to the desired parameter space in the form of a .mat file. Args: parameter_space (dict): dictionary containing parameter/list-of-values pairs. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. filename (path): name of output .mat file. runs (int): number of runs to gather for each parameter combination. """ # Make sure all values are lists for key in parameter_space: if not isinstance(parameter_space[key], list): parameter_space[key] = [parameter_space[key]] # Add a dimension label for each non-singular dimension dimension_labels = [{key: str(parameter_space[key])} for key in parameter_space.keys() if len(parameter_space[key]) > 1] + [{'runs': range(runs)}] # Create a list of the parameter names return savemat( filename, {'results': self.get_results_as_numpy_array(parameter_space, result_parsing_function, runs=runs), 'dimension_labels': dimension_labels})
python
{ "resource": "" }
q17689
CampaignManager.save_to_npy_file
train
def save_to_npy_file(self, parameter_space, result_parsing_function, filename, runs): """ Save results to a numpy array file format. """ np.save(filename, self.get_results_as_numpy_array( parameter_space, result_parsing_function, runs=runs))
python
{ "resource": "" }
q17690
CampaignManager.save_to_folders
train
def save_to_folders(self, parameter_space, folder_name, runs): """ Save results to a folder structure. """ self.space_to_folders(self.db.get_results(), {}, parameter_space, runs, folder_name)
python
{ "resource": "" }
q17691
CampaignManager.space_to_folders
train
def space_to_folders(self, current_result_list, current_query, param_space, runs, current_directory): """ Convert a parameter space specification to a directory tree with a nested structure. """ # Base case: we iterate over the runs and copy files in the final # directory. if not param_space: for run, r in enumerate(current_result_list[:runs]): files = self.db.get_result_files(r) new_dir = os.path.join(current_directory, "run=%s" % run) os.makedirs(new_dir, exist_ok=True) for filename, filepath in files.items(): shutil.copyfile(filepath, os.path.join(new_dir, filename)) return [key, value] = list(param_space.items())[0] # Iterate over dictionary values for v in value: next_query = deepcopy(current_query) temp_query = deepcopy(current_query) # For each list, recur 'fixing' that dimension. next_query[key] = v # Update query # Create folder folder_name = ("%s=%s" % (key, v)).replace('/', '_') new_dir = os.path.join(current_directory, folder_name) os.makedirs(new_dir, exist_ok=True) next_param_space = deepcopy(param_space) del(next_param_space[key]) temp_query[key] = v temp_result_list = [r for r in current_result_list if self.satisfies_query(r, temp_query)] self.space_to_folders(temp_result_list, next_query, next_param_space, runs, new_dir)
python
{ "resource": "" }
q17692
CampaignManager.get_results_as_xarray
train
def get_results_as_xarray(self, parameter_space, result_parsing_function, output_labels, runs): """ Return the results relative to the desired parameter space in the form of an xarray data structure. Args: parameter_space (dict): The space of parameters to export. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. output_labels (list): a list of labels to apply to the results dimensions, output by the result_parsing_function. runs (int): the number of runs to export for each parameter combination. """ np_array = np.array( self.get_space( self.db.get_complete_results(), {}, collections.OrderedDict([(k, v) for k, v in parameter_space.items()]), runs, result_parsing_function)) # Create a parameter space only containing the variable parameters clean_parameter_space = collections.OrderedDict( [(k, v) for k, v in parameter_space.items()]) clean_parameter_space['runs'] = range(runs) if isinstance(output_labels, list): clean_parameter_space['metrics'] = output_labels xr_array = xr.DataArray(np_array, coords=clean_parameter_space, dims=list(clean_parameter_space.keys())) return xr_array
python
{ "resource": "" }
q17693
run
train
def run(ns_3_path, results_dir, script, no_optimization, parameters, max_processes): """ Run multiple simulations. """ sem.parallelrunner.MAX_PARALLEL_PROCESSES = max_processes # Create a campaign campaign = sem.CampaignManager.new(ns_3_path, script, results_dir, overwrite=False, optimized=not no_optimization) # Print campaign info click.echo(campaign) # Run the simulations [params, defaults] = zip(*get_params_and_defaults(campaign.db.get_params(), campaign.db)) # Check whether we need to read parameters from the command line if not parameters: # Substitute non-None defaults with their string representation # This will be then converted back to a Python data structure in # query_parameters string_defaults = list() for idx, d in enumerate(defaults): if d is not None: string_defaults.append(str(d)) else: string_defaults.append(d) script_params = query_parameters(params, defaults=string_defaults) else: script_params = import_parameters_from_file(parameters) # Finally, run the simulations campaign.run_missing_simulations(script_params, runs=click.prompt("Total runs", type=int))
python
{ "resource": "" }
q17694
view
train
def view(results_dir, result_id, hide_simulation_output, parameters, no_pager): """ View results of simulations. """ campaign = sem.CampaignManager.load(results_dir) # Pick the most appropriate function based on the level of detail we want if hide_simulation_output: get_results_function = campaign.db.get_results else: get_results_function = campaign.db.get_complete_results # If a result id was specified, just query for that result if result_id: output = '\n\n\n'.join([pprint.pformat(item) for item in get_results_function(result_id=result_id)]) else: [params, defaults] = zip(*get_params_and_defaults( campaign.db.get_params(), campaign.db)) if not parameters: # Convert to string string_defaults = list() for idx, d in enumerate(defaults): string_defaults.append(str(d)) script_params = query_parameters(params, string_defaults) else: script_params = import_parameters_from_file(parameters) # Perform the search output = '\n\n\n'.join([pprint.pformat(item) for item in get_results_function(script_params)]) # Print the results if no_pager: click.echo(output) else: click.echo_via_pager(output)
python
{ "resource": "" }
q17695
command
train
def command(results_dir, result_id): """ Print the command that needs to be used to reproduce a result. """ campaign = sem.CampaignManager.load(results_dir) result = campaign.db.get_results(result_id=result_id)[0] click.echo("Simulation command:") click.echo(sem.utils.get_command_from_result(campaign.db.get_script(), result)) click.echo("Debug command:") click.echo(sem.utils.get_command_from_result(campaign.db.get_script(), result, debug=True))
python
{ "resource": "" }
q17696
export
train
def export(results_dir, filename, do_not_try_parsing, parameters): """ Export results to file. An extension in filename is required to deduce the file type. If no extension is specified, a directory tree export will be used. Note that this command automatically tries to parse the simulation output. Supported extensions: .mat (Matlab file), .npy (Numpy file), no extension (Directory tree) """ # Get the extension _, extension = os.path.splitext(filename) campaign = sem.CampaignManager.load(results_dir) [params, defaults] = zip(*get_params_and_defaults(campaign.db.get_params(), campaign.db)) if do_not_try_parsing: parsing_function = None else: parsing_function = sem.utils.automatic_parser if not parameters: # Convert to string string_defaults = list() for idx, d in enumerate(defaults): string_defaults.append(str(d)) parameter_query = query_parameters(params, string_defaults) else: # Import specified parameter file parameter_query = import_parameters_from_file(parameters) if extension == ".mat": campaign.save_to_mat_file(parameter_query, parsing_function, filename, runs=click.prompt("Runs", type=int)) elif extension == ".npy": campaign.save_to_npy_file(parameter_query, parsing_function, filename, runs=click.prompt("Runs", type=int)) elif extension == "": campaign.save_to_folders(parameter_query, filename, runs=click.prompt("Runs", type=int)) else: # Unrecognized format raise ValueError("Format not recognized")
python
{ "resource": "" }
q17697
merge
train
def merge(move, output_dir, sources): """ Merge multiple results folder into one, by copying the results over to a new folder. For a faster operation (which on the other hand destroys the campaign data if interrupted), the move option can be used to directly move results to the new folder. """ # Get paths for all campaign JSONS jsons = [] for s in sources: filename = "%s.json" % os.path.split(s)[1] jsons += [os.path.join(s, filename)] # Check that the configuration for all campaigns is the same reference_config = TinyDB(jsons[0]).table('config') for j in jsons[1:]: for i, j in zip(reference_config.all(), TinyDB(j).table('config').all()): assert i == j # Create folders for new results directory filename = "%s.json" % os.path.split(output_dir)[1] output_json = os.path.join(output_dir, filename) output_data = os.path.join(output_dir, 'data') os.makedirs(output_data) # Create new database db = TinyDB(output_json) db.table('config').insert_multiple(reference_config.all()) # Import results from all databases to the new JSON file for s in sources: filename = "%s.json" % os.path.split(s)[1] current_db = TinyDB(os.path.join(s, filename)) db.table('results').insert_multiple(current_db.table('results').all()) # Copy or move results to new data folder for s in sources: for r in glob.glob(os.path.join(s, 'data/*')): basename = os.path.basename(r) if move: shutil.move(r, os.path.join(output_data, basename)) else: shutil.copytree(r, os.path.join(output_data, basename)) if move: for s in sources: shutil.rmtree(os.path.join(s, 'data/*')) shutil.rmtree(os.path.join(s, "%s.json" % os.path.split(s)[1])) shutil.rmtree(s)
python
{ "resource": "" }
q17698
query_parameters
train
def query_parameters(param_list, defaults=None): """ Asks the user for parameters. If available, proposes some defaults. Args: param_list (list): List of parameters to ask the user for values. defaults (list): A list of proposed defaults. It must be a list of the same length as param_list. A value of None in one element of the list means that no default will be proposed for the corresponding parameter. """ script_params = collections.OrderedDict([k, []] for k in param_list) for param, default in zip(list(script_params.keys()), defaults): user_input = click.prompt("%s" % param, default=default) script_params[param] = ast.literal_eval(user_input) return script_params
python
{ "resource": "" }
q17699
import_parameters_from_file
train
def import_parameters_from_file(parameters_file): """ Try importing a parameter dictionary from file. We expect values in parameters_file to be defined as follows: param1: value1 param2: [value2, value3] """ params = {} with open(parameters_file, 'r') as f: matches = re.findall('(.*): (.*)', f.read()) for m in matches: params[m[0]] = ast.literal_eval(m[1]) return params
python
{ "resource": "" }