code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
import urllib.request xmlfile = urllib.request.urlopen('http://' + self._host + '/DbXmlInfo.xml') xml_db = xmlfile.read() xmlfile.close() _LOGGER.info("Loaded xml db") parser = LutronXmlDbParser(lutron=self, xml_db_str=xml_db) assert(parser.parse()) # throw our own exception self._areas = parser.areas self._name = parser.project_name _LOGGER.info('Found Lutron project: %s, %d areas' % ( self._name, len(self.areas))) return True
def load_xml_db(self)
Load the Lutron database from the server.
5.698812
4.959686
1.149027
ev = threading.Event() first = False with self.__lock: if len(self.__events) == 0: first = True self.__events.append(ev) if first: action() return ev
def request(self, action)
Request an action to be performed, in case one.
4.160969
3.83634
1.084619
for handler, context in self._subscribers: handler(self, context, event, params)
def _dispatch_event(self, event: LutronEvent, params: Dict)
Dispatches the specified event to all the subscribers.
14.654867
7.370922
1.9882
self._subscribers.append((handler, context))
def subscribe(self, handler: LutronEventHandler, context)
Subscribes to events from this entity. handler: A callable object that takes the following arguments (in order) obj: the LutrongEntity object that generated the event context: user-supplied (to subscribe()) context object event: the LutronEvent that was generated. params: a dict of event-specific parameters context: User-supplied, opaque object that will be passed to handler.
14.637045
12.615953
1.160201
_LOGGER.debug("handle_update %d -- %s" % (self._integration_id, args)) state = int(args[0]) if state != Output._ACTION_ZONE_LEVEL: return False level = float(args[1]) _LOGGER.debug("Updating %d(%s): s=%d l=%f" % ( self._integration_id, self._name, state, level)) self._level = level self._query_waiters.notify() self._dispatch_event(Output.Event.LEVEL_CHANGED, {'level': self._level}) return True
def handle_update(self, args)
Handles an event update for this object, e.g. dimmer level change.
6.076525
5.536792
1.097481
self._lutron.send(Lutron.OP_QUERY, Output._CMD_TYPE, self._integration_id, Output._ACTION_ZONE_LEVEL)
def __do_query_level(self)
Helper to perform the actual query the current dimmer level of the output. For pure on/off loads the result is either 0.0 or 100.0.
44.463638
29.445341
1.51004
ev = self._query_waiters.request(self.__do_query_level) ev.wait(1.0) return self._level
def level(self)
Returns the current output level by querying the remote controller.
19.375813
14.136463
1.370627
if self._level == new_level: return self._lutron.send(Lutron.OP_EXECUTE, Output._CMD_TYPE, self._integration_id, Output._ACTION_ZONE_LEVEL, "%.2f" % new_level) self._level = new_level
def level(self, new_level)
Sets the new output level.
11.254675
10.325789
1.089958
_LOGGER.debug('Keypad: "%s" Handling "%s" Action: %s Params: %s"' % ( self._keypad.name, self.name, action, params)) return False
def handle_update(self, action, params)
Handle the specified action on this component.
11.491642
10.373573
1.10778
self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Button._ACTION_PRESS)
def press(self)
Triggers a simulated button press to the Keypad.
29.755526
21.08433
1.411263
_LOGGER.debug('Keypad: "%s" %s Action: %s Params: %s"' % ( self._keypad.name, self, action, params)) ev_map = { Button._ACTION_PRESS: Button.Event.PRESSED, Button._ACTION_RELEASE: Button.Event.RELEASED } if action not in ev_map: _LOGGER.debug("Unknown action %d for button %d in keypad %d" % ( action, self.number, self.keypad.name)) return False self._dispatch_event(ev_map[action], {}) return True
def handle_update(self, action, params)
Handle the specified action on this component.
4.489134
4.341913
1.033907
self._lutron.send(Lutron.OP_QUERY, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Led._ACTION_LED_STATE)
def __do_query_state(self)
Helper to perform the actual query for the current LED state.
32.077454
24.687185
1.299356
ev = self._query_waiters.request(self.__do_query_state) ev.wait(1.0) return self._state
def state(self)
Returns the current LED state by querying the remote controller.
18.187151
13.286356
1.368859
self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id, self.component_number, Led._ACTION_LED_STATE, int(new_state)) self._state = new_state
def state(self, new_state: bool)
Sets the new led state. new_state: bool
16.27458
13.013476
1.250594
_LOGGER.debug('Keypad: "%s" %s Action: %s Params: %s"' % ( self._keypad.name, self, action, params)) if action != Led._ACTION_LED_STATE: _LOGGER.debug("Unknown action %d for led %d in keypad %d" % ( action, self.number, self.keypad.name)) return False elif len(params) < 1: _LOGGER.debug("Unknown params %s (action %d on led %d in keypad %d)" % ( params, action, self.number, self.keypad.name)) return False self._state = bool(params[0]) self._query_waiters.notify() self._dispatch_event(Led.Event.STATE_CHANGED, {'state': self._state}) return True
def handle_update(self, action, params)
Handle the specified action on this component.
4.238521
4.18215
1.013479
self._buttons.append(button) self._components[button.component_number] = button
def add_button(self, button)
Adds a button that's part of this keypad. We'll use this to dispatch button events.
7.932051
6.9647
1.138893
self._leds.append(led) self._components[led.component_number] = led
def add_led(self, led)
Add an LED that's part of this keypad.
7.077155
5.851945
1.209368
component = int(args[0]) action = int(args[1]) params = [int(x) for x in args[2:]] _LOGGER.debug("Updating %d(%s): c=%d a=%d params=%s" % ( self._integration_id, self._name, component, action, params)) if component in self._components: return self._components[component].handle_update(action, params) return False
def handle_update(self, args)
The callback invoked by the main event loop if there's an event from this keypad.
3.613041
3.448542
1.047701
start = '' if isinstance(pattern, six.string_types): start = pattern[:2] pattern = InputStream(pattern) if not start: start = pattern.readline()[:2] pattern.seek(0) parseErrListener = STIXPatternErrorListener() lexer = STIXPatternLexer(pattern) # it always adds a console listener by default... remove it. lexer.removeErrorListeners() stream = CommonTokenStream(lexer) parser = STIXPatternParser(stream) parser.buildParseTrees = False # it always adds a console listener by default... remove it. parser.removeErrorListeners() parser.addErrorListener(parseErrListener) # To improve error messages, replace "<INVALID>" in the literal # names with symbolic names. This is a hack, but seemed like # the simplest workaround. for i, lit_name in enumerate(parser.literalNames): if lit_name == u"<INVALID>": parser.literalNames[i] = parser.symbolicNames[i] parser.pattern() # replace with easier-to-understand error message if not (start[0] == '[' or start == '(['): parseErrListener.err_strings[0] = "FAIL: Error found at line 1:0. " \ "input is missing square brackets" return parseErrListener.err_strings
def run_validator(pattern)
Validates a pattern against the STIX Pattern grammar. Error messages are returned in a list. The test passed if the returned list is empty.
4.813983
4.613799
1.043388
errs = run_validator(user_input) passed = len(errs) == 0 if print_errs: for err in errs: print(err) if ret_errs: return passed, errs return passed
def validate(user_input, ret_errs=False, print_errs=False)
Wrapper for run_validator function that returns True if the user_input contains a valid STIX pattern or False otherwise. The error messages may also be returned or printed based upon the ret_errs and print_errs arg values.
2.818723
2.887794
0.976082
parser = argparse.ArgumentParser(description='Validate STIX Patterns.') parser.add_argument('-f', '--file', help="Specify this arg to read patterns from a file.", type=argparse.FileType("r")) args = parser.parse_args() pass_count = fail_count = 0 # I tried using a generator (where each iteration would run raw_input()), # but raw_input()'s behavior seems to change when called from within a # generator: I only get one line, then the generator completes! I don't # know why behavior changes... import functools if args.file: nextpattern = args.file.readline else: nextpattern = functools.partial(six.moves.input, "Enter a pattern to validate: ") try: while True: pattern = nextpattern() if not pattern: break tests_passed, err_strings = validate(pattern, True) if tests_passed: print("\nPASS: %s" % pattern) pass_count += 1 else: for err in err_strings: print(err, '\n') fail_count += 1 except (EOFError, KeyboardInterrupt): pass finally: if args.file: args.file.close() print("\nPASSED:", pass_count, " patterns") print("FAILED:", fail_count, " patterns")
def main()
Continues to validate patterns until it encounters EOF within a pattern file or Ctrl-C is pressed by the user.
4.236356
4.023017
1.05303
token_text = string_literal_token.getText() return token_text[1:-1].replace(u"\\'", u"'"). \ replace(u"\\\\", u"\\")
def _string_literal_to_string(string_literal_token)
Converts the StringLiteral token to a plain string: get text content, removes quote characters, and unescapes it. :param string_literal_token: The string literal :return:
4.001961
4.248976
0.941865
if 'html' in request.GET: # Output HTML content = self.render_html(*args, **kwargs) return HttpResponse(content) else: # Output PDF content = self.render_pdf(*args, **kwargs) response = HttpResponse(content, content_type='application/pdf') if (not self.inline or 'download' in request.GET) and 'inline' not in request.GET: response['Content-Disposition'] = 'attachment; filename=%s' % self.get_filename() response['Content-Length'] = len(content) return response
def get(self, request, *args, **kwargs)
Return a HTTPResponse either of a PDF file or HTML. :rtype: HttpResponse
2.634764
2.430835
1.083893
html = self.render_html(*args, **kwargs) options = self.get_pdfkit_options() if 'debug' in self.request.GET and settings.DEBUG: options['debug-javascript'] = 1 kwargs = {} wkhtmltopdf_bin = os.environ.get('WKHTMLTOPDF_BIN') if wkhtmltopdf_bin: kwargs['configuration'] = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_bin) pdf = pdfkit.from_string(html, False, options, **kwargs) return pdf
def render_pdf(self, *args, **kwargs)
Render the PDF and returns as bytes. :rtype: bytes
2.64617
2.702674
0.979093
if self.filename is None: name = splitext(basename(self.template_name))[0] return '{}.pdf'.format(name) return self.filename
def get_filename(self)
Return ``self.filename`` if set otherwise return the template basename with a ``.pdf`` extension. :rtype: str
4.846247
3.62485
1.336951
static_url = '%s://%s%s' % (self.request.scheme, self.request.get_host(), settings.STATIC_URL) media_url = '%s://%s%s' % (self.request.scheme, self.request.get_host(), settings.MEDIA_URL) with override_settings(STATIC_URL=static_url, MEDIA_URL=media_url): template = loader.get_template(self.template_name) context = self.get_context_data(*args, **kwargs) html = template.render(context) return html
def render_html(self, *args, **kwargs)
Renders the template. :rtype: str
1.737579
1.735687
1.00109
inspector = stix2patterns.inspector.InspectionListener() self.walk(inspector) return inspector.pattern_data()
def inspect(self)
Inspect a pattern. This gives information regarding the sorts of operations, content, etc in use in the pattern. :return: Pattern information
28.422827
22.655315
1.254577
antlr4.ParseTreeWalker.DEFAULT.walk(listener, self.__parse_tree)
def walk(self, listener)
Walk the parse tree, using the given listener. The listener should be a stix2patterns.grammars.STIXPatternListener.STIXPatternListener (or subclass) instance.
12.328958
8.707715
1.415866
in_ = antlr4.InputStream(pattern_str) lexer = STIXPatternLexer(in_) lexer.removeErrorListeners() # remove the default "console" listener token_stream = antlr4.CommonTokenStream(lexer) parser = STIXPatternParser(token_stream) parser.removeErrorListeners() # remove the default "console" listener error_listener = ParserErrorListener() parser.addErrorListener(error_listener) # I found no public API for this... # The default error handler tries to keep parsing, and I don't # think that's appropriate here. (These error handlers are only for # handling the built-in RecognitionException errors.) parser._errHandler = antlr4.BailErrorStrategy() # To improve error messages, replace "<INVALID>" in the literal # names with symbolic names. This is a hack, but seemed like # the simplest workaround. for i, lit_name in enumerate(parser.literalNames): if lit_name == u"<INVALID>": parser.literalNames[i] = parser.symbolicNames[i] # parser.setTrace(True) try: tree = parser.pattern() # print(tree.toStringTree(recog=parser)) return tree except antlr4.error.Errors.ParseCancellationException as e: # The cancellation exception wraps the real RecognitionException # which caused the parser to bail. real_exc = e.args[0] # I want to bail when the first error is hit. But I also want # a decent error message. When an error is encountered in # Parser.match(), the BailErrorStrategy produces the # ParseCancellationException. It is not a subclass of # RecognitionException, so none of the 'except' clauses which would # normally report an error are invoked. # # Error message creation is buried in the ErrorStrategy, and I can # (ab)use the API to get a message: register an error listener with # the parser, force an error report, then get the message out of the # listener. Error listener registration is above; now we force its # invocation. Wish this could be cleaner... parser._errHandler.reportError(parser, real_exc) # should probably chain exceptions if we can... # Should I report the cancellation or recognition exception as the # cause...? six.raise_from(ParseException(error_listener.error_message), real_exc)
def __do_parse(self, pattern_str)
Parses the given pattern and returns the antlr parse tree. :param pattern_str: The STIX pattern :return: The parse tree :raises ParseException: If there is a parse error
6.017105
5.891433
1.021331
streams = self._payload.get('streams', None) return streams[0] if streams is not None and len(streams) >= 1 else ''
def stdout(self)
The job stdout :return: string or None
6.287479
6.297014
0.998486
streams = self._payload.get('streams', None) return streams[1] if streams is not None and len(streams) >= 2 else ''
def stderr(self)
The job stderr :return: string or None
6.037528
6.309165
0.956946
r = self._client._redis flag = '{}:flag'.format(self._queue) return bool(r.exists(flag))
def exists(self)
Returns true if the job is still running or zero-os still knows about this job ID After a job is finished, a job remains on zero-os for max of 5min where you still can read the job result after the 5 min is gone, the job result is no more fetchable :return: bool
12.3728
9.863323
1.254425
r = self._client._redis flag = '{}:flag'.format(self._queue) if bool(r.exists(flag)): return r.ttl(flag) is None return False
def running(self)
Returns true if job still in running state :return:
9.191339
7.766341
1.183484
if callback is None: callback = Response.__default if not callable(callback): raise Exception('callback must be callable') queue = 'stream:%s' % self.id r = self._client._redis # we can terminate quickly by checking if the process is not running and it has no queued output. # if not self.running and r.llen(queue) == 0: # return while True: data = r.blpop(queue, 10) if data is None: if not self.running: break continue _, body = data payload = json.loads(body.decode()) message = payload['message'] line = message['message'] meta = message['meta'] callback(meta >> 16, line, meta & 0xff) if meta & 0x6 != 0: break
def stream(self, callback=None)
Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will not be able to copy any output, while it will block until the process exits. :note: This function will block until it reaches end of stream or the process is no longer running. :param callback: callback method that will get called for each received message callback accepts 3 arguments - level int: the log message levels, refer to the docs for available levels and their meanings - message str: the actual output message - flags int: flags associated with this message - 0x2 means EOF with success exit status - 0x4 means EOF with error for example (eof = flag & 0x6) eof will be true for last message u will ever receive on this callback. Note: if callback is none, a default callback will be used that prints output on stdout/stderr based on level. :return: None
5.272793
4.73462
1.113668
if timeout is None: timeout = self._client.timeout r = self._client._redis start = time.time() maxwait = timeout while maxwait > 0: if not self.exists: raise JobNotFoundError(self.id) v = r.brpoplpush(self._queue, self._queue, min(maxwait, 10)) if v is not None: payload = json.loads(v.decode()) r = Return(payload) logger.debug('%s << %s, stdout="%s", stderr="%s", data="%s"', self._id, r.state, r.stdout, r.stderr, r.data[:1000]) return r logger.debug('%s still waiting (%ss)', self._id, int(time.time() - start)) maxwait -= 10 raise TimeoutError()
def get(self, timeout=None)
Waits for a job to finish (max of given timeout seconds) and return job results. When a job exits get() will keep returning the same result until zero-os doesn't remember the job anymore (self.exists == False) :notes: the timeout here is a client side timeout, it's different than the timeout given to the job on start (like in system method) witch will cause the job to be killed if it exceeded this timeout. :param timeout: max time to wait for the job to finish in seconds :return: Return object
3.979356
3.717478
1.070445
result = super().get(timeout) if result.state != 'SUCCESS': raise ResultError(result.data, result.code) if result.level != 20: raise ResultError('not a json response: %d' % result.level, 406) return json.loads(result.data)
def get(self, timeout=None)
Get response as json, will fail if the job doesn't return a valid json response :param timeout: client side timeout in seconds :return: int
4.932268
4.827251
1.021755
args = {'id': id} self._job_chk.check(args) return self._client.json('job.list', args)
def list(self, id=None)
List all running jobs :param id: optional ID for the job to list
10.806888
14.447017
0.748036
args = { 'id': id, 'signal': int(signal), } self._kill_chk.check(args) return self._client.json('job.kill', args)
def kill(self, id, signal=signal.SIGTERM)
Kill a job with given id :WARNING: beware of what u kill, if u killed redis for example core0 or coreX won't be reachable :param id: job id to kill
6.549953
9.396444
0.697067
args = {'pid': id} self._process_chk.check(args) return self._client.json('process.list', args)
def list(self, id=None)
List all running processes :param id: optional PID for the process to list
12.473987
15.091139
0.826577
args = { 'file': file, 'mode': mode, 'perm': perm, } return self._client.json('filesystem.open', args)
def open(self, file, mode='r', perm=0o0644)
Opens a file on the node :param file: file path to open :param mode: open mode :param perm: file permission in octet form mode: 'r' read only 'w' write only (truncate) '+' read/write 'x' create if not exist 'a' append :return: a file descriptor
4.36319
5.306589
0.822221
args = { 'path': path, 'destination': destination, } return self._client.json('filesystem.move', args)
def move(self, path, destination)
Move a path to destination :param path: source :param destination: destination :return:
5.019463
5.848768
0.858209
args = { 'path': path, 'mode': mode, 'recursive': recursive, } return self._client.json('filesystem.chmod', args)
def chmod(self, path, mode, recursive=False)
Change file/dir permission :param path: path of file/dir to change :param mode: octet mode :param recursive: apply chmod recursively :return:
3.833834
4.7491
0.807276
args = { 'path': path, 'user': user, 'group': group, 'recursive': recursive, } return self._client.json('filesystem.chown', args)
def chown(self, path, user, group, recursive=False)
Change file/dir owner :param path: path of file/dir :param user: user name :param group: group name :param recursive: apply chown recursively :return:
3.170034
3.712585
0.853862
args = { 'fd': fd, } data = self._client.json('filesystem.read', args) return base64.decodebytes(data.encode())
def read(self, fd)
Read a block from the given file descriptor :param fd: file descriptor :return: bytes
7.675279
7.414684
1.035146
args = { 'fd': fd, 'block': base64.encodebytes(bytes).decode(), } return self._client.json('filesystem.write', args)
def write(self, fd, bytes)
Write a block of bytes to an open file descriptor (that is open with one of the writing modes :param fd: file descriptor :param bytes: bytes block to write :return: :note: don't overkill the node with large byte chunks, also for large file upload check the upload method.
6.373783
6.818238
0.934814
fd = self.open(remote, 'w') while True: chunk = reader.read(512 * 1024) if chunk == b'': break self.write(fd, chunk) self.close(fd)
def upload(self, remote, reader)
Uploads a file :param remote: remote file name :param reader: an object that implements the read(size) method (typically a file descriptor) :return:
2.582725
2.7535
0.937979
fd = self.open(remote) while True: chunk = self.read(fd) if chunk == b'': break writer.write(chunk) self.close(fd)
def download(self, remote, writer)
Downloads a file :param remote: remote file name :param writer: an object the implements the write(bytes) interface (typical a file descriptor) :return:
3.1654
3.075526
1.029222
file = open(local, 'rb') try: self.upload(remote, file) finally: file.close()
def upload_file(self, remote, local)
Uploads a file :param remote: remote file name :param local: local file name :return:
2.917224
3.600613
0.810202
file = open(local, 'wb') try: self.download(remote, file) finally: file.close()
def download_file(self, remote, local)
Downloads a file :param remote: remote file name :param local: local file name :return:
3.115021
3.737552
0.833439
raise NotImplemented()
def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None)
Implements the low level command call, this needs to build the command structure and push it on the correct queue. :param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...) check documentation for list of built in commands :param arguments: A dict of required command arguments depends on the command name. :param queue: command queue (commands on the same queue are executed sequentially) :param max_time: kill job server side if it exceeded this amount of seconds :param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so client can stream output :param tags: job tags :param id: job id. Generated if not supplied :return: Response object
296.815277
1,016.903137
0.291882
response = self.raw(command, arguments, tags=tags, id=id) result = response.get() if result.state != 'SUCCESS': raise ResultError(msg='%s' % result.data, code=result.code) return result
def sync(self, command, arguments, tags=None, id=None)
Same as self.raw except it do a response.get() waiting for the command execution to finish and reads the result :param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...) check documentation for list of built in commands :param arguments: A dict of required command arguments depends on the command name. :param tags: job tags :param id: job id. Generated if not supplied :return: Result object
5.023679
4.201269
1.195753
result = self.sync(command, arguments, tags=tags, id=id) if result.level != 20: raise RuntimeError('invalid result level, expecting json(20) got (%d)' % result.level) return json.loads(result.data)
def json(self, command, arguments, tags=None, id=None)
Same as self.sync except it assumes the returned result is json, and loads the payload of the return object if the returned (data) is not of level (20) an error is raised. :Return: Data
5.805466
3.945819
1.471296
parts = shlex.split(command) if len(parts) == 0: raise ValueError('invalid command') args = { 'name': parts[0], 'args': parts[1:], 'dir': dir, 'stdin': stdin, 'env': env, } self._system_chk.check(args) response = self.raw(command='core.system', arguments=args, queue=queue, max_time=max_time, stream=stream, tags=tags, id=id) return response
def system(self, command, dir='', stdin='', env=None, queue=None, max_time=None, stream=False, tags=None, id=None)
Execute a command :param command: command to execute (with its arguments) ex: `ls -l /root` :param dir: CWD of command :param stdin: Stdin data to feed to the command stdin :param env: dict with ENV variables that will be exported to the command :param id: job id. Auto generated if not defined. :return:
3.127024
3.331385
0.938656
args = { 'script': script, 'stdin': stdin, } self._bash_chk.check(args) response = self.raw(command='bash', arguments=args, queue=queue, max_time=max_time, stream=stream, tags=tags, id=id) return response
def bash(self, script, stdin='', queue=None, max_time=None, stream=False, tags=None, id=None)
Execute a bash script, or run a process inside a bash shell. :param script: Script to execute (can be multiline script) :param stdin: Stdin data to feed to the script :param id: job id. Auto generated if not defined. :return:
3.75779
4.493694
0.836236
return self.raw('core.subscribe', {'id': job}, stream=True, id=id)
def subscribe(self, job, id=None)
Subscribes to job logs. It return the subscribe Response object which you will need to call .stream() on to read the output stream of this job. Calling subscribe multiple times will cause different subscriptions on the same job, each subscription will have a copy of this job streams. Note: killing the subscription job will not affect this job, it will also not cause unsubscripe from this stream the subscriptions will die automatically once this job exits. example: job = client.system('long running job') subscription = client.subscribe(job.id) subscription.stream() # this will print directly on stdout/stderr check stream docs for more details. hint: u can give an optional id to the subscriber (otherwise a guid will be generate for you). You probably want to use this in case your job watcher died, so u can hook on the stream of the current subscriber instead of creating a new one example: job = client.system('long running job') subscription = client.subscribe(job.id, 'my-job-subscriber') subscription.stream() # process dies for any reason # on next start u can simply do subscription = client.response_for('my-job-subscriber') subscription.stream() :param job: the job ID to subscribe to :param id: the subscriber ID (optional) :return: the subscribe Job object
14.754541
21.204437
0.695823
args = { 'container': self._container, 'command': { 'command': command, 'arguments': arguments, 'queue': queue, 'max_time': max_time, 'stream': stream, 'tags': tags, 'id': id, }, } # check input self._raw_chk.check(args) response = self._client.raw('corex.dispatch', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to dispatch command to container: %s' % result.data) cmd_id = json.loads(result.data) return self._client.response_for(cmd_id)
def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None)
Implements the low level command call, this needs to build the command structure and push it on the correct queue. :param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...) check documentation for list of built in commands :param arguments: A dict of required command arguments depends on the command name. :param queue: command queue (commands on the same queue are executed sequentially) :param max_time: kill job server side if it exceeded this amount of seconds :param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so client can stream output :param tags: job tags :param id: job id. Generated if not supplied :return: Response object
4.081676
4.103859
0.994594
if nics == self.DefaultNetworking: nics = [{'type': 'default'}] elif nics is None: nics = [] args = { 'root': root_url, 'mount': mount, 'host_network': host_network, 'nics': nics, 'port': port, 'hostname': hostname, 'privileged': privileged, 'storage': storage, 'name': name, 'identity': identity, 'env': env, 'cgroups': cgroups, } # validate input self._create_chk.check(args) response = self._client.raw('corex.create', args, tags=tags) return JSONResponse(response)
def create(self, root_url, mount=None, host_network=False, nics=DefaultNetworking, port=None, hostname=None, privileged=False, storage=None, name=None, tags=None, identity=None, env=None, cgroups=None)
Creater a new container with the given root flist, mount points and zerotier id, and connected to the given bridges :param root_url: The root filesystem flist :param mount: a dict with {host_source: container_target} mount points. where host_source directory must exists. host_source can be a url to a flist to mount. :param host_network: Specify if the container should share the same network stack as the host. if True, container creation ignores both zerotier, bridge and ports arguments below. Not giving errors if provided. :param nics: Configure the attached nics to the container each nic object is a dict of the format { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :param port: A dict of host_port: container_port pairs (only if default networking is enabled) Example: `port={8080: 80, 7000:7000}` :param hostname: Specific hostname you want to give to the container. if None it will automatically be set to core-x, x beeing the ID of the container :param privileged: If true, container runs in privileged mode. :param storage: A Url to the ardb storage to use to mount the root flist (or any other mount that requires g8fs) if not provided, the default one from core0 configuration will be used. :param name: Optional name for the container :param identity: Container Zerotier identity, Only used if at least one of the nics is of type zerotier :param env: a dict with the environment variables needed to be set for the container :param cgroups: custom list of cgroups to apply to this container on creation. formated as [(subsystem, name), ...] please refer to the cgroup api for more detailes.
2.749108
2.669461
1.029836
tags = list(map(str, tags)) return self._client.json('corex.find', {'tags': tags})
def find(self, *tags)
Find containers that matches set of tags :param tags: :return:
9.573701
11.928205
0.80261
self._client_chk.check(container) args = { 'container': int(container), } response = self._client.raw('corex.terminate', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to terminate container: %s' % result.data)
def terminate(self, container)
Terminate a container given it's id :param container: container id :return:
7.231514
7.314553
0.988647
args = { 'container': container, 'nic': nic } self._nic_add.check(args) return self._client.json('corex.nic-add', args)
def nic_add(self, container, nic)
Hot plug a nic into a container :param container: container ID :param nic: { 'type': nic_type # one of default, bridge, zerotier, macvlan, passthrough, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type bridge: bridge name, zerotier: network id, macvlan: the parent link name, passthrough: the link name, vlan: the vlan tag, vxlan: the vxlan id 'name': name of the nic inside the container (ignored in zerotier type) 'hwaddr': Mac address of nic. 'config': { # config is only honored for bridge, vlan, and vxlan types 'dhcp': bool, 'cidr': static_ip # ip/mask 'gateway': gateway 'dns': [dns] } } :return:
7.237005
8.574743
0.843991
args = { 'container': container, 'index': index } self._nic_remove.check(args) return self._client.json('corex.nic-remove', args)
def nic_remove(self, container, index)
Hot unplug of nic from a container Note: removing a nic, doesn't remove the nic from the container info object, instead it sets it's state to `destroyed`. :param container: container ID :param index: index of the nic as returned in the container object info (as shown by container.list()) :return:
7.29161
10.371313
0.703056
self._client_chk.check(container) return ContainerClient(self._client, int(container))
def client(self, container)
Return a client instance that is bound to that container. :param container: container id :return: Client object bound to the specified container id Return a ContainerResponse from container.create
14.825606
14.235945
1.041421
args = { 'container': container, 'url': url, } return JSONResponse(self._client.raw('corex.backup', args))
def backup(self, container, url)
Backup a container to the given restic url all restic urls are supported :param container: :param url: Url to restic repo examples (file:///path/to/restic/?password=<password>) :return: Json response to the backup job (do .get() to get the snapshot ID
8.31091
12.339745
0.673507
args = { 'url': url, } return JSONResponse(self._client.raw('corex.restore', args, tags=tags))
def restore(self, url, tags=None)
Full restore of a container backup. This restore method will recreate an exact copy of the backedup container (including same network setup, and other configurations as defined by the `create` method. To just restore the container data, and use new configuration, use the create method instead with the `root_url` set to `restic:<url>` :param url: Snapshot url, the snapshot ID is passed as a url fragment examples: `file:///path/to/restic/repo?password=<password>#<snapshot-id>` :param tags: this will always override the original container tags (even if not set) :return:
9.691413
16.036591
0.604331
args = { 'name': name, 'hwaddr': hwaddr, 'network': { 'mode': network, 'nat': nat, 'settings': settings, } } self._bridge_create_chk.check(args) return self._client.json('bridge.create', args)
def create(self, name, hwaddr=None, network=None, nat=False, settings={})
Create a bridge with the given name, hwaddr and networking setup :param name: name of the bridge (must be unique), 15 characters or less, and not equal to "default". :param hwaddr: MAC address of the bridge. If none, a one will be created for u :param network: Networking mode, options are none, static, and dnsmasq :param nat: If true, SNAT will be enabled on this bridge. (IF and ONLY IF an IP is set on the bridge via the settings, otherwise flag will be ignored) (the cidr attribute of either static, or dnsmasq modes) :param settings: Networking setting, depending on the selected mode. none: no settings, bridge won't get any ip settings static: settings={'cidr': 'ip/net'} bridge will get assigned the given IP address dnsmasq: settings={'cidr': 'ip/net', 'start': 'ip', 'end': 'ip'} bridge will get assigned the ip in cidr and each running container that is attached to this IP will get IP from the start/end range. Netmask of the range is the netmask part of the provided cidr. if nat is true, SNAT rules will be automatically added in the firewall.
3.836052
3.69746
1.037483
args = { 'name': bridge, } self._bridge_chk.check(args) return self._client.json('bridge.delete', args)
def delete(self, bridge)
Delete a bridge by name :param bridge: bridge name :return:
8.833516
8.984772
0.983165
args = { 'name': bridge, 'nic': nic, } self._nic_add_chk.check(args) return self._client.json('bridge.nic-add', args)
def nic_add(self, bridge, nic)
Attach a nic to a bridge :param bridge: bridge name :param nic: nic name
6.957832
8.213158
0.847157
args = { 'nic': nic, } self._nic_remove_chk.check(args) return self._client.json('bridge.nic-remove', args)
def nic_remove(self, nic)
Detach a nic from a bridge :param nic: nic name to detach
9.685401
9.773051
0.991031
args = { 'name': bridge, } self._bridge_chk.check(args) return self._client.json('bridge.nic-list', args)
def nic_list(self, bridge)
List nics attached to bridge :param bridge: bridge name
9.773413
11.805942
0.827838
response = self._client.raw('disk.list', {}) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to list disks: %s' % result.stderr) if result.level != 20: # 20 is JSON output. raise RuntimeError('invalid response type from disk.list command') data = result.data.strip() if data: return json.loads(data) else: return {}
def list(self)
List available block devices
6.023846
5.964472
1.009955
args = { 'disk': disk, 'table_type': table_type, } self._mktable_chk.check(args) response = self._client.raw('disk.mktable', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to create table: %s' % result.stderr)
def mktable(self, disk, table_type='gpt')
Make partition table on block device. :param disk: device path (/dev/sda, /dev/disk/by-id/ata-Samsung..., etc...) :param table_type: Partition table type as accepted by parted
4.911983
5.98347
0.820926
args = { "disk": disk, "part": part, } self._getpart_chk.check(args) response = self._client.raw('disk.getinfo', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to get info: %s' % result.data) if result.level != 20: # 20 is JSON output. raise RuntimeError('invalid response type from disk.getinfo command') data = result.data.strip() if data: return json.loads(data) else: return {}
def getinfo(self, disk, part='')
Get more info about a disk or a disk partition :param disk: (/dev/sda, /dev/sdb, etc..) :param part: (/dev/sda1, /dev/sdb2, etc...) :return: a dict with {"blocksize", "start", "size", and "free" sections}
5.10569
4.96228
1.0289
args = { 'disk': disk, 'start': start, 'end': end, 'part_type': part_type, } self._mkpart_chk.check(args) response = self._client.raw('disk.mkpart', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to create partition: %s' % result.stderr)
def mkpart(self, disk, start, end, part_type='primary')
Make partition on disk :param disk: device path (/dev/sda, /dev/sdb, etc...) :param start: partition start as accepted by parted mkpart :param end: partition end as accepted by parted mkpart :param part_type: partition type as accepted by parted mkpart
3.857938
4.428038
0.871252
args = { 'disk': disk, 'number': number, } self._rmpart_chk.check(args) response = self._client.raw('disk.rmpart', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to remove partition: %s' % result.stderr)
def rmpart(self, disk, number)
Remove partion from disk :param disk: device path (/dev/sda, /dev/sdb, etc...) :param number: Partition number (starting from 1)
4.716237
5.20568
0.905979
if len(options) == 0: options = [''] args = { 'options': ','.join(options), 'source': source, 'target': target, } self._mount_chk.check(args) response = self._client.raw('disk.mount', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to mount partition: %s' % result.stderr)
def mount(self, source, target, options=[])
Mount partion on target :param source: Full partition path like /dev/sda1 :param target: Mount point :param options: Optional mount options
4.422297
4.345629
1.017643
args = { 'source': source, } self._umount_chk.check(args) response = self._client.raw('disk.umount', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to umount partition: %s' % result.stderr)
def umount(self, source)
Unmount partion :param source: Full partition path like /dev/sda1
6.569007
6.171078
1.064483
args = { "disk": disk, "spindown": spindown } self._spindown_chk.check(args) response = self._client.raw('disk.spindown', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError("Failed to spindown disk {} to {}.".format(disk, spindown))
def spindown(self, disk, spindown=1)
Spindown a disk :param disk str: Full path to a disk like /dev/sda :param spindown int: spindown value should be in [1, 240]
4.525581
4.753863
0.95198
args = { 'disk': disk, } self._seektime_chk.check(args) return self._client.json("disk.seektime", args)
def seektime(self, disk)
Gives seek latency on disk which is a very good indication to the `type` of the disk. it's a very good way to verify if the underlying disk type is SSD or HDD :param disk: disk path or name (/dev/sda, or sda) :return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'}
7.25403
9.216775
0.787046
args = { 'label': label, 'metadata': metadata_profile, 'data': data_profile, 'devices': devices, 'overwrite': overwrite } self._create_chk.check(args) self._client.sync('btrfs.create', args)
def create(self, label, devices, metadata_profile="", data_profile="", overwrite=False)
Create a btrfs filesystem with the given label, devices, and profiles :param label: name/label :param devices : array of devices (/dev/sda1, etc...) :metadata_profile: raid0, raid1, raid5, raid6, raid10, dup or single :data_profile: same as metadata profile :overwrite: force creation of the filesystem. Overwrite any existing filesystem
4.655731
5.078773
0.916704
if len(device) == 0: return args = { 'mountpoint': mountpoint, 'devices': device, } self._device_chk.check(args) self._client.sync('btrfs.device_add', args)
def device_add(self, mountpoint, *device)
Add one or more devices to btrfs filesystem mounted under `mountpoint` :param mountpoint: mount point of the btrfs system :param devices: one ore more devices to add :return:
4.91326
5.369685
0.915
args = { 'path': path } self._subvol_chk.check(args) self._client.sync('btrfs.subvol_create', args)
def subvol_create(self, path)
Create a btrfs subvolume in the specified path :param path: path to create
9.475289
10.421208
0.909231
args = { 'path': path } self._subvol_chk.check(args) self._client.sync('btrfs.subvol_delete', args)
def subvol_delete(self, path)
Delete a btrfs subvolume in the specified path :param path: path to delete
10.386509
10.784765
0.963072
args = { 'path': path, 'limit': limit, } self._subvol_quota_chk.check(args) self._client.sync('btrfs.subvol_quota', args)
def subvol_quota(self, path, limit)
Apply a quota to a btrfs subvolume in the specified path :param path: path to apply the quota for (it has to be the path of the subvol) :param limit: the limit to Apply
6.637408
7.04095
0.942686
args = { "source": source, "destination": destination, "read_only": read_only, } self._subvol_snapshot_chk.check(args) self._client.sync('btrfs.subvol_snapshot', args)
def subvol_snapshot(self, source, destination, read_only=False)
Take a snapshot :param source: source path of subvol :param destination: destination path of snapshot :param read_only: Set read-only on the snapshot :return:
4.741118
5.707178
0.830729
args = {'network': network} self._network_chk.check(args) response = self._client.raw('zerotier.join', args) result = response.get() if result.state != 'SUCCESS': raise RuntimeError('failed to join zerotier network: %s', result.stderr)
def join(self, network)
Join a zerotier network :param network: network id to join :return:
6.876952
6.180672
1.112654
if nics is None: nics = [] args = { 'name': name, 'media': media, 'cpu': cpu, 'flist': flist, 'memory': memory, 'nics': nics, 'port': port, 'mount': mount, 'tags': tags, 'config': config, } self._create_chk.check(args) if media is None and flist is None: raise ValueError('need at least one boot media via media or an flist') return self._client.json('kvm.create', args, tags=tags)
def create(self, name, media=None, flist=None, cpu=2, memory=512, nics=None, port=None, mount=None, tags=None, config=None)
:param name: Name of the kvm domain :param media: (optional) array of media objects to attach to the machine, where the first object is the boot device each media object is a dict of {url, type} where type can be one of 'disk', or 'cdrom', or empty (default to disk) example: [{'url': 'nbd+unix:///test?socket=/tmp/ndb.socket'}, {'type': 'cdrom', 'url: '/somefile.iso'}] zdb exmpale: [{'url': 'zdb://host:port?size=10G&blocksize=4096'}, {'url': 'zdb+unix:///path/to/unix.socket?size=5G'}] :param flist: (optional) VM flist. A special bootable flist witch has a correct boot.yaml file example: http://hub.gig.tech/azmy/ubuntu-zesty.flist :param cpu: number of vcpu cores :param memory: memory in MiB :param port: A dict of host_port: container_port pairs Example: `port={8080: 80, 7000:7000}` Only supported if default network is used :param nics: Configure the attached nics to the container each nic object is a dict of the format { 'type': nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id } :param port: Configure port forwards to vm, this only works if default network nic is added. Is a dict of {host-port: guest-port} :param mount: A list of host shared folders in the format {'source': '/host/path', 'target': '/guest/path', 'readonly': True|False} :param tags: A list of user defined tags (strings) :param config: a map with the config file path as a key and content as a value. This only works when creating a VM from an flist. The config files are written to the machine before booting. Example: config = {'/root/.ssh/authorized_keys': '<PUBLIC KEYS>'} If the machine is not booted from an flist, the config are discarded :note: At least one media or an flist must be provided. :return: uuid of the virtual machine
3.045988
3.135068
0.971586
if nics is None: nics = [] args = { 'nics': nics, 'port': port, 'uuid': uuid } self._migrate_network_chk.check(args) self._client.sync('kvm.prepare_migration_target', args, tags=tags)
def prepare_migration_target(self, uuid, nics=None, port=None, tags=None)
:param name: Name of the kvm domain that will be migrated :param port: A dict of host_port: container_port pairs Example: `port={8080: 80, 7000:7000}` Only supported if default network is used :param nics: Configure the attached nics to the container each nic object is a dict of the format { 'type': nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs) 'id': id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id } :param uuid: uuid of machine to be migrated on old node :return:
5.083141
5.655341
0.898821
args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.destroy', args)
def destroy(self, uuid)
Destroy a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
13.603602
12.232207
1.112113
args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.shutdown', args)
def shutdown(self, uuid)
Shutdown a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
13.226214
11.97208
1.104755
args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.reboot', args)
def reboot(self, uuid)
Reboot a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
12.34401
11.829324
1.043509
args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.reset', args)
def reset(self, uuid)
Reset (Force reboot) a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
13.626036
11.693228
1.165293
args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.pause', args)
def pause(self, uuid)
Pause a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
12.835424
11.80778
1.087031
args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.resume', args)
def resume(self, uuid)
Resume a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
12.787426
11.402493
1.121459
args = { 'uuid': uuid, } self._domain_action_chk.check(args) return self._client.json('kvm.info', args)
def info(self, uuid)
Get info about a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
11.521432
9.920917
1.161327
args = { 'uuid': uuid, } self._domain_action_chk.check(args) return self._client.json('kvm.infops', args)
def infops(self, uuid)
Get info per second about a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
10.680518
9.428082
1.132841
args = { 'uuid': uuid, 'media': media, } self._man_disk_action_chk.check(args) self._client.sync('kvm.attach_disk', args)
def attach_disk(self, uuid, media)
Attach a disk to a machine :param uuid: uuid of the kvm container (same as the used in create) :param media: the media object to attach to the machine media object is a dict of {url, and type} where type can be one of 'disk', or 'cdrom', or empty (default to disk) examples: {'url': 'nbd+unix:///test?socket=/tmp/ndb.socket'}, {'type': 'cdrom': '/somefile.iso'} :return:
9.482069
9.566052
0.991221
args = { 'uuid': uuid, 'type': type, 'id': id, 'hwaddr': hwaddr, } self._man_nic_action_chk.check(args) return self._client.json('kvm.add_nic', args)
def add_nic(self, uuid, type, id=None, hwaddr=None)
Add a nic to a machine :param uuid: uuid of the kvm container (same as the used in create) :param type: nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs) param id: id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id param hwaddr: the hardware address of the nic :return:
4.893603
5.263988
0.929638
args = { 'uuid': uuid, 'media': media, 'totalbytessecset': totalbytessecset, 'totalbytessec': totalbytessec, 'readbytessecset': readbytessecset, 'readbytessec': readbytessec, 'writebytessecset': writebytessecset, 'writebytessec': writebytessec, 'totaliopssecset': totaliopssecset, 'totaliopssec': totaliopssec, 'readiopssecset': readiopssecset, 'readiopssec': readiopssec, 'writeiopssecset': writeiopssecset, 'writeiopssec': writeiopssec, 'totalbytessecmaxset': totalbytessecmaxset, 'totalbytessecmax': totalbytessecmax, 'readbytessecmaxset': readbytessecmaxset, 'readbytessecmax': readbytessecmax, 'writebytessecmaxset': writebytessecmaxset, 'writebytessecmax': writebytessecmax, 'totaliopssecmaxset': totaliopssecmaxset, 'totaliopssecmax': totaliopssecmax, 'readiopssecmaxset': readiopssecmaxset, 'readiopssecmax': readiopssecmax, 'writeiopssecmaxset': writeiopssecmaxset, 'writeiopssecmax': writeiopssecmax, 'totalbytessecmaxlengthset': totalbytessecmaxlengthset, 'totalbytessecmaxlength': totalbytessecmaxlength, 'readbytessecmaxlengthset': readbytessecmaxlengthset, 'readbytessecmaxlength': readbytessecmaxlength, 'writebytessecmaxlengthset': writebytessecmaxlengthset, 'writebytessecmaxlength': writebytessecmaxlength, 'totaliopssecmaxlengthset': totaliopssecmaxlengthset, 'totaliopssecmaxlength': totaliopssecmaxlength, 'readiopssecmaxlengthset': readiopssecmaxlengthset, 'readiopssecmaxlength': readiopssecmaxlength, 'writeiopssecmaxlengthset': writeiopssecmaxlengthset, 'writeiopssecmaxlength': writeiopssecmaxlength, 'sizeiopssecset': sizeiopssecset, 'sizeiopssec': sizeiopssec, 'groupnameset': groupnameset, 'groupname': groupname, } self._limit_disk_io_action_chk.check(args) self._client.sync('kvm.limit_disk_io', args)
def limit_disk_io(self, uuid, media, totalbytessecset=False, totalbytessec=0, readbytessecset=False, readbytessec=0, writebytessecset=False, writebytessec=0, totaliopssecset=False, totaliopssec=0, readiopssecset=False, readiopssec=0, writeiopssecset=False, writeiopssec=0, totalbytessecmaxset=False, totalbytessecmax=0, readbytessecmaxset=False, readbytessecmax=0, writebytessecmaxset=False, writebytessecmax=0, totaliopssecmaxset=False, totaliopssecmax=0, readiopssecmaxset=False, readiopssecmax=0, writeiopssecmaxset=False, writeiopssecmax=0, totalbytessecmaxlengthset=False, totalbytessecmaxlength=0, readbytessecmaxlengthset=False, readbytessecmaxlength=0, writebytessecmaxlengthset=False, writebytessecmaxlength=0, totaliopssecmaxlengthset=False, totaliopssecmaxlength=0, readiopssecmaxlengthset=False, readiopssecmaxlength=0, writeiopssecmaxlengthset=False, writeiopssecmaxlength=0, sizeiopssecset=False, sizeiopssec=0, groupnameset=False, groupname='')
Remove a nic from a machine :param uuid: uuid of the kvm container (same as the used in create) :param media: the media to limit the diskio :return:
1.207019
1.233895
0.978218
args = { 'uuid': uuid, 'desturi': desturi, } self._migrate_action_chk.check(args) self._client.sync('kvm.migrate', args)
def migrate(self, uuid, desturi)
Migrate a vm to another node :param uuid: uuid of the kvm container (same as the used in create) :param desturi: the uri of the destination node :return:
7.731825
7.474326
1.034451
args = {'uuid':uuid} self._get_chk.check(args) return self._client.json('kvm.get', args)
def get(self, uuid)
Get machine info :param uuid str: domain uuid :return: machine info
11.272447
17.993689
0.626467