code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if event['e'] == 'added': if event['r'] == 'lights' and event['id'] not in self.lights: device_type = 'light' device = self.lights[event['id']] = DeconzLight( event['id'], event['light'], self.async_put_state) elif event['r'] == 'sensors' and event['id'] not in self.sensors: if supported_sensor(event['sensor']): device_type = 'sensor' device = self.sensors[event['id']] = create_sensor( event['id'], event['sensor'], self.async_put_state) else: _LOGGER.warning('Unsupported sensor %s', event) return else: _LOGGER.debug('Unsupported event %s', event) return if self.async_add_device_callback: self.async_add_device_callback(device_type, device) elif event['e'] == 'changed': if event['r'] == 'groups' and event['id'] in self.groups: self.groups[event['id']].async_update(event) elif event['r'] == 'lights' and event['id'] in self.lights: self.lights[event['id']].async_update(event) self.update_group_color([event['id']]) elif event['r'] == 'sensors' and event['id'] in self.sensors: self.sensors[event['id']].async_update(event) else: _LOGGER.debug('Unsupported event %s', event) elif event['e'] == 'deleted': _LOGGER.debug('Removed event %s', event) else: _LOGGER.debug('Unsupported event %s', event)
def async_event_handler(self, event: dict) -> None
Receive event from websocket and identifies where the event belong. { "t": "event", "e": "changed", "r": "sensors", "id": "12", "state": { "buttonevent": 2002 } }
2.009011
1.92419
1.044082
for group in self.groups.values(): # Skip group if there are no common light ids. if not any({*lights} & {*group.lights}): continue # More than one light means load_parameters called this method. # Then we take first best light to be available. light_ids = lights if len(light_ids) > 1: light_ids = group.lights for light_id in light_ids: if self.lights[light_id].reachable: group.update_color_state(self.lights[light_id]) break
def update_group_color(self, lights: list) -> None
Update group colors based on light states. deCONZ group updates don't contain any information about the current state of the lights in the group. This method updates the color properties of the group to the current color of the lights in the group. For groups where the lights have different colors the group color will only reflect the color of the latest changed light in the group.
6.518144
6.32384
1.030726
data = {} params = {} volume = Transform.to_volume(volume) file = Transform.to_file(file) destination = { 'volume': volume, 'location': location } source = { 'file': file } if properties: data['properties'] = properties data['source'] = source data['destination'] = destination data['overwrite'] = overwrite extra = { 'resource': cls.__name__, 'query': data } logger.info('Submitting export', extra=extra) api = api if api else cls._API if copy_only: params['copy_only'] = True _export = api.post( cls._URL['query'], data=data, params=params).json() else: _export = api.post( cls._URL['query'], data=data).json() return Export(api=api, **_export)
def submit_export(cls, file, volume, location, properties=None, overwrite=False, copy_only=False, api=None)
Submit new export job. :param file: File to be exported. :param volume: Volume identifier. :param location: Volume location. :param properties: Properties dictionary. :param overwrite: If true it will overwrite file if exists :param copy_only: If true files are kept on SevenBridges bucket. :param api: Api Instance. :return: Export object.
3.102233
3.174548
0.977221
api = api or cls._API if volume: volume = Transform.to_volume(volume) return super(Export, cls)._query( url=cls._URL['query'], volume=volume, state=state, offset=offset, limit=limit, fields='_all', api=api )
def query(cls, volume=None, state=None, offset=None, limit=None, api=None)
Query (List) exports. :param volume: Optional volume identifier. :param state: Optional import sate. :param api: Api instance. :return: Collection object.
4.5258
4.793386
0.944176
api = api or cls._API export_ids = [Transform.to_export(export) for export in exports] data = {'export_ids': export_ids} response = api.post(url=cls._URL['bulk_get'], data=data) return ExportBulkRecord.parse_records(response=response, api=api)
def bulk_get(cls, exports, api=None)
Retrieve exports in bulk. :param exports: Exports to be retrieved. :param api: Api instance. :return: list of ExportBulkRecord objects.
4.296341
4.185654
1.026444
if not exports: raise SbgError('Exports are required') api = api or cls._API items = [] for export in exports: file_ = Transform.to_file(export.get('file')) volume = Transform.to_volume(export.get('volume')) location = Transform.to_location(export.get('location')) properties = export.get('properties', {}) overwrite = export.get('overwrite', False) item = { 'source': { 'file': file_ }, 'destination': { 'volume': volume, 'location': location }, 'properties': properties, 'overwrite': overwrite } items.append(item) data = {'items': items} params = {'copy_only': copy_only} response = api.post( url=cls._URL['bulk_create'], params=params, data=data ) return ExportBulkRecord.parse_records(response=response, api=api)
def bulk_submit(cls, exports, copy_only=False, api=None)
Create exports in bulk. :param exports: Exports to be submitted in bulk. :param copy_only: If true files are kept on SevenBridges bucket. :param api: Api instance. :return: list of ExportBulkRecord objects.
2.87931
2.774199
1.037889
api = api if api else cls._API return super(Division, cls)._query( url=cls._URL['query'], offset=offset, limit=limit, fields='_all', api=api )
def query(cls, offset=None, limit=None, api=None)
Query (List) divisions. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: Collection object.
5.893515
5.954001
0.989841
url = 'http://{host}:{port}/api'.format(host=host, port=str(port)) auth = None if username and password: auth = aiohttp.BasicAuth(username, password=password) data = b'{"devicetype": "pydeconz"}' response = await async_request(session.post, url, auth=auth, data=data) api_key = response[0]['success']['username'] _LOGGER.info("API key: %s", api_key) return api_key
async def async_get_api_key(session, host, port, username=None, password=None, **kwargs)
Get a new API key for devicetype.
3.091157
2.85472
1.082823
url = 'http://{host}:{port}/api/{api_key}/config/whitelist/{api_key}'.format( host=host, port=str(port), api_key=api_key) response = await async_request(session.delete, url) _LOGGER.info(response)
async def async_delete_api_key(session, host, port, api_key)
Delete API key from deCONZ.
3.89669
2.892036
1.347386
url = 'http://{}:{}/api/{}/config'.format(host, str(port), api_key) response = await async_request(session.get, url) api_keys.append(api_key) for key in response['whitelist'].keys(): if key not in api_keys: await async_delete_api_key(session, host, port, key)
async def async_delete_all_keys(session, host, port, api_key, api_keys=[])
Delete all API keys except for the ones provided to the method.
3.292288
2.959486
1.112452
url = 'http://{}:{}/api/{}/config'.format(host, str(port), api_key) response = await async_request(session.get, url) bridgeid = response['bridgeid'] _LOGGER.info("Bridge id: %s", bridgeid) return bridgeid
async def async_get_bridgeid(session, host, port, api_key, **kwargs)
Get bridge id for bridge.
3.363316
3.092388
1.087611
bridges = [] response = await async_request(session.get, URL_DISCOVER) if not response: _LOGGER.info("No discoverable bridges available.") return bridges for bridge in response: bridges.append({'bridgeid': bridge['id'], 'host': bridge['internalipaddress'], 'port': bridge['internalport']}) _LOGGER.info("Discovered the following bridges: %s.", bridges) return bridges
async def async_discovery(session)
Find bridges allowing gateway discovery.
3.31099
2.959182
1.118887
_LOGGER.debug("Sending %s to %s", kwargs, url) try: res = await session(url, **kwargs) if res.content_type != 'application/json': raise ResponseError( "Invalid content type: {}".format(res.content_type)) response = await res.json() _LOGGER.debug("HTTP request response: %s", response) _raise_on_error(response) return response except aiohttp.client_exceptions.ClientError as err: raise RequestError( "Error requesting data from {}: {}".format(url, err) ) from None
async def async_request(session, url, **kwargs)
Do a web request and manage response.
2.76434
2.621907
1.054324
url = 'http://{}:{}'.format(self.host, self.port) try: async with self.session.ws_connect(url) as ws: self.state = STATE_RUNNING async for msg in ws: if self.state == STATE_STOPPED: break elif msg.type == aiohttp.WSMsgType.TEXT: self._data = json.loads(msg.data) self.async_session_handler_callback('data') _LOGGER.debug('Websocket data: %s', msg.data) elif msg.type == aiohttp.WSMsgType.CLOSED: break elif msg.type == aiohttp.WSMsgType.ERROR: break except aiohttp.ClientConnectorError: if self.state != STATE_STOPPED: self.retry() except Exception as err: _LOGGER.error('Unexpected error %s', err) if self.state != STATE_STOPPED: self.retry() else: if self.state != STATE_STOPPED: self.retry()
async def running(self)
Start websocket connection.
2.277552
2.176316
1.046517
self.state = STATE_STARTING self.loop.call_later(RETRY_TIMER, self.start) _LOGGER.debug('Reconnecting to deCONZ in %i.', RETRY_TIMER)
def retry(self)
Retry to connect to deCONZ.
7.824955
5.025208
1.55714
if self.state != STATE_RUNNING: conn = self.loop.create_connection( lambda: self, self.host, self.port) task = self.loop.create_task(conn) task.add_done_callback(self.init_done) self.state = STATE_STARTING
def start(self)
Start websocket connection.
3.088888
2.794487
1.105351
try: if fut.exception(): fut.result() except OSError as err: _LOGGER.debug('Got exception %s', err) self.retry()
def init_done(self, fut)
Server ready. If we get OSError during init the device is not available.
5.550491
5.365185
1.034539
self.state = STATE_STOPPED if self.transport: self.transport.close()
def stop(self)
Close websocket connection.
6.378397
4.579591
1.392787
randomness = os.urandom(16) key = base64encode(randomness).decode('utf-8').strip() self.transport = transport message = "GET / HTTP/1.1\r\n" message += "Host: " + self.host + ':' + str(self.port) + '\r\n' message += "User-Agent: Python/3.5 websockets/3.4\r\n" message += "Upgrade: Websocket\r\n" message += "Connection: Upgrade\r\n" message += "Sec-WebSocket-Key: " + key + "\r\n" message += "Sec-WebSocket-Version: 13\r\n" message += "\r\n" _LOGGER.debug('Websocket handshake: %s', message) self.transport.write(message.encode())
def connection_made(self, transport)
Do the websocket handshake. According to https://tools.ietf.org/html/rfc6455
2.273944
2.178768
1.043683
if self.state == STATE_STARTING: self.state = STATE_RUNNING _LOGGER.debug('Websocket handshake: %s', data.decode()) return _LOGGER.debug('Websocket data: %s', data) while len(data) > 0: payload, extra_data = self.get_payload(data) self._data = payload ### self.async_session_handler_callback('data')### #self.async_callback(payload) data = extra_data
def data_received(self, data)
Data received over websocket. First received data will allways be handshake accepting connection. We need to check how big the header is so we can send event data as a proper json object.
6.061214
5.76108
1.052097
if self.state == STATE_RUNNING: _LOGGER.warning('Lost connection to deCONZ') self.retry()
def connection_lost(self, exc)
Happen when device closes connection or stop() has been called.
10.3822
8.94929
1.160114
start = 2 length = ord(data[1:2]) if length == 126: # Payload information are an extra 2 bytes. start = 4 length, = unpack(">H", data[2:4]) elif length == 127: # Payload information are an extra 6 bytes. start = 8 length, = unpack(">I", data[2:6]) end = start + length payload = json.loads(data[start:end].decode()) extra_data = data[end:] return payload, extra_data
def get_payload(self, data)
Parse length of payload and return it.
3.091065
2.862376
1.079895
recently_used = (db.session.query(VCRoom.id) .filter(VCRoom.type == 'vidyo', Event.end_dt > (now_utc() - timedelta(days=max_room_event_age))) .join(VCRoom.events) .join(VCRoomEventAssociation.event) .group_by(VCRoom.id)) # non-deleted rooms with no recent associations return VCRoom.find_all(VCRoom.status != VCRoomStatus.deleted, ~VCRoom.id.in_(recently_used))
def find_old_vidyo_rooms(max_room_event_age)
Finds all Vidyo rooms that are: - linked to no events - linked only to events whose start date precedes today - max_room_event_age days
4.243343
4.240516
1.000667
user = vc_room.vidyo_extension.owned_by_user tpl = get_plugin_template_module('emails/remote_deleted.html', plugin=plugin, vc_room=vc_room, event=None, vc_room_event=None, user=user) _send('delete', user, plugin, None, vc_room, tpl)
def notify_owner(plugin, vc_room)
Notifies about the deletion of a Vidyo room from the Vidyo server.
8.065481
7.044084
1.145001
room_query = VCRoom.find(type='vidyo') table_data = [['ID', 'Name', 'Status', 'Vidyo ID', 'Extension']] if status: room_query = room_query.filter(VCRoom.status == VCRoomStatus.get(status)) for room in room_query: table_data.append([unicode(room.id), room.name, room.status.name, unicode(room.data['vidyo_id']), unicode(room.vidyo_extension.extension)]) table = AsciiTable(table_data) for col in (0, 3, 4): table.justify_columns[col] = 'right' print table.table
def rooms(status=None)
Lists all Vidyo rooms
3.584767
3.334688
1.074993
for profile in profiles: try: config = Config(profile, advance_access=advance_access) url = config.api_endpoint token = config.auth_token proxies = config.proxies aa = config.advance_access return url, token, proxies, aa except Exception: pass return None, None, None, None
def config_vars(profiles, advance_access)
Utility method to fetch config vars using ini section profile :param profiles: profile name. :param advance_access: advance_access flag. :return:
3.167827
3.462857
0.914801
if len(request.url) > self.MAX_URL_LENGTH: raise URITooLong( message=( 'Request url too large, ' 'likely too many query parameters provided.' ) ) return super(RequestSession, self).send(request, **kwargs)
def send(self, request, **kwargs)
Send prepared request :param request: Prepared request to be sent :param kwargs: request keyword arguments :return: Request response
6.136736
6.288358
0.975889
from indico_search.plugin import SearchPlugin assert current_plugin == SearchPlugin.instance templates = ('{}:{}'.format(SearchPlugin.instance.engine_plugin.name, template_name), template_name) return render_plugin_template(templates, **context)
def render_engine_or_search_template(template_name, **context)
Renders a template from the engine plugin or the search plugin If the template is available in the engine plugin, it's taken from there, otherwise the template from this plugin is used. :param template_name: name of the template :param context: the variables that should be available in the context of the template.
6.411901
6.327328
1.013366
from indico_vc_vidyo.plugin import VidyoPlugin providers = authenticators_re.split(VidyoPlugin.settings.get('authenticators')) done = set() for provider in providers: for _, identifier in user.iter_identifiers(check_providers=True, providers={provider}): if identifier in done: continue done.add(identifier) yield identifier
def iter_user_identities(user)
Iterates over all existing user identities that can be used with Vidyo
7.217531
6.907281
1.044916
providers = list(auth.strip() for auth in settings.get('authenticators').split(',')) identities = Identity.find_all(Identity.provider.in_(providers), Identity.identifier == identifier) if identities: return sorted(identities, key=lambda x: providers.index(x.provider))[0].user for provider in providers: try: identity_info = multipass.get_identity(provider, identifier) except IdentityRetrievalFailed: continue if identity_info is None: continue if not identity_info.provider.settings.get('trusted_email'): continue emails = {email.lower() for email in identity_info.data.getlist('email') if email} if not emails: continue user = User.find_first(~User.is_deleted, User.all_emails.in_(list(emails))) if user: return user
def get_user_from_identifier(settings, identifier)
Get an actual User object from an identifier
3.784386
3.700202
1.022751
extension = '{prefix}{event_id}'.format(prefix=prefix, event_id=event_id) yield extension suffix = 1 while True: yield '{extension}{suffix}'.format(extension=extension, suffix=suffix) suffix += 1
def iter_extensions(prefix, event_id)
Return extension (prefix + event_id) with an optional suffix which is incremented step by step in case of collision
2.589732
2.456139
1.054391
vc_room.name = room_obj.name if room_obj.ownerName != vc_room.data['owner_identity']: owner = get_user_from_identifier(settings, room_obj.ownerName) or User.get_system_user() vc_room.vidyo_extension.owned_by_user = owner vc_room.data.update({ 'description': room_obj.description, 'vidyo_id': unicode(room_obj.roomID), 'url': room_obj.RoomMode.roomURL, 'owner_identity': room_obj.ownerName, 'room_pin': room_obj.RoomMode.roomPIN if room_obj.RoomMode.hasPIN else "", 'moderation_pin': room_obj.RoomMode.moderatorPIN if room_obj.RoomMode.hasModeratorPIN else "", }) vc_room.vidyo_extension.extension = int(room_obj.extension)
def update_room_from_obj(settings, vc_room, room_obj)
Updates a VCRoom DB object using a SOAP room object returned by the API
3.848726
3.737112
1.029866
api = api if api else cls._API extra = { 'resource': cls.__name__, 'query': {} } logger.info('Getting resources', extra=extra) endpoints = api.get(url=cls._URL['get']).json() return Endpoints(api=api, **endpoints)
def get(cls, api=None, **kwargs)
Get api links. :param api: Api instance. :return: Endpoints object.
6.317633
5.678765
1.112501
items = [] for parser in args: for element in parser(*items): # pylint:disable=star-args if element is None: yield None else: items.append(element) break yield tuple(items)
def parse_chain(*args)
Creates a new parser that executes the passed parsers (args) with the previous results and yields a tuple of the results. >>> list(parse_chain(lambda: (None, 1), lambda one: (None, 2))) [None, None, (1, 2)] @param args: parsers @returns: parser
4.259937
4.395608
0.969135
if not isinstance(ipstr, basestring): raise ValueError("given ip address is not a string") parts = ipstr.split('.') if len(parts) != 4: raise ValueError("given ip address has an invalid number of dots") parts = [int(x) for x in parts] # raises ValueError return int_seq_to_bytes(parts)
def pack_ip(ipstr)
Converts an ip address given in dotted notation to a four byte string in network byte order. >>> len(pack_ip("127.0.0.1")) 4 >>> pack_ip("foo") Traceback (most recent call last): ... ValueError: given ip address has an invalid number of dots @type ipstr: str @rtype: bytes @raises ValueError: for badly formatted ip addresses
3.117455
2.579593
1.208506
if not isinstance(fourbytes, bytes): raise ValueError("given buffer is not a string") if len(fourbytes) != 4: raise ValueError("given buffer is not exactly four bytes long") return ".".join([str(x) for x in bytes_to_int_seq(fourbytes)])
def unpack_ip(fourbytes)
Converts an ip address given in a four byte string in network byte order to a string in dotted notation. >>> unpack_ip(b"dead") '100.101.97.100' >>> unpack_ip(b"alive") Traceback (most recent call last): ... ValueError: given buffer is not exactly four bytes long @type fourbytes: bytes @rtype: str @raises ValueError: for bad input
2.976521
2.403917
1.238196
if not isinstance(macstr, basestring): raise ValueError("given mac addresses is not a string") parts = macstr.split(":") if len(parts) != 6: raise ValueError("given mac addresses has an invalid number of colons") parts = [int(part, 16) for part in parts] # raises ValueError return int_seq_to_bytes(parts)
def pack_mac(macstr)
Converts a mac address given in colon delimited notation to a six byte string in network byte order. >>> pack_mac("30:31:32:33:34:35") == b'012345' True >>> pack_mac("bad") Traceback (most recent call last): ... ValueError: given mac addresses has an invalid number of colons @type macstr: str @rtype: bytes @raises ValueError: for badly formatted mac addresses
3.142271
2.456207
1.279319
if not isinstance(sixbytes, bytes): raise ValueError("given buffer is not a string") if len(sixbytes) != 6: raise ValueError("given buffer is not exactly six bytes long") return ":".join(["%2.2x".__mod__(x) for x in bytes_to_int_seq(sixbytes)])
def unpack_mac(sixbytes)
Converts a mac address given in a six byte string in network byte order to a string in colon delimited notation. >>> unpack_mac(b"012345") '30:31:32:33:34:35' >>> unpack_mac(b"bad") Traceback (most recent call last): ... ValueError: given buffer is not exactly six bytes long @type sixbytes: bytes @rtype: str @raises ValueError: for bad input
3.71285
3.069366
1.209647
if len(self) + len(data) > self.sizelimit: raise OmapiSizeLimitError() self.buff.write(data) return self
def add(self, data)
>>> ob = OutBuffer().add(OutBuffer.sizelimit * b"x") >>> ob.add(b"y") # doctest: +ELLIPSIS Traceback (most recent call last): ... OmapiSizeLimitError: ... @type data: bytes @returns: self @raises OmapiSizeLimitError:
6.632347
2.815151
2.355947
if len(string) >= (1 << 32): raise ValueError("string too long") return self.add_net32int(len(string)).add(string)
def add_net32string(self, string)
>>> r = b'\\x00\\x00\\x00\\x01x' >>> OutBuffer().add_net32string(b"x").getvalue() == r True @type string: bytes @param string: maximum length must fit in a 32bit integer @returns: self @raises OmapiSizeLimitError:
3.879846
4.215254
0.92043
if len(string) >= (1 << 16): raise ValueError("string too long") return self.add_net16int(len(string)).add(string)
def add_net16string(self, string)
>>> OutBuffer().add_net16string(b"x").getvalue() == b'\\x00\\x01x' True @type string: bytes @param string: maximum length must fit in a 16bit integer @returns: self @raises OmapiSizeLimitError:
3.995985
3.790365
1.054248
if not isinstance(items, list): items = items.items() for key, value in items: self.add_net16string(key).add_net32string(value) return self.add(b"\x00\x00")
def add_bindict(self, items)
>>> r = b'\\x00\\x03foo\\x00\\x00\\x00\\x03bar\\x00\\x00' >>> OutBuffer().add_bindict({b"foo": b"bar"}).getvalue() == r True @type items: [(bytes, bytes)] or {bytes: bytes} @returns: self @raises OmapiSizeLimitError:
4.167023
3.907739
1.066351
self.buff = io.BytesIO(self.getvalue()[length:]) return self
def consume(self, length)
>>> OutBuffer().add(b"spam").consume(2).getvalue() == b"am" True @type length: int @returns: self
8.113247
9.714465
0.835172
if self.implemented_protocol_version != self.protocol_version: raise OmapiError("protocol mismatch") if self.implemented_header_size != self.header_size: raise OmapiError("header size mismatch")
def validate(self)
Checks whether this OmapiStartupMessage matches the implementation. @raises OmapiError:
4.735562
2.742932
1.72646
outbuffer.add_net32int(self.protocol_version) outbuffer.add_net32int(self.header_size)
def serialize(self, outbuffer)
Serialize this OmapiStartupMessage to the given outbuffer. @type outbuffer: OutBuffer
4.955482
6.442458
0.769191
return hmac.HMAC(self.key, message, digestmod=hashlib.md5).digest()
def sign(self, message)
>>> authlen = OmapiHMACMD5Authenticator.authlen >>> len(OmapiHMACMD5Authenticator(b"foo", 16*b"x").sign(b"baz")) == authlen True @type message: bytes @rtype: bytes @returns: a signature of length self.authlen
4.04949
4.658982
0.869179
ret = OutBuffer() self.serialize(ret, forsigning) return ret.getvalue()
def as_string(self, forsigning=False)
>>> len(OmapiMessage().as_string(True)) >= 24 True @type forsigning: bool @rtype: bytes @raises OmapiSizeLimitError:
5.887165
9.341771
0.630198
self.authid = authenticator.authid self.signature = b"\0" * authenticator.authlen # provide authlen self.signature = authenticator.sign(self.as_string(forsigning=True)) assert len(self.signature) == authenticator.authlen
def sign(self, authenticator)
Sign this OMAPI message. @type authenticator: OmapiAuthenticatorBase
5.729838
5.404449
1.060208
try: return authenticators[self.authid]. sign(self.as_string(forsigning=True)) == self.signature except KeyError: return False
def verify(self, authenticators)
Verify this OMAPI message. >>> a1 = OmapiHMACMD5Authenticator(b"egg", b"spam") >>> a2 = OmapiHMACMD5Authenticator(b"egg", b"tomatoes") >>> a1.authid = a2.authid = 5 >>> m = OmapiMessage.open(b"host") >>> m.verify({a1.authid: a1}) False >>> m.sign(a1) >>> m.verify({a1.authid: a1}) True >>> m.sign(a2) >>> m.verify({a1.authid: a1}) False @type authenticators: {int: OmapiAuthenticatorBase} @rtype: bool
8.172126
7.085567
1.153348
return cls(opcode=OMAPI_OP_OPEN, message=[(b"type", typename)], tid=-1)
def open(cls, typename)
Create an OMAPI open message with given typename. @type typename: bytes @rtype: OmapiMessage
24.612831
13.400709
1.836681
return parse_map(operator.itemgetter(1), parse_chain(self.parse_net16int, self.parse_fixedbuffer))
def parse_net16string(self)
>>> next(InBuffer(b"\\0\\x03eggs").parse_net16string()) == b'egg' True
13.228507
12.922791
1.023657
return parse_map(operator.itemgetter(1), parse_chain(self.parse_net32int, self.parse_fixedbuffer))
def parse_net32string(self)
>>> next(InBuffer(b"\\0\\0\\0\\x03eggs").parse_net32string()) == b'egg' True
13.117565
12.356104
1.061626
entries = [] try: while True: for key in self.parse_net16string(): if key is None: yield None elif not key: raise StopIteration() else: for value in self.parse_net32string(): if value is None: yield None else: entries.append((key, value)) break break # Abusing StopIteration here, since nothing should be throwing # it at us. except StopIteration: yield entries
def parse_bindict(self)
>>> d = b"\\0\\x01a\\0\\0\\0\\x01b\\0\\0spam" >>> next(InBuffer(d).parse_bindict()) == [(b'a', b'b')] True
4.713017
4.408197
1.069148
return parse_map(lambda args: OmapiStartupMessage(*args), parse_chain(self.parse_net32int, lambda _: self.parse_net32int()))
def parse_startup_message(self)
results in an OmapiStartupMessage >>> d = b"\\0\\0\\0\\x64\\0\\0\\0\\x18" >>> next(InBuffer(d).parse_startup_message()).validate()
26.106514
14.439355
1.808011
parser = parse_chain(self.parse_net32int, # authid lambda *_: self.parse_net32int(), # authlen lambda *_: self.parse_net32int(), # opcode lambda *_: self.parse_net32int(), # handle lambda *_: self.parse_net32int(), # tid lambda *_: self.parse_net32int(), # rid lambda *_: self.parse_bindict(), # message lambda *_: self.parse_bindict(), # object lambda *args: self.parse_fixedbuffer(args[1])) # signature return parse_map(lambda args: # skip authlen in args: OmapiMessage(*(args[0:1] + args[2:])), parser)
def parse_message(self)
results in an OmapiMessage
5.230266
4.472938
1.169313
if not self.connection: raise OmapiError("not connected") try: data = self.connection.recv(2048) except socket.error: self.close() raise if not data: self.close() raise OmapiError("connection closed") try: self.protocol.data_received(data) except OmapiSizeLimitError: self.close() raise
def fill_inbuffer(self)
Read bytes from the connection and hand them to the protocol. @raises OmapiError: @raises socket.error:
2.675797
2.014746
1.328106
try: self.connection.sendall(data) except socket.error: self.close() raise
def write(self, data)
Send all of data to the connection. @type data: bytes @raises socket.error:
2.789766
2.847735
0.979644
if sign: message.sign(self.authenticators[self.defauth]) logger.debug("sending %s", LazyStr(message.dump_oneline)) self.transport.write(message.as_string())
def send_message(self, message, sign=True)
Send the given message to the connection. @type message: OmapiMessage @param sign: whether the message needs to be signed @raises OmapiError: @raises socket.error:
7.308049
8.407146
0.869266
while not self.recv_message_queue: self.transport.fill_inbuffer() message = self.recv_message_queue.pop(0) assert message is not None if not message.verify(self.protocol.authenticators): self.close() raise OmapiError("bad omapi message signature") return message
def receive_message(self)
Read the next message from the connection. @rtype: OmapiMessage @raises OmapiError: @raises socket.error:
5.72751
4.362914
1.312772
response = self.receive_message() if not response.is_response(message): raise OmapiError("received message is not the desired response") # signature already verified if response.authid != self.protocol.defauth and not insecure: raise OmapiError("received message is signed with wrong authenticator") return response
def receive_response(self, message, insecure=False)
Read the response for the given message. @type message: OmapiMessage @type insecure: bool @param insecure: avoid an OmapiError about a wrong authenticator @rtype: OmapiMessage @raises OmapiError: @raises socket.error:
6.740886
4.94544
1.363051
self.check_connected() self.protocol.send_message(message, sign)
def send_message(self, message, sign=True)
Sends the given message to the connection. @type message: OmapiMessage @type sign: bool @param sign: whether the message needs to be signed @raises OmapiError: @raises socket.error:
5.114345
5.455906
0.937396
res = self.lookup_by_host(mac=mac) try: return res["ip-address"] except KeyError: raise OmapiErrorAttributeNotFound()
def lookup_ip_host(self, mac)
Lookup a host object with with given mac address. @type mac: str @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given mac could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks a ip @raises socket.error:
8.173469
4.521623
1.807641
res = self.lookup_by_lease(mac=mac) try: return res["ip-address"] except KeyError: raise OmapiErrorAttributeNotFound()
def lookup_ip(self, mac)
Look for a lease object with given mac address and return the assigned ip address. @type mac: str @rtype: str or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given mac could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks a ip @raises socket.error:
9.948444
4.806276
2.069886
res = self.lookup_by_lease(ip=ip) try: return res["hardware-address"] except KeyError: raise OmapiErrorAttributeNotFound()
def lookup_mac(self, ip)
Look up a lease object with given ip address and return the associated mac address. @type ip: str @rtype: str or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given ip could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks a mac @raises socket.error:
10.703936
5.518032
1.93981
res = self.lookup_by_host(name=name) try: return dict(ip=res["ip-address"], mac=res["hardware-address"], hostname=res["name"].decode('utf-8')) except KeyError: raise OmapiErrorAttributeNotFound()
def lookup_host(self, name)
Look for a host object with given name and return the name, mac, and ip address @type name: str @rtype: dict or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given name could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name @raises socket.error:
6.563434
4.08755
1.605714
res = self.lookup_by_host(mac=mac) try: return dict(ip=res["ip-address"], mac=res["hardware-address"], name=res["name"].decode('utf-8')) except KeyError: raise OmapiErrorAttributeNotFound()
def lookup_host_host(self, mac)
Look for a host object with given mac address and return the name, mac, and ip address @type mac: str @rtype: dict or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given mac address could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name @raises socket.error:
5.963147
3.881809
1.536177
res = self.lookup_by_lease(ip=ip) if "client-hostname" not in res: raise OmapiErrorAttributeNotFound() return res["client-hostname"].decode('utf-8')
def lookup_hostname(self, ip)
Look up a lease object with given ip address and return the associated client hostname. @type ip: str @rtype: str or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given ip address could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks a hostname @raises socket.error:
8.681581
4.189649
2.07215
ltype_utf = ltype.encode("utf-8") assert ltype_utf in [b"host", b"lease"] msg = OmapiMessage.open(ltype_utf) for k in kwargs: if k == "raw": continue _k = k.replace("_", "-") if _k in ["ip", "ip-address"]: msg.obj.append((b"ip-address", pack_ip(kwargs[k]))) elif _k in ["mac", "hardware-address"]: msg.obj.append((b"hardware-address", pack_mac(kwargs[k]))) msg.obj.append((b"hardware-type", struct.pack("!I", 1))) elif _k == "name": msg.obj.append((b"name", kwargs[k].encode('utf-8'))) else: msg.obj.append((str(k).encode(), kwargs[k].encode('utf-8'))) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiErrorNotFound() if "raw" in kwargs and kwargs["raw"]: return dict(response.obj) res = dict() for k, v in dict(response.obj).items(): _k = k.decode('utf-8') try: if _k == "ip-address": v = unpack_ip(v) elif _k in ["hardware-address"]: v = unpack_mac(v) elif _k in ["starts", "ends", "tstp", "tsfp", "atsfp", "cltt", "subnet", "pool", "state", "hardware-type"]: v = struct.unpack(">I", v)[0] elif _k in ["flags"]: v = struct.unpack(">I", v)[0] except struct.error: pass res[_k] = v return res
def __lookup(self, ltype, **kwargs)
Generic Lookup function @type ltype: str @type rvalues: list @type ip: str @type mac: str @type name: str @rtype: dict or str (if len(rvalues) == 1) or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given name could be found or the object lacks an ip address or mac @raises socket.error:
2.83199
2.716934
1.042348
msg = OmapiMessage.open(b"host") msg.message.append((b"create", struct.pack("!I", 1))) msg.message.append((b"exclusive", struct.pack("!I", 1))) msg.obj.append((b"hardware-address", pack_mac(mac))) msg.obj.append((b"hardware-type", struct.pack("!I", 1))) msg.obj.append((b"ip-address", pack_ip(ip))) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add failed")
def add_host(self, ip, mac)
Create a host object with given ip address and and mac address. @type ip: str @type mac: str @raises ValueError: @raises OmapiError: @raises socket.error:
3.8632
3.570996
1.081827
raise OmapiError("add failed")
def add_host_supersede_name(self, ip, mac, name): # pylint:disable=E0213 msg = OmapiMessage.open(b"host") msg.message.append((b"create", struct.pack("!I", 1))) msg.message.append((b"exclusive", struct.pack("!I", 1))) msg.obj.append((b"hardware-address", pack_mac(mac))) msg.obj.append((b"hardware-type", struct.pack("!I", 1))) msg.obj.append((b"ip-address", pack_ip(ip))) msg.obj.append((b"name", name.encode('utf-8'))) msg.obj.append((b"statements", 'supersede host-name "{0}";'.format(name).encode('utf-8'))) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE
Add a host with a fixed-address and override its hostname with the given name. @type self: Omapi @type ip: str @type mac: str @type name: str @raises ValueError: @raises OmapiError: @raises socket.error:
44.115528
20.111576
2.193539
stmts.append('supersede host-name "{0}";\n '.format(hostname)) if router: stmts.append('supersede routers {0};\n '.format(router)) if domain: stmts.append('supersede domain-name "{0}";'.format(domain)) if stmts: encoded_stmts = "".join(stmts).encode("utf-8") msg.obj.append((b"statements", encoded_stmts)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add failed")
def add_host_supersede(self, ip, mac, name, hostname=None, router=None, domain=None): # pylint:disable=too-many-arguments stmts = [] msg = OmapiMessage.open(b"host") msg.message.append((b"create", struct.pack("!I", 1))) msg.obj.append((b"name", name)) msg.obj.append((b"hardware-address", pack_mac(mac))) msg.obj.append((b"hardware-type", struct.pack("!I", 1))) msg.obj.append((b"ip-address", pack_ip(ip))) if hostname
Create a host object with given ip, mac, name, hostname, router and domain. hostname, router and domain are optional arguments. @type ip: str @type mac: str @type name: str @type hostname: str @type router: str @type domain: str @raises OmapiError: @raises socket.error:
3.264006
3.434105
0.950468
msg = OmapiMessage.open(b"host") msg.obj.append((b"hardware-address", pack_mac(mac))) msg.obj.append((b"hardware-type", struct.pack("!I", 1))) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiErrorNotFound() if response.handle == 0: raise OmapiError("received invalid handle from server") response = self.query_server(OmapiMessage.delete(response.handle)) if response.opcode != OMAPI_OP_STATUS: raise OmapiError("delete failed")
def del_host(self, mac)
Delete a host object with with given mac address. @type mac: str @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given mac address could be found @raises socket.error:
4.327386
3.641381
1.188391
msg = OmapiMessage.open(b"group") msg.message.append(("create", struct.pack("!I", 1))) msg.obj.append(("name", groupname)) msg.obj.append(("statements", statements)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add group failed")
def add_group(self, groupname, statements)
Adds a group @type groupname: bytes @type statements: str
6.185861
5.958635
1.038134
msg = OmapiMessage.open(b"host") msg.message.append(("create", struct.pack("!I", 1))) msg.message.append(("exclusive", struct.pack("!I", 1))) msg.obj.append(("hardware-address", pack_mac(mac))) msg.obj.append(("hardware-type", struct.pack("!I", 1))) msg.obj.append(("ip-address", pack_ip(ip))) msg.obj.append(("group", groupname)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add failed")
def add_host_with_group(self, ip, mac, groupname)
Adds a host with given ip and mac in a group named groupname @type ip: str @type mac: str @type groupname: str
3.864981
4.03215
0.958541
m1 = OmapiMessage.open(b"host") m1.update_object(dict(name=name)) r1 = self.query_server(m1) if r1.opcode != OMAPI_OP_UPDATE: raise OmapiError("opening host %s failed" % name) m2 = OmapiMessage.update(r1.handle) m2.update_object(dict(group=group)) r2 = self.query_server(m2) if r2.opcode != OMAPI_OP_UPDATE: raise OmapiError("changing group of host %s to %s failed" % (name, group))
def change_group(self, name, group)
Change the group of a host given the name of the host. @type name: str @type group: str
3.352106
3.162732
1.059877
from sevenbridges.models.link import Link from sevenbridges.meta.collection import Collection api = kwargs.pop('api', cls._API) url = kwargs.pop('url') extra = {'resource': cls.__name__, 'query': kwargs} logger.info('Querying {} resource'.format(cls), extra=extra) response = api.get(url=url, params=kwargs) data = response.json() total = response.headers['x-total-matching-query'] items = [cls(api=api, **item) for item in data['items']] links = [Link(**link) for link in data['links']] href = data['href'] return Collection( resource=cls, href=href, total=total, items=items, links=links, api=api )
def _query(cls, **kwargs)
Generic query implementation that is used by the resources.
3.177733
3.097051
1.026051
id = Transform.to_resource(id) api = api if api else cls._API if 'get' in cls._URL: extra = {'resource': cls.__name__, 'query': {'id': id}} logger.info('Fetching {} resource'.format(cls), extra=extra) resource = api.get(url=cls._URL['get'].format(id=id)).json() return cls(api=api, **resource) else: raise SbgError('Unable to fetch resource!')
def get(cls, id, api=None)
Fetches the resource from the server. :param id: Resource identifier :param api: sevenbridges Api instance. :return: Resource object.
5.2391
4.955226
1.057288
if 'delete' in self._URL: extra = {'resource': self.__class__.__name__, 'query': { 'id': self.id}} logger.info("Deleting {} resource.".format(self), extra=extra) self._api.delete(url=self._URL['delete'].format(id=self.id)) else: raise SbgError('Resource can not be deleted!')
def delete(self)
Deletes the resource on the server.
5.928202
6.235806
0.950671
try: if hasattr(self, 'href'): data = self._api.get(self.href, append_base=False).json() resource = self.__class__(api=self._api, **data) elif hasattr(self, 'id') and hasattr(self, '_URL') and \ 'get' in self._URL: data = self._api.get( self._URL['get'].format(id=self.id)).json() resource = self.__class__(api=self._api, **data) else: raise SbgError('Resource can not be refreshed!') query = {'id': self.id} if hasattr(self, 'id') else {} extra = {'resource': self.__class__.__name__, 'query': query} logger.info('Reloading {} resource.'.format(self), extra=extra) except Exception: raise SbgError('Resource can not be refreshed!') self._data = resource._data self._dirty = resource._dirty self._old = copy.deepcopy(self._data.data) return self
def reload(self)
Refreshes the resource with the data from the server.
3.250774
3.090307
1.051926
if isinstance(record, list): return [cls._convert(r) for r in record] else: return [cls._convert(record)]
def convert(cls, record)
Converts a single dictionary or list of dictionaries into converted list of dictionaries.
2.867174
2.411356
1.18903
if isinstance(record, list): return [cls._convert(r) for r in record] else: return cls._convert(record)
def _convert_internal(cls, record)
Converts a single dictionary into converted dictionary or list of dictionaries into converted list of dictionaries. Used while passing dictionaries to another converter.
2.670045
2.30317
1.159291
if not record: return {} converted_dict = {} for field in cls.conversion: key = field[0] if len(field) >= 2 and field[1]: converted_key = field[1] else: converted_key = key if len(field) >= 3 and field[2]: conversion_method = field[2] else: conversion_method = cls.default_conversion_method if len(field) >= 4: converter = field[3] else: converter = None try: value = conversion_method(record[key]) except KeyError: continue if converter: value = converter._convert_internal(value) if converted_key is APPEND: if isinstance(value, list): for v in value: converted_dict.update(v) else: converted_dict.update(value) else: converted_dict[converted_key] = value return converted_dict
def _convert(cls, record)
Core method of the converter. Converts a single dictionary into another dictionary.
2.258859
2.208062
1.023005
while response.status_code == 429: headers = response.headers remaining_time = headers.get('X-RateLimit-Reset') sleep = int(remaining_time) - int(time.time()) logger.warning('Rate limit reached! Waiting for [%s]s', sleep) time.sleep(sleep + 5) response = api.session.send(response.request) return response
def rate_limit_sleeper(api, response)
Pauses the execution if rate limit is breached. :param api: Api instance. :param response: requests.Response object
2.983446
3.041789
0.980819
while response.status_code == 503: logger.info('Service unavailable: Response=[%s]', six.text_type(response.__dict__)) response_body = response.json() if 'code' in response_body: if response_body['code'] == 0: logger.warning('API Maintenance in progress!' ' Waiting for [%s]s', sleep) time.sleep(sleep) response = api.session.send(response.request) else: return response else: return response return response
def maintenance_sleeper(api, response, sleep=300)
Pauses the execution if sevenbridges api is under maintenance. :param api: Api instance. :param response: requests.Response object. :param sleep: Time to sleep in between the requests.
3.609326
3.718039
0.970761
while response.status_code >= 500: logger.warning('Caught [%s] status code! Waiting for [%s]s', response.status_code, sleep) time.sleep(sleep) response = api.session.send(response.request) return response
def general_error_sleeper(api, response, sleep=300)
Pauses the execution if response status code is > 500. :param api: Api instance. :param response: requests.Response object :param sleep: Time to sleep in between the requests.
3.42443
3.325069
1.029882
service = {'type': VolumeType.GOOGLE, 'bucket': bucket, 'credentials': {'client_email': client_email, 'private_key': private_key } } if prefix: service['prefix'] = prefix if properties: service['properties'] = properties data = {'name': name, 'service': service, 'access_mode': access_mode } if description: data['description'] = description api = api or cls._API extra = { 'resource': cls.__name__, 'query': data } logger.info('Creating google volume', extra=extra) response = api.post(url=cls._URL['query'], data=data).json() return Volume(api=api, **response)
def create_google_volume(cls, name, bucket, client_email, private_key, access_mode, description=None, prefix=None, properties=None, api=None)
Create s3 volume. :param name: Volume name. :param bucket: Referenced bucket. :param client_email: Google client email. :param private_key: Google client private key. :param access_mode: Access Mode. :param description: Volume description. :param prefix: Volume prefix. :param properties: Volume properties. :param api: Api instance. :return: Volume object.
2.89767
3.148921
0.920211
service = { 'type': VolumeType.OSS, 'bucket': bucket, 'endpoint': endpoint, 'credentials': { 'access_key_id': access_key_id, 'secret_access_key': secret_access_key } } if prefix: service['prefix'] = prefix if properties: service['properties'] = properties data = { 'name': name, 'service': service, 'access_mode': access_mode } if description: data['description'] = description api = api or cls._API extra = { 'resource': cls.__name__, 'query': data } logger.info('Creating oss volume', extra=extra) response = api.post(url=cls._URL['query'], data=data).json() return Volume(api=api, **response)
def create_oss_volume(cls, name, bucket, endpoint, access_key_id, secret_access_key, access_mode, description=None, prefix=None, properties=None, api=None)
Create oss volume. :param name: Volume name. :param bucket: Referenced bucket. :param access_key_id: Access key identifier. :param secret_access_key: Secret access key. :param access_mode: Access Mode. :param endpoint: Volume Endpoint. :param description: Volume description. :param prefix: Volume prefix. :param properties: Volume properties. :param api: Api instance. :return: Volume object.
2.551333
2.659571
0.959303
param = {'location': location} data = self._api.get(url=self._URL['object'].format( id=self.id), params=param).json() return VolumeObject(api=self._api, **data)
def get_volume_object_info(self, location)
Fetches information about single volume object - usually file :param location: object location :return:
5.035237
6.059106
0.83102
api = api if api else self._API response = api.get( url=self._URL['member'].format(id=self.id, username=username), ) data = response.json() return Member(api=api, **data)
def get_member(self, username, api=None)
Fetches information about a single volume member :param username: Member name :param api: Api instance :return: Member object
3.787394
4.135363
0.915855
data = {} volume = Transform.to_volume(volume) if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive' ) elif project: project = Transform.to_project(project) destination = { 'project': project } elif parent: parent = Transform.to_file(parent) destination = { 'parent': parent } else: raise SbgError('Project or parent identifier is required.') source = { 'volume': volume, 'location': location } if name: destination['name'] = name data['source'] = source data['destination'] = destination data['overwrite'] = overwrite if not preserve_folder_structure: data['preserve_folder_structure'] = preserve_folder_structure if properties: data['properties'] = properties api = api if api else cls._API extra = { 'resource': cls.__name__, 'query': data } logger.info('Submitting import', extra=extra) _import = api.post(cls._URL['query'], data=data).json() return Import(api=api, **_import)
def submit_import(cls, volume, location, project=None, name=None, overwrite=False, properties=None, parent=None, preserve_folder_structure=True, api=None)
Submits new import job. :param volume: Volume identifier. :param location: Volume location. :param project: Project identifier. :param name: Optional file name. :param overwrite: If true it will overwrite file if exists. :param properties: Properties dictionary. :param parent: The ID of the target folder to which the item should be imported. Should not be used together with project. :param preserve_folder_structure: Whether to keep the exact source folder structure. The default value is true if the item being imported is a folder. Should not be used if you are importing a file. :param api: Api instance. :return: Import object.
2.763225
2.900041
0.952823
api = api or cls._API if project: project = Transform.to_project(project) if volume: volume = Transform.to_volume(volume) return super(Import, cls)._query( url=cls._URL['query'], project=project, volume=volume, state=state, fields='_all', offset=offset, limit=limit, api=api )
def query(cls, project=None, volume=None, state=None, offset=None, limit=None, api=None)
Query (List) imports. :param project: Optional project identifier. :param volume: Optional volume identifier. :param state: Optional import sate. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: Collection object.
3.568091
3.651994
0.977025
api = api or cls._API import_ids = [Transform.to_import(import_) for import_ in imports] data = {'import_ids': import_ids} response = api.post(url=cls._URL['bulk_get'], data=data) return ImportBulkRecord.parse_records(response=response, api=api)
def bulk_get(cls, imports, api=None)
Retrieve imports in bulk :param imports: Imports to be retrieved. :param api: Api instance. :return: List of ImportBulkRecord objects.
4.106706
4.128302
0.994769
if not imports: raise SbgError('Imports are required') api = api or cls._API items = [] for import_ in imports: volume = Transform.to_volume(import_.get('volume')) location = Transform.to_location(import_.get('location')) project = Transform.to_project(import_.get('project')) name = import_.get('name', None) overwrite = import_.get('overwrite', False) item = { 'source': { 'volume': volume, 'location': location }, 'destination': { 'project': project }, 'overwrite': overwrite } if name: item['destination']['name'] = name items.append(item) data = {'items': items} response = api.post(url=cls._URL['bulk_create'], data=data) return ImportBulkRecord.parse_records(response=response, api=api)
def bulk_submit(cls, imports, api=None)
Submit imports in bulk :param imports: Imports to be retrieved. :param api: Api instance. :return: List of ImportBulkRecord objects.
2.887975
2.830055
1.020466
if room.custom_server: return def _create_room(xmpp): muc = xmpp.plugin['xep_0045'] muc.joinMUC(room.jid, xmpp.requested_jid.user) muc.configureRoom(room.jid, _set_form_values(xmpp, room)) current_plugin.logger.info('Creating room %s', room.jid) _execute_xmpp(_create_room)
def create_room(room)
Creates a MUC room on the XMPP server.
6.142098
5.48779
1.11923
if room.custom_server: return def _update_room(xmpp): muc = xmpp.plugin['xep_0045'] muc.joinMUC(room.jid, xmpp.requested_jid.user) muc.configureRoom(room.jid, _set_form_values(xmpp, room, muc.getRoomConfig(room.jid))) current_plugin.logger.info('Updating room %s', room.jid) _execute_xmpp(_update_room)
def update_room(room)
Updates a MUC room on the XMPP server.
5.762975
5.004723
1.151507
if room.custom_server: return def _delete_room(xmpp): muc = xmpp.plugin['xep_0045'] muc.destroy(room.jid, reason=reason) current_plugin.logger.info('Deleting room %s', room.jid) _execute_xmpp(_delete_room) delete_logs(room)
def delete_room(room, reason='')
Deletes a MUC room from the XMPP server.
5.784105
5.009919
1.154531
mapping = { 'name': 'muc#roomconfig_roomname', 'description': 'muc#roomconfig_roomdesc', 'password': 'muc#roomconfig_roomsecret' } def _get_room_config(xmpp): muc = xmpp.plugin['xep_0045'] try: form = muc.getRoomConfig(jid) except ValueError: # probably the room doesn't exist return None fields = form.values['fields'] return {key: fields[muc_key].values['value'] for key, muc_key in mapping.iteritems()} return _execute_xmpp(_get_room_config)
def get_room_config(jid)
Retrieves basic data of a MUC room from the XMPP server. :return: dict containing name, description and password of the room
3.542911
3.567123
0.993213
def _room_exists(xmpp): disco = xmpp.plugin['xep_0030'] try: disco.get_info(jid) except IqError as e: if e.condition == 'item-not-found': return False raise else: return True return _execute_xmpp(_room_exists)
def room_exists(jid)
Checks if a MUC room exists on the server.
3.464161
3.267517
1.060182
jid = unicode_to_ascii(s).lower() jid = WHITESPACE.sub('-', jid) jid = INVALID_JID_CHARS.sub('', jid) return jid.strip()[:256]
def sanitize_jid(s)
Generates a valid JID node identifier from a string
3.820287
3.857224
0.990424
if not append_date: return sanitize_jid(name) return '{}-{}'.format(sanitize_jid(name), append_date.strftime('%Y-%m-%d'))
def generate_jid(name, append_date=None)
Generates a v alid JID based on the room name. :param append_date: appends the given date to the JID
2.95644
4.407722
0.670741