INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Return string of length elements chosen from alphabet.
def random_string(self, length, alphabet): """Return string of `length` elements chosen from `alphabet`.""" return ''.join( self.choice(alphabet) for n in range(length) )
Decorator to mark a method as an API endpoint for later registration.
def api_endpoint(path_or_func=None, decorate=True): """ Decorator to mark a method as an API endpoint for later registration. Args: path_or_func: either the function to be decorated or its API path. decorate (bool): Apply API_ENDPOINT_DECORATORS if True (default). Returns: Callable: Decorated function (with optionally applied decorators). Examples: >>> from dddp.api import APIMixin, api_endpoint >>> class Counter(APIMixin): ... value = 0 ... ... # default API path matches function name 'increment'. ... @api_endpoint ... def increment(self, amount): ... '''Increment counter value by `amount`.''' ... self.value += amount ... return self.value ... ... # excplicitly set API path to 'Decrement'. ... @api_endpoint('Decrement') ... def decrement(self, amount): ... '''Decrement counter value by `amount`.''' ... self.value -= amount ... return self.value """ def maybe_decorated(func): """Apply API_ENDPOINT_DECORATORS to func.""" if decorate: for decorator in API_ENDPOINT_DECORATORS: func = decorator()(func) return func if callable(path_or_func): path_or_func.api_path = path_or_func.__name__ return maybe_decorated(path_or_func) else: def _api_endpoint(func): """Decorator inner.""" if path_or_func is None: func.api_path = func.__name__ else: func.api_path = path_or_func return maybe_decorated(func) return _api_endpoint
Iterator over all API endpoint names and callbacks.
def api_endpoints(obj): """Iterator over all API endpoint names and callbacks.""" for name in dir(obj): attr = getattr(obj, name) api_path = getattr(attr, 'api_path', None) if api_path: yield ( '%s%s' % (obj.api_path_prefix, api_path), attr, ) for api_provider in obj.api_providers: for api_path, attr in api_endpoints(api_provider): yield (api_path, attr)
Cached dict of api_path: func.
def api_path_map(self): """Cached dict of api_path: func.""" if self._api_path_cache is None: self._api_path_cache = { api_path: func for api_path, func in api_endpoints(self) } return self._api_path_cache
Clear out cache for api_path_map.
def clear_api_path_map_cache(self): """Clear out cache for api_path_map.""" self._api_path_cache = None for api_provider in self.api_providers: if six.get_method_self( api_provider.clear_api_path_map_cache, ) is not None: api_provider.clear_api_path_map_cache()
Call func ( * args ** kwargs ) but NEVER raise an exception.
def safe_call(func, *args, **kwargs): """ Call `func(*args, **kwargs)` but NEVER raise an exception. Useful in situations such as inside exception handlers where calls to `logging.error` try to send email, but the SMTP server isn't always availalbe and you don't want your exception handler blowing up. """ try: return None, func(*args, **kwargs) except Exception: # pylint: disable=broad-except # something went wrong during the call, return a stack trace that can # be dealt with by the caller return traceback.format_exc(), None
Debug print name and val.
def dprint(name, val): """Debug print name and val.""" from pprint import pformat print( '% 5s: %s' % ( name, '\n '.join( pformat( val, indent=4, width=75, ).split('\n') ), ), )
Validate arguments to be supplied to func.
def validate_kwargs(func, kwargs): """Validate arguments to be supplied to func.""" func_name = func.__name__ argspec = inspect.getargspec(func) all_args = argspec.args[:] defaults = list(argspec.defaults or []) # ignore implicit 'self' argument if inspect.ismethod(func) and all_args[:1] == ['self']: all_args[:1] = [] # don't require arguments that have defaults if defaults: required = all_args[:-len(defaults)] else: required = all_args[:] # translate 'foo_' to avoid reserved names like 'id' trans = { arg: arg.endswith('_') and arg[:-1] or arg for arg in all_args } for key in list(kwargs): key_adj = '%s_' % key if key_adj in all_args: kwargs[key_adj] = kwargs.pop(key) # figure out what we're missing supplied = sorted(kwargs) missing = [ trans.get(arg, arg) for arg in required if arg not in supplied ] if missing: raise MeteorError( 400, func.err, 'Missing required arguments to %s: %s' % ( func_name, ' '.join(missing), ), ) # figure out what is extra extra = [ arg for arg in supplied if arg not in all_args ] if extra: raise MeteorError( 400, func.err, 'Unknown arguments to %s: %s' % (func_name, ' '.join(extra)), )
Handle new websocket connection.
def on_open(self): """Handle new websocket connection.""" this.request = WSGIRequest(self.ws.environ) this.ws = self this.send = self.send this.reply = self.reply self.logger = self.ws.logger self.remote_ids = collections.defaultdict(set) # `_tx_buffer` collects outgoing messages which must be sent in order self._tx_buffer = {} # track the head of the queue (buffer) and the next msg to be sent self._tx_buffer_id_gen = itertools.cycle(irange(sys.maxint)) self._tx_next_id_gen = itertools.cycle(irange(sys.maxint)) # start by waiting for the very first message self._tx_next_id = next(self._tx_next_id_gen) this.remote_addr = self.remote_addr = \ '{0[REMOTE_ADDR]}:{0[REMOTE_PORT]}'.format( self.ws.environ, ) this.subs = {} safe_call(self.logger.info, '+ %s OPEN', self) self.send('o') self.send('a["{\\"server_id\\":\\"0\\"}"]')
Handle closing of websocket connection.
def on_close(self, *args, **kwargs): """Handle closing of websocket connection.""" if self.connection is not None: del self.pgworker.connections[self.connection.pk] self.connection.delete() self.connection = None signals.request_finished.send(sender=self.__class__) safe_call(self.logger.info, '- %s %s', self, args or 'CLOSE')
Process a message received from remote.
def on_message(self, message): """Process a message received from remote.""" if self.ws.closed: return None try: safe_call(self.logger.debug, '< %s %r', self, message) # process individual messages for data in self.ddp_frames_from_message(message): self.process_ddp(data) # emit request_finished signal to close DB connections signals.request_finished.send(sender=self.__class__) except geventwebsocket.WebSocketError: self.ws.close()
Yield DDP messages from a raw WebSocket message.
def ddp_frames_from_message(self, message): """Yield DDP messages from a raw WebSocket message.""" # parse message set try: msgs = ejson.loads(message) except ValueError: self.reply( 'error', error=400, reason='Data is not valid EJSON', ) raise StopIteration if not isinstance(msgs, list): self.reply( 'error', error=400, reason='Invalid EJSON messages', ) raise StopIteration # process individual messages while msgs: # pop raw message from the list raw = msgs.pop(0) # parse message payload try: data = ejson.loads(raw) except (TypeError, ValueError): data = None if not isinstance(data, dict): self.reply( 'error', error=400, reason='Invalid SockJS DDP payload', offendingMessage=raw, ) yield data if msgs: # yield to other greenlets before processing next msg gevent.sleep()
Process a single DDP message.
def process_ddp(self, data): """Process a single DDP message.""" msg_id = data.get('id', None) try: msg = data.pop('msg') except KeyError: self.reply( 'error', reason='Bad request', offendingMessage=data, ) return try: # dispatch message self.dispatch(msg, data) except Exception as err: # pylint: disable=broad-except # This should be the only protocol exception handler kwargs = { 'msg': {'method': 'result'}.get(msg, 'error'), } if msg_id is not None: kwargs['id'] = msg_id if isinstance(err, MeteorError): error = err.as_dict() else: error = { 'error': 500, 'reason': 'Internal server error', } if kwargs['msg'] == 'error': kwargs.update(error) else: kwargs['error'] = error if not isinstance(err, MeteorError): # not a client error, should always be logged. stack, _ = safe_call( self.logger.error, '%r %r', msg, data, exc_info=1, ) if stack is not None: # something went wrong while logging the error, revert to # writing a stack trace to stderr. traceback.print_exc(file=sys.stderr) sys.stderr.write( 'Additionally, while handling the above error the ' 'following error was encountered:\n' ) sys.stderr.write(stack) elif settings.DEBUG: print('ERROR: %s' % err) dprint('msg', msg) dprint('data', data) error.setdefault('details', traceback.format_exc()) # print stack trace for client errors when DEBUG is True. print(error['details']) self.reply(**kwargs) if msg_id and msg == 'method': self.reply('updated', methods=[msg_id])
Dispatch msg to appropriate recv_foo handler.
def dispatch(self, msg, kwargs): """Dispatch msg to appropriate recv_foo handler.""" # enforce calling 'connect' first if self.connection is None and msg != 'connect': self.reply('error', reason='Must connect first') return if msg == 'method': if ( 'method' not in kwargs ) or ( 'id' not in kwargs ): self.reply( 'error', error=400, reason='Malformed method invocation', ) return # lookup method handler try: handler = getattr(self, 'recv_%s' % msg) except (AttributeError, UnicodeEncodeError): raise MeteorError(404, 'Method not found') # validate handler arguments validate_kwargs(handler, kwargs) # dispatch to handler handler(**kwargs)
Send data ( raw string or EJSON payload ) to WebSocket client.
def send(self, data, tx_id=None): """Send `data` (raw string or EJSON payload) to WebSocket client.""" # buffer data until we get pre-requisite data if tx_id is None: tx_id = self.get_tx_id() self._tx_buffer[tx_id] = data # de-queue messages from buffer while self._tx_next_id in self._tx_buffer: # pull next message from buffer data = self._tx_buffer.pop(self._tx_next_id) if self._tx_buffer: safe_call(self.logger.debug, 'TX found %d', self._tx_next_id) # advance next message ID self._tx_next_id = next(self._tx_next_id_gen) if not isinstance(data, basestring): # ejson payload msg = data.get('msg', None) if msg in (ADDED, CHANGED, REMOVED): ids = self.remote_ids[data['collection']] meteor_id = data['id'] if msg == ADDED: if meteor_id in ids: msg = data['msg'] = CHANGED else: ids.add(meteor_id) elif msg == CHANGED: if meteor_id not in ids: # object has become visible, treat as `added`. msg = data['msg'] = ADDED ids.add(meteor_id) elif msg == REMOVED: try: ids.remove(meteor_id) except KeyError: continue # client doesn't have this, don't send. data = 'a%s' % ejson.dumps([ejson.dumps(data)]) # send message safe_call(self.logger.debug, '> %s %r', self, data) try: self.ws.send(data) except geventwebsocket.WebSocketError: self.ws.close() self._tx_buffer.clear() break num_waiting = len(self._tx_buffer) if num_waiting > 10: safe_call( self.logger.warn, 'TX received %d, waiting for %d, have %d waiting: %r.', tx_id, self._tx_next_id, num_waiting, self._tx_buffer, )
DDP connect handler.
def recv_connect(self, version=None, support=None, session=None): """DDP connect handler.""" del session # Meteor doesn't even use this! if self.connection is not None: raise MeteorError( 400, 'Session already established.', self.connection.connection_id, ) elif None in (version, support) or version not in self.versions: self.reply('failed', version=self.versions[0]) elif version not in support: raise MeteorError(400, 'Client version/support mismatch.') else: from dddp.models import Connection cur = connection.cursor() cur.execute('SELECT pg_backend_pid()') (backend_pid,) = cur.fetchone() this.version = version this.support = support self.connection = Connection.objects.create( server_addr='%d:%s' % ( backend_pid, self.ws.handler.socket.getsockname(), ), remote_addr=self.remote_addr, version=version, ) self.pgworker.connections[self.connection.pk] = self atexit.register(self.on_close, 'Shutting down.') self.reply('connected', session=self.connection.connection_id)
DDP ping handler.
def recv_ping(self, id_=None): """DDP ping handler.""" if id_ is None: self.reply('pong') else: self.reply('pong', id=id_)
DDP sub handler.
def recv_sub(self, id_, name, params): """DDP sub handler.""" self.api.sub(id_, name, *params)
DDP unsub handler.
def recv_unsub(self, id_=None): """DDP unsub handler.""" if id_: self.api.unsub(id_) else: self.reply('nosub')
DDP method handler.
def recv_method(self, method, params, id_, randomSeed=None): """DDP method handler.""" if randomSeed is not None: this.random_streams.random_seed = randomSeed this.alea_random = alea.Alea(randomSeed) self.api.method(method, params, id_) self.reply('updated', methods=[id_])
Inform client that WebSocket service is available.
def ddpp_sockjs_info(environ, start_response): """Inform client that WebSocket service is available.""" import random import ejson start_response( '200 OK', [ ('Content-Type', 'application/json; charset=UTF-8'), ] + common_headers(environ), ) yield ejson.dumps(collections.OrderedDict([ ('websocket', True), ('origins', [ '*:*', ]), ('cookie_needed', False), ('entropy', random.getrandbits(32)), ]))
Convert a string of format host [: port ] into Addr ( host port ).
def addr(val, default_port=8000, defualt_host='localhost'): """ Convert a string of format host[:port] into Addr(host, port). >>> addr('0:80') Addr(host='0', port=80) >>> addr('127.0.0.1:80') Addr(host='127.0.0.1', port=80) >>> addr('0.0.0.0', default_port=8000) Addr(host='0.0.0.0', port=8000) """ import re import socket match = re.match(r'\A(?P<host>.*?)(:(?P<port>(\d+|\w+)))?\Z', val) if match is None: raise argparse.ArgumentTypeError( '%r is not a valid host[:port] address.' % val ) host, port = match.group('host', 'port') if not host: host = defualt_host if not port: port = default_port elif port.isdigit(): port = int(port) else: port = socket.getservbyname(port) return Addr(host, port)
Spawn greenlets for handling websockets and PostgreSQL calls.
def serve(listen, verbosity=1, debug_port=0, **ssl_args): """Spawn greenlets for handling websockets and PostgreSQL calls.""" launcher = DDPLauncher(debug=verbosity == 3, verbosity=verbosity) if debug_port: launcher.servers.append( launcher.get_backdoor_server('localhost:%d' % debug_port) ) launcher.add_web_servers(listen, **ssl_args) # die gracefully with SIGINT or SIGQUIT sigmap = { val: name for name, val in vars(signal).items() if name.startswith('SIG') } def sighandler(signum=None, frame=None): """Signal handler""" launcher.logger.info( 'Received signal %s in frame %r', sigmap.get(signum, signum), frame, ) launcher.stop() for signum in [signal.SIGINT, signal.SIGQUIT]: gevent.signal(signum, sighandler) launcher.run()
Main entry point for dddp command.
def main(): """Main entry point for `dddp` command.""" parser = argparse.ArgumentParser(description=__doc__) django = parser.add_argument_group('Django Options') django.add_argument( '--verbosity', '-v', metavar='VERBOSITY', dest='verbosity', type=int, default=1, ) django.add_argument( '--debug-port', metavar='DEBUG_PORT', dest='debug_port', type=int, default=0, ) django.add_argument( '--settings', metavar='SETTINGS', dest='settings', help="The Python path to a settings module, e.g. " "\"myproject.settings.main\". If this isn't provided, the " "DJANGO_SETTINGS_MODULE environment variable will be used.", ) http = parser.add_argument_group('HTTP Options') http.add_argument( 'listen', metavar='address[:port]', nargs='*', type=addr, help='Listening address for HTTP(s) server.', ) ssl = parser.add_argument_group('SSL Options') ssl.add_argument('--ssl-version', metavar='SSL_VERSION', dest='ssl_version', help="SSL version to use (see stdlib ssl module's) [3]", choices=['1', '2', '3'], default='3') ssl.add_argument('--certfile', metavar='FILE', dest='certfile', help="SSL certificate file [None]") ssl.add_argument('--ciphers', metavar='CIPHERS', dest='ciphers', help="Ciphers to use (see stdlib ssl module's) [TLSv1]") ssl.add_argument('--ca-certs', metavar='FILE', dest='ca_certs', help="CA certificates file [None]") ssl.add_argument('--keyfile', metavar='FILE', dest='keyfile', help="SSL key file [None]") namespace = parser.parse_args() if namespace.settings: os.environ['DJANGO_SETTINGS_MODULE'] = namespace.settings serve( namespace.listen or [Addr('localhost', 8000)], debug_port=namespace.debug_port, keyfile=namespace.keyfile, certfile=namespace.certfile, verbosity=namespace.verbosity, )
Print formatted msg if verbosity set at 1 or above.
def print(self, msg, *args, **kwargs): """Print formatted msg if verbosity set at 1 or above.""" if self.verbosity >= 1: print(msg, *args, **kwargs)
Add WebSocketServer for each ( host port ) in listen_addrs.
def add_web_servers(self, listen_addrs, debug=False, **ssl_args): """Add WebSocketServer for each (host, port) in listen_addrs.""" self.servers.extend( self.get_web_server(listen_addr, debug=debug, **ssl_args) for listen_addr in listen_addrs )
Setup WebSocketServer on listen_addr ( host port ).
def get_web_server(self, listen_addr, debug=False, **ssl_args): """Setup WebSocketServer on listen_addr (host, port).""" return geventwebsocket.WebSocketServer( listen_addr, self.resource, debug=debug, **{key: val for key, val in ssl_args.items() if val is not None} )
Add a backdoor ( debug ) server.
def get_backdoor_server(self, listen_addr, **context): """Add a backdoor (debug) server.""" from django.conf import settings local_vars = { 'launcher': self, 'servers': self.servers, 'pgworker': self.pgworker, 'stop': self.stop, 'api': self.api, 'resource': self.resource, 'settings': settings, 'wsgi_app': self.wsgi_app, 'wsgi_name': self.wsgi_name, } local_vars.update(context) return BackdoorServer( listen_addr, banner='Django DDP', locals=local_vars, )
Stop all green threads.
def stop(self): """Stop all green threads.""" self.logger.debug('PostgresGreenlet stop') self._stop_event.set() # ask all threads to stop. for server in self.servers + [DDPLauncher.pgworker]: self.logger.debug('Stopping %s', server) server.stop() # wait for all threads to stop. gevent.joinall(self.threads + [DDPLauncher.pgworker]) self.threads = []
Run PostgresGreenlet and web/ debug servers.
def start(self): """Run PostgresGreenlet and web/debug servers.""" self.logger.debug('PostgresGreenlet start') self._stop_event.clear() self.print('=> Discovering DDP endpoints...') if self.verbosity > 1: for api_path in sorted(self.api.api_path_map()): print(' %s' % api_path) # start greenlets self.pgworker.start() self.print('=> Started PostgresGreenlet.') for server in self.servers: thread = gevent.spawn(server.serve_forever) gevent.sleep() # yield to thread in case it can't start self.threads.append(thread) if thread.dead: # thread died, stop everything and re-raise the exception. self.stop() thread.get() if isinstance(server, geventwebsocket.WebSocketServer): self.print( '=> App running at: %s://%s:%d/' % ( 'https' if server.ssl_enabled else 'http', server.server_host, server.server_port, ), ) elif isinstance(server, gevent.backdoor.BackdoorServer): self.print( '=> Debug service running at: telnet://%s:%d/' % ( server.server_host, server.server_port, ), ) self.print('=> Started your app (%s).' % self.wsgi_name)
Run DDP greenlets.
def run(self): """Run DDP greenlets.""" self.logger.debug('PostgresGreenlet run') self.start() self._stop_event.wait() # wait for all threads to stop. gevent.joinall(self.threads + [DDPLauncher.pgworker]) self.threads = []
Initialisation for django - ddp ( setup lookups and signal handlers ).
def ready(self): """Initialisation for django-ddp (setup lookups and signal handlers).""" if not settings.DATABASES: raise ImproperlyConfigured('No databases configured.') for (alias, conf) in settings.DATABASES.items(): engine = conf['ENGINE'] if engine not in [ 'django.db.backends.postgresql', 'django.db.backends.postgresql_psycopg2', ]: warnings.warn( 'Database %r uses unsupported %r engine.' % ( alias, engine, ), UserWarning, ) self.api = autodiscover() self.api.ready()
Spawn sub tasks wait for stop signal.
def _run(self): # pylint: disable=method-hidden """Spawn sub tasks, wait for stop signal.""" conn_params = self.connection.get_connection_params() # See http://initd.org/psycopg/docs/module.html#psycopg2.connect and # http://www.postgresql.org/docs/current/static/libpq-connect.html # section 31.1.2 (Parameter Key Words) for details on available params. conn_params.update( async=True, application_name='{} pid={} django-ddp'.format( socket.gethostname(), # hostname os.getpid(), # PID )[:64], # 64 characters for default PostgreSQL build config ) conn = None while conn is None: try: conn = psycopg2.connect(**conn_params) except psycopg2.OperationalError as err: # Some variants of the psycopg2 driver for Django add extra # params that aren't meant to be passed directly to # `psycopg2.connect()` -- issue a warning and try again. msg = ('%s' % err).strip() msg_prefix = 'invalid connection option "' if not msg.startswith(msg_prefix): # *waves hand* this is not the errror you are looking for. raise key = msg[len(msg_prefix):-1] self.logger.warning( 'Ignoring unknown settings.DATABASES[%r] option: %s=%r', self.connection.alias, key, conn_params.pop(key), ) self.poll(conn) # wait for conneciton to start import logging logging.getLogger('dddp').info('=> Started PostgresGreenlet.') cur = conn.cursor() cur.execute('LISTEN "ddp";') while not self._stop_event.is_set(): try: self.select_greenlet = gevent.spawn( gevent.select.select, [conn], [], [], timeout=None, ) self.select_greenlet.get() except gevent.GreenletExit: self._stop_event.set() finally: self.select_greenlet = None self.poll(conn) self.poll(conn) cur.close() self.poll(conn) conn.close()
Stop subtasks and let run () finish.
def stop(self): """Stop subtasks and let run() finish.""" self._stop_event.set() if self.select_greenlet is not None: self.select_greenlet.kill() self.select_greenlet.get() gevent.sleep()
Poll DB socket and process async tasks.
def poll(self, conn): """Poll DB socket and process async tasks.""" while 1: state = conn.poll() if state == psycopg2.extensions.POLL_OK: while conn.notifies: notify = conn.notifies.pop() self.logger.info( "Got NOTIFY (pid=%d, payload=%r)", notify.pid, notify.payload, ) # read the header and check seq/fin. hdr, chunk = notify.payload.split('|', 1) # print('RECEIVE: %s' % hdr) header = ejson.loads(hdr) uuid = header['uuid'] size, chunks = self.chunks.setdefault(uuid, [0, {}]) if header['fin']: size = self.chunks[uuid][0] = header['seq'] # stash the chunk chunks[header['seq']] = chunk if len(chunks) != size: # haven't got all the chunks yet continue # process next NOTIFY in loop # got the last chunk -> process it. data = ''.join( chunk for _, chunk in sorted(chunks.items()) ) del self.chunks[uuid] # don't forget to cleanup! data = ejson.loads(data) sender = data.pop('_sender', None) tx_id = data.pop('_tx_id', None) for connection_id in data.pop('_connection_ids'): try: websocket = self.connections[connection_id] except KeyError: continue # connection not in this process if connection_id == sender: websocket.send(data, tx_id=tx_id) else: websocket.send(data) break elif state == psycopg2.extensions.POLL_WRITE: gevent.select.select([], [conn.fileno()], []) elif state == psycopg2.extensions.POLL_READ: gevent.select.select([conn.fileno()], [], []) else: self.logger.warn('POLL_ERR: %s', state)
Patch threading and psycopg2 modules for green threads.
def greenify(): """Patch threading and psycopg2 modules for green threads.""" # don't greenify twice. if _GREEN: return _GREEN[True] = True from gevent.monkey import patch_all, saved if ('threading' in sys.modules) and ('threading' not in saved): import warnings warnings.warn('threading module loaded before patching!') patch_all() try: # Use psycopg2 by default import psycopg2 del psycopg2 except ImportError: # Fallback to psycopg2cffi if required (eg: pypy) from psycopg2cffi import compat compat.register() from psycogreen.gevent import patch_psycopg patch_psycopg()
Generate a new ID optionally using namespace of given name.
def meteor_random_id(name=None, length=17): """Generate a new ID, optionally using namespace of given `name`.""" if name is None: stream = THREAD_LOCAL.alea_random else: stream = THREAD_LOCAL.random_streams[name] return stream.random_string(length, METEOR_ID_CHARS)
Import all ddp submodules from settings. INSTALLED_APPS.
def autodiscover(): """Import all `ddp` submodules from `settings.INSTALLED_APPS`.""" from django.utils.module_loading import autodiscover_modules from dddp.api import API autodiscover_modules('ddp', register_to=API) return API
Return an error dict for self. args and kwargs.
def as_dict(self, **kwargs): """Return an error dict for self.args and kwargs.""" error, reason, details, err_kwargs = self.args result = { key: val for key, val in { 'error': error, 'reason': reason, 'details': details, }.items() if val is not None } result.update(err_kwargs) result.update(kwargs) return result
Get attribute creating if required using specified factory.
def get(self, name, factory, *factory_args, **factory_kwargs): """Get attribute, creating if required using specified factory.""" update_thread_local = getattr(factory, 'update_thread_local', True) if (not update_thread_local) or (name not in self.__dict__): obj = factory(*factory_args, **factory_kwargs) if update_thread_local: setattr(self, name, obj) return obj return getattr(self, name)
Emit a formatted log record via DDP.
def emit(self, record): """Emit a formatted log record via DDP.""" if getattr(this, 'subs', {}).get(LOGS_NAME, False): self.format(record) this.send({ 'msg': ADDED, 'collection': LOGS_NAME, 'id': meteor_random_id('/collection/%s' % LOGS_NAME), 'fields': { attr: { # typecasting methods for specific attributes 'args': lambda args: [repr(arg) for arg in args], 'created': datetime.datetime.fromtimestamp, 'exc_info': stacklines_or_none, }.get( attr, lambda val: val # default typecasting method )(getattr(record, attr, None)) for attr in ( 'args', 'asctime', 'created', 'exc_info', 'filename', 'funcName', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'message', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'thread', 'threadName', ) }, })
Given a request a list of renderers and the force configuration option return a two - tuple of: ( media type render callable ). Uses mimeparse to find the best media type match from the ACCEPT header.
def select_renderer(request: web.Request, renderers: OrderedDict, force=True): """ Given a request, a list of renderers, and the ``force`` configuration option, return a two-tuple of: (media type, render callable). Uses mimeparse to find the best media type match from the ACCEPT header. """ header = request.headers.get('ACCEPT', '*/*') best_match = mimeparse.best_match(renderers.keys(), header) if not best_match or best_match not in renderers: if force: return tuple(renderers.items())[0] else: raise web.HTTPNotAcceptable return best_match, renderers[best_match]
Middleware which selects a renderer for a given request then renders a handler s data to a aiohttp. web. Response.
def negotiation_middleware( renderers=DEFAULTS['RENDERERS'], negotiator=DEFAULTS['NEGOTIATOR'], force_negotiation=DEFAULTS['FORCE_NEGOTIATION'] ): """Middleware which selects a renderer for a given request then renders a handler's data to a `aiohttp.web.Response`. """ @asyncio.coroutine def factory(app, handler): @asyncio.coroutine def middleware(request): content_type, renderer = negotiator( request, renderers, force_negotiation, ) request['selected_media_type'] = content_type response = yield from handler(request) if getattr(response, 'data', None): # Render data with the selected renderer if asyncio.iscoroutinefunction(renderer): render_result = yield from renderer(request, response.data) else: render_result = renderer(request, response.data) else: render_result = response if isinstance(render_result, web.Response): return render_result if getattr(response, 'data', None): response.body = render_result response.content_type = content_type return response return middleware return factory
Set up the negotiation middleware. Reads configuration from app [ AIOHTTP_UTILS ].
def setup( app: web.Application, *, negotiator: callable=DEFAULTS['NEGOTIATOR'], renderers: OrderedDict=DEFAULTS['RENDERERS'], force_negotiation: bool=DEFAULTS['FORCE_NEGOTIATION'] ): """Set up the negotiation middleware. Reads configuration from ``app['AIOHTTP_UTILS']``. :param app: Application to set up. :param negotiator: Function that selects a renderer given a request, a dict of renderers, and a ``force`` parameter (whether to return a renderer even if the client passes an unsupported media type). :param renderers: Mapping of mediatypes to callable renderers. :param force_negotiation: Whether to return a rennderer even if the client passes an unsupported media type). """ config = app.get(CONFIG_KEY, {}) middleware = negotiation_middleware( renderers=config.get('RENDERERS', renderers), negotiator=config.get('NEGOTIATOR', negotiator), force_negotiation=config.get('FORCE_NEGOTIATION', force_negotiation) ) app.middlewares.append(middleware) return app
Context manager which yields a function for adding multiple routes from a given module.
def add_route_context( app: web.Application, module=None, url_prefix: str=None, name_prefix: str=None ): """Context manager which yields a function for adding multiple routes from a given module. Example: .. code-block:: python # myapp/articles/views.py async def list_articles(request): return web.Response(b'article list...') async def create_article(request): return web.Response(b'created article...') .. code-block:: python # myapp/app.py from myapp.articles import views with add_route_context(app, url_prefix='/api/', name_prefix='articles') as route: route('GET', '/articles/', views.list_articles) route('POST', '/articles/', views.create_article) app.router['articles.list_articles'].url() # /api/articles/ If you prefer, you can also pass module and handler names as strings. .. code-block:: python with add_route_context(app, module='myapp.articles.views', url_prefix='/api/', name_prefix='articles') as route: route('GET', '/articles/', 'list_articles') route('POST', '/articles/', 'create_article') :param app: Application to add routes to. :param module: Import path to module (str) or module object which contains the handlers. :param url_prefix: Prefix to prepend to all route paths. :param name_prefix: Prefix to prepend to all route names. """ if isinstance(module, (str, bytes)): module = importlib.import_module(module) def add_route(method, path, handler, name=None): """ :param str method: HTTP method. :param str path: Path for the route. :param handler: A handler function or a name of a handler function contained in `module`. :param str name: Name for the route. If `None`, defaults to the handler's function name. """ if isinstance(handler, (str, bytes)): if not module: raise ValueError( 'Must pass module to add_route_context if passing handler name strings.' ) name = name or handler handler = getattr(module, handler) else: name = name or handler.__name__ path = make_path(path, url_prefix) name = '.'.join((name_prefix, name)) if name_prefix else name return app.router.add_route(method, path, handler, name=name) yield add_route
Context manager which yields a function for adding multiple resources from a given module to an app using ResourceRouter <aiohttp_utils. routing. ResourceRouter >.
def add_resource_context( app: web.Application, module=None, url_prefix: str=None, name_prefix: str=None, make_resource=lambda cls: cls() ): """Context manager which yields a function for adding multiple resources from a given module to an app using `ResourceRouter <aiohttp_utils.routing.ResourceRouter>`. Example: .. code-block:: python # myapp/articles/views.py class ArticleList: async def get(self, request): return web.Response(b'article list...') class ArticleDetail: async def get(self, request): return web.Response(b'article detail...') .. code-block:: python # myapp/app.py from myapp.articles import views with add_resource_context(app, url_prefix='/api/') as route: route('/articles/', views.ArticleList()) route('/articles/{pk}', views.ArticleDetail()) app.router['ArticleList:get'].url() # /api/articles/ app.router['ArticleDetail:get'].url(parts={'pk': 42}) # /api/articles/42 If you prefer, you can also pass module and class names as strings. :: with add_resource_context(app, module='myapp.articles.views', url_prefix='/api/') as route: route('/articles/', 'ArticleList') route('/articles/{pk}', 'ArticleDetail') .. note:: If passing class names, the resource classes will be instantiated with no arguments. You can change this behavior by overriding ``make_resource``. .. code-block:: python # myapp/authors/views.py class AuthorList: def __init__(self, db): self.db = db async def get(self, request): # Fetch authors from self.db... .. code-block:: python # myapp/app.py from myapp.database import db with add_resource_context(app, module='myapp.authors.views', url_prefix='/api/', make_resource=lambda cls: cls(db=db)) as route: route('/authors/', 'AuthorList') :param app: Application to add routes to. :param resource: Import path to module (str) or module object which contains the resource classes. :param url_prefix: Prefix to prepend to all route paths. :param name_prefix: Prefix to prepend to all route names. :param make_resource: Function which receives a resource class and returns a resource instance. """ assert isinstance(app.router, ResourceRouter), 'app must be using ResourceRouter' if isinstance(module, (str, bytes)): module = importlib.import_module(module) def get_base_name(resource, method_name, names): return names.get(method_name, app.router.get_default_handler_name(resource, method_name)) default_make_resource = make_resource def add_route( path: str, resource, names: Mapping=None, make_resource=None ): make_resource = make_resource or default_make_resource names = names or {} if isinstance(resource, (str, bytes)): if not module: raise ValueError( 'Must pass module to add_route_context if passing resource name strings.' ) resource_cls = getattr(module, resource) resource = make_resource(resource_cls) path = make_path(path, url_prefix) if name_prefix: supported_method_names = get_supported_method_names(resource) names = { method_name: '.'.join( (name_prefix, get_base_name(resource, method_name, names=names)) ) for method_name in supported_method_names } return app.router.add_resource_object(path, resource, names=names) yield add_route
Add routes by an resource instance s methods.
def add_resource_object(self, path: str, resource, methods: tuple=tuple(), names: Mapping=None): """Add routes by an resource instance's methods. :param path: route path. Should be started with slash (``'/'``). :param resource: A "resource" instance. May be an instance of a plain object. :param methods: Methods (strings) to register. :param names: Dictionary of ``name`` overrides. """ names = names or {} if methods: method_names = methods else: method_names = self.HTTP_METHOD_NAMES for method_name in method_names: handler = getattr(resource, method_name, None) if handler: name = names.get(method_name, self.get_default_handler_name(resource, method_name)) self.add_route(method_name.upper(), path, handler, name=name)
Run an aiohttp. web. Application using gunicorn.
def run(app: web.Application, **kwargs): """Run an `aiohttp.web.Application` using gunicorn. :param app: The app to run. :param str app_uri: Import path to `app`. Takes the form ``$(MODULE_NAME):$(VARIABLE_NAME)``. The module name can be a full dotted path. The variable name refers to the `aiohttp.web.Application` instance. This argument is required if ``reload=True``. :param str host: Hostname to listen on. :param int port: Port of the server. :param bool reload: Whether to reload the server on a code change. If not set, will take the same value as ``app.debug``. **EXPERIMENTAL**. :param \*\*kwargs: Extra configuration options to set on the ``GunicornApp's`` config object. """ runner = Runner(app, **kwargs) runner.run()
Sends a push notification to this device via GCM
def send_message(self, message, **kwargs): """ Sends a push notification to this device via GCM """ from ..libs.gcm import gcm_send_message data = kwargs.pop("extra", {}) if message is not None: data["message"] = message return gcm_send_message(registration_id=self.registration_id, data=data, **kwargs)
Sends an APNS notification to one or more registration_ids. The registration_ids argument needs to be a list.
def apns_send_bulk_message(registration_ids, alert, **kwargs): """ Sends an APNS notification to one or more registration_ids. The registration_ids argument needs to be a list. Note that if set alert should always be a string. If it is not set, it won't be included in the notification. You will need to pass None to this for silent notifications. """ with closing(_apns_create_socket_to_push(**kwargs)) as socket: for identifier, registration_id in enumerate(registration_ids): _apns_send(registration_id, alert, identifier=identifier, socket=socket, **kwargs) _apns_check_errors(socket)
Queries the APNS server for id s that are no longer active since the last fetch
def apns_fetch_inactive_ids(): """ Queries the APNS server for id's that are no longer active since the last fetch """ with closing(_apns_create_socket_to_feedback()) as socket: inactive_ids = [] for _, registration_id in _apns_receive_feedback(socket): inactive_ids.append(codecs.encode(registration_id, 'hex_codec')) return inactive_ids
Standalone method to send a single gcm notification
def gcm_send_message(registration_id, data, encoding='utf-8', **kwargs): """ Standalone method to send a single gcm notification """ messenger = GCMMessenger(registration_id, data, encoding=encoding, **kwargs) return messenger.send_plain()
Standalone method to send bulk gcm notifications
def gcm_send_bulk_message(registration_ids, data, encoding='utf-8', **kwargs): """ Standalone method to send bulk gcm notifications """ messenger = GCMMessenger(registration_ids, data, encoding=encoding, **kwargs) return messenger.send_bulk()
Sends a text/ plain GCM message
def send_plain(self): """ Sends a text/plain GCM message """ values = {"registration_id": self._registration_id} for key, val in self._data.items(): values["data.%s" % (key)] = val.encode(self.encoding) for key, val in self._kwargs.items(): if val and isinstance(val, bool): val = 1 values[key] = val data = urlencode(sorted(values.items())).encode(self.encoding) result = self._send(data, "application/x-www-form-urlencoded;charset=UTF-8") if result.startswith("Error="): if result in ("Error=NotRegistered", "Error=InvalidRegistration"): ## TODO: deactivate the unregistered device return result raise GCMPushError(result) return result
Sends a json GCM message
def send_json(self, ids=None): """ Sends a json GCM message """ items = ids or self._registration_id values = {"registration_ids": items} if self._data is not None: values["data"] = self._data for key, val in self._kwargs.items(): if val: values[key] = val data = json.dumps(values, separators=(",", ":"), sort_keys=True).encode( self.encoding) result = json.loads(self._send(data, "application/json")) if ("failure" in result) and (result["failure"]): unregistered = [] throw_error = False for index, error in enumerate(result.get("results", [])): error = error.get("error", "") if error in ("NotRegistered", "InvalidRegistration"): unregistered.append(items[index]) elif error != "": throw_error = True self.deactivate_unregistered_devices(unregistered) if throw_error: raise GCMPushError(result) return result
Sends a GCM message with the given content type
def _send(self, data, content_type): """ Sends a GCM message with the given content type """ headers = { "Content-Type": content_type, "Authorization": "key=%s" % (self.api_key), "Content-Length": str(len(data)) } request = Request(self.api_url, data, headers) return urlopen(request).read().decode(self.encoding)
Returns the instance of the given module location.
def get_model(module_location): """ Returns the instance of the given module location. """ if not isinstance(module_location, (str, unicode)): raise ValueError("The value provided should either be a string or "\ "unicode instance. The value '%s' provided was %s "\ "rather." % (module_location, type(module_location))) try: name_split = module_location.split(".") class_name = name_split.pop(-1) if not len(name_split): raise ValueError("The value should provide the module location "\ "joined by '.' e.g. for model named 'test' in " "/app/module.py, The value should be 'app.module.test'") module_location = ".".join(name_split) module = importlib.import_module(module_location) cls = getattr(module, class_name) return cls except AttributeError: pass
obj: case or network
def plot_line_power(obj, results, hour, ax=None): ''' obj: case or network ''' if ax is None: fig, ax = plt.subplots(1, 1, figsize=(16, 10)) ax.axis('off') case, network = _return_case_network(obj) network.draw_buses(ax=ax) network.draw_loads(ax=ax) network.draw_generators(ax=ax) network.draw_connections('gen_to_bus', ax=ax) network.draw_connections('load_to_bus', ax=ax) edgelist, edge_color, edge_width, edge_labels = _generate_edges(results, case, hour) branches = network.draw_branches(ax=ax, edgelist=edgelist, edge_color=edge_color, width=edge_width, edge_labels=edge_labels) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size='5%', pad=0.05) cb = plt.colorbar(branches, cax=cax, orientation='vertical') cax.yaxis.set_label_position('left') cax.yaxis.set_ticks_position('left') cb.set_label('Loading Factor') return ax
Fast forward selection algorithm
def fast_forward_selection(scenarios, number_of_reduced_scenarios, probability=None): """Fast forward selection algorithm Parameters ---------- scenarios : numpy.array Contain the input scenarios. The columns representing the individual scenarios The rows are the vector of values in each scenario number_of_reduced_scenarios : int final number of scenarios that the reduced scenarios contain. If number of scenarios is equal to or greater than the input scenarios, then the original input scenario set is returned as the reduced set probability : numpy.array (default=None) probability is a numpy.array with length equal to number of scenarios. if probability is not defined, all scenarios get equal probabilities Returns ------- reduced_scenarios : numpy.array reduced set of scenarios reduced_probability : numpy.array probability of reduced set of scenarios reduced_scenario_set : list scenario numbers of reduced set of scenarios Example ------- Scenario reduction can be performed as shown below:: >>> import numpy as np >>> import random >>> scenarios = np.array([[random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)]]) >>> import psst.scenario >>> reduced_scenarios, reduced_probability, reduced_scenario_numbers = psst.scenario.fast_forward_selection(scenarios, probability, 2) """ print("Running fast forward selection algorithm") number_of_scenarios = scenarios.shape[1] logger.debug("Input number of scenarios = %d", number_of_scenarios) # if probability is not defined assign equal probability to all scenarios if probability is None: probability = np.array([1/number_of_scenarios for i in range(0, number_of_scenarios)]) # initialize z, c and J z = np.array([np.inf for i in range(0, number_of_scenarios)]) c = np.zeros((number_of_scenarios, number_of_scenarios)) J = range(0, number_of_scenarios) # no reduction necessary if number_of_reduced_scenarios >= number_of_scenarios: return(scenarios, probability, J) for scenario_k in range(0, number_of_scenarios): for scenario_u in range(0, number_of_scenarios): c[scenario_k, scenario_u] = distance(scenarios[:, scenario_k], scenarios[:, scenario_u]) for scenario_u in range(0, number_of_scenarios): summation = 0 for scenario_k in range(0, number_of_scenarios): if scenario_k != scenario_u: summation = summation + probability[scenario_k]*c[scenario_k, scenario_u] z[scenario_u] = summation U = [np.argmin(z)] for u in U: J.remove(u) for _ in range(0, number_of_scenarios - number_of_reduced_scenarios - 1): print("Running {}".format(_)) for scenario_u in J: for scenario_k in J: lowest_value = np.inf for scenario_number in U: lowest_value = min(c[scenario_k, scenario_u], c[scenario_k, scenario_number]) c[scenario_k, scenario_u] = lowest_value for scenario_u in J: summation = 0 for scenario_k in J: if scenario_k not in U: summation = summation + probability[scenario_k]*c[scenario_k, scenario_u] z[scenario_u] = summation u_i = np.argmin([item if i in J else np.inf for i, item in enumerate(z)]) J.remove(u_i) U.append(u_i) reduced_scenario_set = U reduced_probability = [] reduced_probability = copy.deepcopy(probability) for deleted_scenario_number in J: lowest_value = np.inf # find closest scenario_number for scenario_j in reduced_scenario_set: if c[deleted_scenario_number, scenario_j] < lowest_value: closest_scenario_number = scenario_j lowest_value = c[deleted_scenario_number, scenario_j] reduced_probability[closest_scenario_number] = reduced_probability[closest_scenario_number] + reduced_probability[deleted_scenario_number] reduced_scenarios = copy.deepcopy(scenarios[:, reduced_scenario_set]) reduced_probability = reduced_probability[reduced_scenario_set] return reduced_scenarios, reduced_probability, reduced_scenario_set
Simultaneous backward reduction algorithm
def simultaneous_backward_reduction(scenarios, number_of_reduced_scenarios, probability=None): """Simultaneous backward reduction algorithm Parameters ---------- scenarios : numpy.array Contain the input scenarios. The columns representing the individual scenarios The rows are the vector of values in each scenario number_of_reduced_scenarios : int final number of scenarios that the reduced scenarios contain. If number of scenarios is equal to or greater than the input scenarios, then the original input scenario set is returned as the reduced set probability : numpy.array (default=None) probability is a numpy.array with length equal to number of scenarios. if probability is not defined, all scenarios get equal probabilities Returns ------- reduced_scenarios : numpy.array reduced set of scenarios reduced_probability : numpy.array probability of reduced set of scenarios reduced_scenario_set : list scenario numbers of reduced set of scenarios Example ------- Scenario reduction can be performed as shown below:: >>> import numpy as np >>> import random >>> scenarios = np.array([[random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)], >>> [random.randint(500,1000) for i in range(0,24)]]) >>> import psst.scenario >>> reduced_scenarios, reduced_probability, reduced_scenario_numbers = psst.scenario.simultaneous_backward_reduction(scenarios, probability, 2) """ print("Running simultaneous backward reduction algorithm") number_of_scenarios = scenarios.shape[1] logger.debug("Input number of scenarios = %d", number_of_scenarios) # if probability is not defined assign equal probability to all scenarios if probability is None: probability = np.array([1/number_of_scenarios for i in range(0, number_of_scenarios)]) # initialize z, c and J z = np.array([np.inf for i in range(0, number_of_scenarios)]) c = np.zeros((number_of_scenarios, number_of_scenarios)) J = [] # no reduction necessary if number_of_reduced_scenarios >= number_of_scenarios: return(scenarios, probability, J) """compute the distance of scenario pairs""" for scenario_k in range(0, number_of_scenarios): for scenario_j in range(0, number_of_scenarios): c[scenario_k, scenario_j] = distance(scenarios[:, scenario_k], scenarios[:, scenario_j]) for scenario_l in range(0, number_of_scenarios): lowest_value = np.inf for scenario_j in range(0, number_of_scenarios): if scenario_l == scenario_j: continue lowest_value = min(lowest_value, c[scenario_l, scenario_j]) c[scenario_l, scenario_l] = lowest_value z[scenario_l] = probability[scenario_l]*c[scenario_l, scenario_l] J.append(np.argmin(z)) for _ in range(0, number_of_scenarios - number_of_reduced_scenarios - 1): for scenario_l in range(0, number_of_scenarios): for scenario_k in range(0, number_of_scenarios): if scenario_k in J or scenario_k == scenario_l: if scenario_l not in J: lowest_value = np.inf for scenario_j in range(0, number_of_scenarios): if scenario_j not in J and scenario_j != scenario_l: lowest_value = min(lowest_value, c[scenario_k, scenario_j]) c[scenario_k, scenario_l] = lowest_value for scenario_l in range(0, number_of_scenarios): if scenario_l not in J: summation = 0 for scenario_k in range(0, number_of_scenarios): if scenario_k in J or scenario_k == scenario_l: summation = summation + probability[scenario_k]*c[scenario_k, scenario_l] z[scenario_l] = summation J.append(np.argmin([item if i not in J else np.inf for i, item in enumerate(z)])) reduced_scenario_set = [] for scenario_number in range(0, number_of_scenarios): if scenario_number not in J: reduced_scenario_set.append(scenario_number) reduced_probability = [] reduced_probability = copy.deepcopy(probability) for deleted_scenario_number in J: lowest_value = np.inf # find closest scenario_number for scenario_j in reduced_scenario_set: if c[deleted_scenario_number, scenario_j] < lowest_value: closest_scenario_number = scenario_j lowest_value = c[deleted_scenario_number, scenario_j] reduced_probability[closest_scenario_number] = reduced_probability[closest_scenario_number] + reduced_probability[deleted_scenario_number] reduced_scenarios = copy.deepcopy(scenarios[:, reduced_scenario_set]) reduced_probability = reduced_probability[reduced_scenario_set] return(reduced_scenarios, reduced_probability, reduced_scenario_set)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the search method. Note that this will return a generator
def search(term=None, phrase=None, limit=DEFAULT_SEARCH_LIMIT, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the search method. Note that this will return a generator """ return Giphy(api_key=api_key, strict=strict).search( term=term, phrase=phrase, limit=limit, rating=rating)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method.
def translate(term=None, phrase=None, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method. """ return Giphy(api_key=api_key, strict=strict).translate( term=term, phrase=phrase, rating=rating)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the trending method. Note that this will return a generator
def trending(limit=DEFAULT_SEARCH_LIMIT, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the trending method. Note that this will return a generator """ return Giphy(api_key=api_key, strict=strict).trending( limit=limit, rating=rating)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the gif method.
def gif(gif_id, api_key=GIPHY_PUBLIC_KEY, strict=False): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the gif method. """ return Giphy(api_key=api_key, strict=strict).gif(gif_id)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the screensaver method.
def screensaver(tag=None, api_key=GIPHY_PUBLIC_KEY, strict=False): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the screensaver method. """ return Giphy(api_key=api_key, strict=strict).screensaver(tag=tag)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the upload method.
def upload(tags, file_path, username=None, api_key=GIPHY_PUBLIC_KEY, strict=False): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the upload method. """ return Giphy(api_key=api_key, strict=strict).upload( tags, file_path, username)
Takes an image dict from the giphy api and converts it to attributes. Any fields expected to be int ( width height size frames ) will be attempted to be converted. Also the keys of data serve as the attribute names but with special action taken. Keys are split by the last underscore ; anything prior becomes the attribute name anything after becomes a sub - attribute. For example: fixed_width_downsampled will end up at self. fixed_width. downsampled
def _make_images(self, images): """ Takes an image dict from the giphy api and converts it to attributes. Any fields expected to be int (width, height, size, frames) will be attempted to be converted. Also, the keys of `data` serve as the attribute names, but with special action taken. Keys are split by the last underscore; anything prior becomes the attribute name, anything after becomes a sub-attribute. For example: fixed_width_downsampled will end up at `self.fixed_width.downsampled` """ # Order matters :) process = ('original', 'fixed_width', 'fixed_height', 'fixed_width_downsampled', 'fixed_width_still', 'fixed_height_downsampled', 'fixed_height_still', 'downsized') for key in process: data = images.get(key) # Ignore empties if not data: continue parts = key.split('_') # attr/subattr style if len(parts) > 2: attr, subattr = '_'.join(parts[:-1]), parts[-1] else: attr, subattr = '_'.join(parts), None # Normalize data img = AttrDict(self._normalized(data)) if subattr is None: setattr(self, attr, img) else: setattr(getattr(self, attr), subattr, img)
Does a normalization of sorts on image type data so that values that should be integers are converted from strings
def _normalized(self, data): """ Does a normalization of sorts on image type data so that values that should be integers are converted from strings """ int_keys = ('frames', 'width', 'height', 'size') for key in int_keys: if key not in data: continue try: data[key] = int(data[key]) except ValueError: pass # Ignored return data
Wrapper for making an api request from giphy
def _fetch(self, endpoint_name, **params): """ Wrapper for making an api request from giphy """ params['api_key'] = self.api_key resp = requests.get(self._endpoint(endpoint_name), params=params) resp.raise_for_status() data = resp.json() self._check_or_raise(data.get('meta', {})) return data
Search for gifs with a given word or phrase. Punctuation is ignored. By default this will perform a term search. If you want to search by phrase use the phrase keyword argument. What s the difference between term and phrase searches? Simple: a term search will return results matching any words given whereas a phrase search will match all words.
def search(self, term=None, phrase=None, limit=DEFAULT_SEARCH_LIMIT, rating=None): """ Search for gifs with a given word or phrase. Punctuation is ignored. By default, this will perform a `term` search. If you want to search by phrase, use the `phrase` keyword argument. What's the difference between `term` and `phrase` searches? Simple: a term search will return results matching any words given, whereas a phrase search will match all words. Note that this method is a GiphyImage generator that automatically handles api paging. Optionally accepts a limit that will terminate the generation after a specified number of results have been yielded. This defaults to 25 results; a None implies no limit :param term: Search term or terms :type term: string :param phrase: Search phrase :type phrase: string :param limit: Maximum number of results to yield :type limit: int :param rating: limit results to those rated (y,g, pg, pg-13 or r). :type rating: string """ assert any((term, phrase)), 'You must supply a term or phrase to search' # Phrases should have dashes and not spaces if phrase: phrase = phrase.replace(' ', '-') results_yielded = 0 # Count how many things we yield page, per_page = 0, 25 params = {'q': (term or phrase)} if rating: params.update({'rating': rating}) fetch = partial(self._fetch, 'search', **params) # Generate results until we 1) run out of pages 2) reach a limit while True: data = fetch(offset=page, limit=per_page) page += per_page # Guard for empty results if not data['data']: raise StopIteration for item in data['data']: results_yielded += 1 yield GiphyImage(item) if limit is not None and results_yielded >= limit: raise StopIteration # Check yieled limit and whether or not there are more items if (page >= data['pagination']['total_count'] or (limit is not None and results_yielded >= limit)): raise StopIteration
Suppose you expect the search method to just give you a list rather than a generator. This method will have that effect. Equivalent to::
def search_list(self, term=None, phrase=None, limit=DEFAULT_SEARCH_LIMIT, rating=None): """ Suppose you expect the `search` method to just give you a list rather than a generator. This method will have that effect. Equivalent to:: >>> g = Giphy() >>> results = list(g.search('foo')) """ return list(self.search(term=term, phrase=phrase, limit=limit, rating=rating))
Retrieve a single image that represents a transalation of a term or phrase into an animated gif. Punctuation is ignored. By default this will perform a term translation. If you want to translate by phrase use the phrase keyword argument.
def translate(self, term=None, phrase=None, strict=False, rating=None): """ Retrieve a single image that represents a transalation of a term or phrase into an animated gif. Punctuation is ignored. By default, this will perform a `term` translation. If you want to translate by phrase, use the `phrase` keyword argument. :param term: Search term or terms :type term: string :param phrase: Search phrase :type phrase: string :param strict: Whether an exception should be raised when no results :type strict: boolean :param rating: limit results to those rated (y,g, pg, pg-13 or r). :type rating: string """ assert any((term, phrase)), 'You must supply a term or phrase to search' # Phrases should have dashes and not spaces if phrase: phrase = phrase.replace(' ', '-') params = {'s': (term or phrase)} if rating: params.update({'rating': rating}) resp = self._fetch('translate', **params) if resp['data']: return GiphyImage(resp['data']) elif strict or self.strict: raise GiphyApiException( "Term/Phrase '%s' could not be translated into a GIF" % (term or phrase))
Retrieve GIFs currently trending online. The data returned mirrors that used to create The Hot 100 list of GIFs on Giphy.
def trending(self, rating=None, limit=DEFAULT_SEARCH_LIMIT): """ Retrieve GIFs currently trending online. The data returned mirrors that used to create The Hot 100 list of GIFs on Giphy. :param rating: limit results to those rated (y,g, pg, pg-13 or r). :type rating: string :param limit: Maximum number of results to yield :type limit: int """ results_yielded = 0 # Count how many things we yield page, per_page = 0, 25 params = {'rating': rating} if rating else {} fetch = partial(self._fetch, 'trending', **params) # Generate results until we 1) run out of pages 2) reach a limit while True: data = fetch(offset=page, limit=per_page) page += per_page # Guard for empty results if not data['data']: raise StopIteration for item in data['data']: results_yielded += 1 yield GiphyImage(item) if limit is not None and results_yielded >= limit: raise StopIteration # Check yieled limit and whether or not there are more items if (page >= data['pagination']['total_count'] or (limit is not None and results_yielded >= limit)): raise StopIteration
Suppose you expect the trending method to just give you a list rather than a generator. This method will have that effect. Equivalent to::
def trending_list(self, rating=None, limit=DEFAULT_SEARCH_LIMIT): """ Suppose you expect the `trending` method to just give you a list rather than a generator. This method will have that effect. Equivalent to:: >>> g = Giphy() >>> results = list(g.trending()) """ return list(self.trending(limit=limit, rating=rating))
Retrieves a specifc gif from giphy based on unique id
def gif(self, gif_id, strict=False): """ Retrieves a specifc gif from giphy based on unique id :param gif_id: Unique giphy gif ID :type gif_id: string :param strict: Whether an exception should be raised when no results :type strict: boolean """ resp = self._fetch(gif_id) if resp['data']: return GiphyImage(resp['data']) elif strict or self.strict: raise GiphyApiException( "GIF with ID '%s' could not be found" % gif_id)
Returns a random giphy image optionally based on a search of a given tag. Note that this method will both query for a screensaver image and fetch the full details of that image ( 2 request calls )
def screensaver(self, tag=None, strict=False): """ Returns a random giphy image, optionally based on a search of a given tag. Note that this method will both query for a screensaver image and fetch the full details of that image (2 request calls) :param tag: Tag to retrieve a screensaver image :type tag: string :param strict: Whether an exception should be raised when no results :type strict: boolean """ params = {'tag': tag} if tag else {} resp = self._fetch('screensaver', **params) if resp['data'] and resp['data']['id']: return self.gif(resp['data']['id']) elif strict or self.strict: raise GiphyApiException( "No screensaver GIF tagged '%s' found" % tag)
Uploads a gif from the filesystem to Giphy.
def upload(self, tags, file_path, username=None): """ Uploads a gif from the filesystem to Giphy. :param tags: Tags to apply to the uploaded image :type tags: list :param file_path: Path at which the image can be found :type file_path: string :param username: Your channel username if not using public API key """ params = { 'api_key': self.api_key, 'tags': ','.join(tags) } if username is not None: params['username'] = username with open(file_path, 'rb') as f: resp = requests.post( GIPHY_UPLOAD_ENDPOINT, params=params, files={'file': f}) resp.raise_for_status() data = resp.json() self._check_or_raise(data.get('meta', {})) return self.gif(data['data']['id'])
convert ( 1 1 ) to B2 and B2 to ( 1 1 ) auto - recongnize
def _convert(self, args): '''convert '(1,1)' to 'B2' and 'B2' to '(1,1)' auto-recongnize''' if args.find(",") > -1: b, a = args.replace("(", "").replace(")", "").split(",") a = chr(int(a)+65)#chr(65) is "A" and ord("A") is 65 b = str(int(b)+1) return a+b else: a = str(int(args[1:2])-1) # D1-->(0,3) 1-->0 b = str(ord(args[0:1].upper())-65) # D1-->(0,3) D-->3 ord("D") is 68 return "("+a+","+b+")"
Prepares the extension element for access control Extension element is the optional parameter for the YouTubeVideoEntry We use extension element to modify access control settings
def _access_control(self, access_control, my_media_group=None): """ Prepares the extension element for access control Extension element is the optional parameter for the YouTubeVideoEntry We use extension element to modify access control settings Returns: tuple of extension elements """ # Access control extension = None if access_control is AccessControl.Private: # WARNING: this part of code is not tested # set video as private if my_media_group: my_media_group.private = gdata.media.Private() elif access_control is AccessControl.Unlisted: # set video as unlisted from gdata.media import YOUTUBE_NAMESPACE from atom import ExtensionElement kwargs = { "namespace": YOUTUBE_NAMESPACE, "attributes": {'action': 'list', 'permission': 'denied'}, } extension = ([ExtensionElement('accessControl', **kwargs)]) return extension
Retrieve the video feed by username Returns: gdata. youtube. YouTubeVideoFeed object
def fetch_feed_by_username(self, username): """ Retrieve the video feed by username Returns: gdata.youtube.YouTubeVideoFeed object """ # Don't use trailing slash youtube_url = 'http://gdata.youtube.com/feeds/api' uri = os.sep.join([youtube_url, "users", username, "uploads"]) return Api.yt_service.GetYouTubeVideoFeed(uri)
Authenticates the user and sets the GData Auth token. All params are optional if not set we will use the ones on the settings if no settings found raises AttributeError params are email password and source. Source is the app id
def authenticate(self, email=None, password=None, source=None): """ Authenticates the user and sets the GData Auth token. All params are optional, if not set, we will use the ones on the settings, if no settings found, raises AttributeError params are email, password and source. Source is the app id Raises: gdata.service.exceptions.BadAuthentication """ from gdata.service import BadAuthentication # Auth parameters Api.yt_service.email = email if email else settings.YOUTUBE_AUTH_EMAIL Api.yt_service.password = password if password else settings.YOUTUBE_AUTH_PASSWORD Api.yt_service.source = source if source else settings.YOUTUBE_CLIENT_ID try: Api.yt_service.ProgrammaticLogin() self.authenticated = True except BadAuthentication: raise ApiError(_("Incorrect username or password"))
Direct upload method: Uploads the video directly from your server to Youtube and creates a video
def upload_direct(self, video_path, title, description="", keywords="", developer_tags=None, access_control=AccessControl.Public): """ Direct upload method: Uploads the video directly from your server to Youtube and creates a video Returns: gdata.youtube.YouTubeVideoEntry See: https://developers.google.com/youtube/1.0/developers_guide_python#UploadingVideos """ # prepare a media group object to hold our video's meta-data my_media_group = gdata.media.Group( title=gdata.media.Title(text=title), description=gdata.media.Description(description_type='plain', text=description), keywords=gdata.media.Keywords(text=keywords), category=[gdata.media.Category( text='Autos', scheme='http://gdata.youtube.com/schemas/2007/categories.cat', label='Autos')], #player = None ) # Access Control extension = self._access_control(access_control, my_media_group) # create the gdata.youtube.YouTubeVideoEntry to be uploaded video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group, extension_elements=extension) # add developer tags if developer_tags: video_entry.AddDeveloperTags(developer_tags) # upload the video and create a new entry new_entry = Api.yt_service.InsertVideoEntry(video_entry, video_path) return new_entry
Browser based upload Creates the video entry and meta data to initiate a browser upload
def upload(self, title, description="", keywords="", developer_tags=None, access_control=AccessControl.Public): """ Browser based upload Creates the video entry and meta data to initiate a browser upload Authentication is needed Params: title: string description: string keywords: comma seperated string developer_tags: tuple Return: dict contains post_url and youtube_token. i.e { 'post_url': post_url, 'youtube_token': youtube_token } Raises: ApiError: on no authentication """ # Raise ApiError if not authenticated if not self.authenticated: raise ApiError(_("Authentication is required")) # create media group my_media_group = gdata.media.Group( title=gdata.media.Title(text=title), description=gdata.media.Description(description_type='plain', text=description), keywords=gdata.media.Keywords(text=keywords), category=[gdata.media.Category( text='Autos', scheme='http://gdata.youtube.com/schemas/2007/categories.cat', label='Autos')], #player = None ) # Access Control extension = self._access_control(access_control, my_media_group) # create video entry video_entry = gdata.youtube.YouTubeVideoEntry( media=my_media_group, extension_elements=extension) # add developer tags if developer_tags: video_entry.AddDeveloperTags(developer_tags) # upload meta data only response = Api.yt_service.GetFormUploadToken(video_entry) # parse response tuple and use the variables to build a form post_url = response[0] youtube_token = response[1] return {'post_url': post_url, 'youtube_token': youtube_token}
Checks the video upload status Newly uploaded videos may be in the processing state
def check_upload_status(self, video_id): """ Checks the video upload status Newly uploaded videos may be in the processing state Authentication is required Returns: True if video is available otherwise a dict containes upload_state and detailed message i.e. {"upload_state": "processing", "detailed_message": ""} """ # Raise ApiError if not authenticated if not self.authenticated: raise ApiError(_("Authentication is required")) entry = self.fetch_video(video_id) upload_status = Api.yt_service.CheckUploadStatus(entry) if upload_status is not None: video_upload_state = upload_status[0] detailed_message = upload_status[1] return {"upload_state": video_upload_state, "detailed_message": detailed_message} else: return True
Updates the video
def update_video(self, video_id, title="", description="", keywords="", access_control=AccessControl.Unlisted): """ Updates the video Authentication is required Params: entry: video entry fetch via 'fetch_video()' title: string description: string keywords: string Returns: a video entry on success None otherwise """ # Raise ApiError if not authenticated if not self.authenticated: raise ApiError(_("Authentication is required")) entry = self.fetch_video(video_id) # Set Access Control extension = self._access_control(access_control) if extension: entry.extension_elements = extension if title: entry.media.title.text = title if description: entry.media.description.text = description #if keywords: # entry.media.keywords.text = keywords success = Api.yt_service.UpdateVideoEntry(entry) return success
Deletes the video
def delete_video(self, video_id): """ Deletes the video Authentication is required Params: entry: video entry fetch via 'fetch_video()' Return: True if successful Raise: OperationError: on unsuccessful deletion """ # Raise ApiError if not authenticated if not self.authenticated: raise ApiError(_("Authentication is required")) entry = self.fetch_video(video_id) response = Api.yt_service.DeleteVideoEntry(entry) if not response: raise OperationError(_("Cannot be deleted from Youtube")) return True
Controls the availability of the video. Newly uploaded videos are in processing stage. And others might be rejected.
def check_video_availability(request, video_id): """ Controls the availability of the video. Newly uploaded videos are in processing stage. And others might be rejected. Returns: json response """ # Check video availability # Available states are: processing api = Api() api.authenticate() availability = api.check_upload_status(video_id) if availability is not True: data = {'success': False} else: data = {'success': True} return HttpResponse(json.dumps(data), content_type="application/json")
Displays a video in an embed player
def video(request, video_id): """ Displays a video in an embed player """ # Check video availability # Available states are: processing api = Api() api.authenticate() availability = api.check_upload_status(video_id) if availability is not True: # Video is not available video = Video.objects.filter(video_id=video_id).get() state = availability["upload_state"] # Add additional states here. I'm not sure what states are available if state == "failed" or state == "rejected": return render_to_response( "django_youtube/video_failed.html", {"video": video, "video_id": video_id, "message": _("Invalid video."), "availability": availability}, context_instance=RequestContext(request) ) else: return render_to_response( "django_youtube/video_unavailable.html", {"video": video, "video_id": video_id, "message": _("This video is currently being processed"), "availability": availability}, context_instance=RequestContext(request) ) video_params = _video_params(request, video_id) return render_to_response( "django_youtube/video.html", video_params, context_instance=RequestContext(request) )
list of videos of a user if username does not set shows the currently logged in user
def video_list(request, username=None): """ list of videos of a user if username does not set, shows the currently logged in user """ # If user is not authenticated and username is None, raise an error if username is None and not request.user.is_authenticated(): from django.http import Http404 raise Http404 from django.contrib.auth.models import User user = User.objects.get(username=username) if username else request.user # loop through the videos of the user videos = Video.objects.filter(user=user).all() video_params = [] for video in videos: video_params.append(_video_params(request, video.video_id)) return render_to_response( "django_youtube/videos.html", {"video_params": video_params}, context_instance=RequestContext(request) )
direct upload method starts with uploading video to our server then sends the video file to youtube
def direct_upload(request): """ direct upload method starts with uploading video to our server then sends the video file to youtube param: (optional) `only_data`: if set, a json response is returns i.e. {'video_id':'124weg'} return: if `only_data` set, a json object. otherwise redirects to the video display page """ if request.method == "POST": try: form = YoutubeDirectUploadForm(request.POST, request.FILES) # upload the file to our server if form.is_valid(): uploaded_video = form.save() # send this file to youtube api = Api() api.authenticate() video_entry = api.upload_direct(uploaded_video.file_on_server.path, "Uploaded video from zuqqa") # get data from video entry swf_url = video_entry.GetSwfUrl() youtube_url = video_entry.id.text # getting video_id is tricky, I can only reach the url which # contains the video_id. # so the only option is to parse the id element # https://groups.google.com/forum/?fromgroups=#!topic/youtube-api-gdata/RRl_h4zuKDQ url_parts = youtube_url.split("/") url_parts.reverse() video_id = url_parts[0] # save video_id to video instance video = Video() video.user = request.user video.video_id = video_id video.title = 'tmp video' video.youtube_url = youtube_url video.swf_url = swf_url video.save() # send a signal video_created.send(sender=video, video=video) # delete the uploaded video instance uploaded_video.delete() # return the response return_only_data = request.GET.get('only_data') if return_only_data: return HttpResponse(json.dumps({"video_id": video_id}), content_type="application/json") else: # Redirect to the video page or the specified page try: next_url = settings.YOUTUBE_UPLOAD_REDIRECT_URL except AttributeError: next_url = reverse( "django_youtube.views.video", kwargs={"video_id": video_id}) return HttpResponseRedirect(next_url) except: import sys logger.error("Unexpected error: %s - %s" % (sys.exc_info()[ 0], sys.exc_info()[1])) # @todo: proper error management return HttpResponse("error happened") form = YoutubeDirectUploadForm() if return_only_data: return HttpResponse(json.dumps({"error": 500}), content_type="application/json") else: return render_to_response( "django_youtube/direct-upload.html", {"form": form}, context_instance=RequestContext(request) )
Displays an upload form Creates upload url and token from youtube api and uses them on the form
def upload(request): """ Displays an upload form Creates upload url and token from youtube api and uses them on the form """ # Get the optional parameters title = request.GET.get("title", "%s's video on %s" % ( request.user.username, request.get_host())) description = request.GET.get("description", "") keywords = request.GET.get("keywords", "") # Try to create post_url and token to create an upload form try: api = Api() # upload method needs authentication api.authenticate() # Customize following line to your needs, you can add description, keywords or developer_keys # I prefer to update video information after upload finishes data = api.upload(title, description=description, keywords=keywords, access_control=AccessControl.Unlisted) except ApiError as e: # An api error happened, redirect to homepage messages.add_message(request, messages.ERROR, e.message) return HttpResponseRedirect("/") except: # An error happened, redirect to homepage messages.add_message(request, messages.ERROR, _( 'An error occurred during the upload, Please try again.')) return HttpResponseRedirect("/") # Create the form instance form = YoutubeUploadForm(initial={"token": data["youtube_token"]}) protocol = 'https' if request.is_secure() else 'http' next_url = '%s://%s%s/' % (protocol, request.get_host(), reverse("django_youtube.views.upload_return")) return render_to_response( "django_youtube/upload.html", {"form": form, "post_url": data["post_url"], "next_url": next_url}, context_instance=RequestContext(request) )
The upload result page Youtube will redirect to this page after upload is finished Saves the video data and redirects to the next page
def upload_return(request): """ The upload result page Youtube will redirect to this page after upload is finished Saves the video data and redirects to the next page Params: status: status of the upload (200 for success) id: id number of the video """ status = request.GET.get("status") video_id = request.GET.get("id") if status == "200" and video_id: # upload is successful # save the video entry video = Video() video.user = request.user video.video_id = video_id video.save() # send a signal video_created.send(sender=video, video=video) # Redirect to the video page or the specified page try: next_url = settings.YOUTUBE_UPLOAD_REDIRECT_URL except AttributeError: next_url = reverse( "django_youtube.views.video", kwargs={"video_id": video_id}) return HttpResponseRedirect(next_url) else: # upload failed, redirect to upload page from django.contrib import messages messages.add_message( request, messages.ERROR, _('Upload failed, Please try again.')) return HttpResponseRedirect(reverse("django_youtube.views.upload"))
Removes the video from youtube and from db Requires POST
def remove(request, video_id): """ Removes the video from youtube and from db Requires POST """ # prepare redirection url try: next_url = settings.YOUTUBE_DELETE_REDIRECT_URL except AttributeError: next_url = reverse("django_youtube.views.upload") # Remove from db try: Video.objects.get(video_id=video_id).delete() except: from django.contrib import messages messages.add_message( request, messages.ERROR, _('Video could not be deleted.')) # Return to upload page or specified page return HttpResponseRedirect(next_url)
Connects to Youtube Api and retrieves the video entry object
def entry(self): """ Connects to Youtube Api and retrieves the video entry object Return: gdata.youtube.YouTubeVideoEntry """ api = Api() api.authenticate() return api.fetch_video(self.video_id)
Syncronize the video information on db with the video on Youtube The reason that I didn t use signals is to avoid saving the video instance twice.
def save(self, *args, **kwargs): """ Syncronize the video information on db with the video on Youtube The reason that I didn't use signals is to avoid saving the video instance twice. """ # if this is a new instance add details from api if not self.id: # Connect to api and get the details entry = self.entry() # Set the details self.title = entry.media.title.text self.description = entry.media.description.text self.keywords = entry.media.keywords.text self.youtube_url = entry.media.player.url self.swf_url = entry.GetSwfUrl() if entry.media.private: self.access_control = AccessControl.Private else: self.access_control = AccessControl.Public # Save the instance super(Video, self).save(*args, **kwargs) # show thumbnails for thumbnail in entry.media.thumbnail: t = Thumbnail() t.url = thumbnail.url t.video = self t.save() else: # updating the video instance # Connect to API and update video on youtube api = Api() # update method needs authentication api.authenticate() # Update the info on youtube, raise error on failure api.update_video(self.video_id, self.title, self.description, self.keywords, self.access_control) # Save the model return super(Video, self).save(*args, **kwargs)
Deletes the video from youtube
def delete(self, *args, **kwargs): """ Deletes the video from youtube Raises: OperationError """ api = Api() # Authentication is required for deletion api.authenticate() # Send API request, raises OperationError on unsuccessful deletion api.delete_video(self.video_id) # Call the super method return super(Video, self).delete(*args, **kwargs)
Method for Regenerate Key <https:// m2x. att. com/ developer/ documentation/ v2/ keys#Regenerate - Key > _ endpoint.
def regenerate(self): """ Method for `Regenerate Key <https://m2x.att.com/developer/documentation/v2/keys#Regenerate-Key>`_ endpoint. :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ self.data.update( self.api.post(self.item_path(self.key) + '/regenerate') )
Method for List Devices from an existing Distribution <https:// m2x. att. com/ developer/ documentation/ v2/ distribution#List - Devices - from - an - existing - Distribution > _ endpoint.
def devices(self, **params): """ Method for `List Devices from an existing Distribution <https://m2x.att.com/developer/documentation/v2/distribution#List-Devices-from-an-existing-Distribution>`_ endpoint. :param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters. :return: List of Devices associated with this Distribution as :class:`.DistributionDevice` objects :rtype: `list <https://docs.python.org/2/library/functions.html#list>`_ :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ return DistributionDevice.list(self.api, distribution_id=self.id, **params)
Method for Add Device to an Existing Distribution <https:// m2x. att. com/ developer/ documentation/ v2/ distribution#Add - Device - to - an - existing - Distribution > _ endpoint.
def add_device(self, params): """ Method for `Add Device to an Existing Distribution <https://m2x.att.com/developer/documentation/v2/distribution#Add-Device-to-an-existing-Distribution>`_ endpoint. :param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters. :return: The newly created DistributionDevice :rtype: DistributionDevice :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ return DistributionDevice.create(self.api, distribution_id=self.id, **params)
Method for List Devices from an existing Collection <https:// m2x. att. com/ developer/ documentation/ v2/ collections#List - Devices - from - an - existing - Collection > _ endpoint.
def devices(self, **params): """ Method for `List Devices from an existing Collection <https://m2x.att.com/developer/documentation/v2/collections#List-Devices-from-an-existing-Collection>`_ endpoint. :param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters. :return: List of :class:`.Device` objects :rtype: `list <https://docs.python.org/2/library/functions.html#list>`_ :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ return CollectionDevice.list(self.api, collection_id=self.id, **params)