sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def do_folder_update_metadata(client, args):
"""Update file metadata"""
client.update_folder_metadata(args.uri, foldername=args.foldername,
description=args.description,
mtime=args.mtime, privacy=args.privacy,
privacy_recursive=args.recursive)
return True
|
Update file metadata
|
entailment
|
def main(): # pylint: disable=too-many-statements
"""Main entry point"""
parser = argparse.ArgumentParser(prog='mediafire-cli',
description=__doc__)
parser.add_argument('--debug', dest='debug', action='store_true',
default=False, help='Enable debug output')
parser.add_argument('--email', dest='email', required=False,
default=os.environ.get('MEDIAFIRE_EMAIL', None))
parser.add_argument('--password', dest='password', required=False,
default=os.environ.get('MEDIAFIRE_PASSWORD', None))
actions = parser.add_subparsers(title='Actions', dest='action')
# http://bugs.python.org/issue9253#msg186387
actions.required = True
# ls
subparser = actions.add_parser('ls',
help=do_ls.__doc__)
subparser.add_argument('uri', nargs='?',
help='MediaFire URI',
default='mf:///')
# file-upload
subparser = actions.add_parser('file-upload',
help=do_file_upload.__doc__)
subparser.add_argument('paths', nargs='+',
help='Path[s] to upload')
subparser.add_argument('dest_uri', help='Destination MediaFire URI')
# file-download
subparser = actions.add_parser('file-download',
help=do_file_download.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire File URI[s] to download')
subparser.add_argument('dest_path', help='Destination path')
# file-show
subparser = actions.add_parser('file-show',
help=do_file_show.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire File URI[s] to print out')
# folder-create
subparser = actions.add_parser('folder-create',
help=do_folder_create.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire folder path URI[s]')
# resource-delete
subparser = actions.add_parser('resource-delete',
help=do_resource_delete.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire resource URI[s]')
subparser.add_argument('--purge', help="Purge, don't send to trash",
dest="purge", action="store_true", default=False)
# file-update-metadata
subparser = actions.add_parser('file-update-metadata',
help=do_file_update_metadata.__doc__)
subparser.add_argument('uri', help='MediaFire file URI')
subparser.add_argument('--filename', help='Set file name',
default=None, dest='filename')
subparser.add_argument('--privacy', help='Set file privacy',
choices=['public', 'private'],
default=None, dest='privacy')
subparser.add_argument('--description',
help='Set file description',
dest='description', default=None)
subparser.add_argument('--mtime', help="Set file modification time",
dest='mtime', default=None)
# folder-update-metadata
subparser = actions.add_parser('folder-update-metadata',
help=do_folder_update_metadata.__doc__)
subparser.add_argument('uri', help='MediaFire folder URI')
subparser.add_argument('--foldername', help='Set folder name',
default=None, dest='foldername')
subparser.add_argument('--privacy', help='Set folder privacy',
choices=['public', 'private'],
default=None, dest='privacy')
subparser.add_argument('--recursive', help='Set privacy recursively',
action='store_true', default=None,
dest='recursive')
subparser.add_argument('--description',
help='Set folder description',
dest='description', default=None)
subparser.add_argument('--mtime', help='Set folder mtime',
default=None, dest='mtime')
# debug-get-resource
subparser = actions.add_parser('debug-get-resource',
help=do_debug_get_resource.__doc__)
subparser.add_argument('uri', help='MediaFire resource URI',
default='mediafire:/', nargs='?')
args = parser.parse_args()
if args.debug:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("mediafire.client").setLevel(logging.DEBUG)
client = MediaFireClient()
if args.email and args.password:
client.login(args.email, args.password, app_id=APP_ID)
router = {
"file-upload": do_file_upload,
"file-download": do_file_download,
"file-show": do_file_show,
"ls": do_ls,
"folder-create": do_folder_create,
"resource-delete": do_resource_delete,
"file-update-metadata": do_file_update_metadata,
"folder-update-metadata": do_folder_update_metadata,
"debug-get-resource": do_debug_get_resource
}
if args.action in router:
result = router[args.action](client, args)
if not result:
sys.exit(1)
else:
print('Unsupported action: {}'.format(args.action))
sys.exit(1)
|
Main entry point
|
entailment
|
def pub(self, topic, message):
'''Publish a message to a topic'''
return self.post('pub', params={'topic': topic}, data=message)
|
Publish a message to a topic
|
entailment
|
def mpub(self, topic, messages, binary=True):
'''Send multiple messages to a topic. Optionally pack the messages'''
if binary:
# Pack and ship the data
return self.post('mpub', data=pack(messages)[4:],
params={'topic': topic, 'binary': True})
elif any('\n' in m for m in messages):
# If any of the messages has a newline, then you must use the binary
# calling format
raise ClientException(
'Use `binary` flag in mpub for messages with newlines')
else:
return self.post(
'/mpub', params={'topic': topic}, data='\n'.join(messages))
|
Send multiple messages to a topic. Optionally pack the messages
|
entailment
|
def clean_stats(self):
'''Stats with topics and channels keyed on topic and channel names'''
stats = self.stats()
if 'topics' in stats: # pragma: no branch
topics = stats['topics']
topics = dict((t.pop('topic_name'), t) for t in topics)
for topic, data in topics.items():
if 'channels' in data: # pragma: no branch
channels = data['channels']
channels = dict(
(c.pop('channel_name'), c) for c in channels)
data['channels'] = channels
stats['topics'] = topics
return stats
|
Stats with topics and channels keyed on topic and channel names
|
entailment
|
def execute_add(args, root_dir=None):
"""Add a new command to the daemon queue.
Args:
args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al']
root_dir (string): The path to the root directory the daemon is running in.
"""
# We accept a list of strings.
# This is done to create a better commandline experience with argparse.
command = ' '.join(args['command'])
# Send new instruction to daemon
instruction = {
'command': command,
'path': os.getcwd()
}
print_command_factory('add')(instruction, root_dir)
|
Add a new command to the daemon queue.
Args:
args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al']
root_dir (string): The path to the root directory the daemon is running in.
|
entailment
|
def execute_edit(args, root_dir=None):
"""Edit a existing queue command in the daemon.
Args:
args['key'] int: The key of the queue entry to be edited
root_dir (string): The path to the root directory the daemon is running in.
"""
# Get editor
EDITOR = os.environ.get('EDITOR', 'vim')
# Get command from server
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
# Check if queue is not empty, the entry exists and is queued or stashed
if not isinstance(status['data'], str) and key in status['data']:
if status['data'][key]['status'] in ['queued', 'stashed']:
command = status['data'][key]['command']
else:
print("Entry is not 'queued' or 'stashed'")
sys.exit(1)
else:
print('No entry with this key')
sys.exit(1)
with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
tf.write(command.encode('utf-8'))
tf.flush()
call([EDITOR, tf.name])
# do the parsing with `tf` using regular File operations.
# for instance:
tf.seek(0)
edited_command = tf.read().decode('utf-8')
print_command_factory('edit')({
'key': key,
'command': edited_command,
}, root_dir=root_dir)
|
Edit a existing queue command in the daemon.
Args:
args['key'] int: The key of the queue entry to be edited
root_dir (string): The path to the root directory the daemon is running in.
|
entailment
|
def command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
"""Communicate with the daemon.
This function sends a payload to the daemon and returns the unpickled
object sent by the daemon.
Args:
body (dir): Any other arguments that should be put into the payload.
root_dir (str): The root directory in which we expect the daemon.
We need this to connect to the daemons socket.
Returns:
function: The returned payload.
"""
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message, unpickle and return it
response = receive_data(client)
return response
return communicate
|
A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
|
entailment
|
def get_descriptor(self, number):
"""Create file descriptors for process output."""
# Create stdout file and get file descriptor
stdout_path = os.path.join(self.config_dir,
'pueue_process_{}.stdout'.format(number))
if os.path.exists(stdout_path):
os.remove(stdout_path)
out_descriptor = open(stdout_path, 'w+')
# Create stderr file and get file descriptor
stderr_path = os.path.join(self.config_dir,
'pueue_process_{}.stderr'.format(number))
if os.path.exists(stderr_path):
os.remove(stderr_path)
err_descriptor = open(stderr_path, 'w+')
self.descriptors[number] = {}
self.descriptors[number]['stdout'] = out_descriptor
self.descriptors[number]['stdout_path'] = stdout_path
self.descriptors[number]['stderr'] = err_descriptor
self.descriptors[number]['stderr_path'] = stderr_path
return out_descriptor, err_descriptor
|
Create file descriptors for process output.
|
entailment
|
def clean_descriptor(self, number):
"""Close file descriptor and remove underlying files."""
self.descriptors[number]['stdout'].close()
self.descriptors[number]['stderr'].close()
if os.path.exists(self.descriptors[number]['stdout_path']):
os.remove(self.descriptors[number]['stdout_path'])
if os.path.exists(self.descriptors[number]['stderr_path']):
os.remove(self.descriptors[number]['stderr_path'])
|
Close file descriptor and remove underlying files.
|
entailment
|
def check_finished(self):
"""Poll all processes and handle any finished processes."""
changed = False
for key in list(self.processes.keys()):
# Poll process and check if it finshed
process = self.processes[key]
process.poll()
if process.returncode is not None:
# If a process is terminated by `stop` or `kill`
# we want to queue it again instead closing it as failed.
if key not in self.stopping:
# Get std_out and err_out
output, error_output = process.communicate()
descriptor = self.descriptors[key]
descriptor['stdout'].seek(0)
descriptor['stderr'].seek(0)
output = get_descriptor_output(descriptor['stdout'], key, handler=self)
error_output = get_descriptor_output(descriptor['stderr'], key, handler=self)
# Mark queue entry as finished and save returncode
self.queue[key]['returncode'] = process.returncode
if process.returncode != 0:
self.queue[key]['status'] = 'failed'
else:
self.queue[key]['status'] = 'done'
# Add outputs to queue
self.queue[key]['stdout'] = output
self.queue[key]['stderr'] = error_output
self.queue[key]['end'] = str(datetime.now().strftime("%H:%M"))
self.queue.write()
changed = True
else:
self.stopping.remove(key)
if key in self.to_remove:
self.to_remove.remove(key)
del self.queue[key]
else:
if key in self.to_stash:
self.to_stash.remove(key)
self.queue[key]['status'] = 'stashed'
else:
self.queue[key]['status'] = 'queued'
self.queue[key]['start'] = ''
self.queue[key]['end'] = ''
self.queue.write()
self.clean_descriptor(key)
del self.processes[key]
# If anything should be logged we return True
return changed
|
Poll all processes and handle any finished processes.
|
entailment
|
def check_for_new(self):
"""Check if we can start a new process."""
free_slots = self.max_processes - len(self.processes)
for item in range(free_slots):
key = self.queue.next()
if key is not None:
self.spawn_new(key)
|
Check if we can start a new process.
|
entailment
|
def spawn_new(self, key):
"""Spawn a new task and save it to the queue."""
# Check if path exists
if not os.path.exists(self.queue[key]['path']):
self.queue[key]['status'] = 'failed'
error_msg = "The directory for this command doesn't exist anymore: {}".format(self.queue[key]['path'])
self.logger.error(error_msg)
self.queue[key]['stdout'] = ''
self.queue[key]['stderr'] = error_msg
else:
# Get file descriptors
stdout, stderr = self.get_descriptor(key)
if self.custom_shell != 'default':
# Create subprocess
self.processes[key] = subprocess.Popen(
[
self.custom_shell,
'-i',
'-c',
self.queue[key]['command'],
],
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE,
universal_newlines=True,
preexec_fn=os.setsid,
cwd=self.queue[key]['path']
)
else:
# Create subprocess
self.processes[key] = subprocess.Popen(
self.queue[key]['command'],
shell=True,
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE,
universal_newlines=True,
preexec_fn=os.setsid,
cwd=self.queue[key]['path']
)
self.queue[key]['status'] = 'running'
self.queue[key]['start'] = str(datetime.now().strftime("%H:%M"))
self.queue.write()
|
Spawn a new task and save it to the queue.
|
entailment
|
def kill_all(self, kill_signal, kill_shell=False):
"""Kill all running processes."""
for key in self.processes.keys():
self.kill_process(key, kill_signal, kill_shell)
|
Kill all running processes.
|
entailment
|
def start_process(self, key):
"""Start a specific processes."""
if key in self.processes and key in self.paused:
os.killpg(os.getpgid(self.processes[key].pid), signal.SIGCONT)
self.queue[key]['status'] = 'running'
self.paused.remove(key)
return True
elif key not in self.processes:
if self.queue[key]['status'] in ['queued', 'stashed']:
self.spawn_new(key)
return True
return False
|
Start a specific processes.
|
entailment
|
def pause_process(self, key):
"""Pause a specific processes."""
if key in self.processes and key not in self.paused:
os.killpg(os.getpgid(self.processes[key].pid), signal.SIGSTOP)
self.queue[key]['status'] = 'paused'
self.paused.append(key)
return True
return False
|
Pause a specific processes.
|
entailment
|
def daemon_factory(path):
"""Create a closure which creates a running daemon.
We need to create a closure that contains the correct path the daemon should
be started with. This is needed as the `Daemonize` library
requires a callable function for daemonization and doesn't accept any arguments.
This function cleans up sockets and output files in case we encounter any exceptions.
"""
def start_daemon():
root_dir = path
config_dir = os.path.join(root_dir, '.config/pueue')
try:
daemon = Daemon(root_dir=root_dir)
daemon.main()
except KeyboardInterrupt:
print('Keyboard interrupt. Shutting down')
daemon.stop_daemon()
except Exception:
try:
daemon.stop_daemon()
except Exception:
pass
cleanup(config_dir)
raise
return start_daemon
|
Create a closure which creates a running daemon.
We need to create a closure that contains the correct path the daemon should
be started with. This is needed as the `Daemonize` library
requires a callable function for daemonization and doesn't accept any arguments.
This function cleans up sockets and output files in case we encounter any exceptions.
|
entailment
|
def main():
"""Execute entry function."""
args = parser.parse_args()
args_dict = vars(args)
root_dir = args_dict['root'] if 'root' in args else None
# If a root directory is specified, get the absolute path and
# check if it exists. Abort if it doesn't exist!
if root_dir:
root_dir = os.path.abspath(root_dir)
if not os.path.exists(root_dir):
print("The specified directory doesn't exist!")
sys.exit(1)
# Default to home directory if no root is specified
else:
root_dir = os.path.expanduser('~')
if args.stopdaemon:
print_command_factory('STOPDAEMON')(vars(args), root_dir)
elif args.nodaemon:
daemon_factory(root_dir)()
elif args.daemon:
config_dir = os.path.join(root_dir, '.config/pueue')
os.makedirs(config_dir, exist_ok=True)
daemon = Daemonize(app='pueue', pid=os.path.join(config_dir, 'pueue.pid'),
action=daemon_factory(root_dir), chdir=root_dir)
daemon.start()
elif hasattr(args, 'func'):
try:
args.func(args_dict, root_dir)
except EOFError:
print('Apparently the daemon just died. Sorry for that :/')
else:
print('Invalid Command. Please check -h')
|
Execute entry function.
|
entailment
|
def register(host=DFLT_ADDRESS[0], port=DFLT_ADDRESS[1],
signum=signal.SIGUSR1):
"""Register a pdb handler for signal 'signum'.
The handler sets pdb to listen on the ('host', 'port') internet address
and to start a remote debugging session on accepting a socket connection.
"""
_pdbhandler._register(host, port, signum)
|
Register a pdb handler for signal 'signum'.
The handler sets pdb to listen on the ('host', 'port') internet address
and to start a remote debugging session on accepting a socket connection.
|
entailment
|
def get_handler():
"""Return the handler as a named tuple.
The named tuple attributes are 'host', 'port', 'signum'.
Return None when no handler has been registered.
"""
host, port, signum = _pdbhandler._registered()
if signum:
return Handler(host if host else DFLT_ADDRESS[0].encode(),
port if port else DFLT_ADDRESS[1], signum)
|
Return the handler as a named tuple.
The named tuple attributes are 'host', 'port', 'signum'.
Return None when no handler has been registered.
|
entailment
|
def wait(self, timeout):
'''Wait for the provided time to elapse'''
logger.debug('Waiting for %fs', timeout)
return self._event.wait(timeout)
|
Wait for the provided time to elapse
|
entailment
|
def delay(self):
'''How long to wait before the next check'''
if self._last_checked:
return self._interval - (time.time() - self._last_checked)
return self._interval
|
How long to wait before the next check
|
entailment
|
def callback(self):
'''Run the callback'''
self._callback(*self._args, **self._kwargs)
self._last_checked = time.time()
|
Run the callback
|
entailment
|
def run(self):
'''Run the callback periodically'''
while not self.wait(self.delay()):
try:
logger.info('Invoking callback %s', self.callback)
self.callback()
except StandardError:
logger.exception('Callback failed')
|
Run the callback periodically
|
entailment
|
def login(self, email=None, password=None, app_id=None, api_key=None):
"""Login to MediaFire account.
Keyword arguments:
email -- account email
password -- account password
app_id -- application ID
api_key -- API Key (optional)
"""
session_token = self.api.user_get_session_token(
app_id=app_id, email=email, password=password, api_key=api_key)
# install session token back into api client
self.api.session = session_token
|
Login to MediaFire account.
Keyword arguments:
email -- account email
password -- account password
app_id -- application ID
api_key -- API Key (optional)
|
entailment
|
def get_resource_by_uri(self, uri):
"""Return resource described by MediaFire URI.
uri -- MediaFire URI
Examples:
Folder (using folderkey):
mf:r5g3p2z0sqs3j
mf:r5g3p2z0sqs3j/folder/file.ext
File (using quickkey):
mf:xkr43dadqa3o2p2
Path:
mf:///Documents/file.ext
"""
location = self._parse_uri(uri)
if location.startswith("/"):
# Use path lookup only, root=myfiles
result = self.get_resource_by_path(location)
elif "/" in location:
# mf:abcdefjhijklm/name
resource_key, path = location.split('/', 2)
parent_folder = self.get_resource_by_key(resource_key)
if not isinstance(parent_folder, Folder):
raise NotAFolderError(resource_key)
# perform additional lookup by path
result = self.get_resource_by_path(
path, folder_key=parent_folder['folderkey'])
else:
# mf:abcdefjhijklm
result = self.get_resource_by_key(location)
return result
|
Return resource described by MediaFire URI.
uri -- MediaFire URI
Examples:
Folder (using folderkey):
mf:r5g3p2z0sqs3j
mf:r5g3p2z0sqs3j/folder/file.ext
File (using quickkey):
mf:xkr43dadqa3o2p2
Path:
mf:///Documents/file.ext
|
entailment
|
def get_resource_by_key(self, resource_key):
"""Return resource by quick_key/folder_key.
key -- quick_key or folder_key
"""
# search for quick_key by default
lookup_order = ["quick_key", "folder_key"]
if len(resource_key) == FOLDER_KEY_LENGTH:
lookup_order = ["folder_key", "quick_key"]
resource = None
for lookup_key in lookup_order:
try:
if lookup_key == "folder_key":
info = self.api.folder_get_info(folder_key=resource_key)
resource = Folder(info['folder_info'])
elif lookup_key == "quick_key":
info = self.api.file_get_info(quick_key=resource_key)
resource = File(info['file_info'])
except MediaFireApiError:
# TODO: Check response code
pass
if resource:
break
if not resource:
raise ResourceNotFoundError(resource_key)
return resource
|
Return resource by quick_key/folder_key.
key -- quick_key or folder_key
|
entailment
|
def get_resource_by_path(self, path, folder_key=None):
"""Return resource by remote path.
path -- remote path
Keyword arguments:
folder_key -- what to use as the root folder (None for root)
"""
logger.debug("resolving %s", path)
# remove empty path components
path = posixpath.normpath(path)
components = [t for t in path.split(posixpath.sep) if t != '']
if not components:
# request for root
return Folder(
self.api.folder_get_info(folder_key)['folder_info']
)
resource = None
for component in components:
exists = False
for item in self._folder_get_content_iter(folder_key):
name = item['name'] if 'name' in item else item['filename']
if name == component:
exists = True
if components[-1] != component:
# still have components to go through
if 'filename' in item:
# found a file, expected a directory
raise NotAFolderError(item['filename'])
folder_key = item['folderkey']
else:
# found the leaf
resource = item
break
if resource is not None:
break
if not exists:
# intermediate component does not exist - bailing out
break
if resource is None:
raise ResourceNotFoundError(path)
if "quickkey" in resource:
file_info = self.api.file_get_info(
resource['quickkey'])['file_info']
result = File(file_info)
elif "folderkey" in resource:
folder_info = self.api.folder_get_info(
resource['folderkey'])['folder_info']
result = Folder(folder_info)
return result
|
Return resource by remote path.
path -- remote path
Keyword arguments:
folder_key -- what to use as the root folder (None for root)
|
entailment
|
def _folder_get_content_iter(self, folder_key=None):
"""Iterator for api.folder_get_content"""
lookup_params = [
{'content_type': 'folders', 'node': 'folders'},
{'content_type': 'files', 'node': 'files'}
]
for param in lookup_params:
more_chunks = True
chunk = 0
while more_chunks:
chunk += 1
content = self.api.folder_get_content(
content_type=param['content_type'], chunk=chunk,
folder_key=folder_key)['folder_content']
# empty folder/file list
if not content[param['node']]:
break
# no next page
if content['more_chunks'] == 'no':
more_chunks = False
for resource_info in content[param['node']]:
yield resource_info
|
Iterator for api.folder_get_content
|
entailment
|
def get_folder_contents_iter(self, uri):
"""Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise NotAFolderError(uri)
folder_key = resource['folderkey']
for item in self._folder_get_content_iter(folder_key):
if 'filename' in item:
# Work around https://mediafire.mantishub.com/view.php?id=5
# TODO: remove in 1.0
if ".patch." in item['filename']:
continue
yield File(item)
elif 'name' in item:
yield Folder(item)
|
Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
|
entailment
|
def create_folder(self, uri, recursive=False):
"""Create folder.
uri -- MediaFire URI
Keyword arguments:
recursive -- set to True to create intermediate folders.
"""
logger.info("Creating %s", uri)
# check that folder exists already
try:
resource = self.get_resource_by_uri(uri)
if isinstance(resource, Folder):
return resource
else:
raise NotAFolderError(uri)
except ResourceNotFoundError:
pass
location = self._parse_uri(uri)
folder_name = posixpath.basename(location)
parent_uri = 'mf://' + posixpath.dirname(location)
try:
parent_node = self.get_resource_by_uri(parent_uri)
if not isinstance(parent_node, Folder):
raise NotAFolderError(parent_uri)
parent_key = parent_node['folderkey']
except ResourceNotFoundError:
if recursive:
result = self.create_folder(parent_uri, recursive=True)
parent_key = result['folderkey']
else:
raise
# We specify exact location, so don't allow duplicates
result = self.api.folder_create(
folder_name, parent_key=parent_key, action_on_duplicate='skip')
logger.info("Created folder '%s' [mf:%s]",
result['name'], result['folder_key'])
return self.get_resource_by_key(result['folder_key'])
|
Create folder.
uri -- MediaFire URI
Keyword arguments:
recursive -- set to True to create intermediate folders.
|
entailment
|
def delete_folder(self, uri, purge=False):
"""Delete folder.
uri -- MediaFire folder URI
Keyword arguments:
purge -- delete the folder without sending it to Trash
"""
try:
resource = self.get_resource_by_uri(uri)
except ResourceNotFoundError:
# Nothing to remove
return None
if not isinstance(resource, Folder):
raise ValueError("Folder expected, got {}".format(type(resource)))
if purge:
func = self.api.folder_purge
else:
func = self.api.folder_delete
try:
result = func(resource['folderkey'])
except MediaFireApiError as err:
if err.code == 100:
logger.debug(
"Delete folder returns error 900 but folder is deleted: "
"http://forum.mediafiredev.com/showthread.php?129")
result = {}
else:
raise
return result
|
Delete folder.
uri -- MediaFire folder URI
Keyword arguments:
purge -- delete the folder without sending it to Trash
|
entailment
|
def delete_file(self, uri, purge=False):
"""Delete file.
uri -- MediaFire file URI
Keyword arguments:
purge -- delete the file without sending it to Trash.
"""
try:
resource = self.get_resource_by_uri(uri)
except ResourceNotFoundError:
# Nothing to remove
return None
if not isinstance(resource, File):
raise ValueError("File expected, got {}".format(type(resource)))
if purge:
func = self.api.file_purge
else:
func = self.api.file_delete
return func(resource['quickkey'])
|
Delete file.
uri -- MediaFire file URI
Keyword arguments:
purge -- delete the file without sending it to Trash.
|
entailment
|
def delete_resource(self, uri, purge=False):
"""Delete file or folder
uri -- mediafire URI
Keyword arguments:
purge -- delete the resource without sending it to Trash.
"""
try:
resource = self.get_resource_by_uri(uri)
except ResourceNotFoundError:
# Nothing to remove
return None
if isinstance(resource, File):
result = self.delete_file(uri, purge)
elif isinstance(resource, Folder):
result = self.delete_folder(uri, purge)
else:
raise ValueError('Unsupported resource: {}'.format(type(resource)))
return result
|
Delete file or folder
uri -- mediafire URI
Keyword arguments:
purge -- delete the resource without sending it to Trash.
|
entailment
|
def _prepare_upload_info(self, source, dest_uri):
"""Prepare Upload object, resolve paths"""
try:
dest_resource = self.get_resource_by_uri(dest_uri)
except ResourceNotFoundError:
dest_resource = None
is_fh = hasattr(source, 'read')
folder_key = None
name = None
if dest_resource:
if isinstance(dest_resource, File):
folder_key = dest_resource['parent_folderkey']
name = dest_resource['filename']
elif isinstance(dest_resource, Folder):
if is_fh:
raise ValueError("Cannot determine target file name")
basename = posixpath.basename(source)
dest_uri = posixpath.join(dest_uri, basename)
try:
result = self.get_resource_by_uri(dest_uri)
if isinstance(result, Folder):
raise ValueError("Target is a folder (file expected)")
folder_key = result.get('parent_folderkey', None)
name = result['filename']
except ResourceNotFoundError:
# ok, neither a file nor folder, proceed
folder_key = dest_resource['folderkey']
name = basename
else:
raise Exception("Unknown resource type")
else:
# get parent resource
parent_uri = '/'.join(dest_uri.split('/')[0:-1])
result = self.get_resource_by_uri(parent_uri)
if not isinstance(result, Folder):
raise NotAFolderError("Parent component is not a folder")
folder_key = result['folderkey']
name = posixpath.basename(dest_uri)
return folder_key, name
|
Prepare Upload object, resolve paths
|
entailment
|
def upload_file(self, source, dest_uri):
"""Upload file to MediaFire.
source -- path to the file or a file-like object (e.g. io.BytesIO)
dest_uri -- MediaFire Resource URI
"""
folder_key, name = self._prepare_upload_info(source, dest_uri)
is_fh = hasattr(source, 'read')
fd = None
try:
if is_fh:
# Re-using filehandle
fd = source
else:
# Handling fs open/close
fd = open(source, 'rb')
return MediaFireUploader(self.api).upload(
fd, name, folder_key=folder_key,
action_on_duplicate='replace')
finally:
# Close filehandle if we opened it
if fd and not is_fh:
fd.close()
|
Upload file to MediaFire.
source -- path to the file or a file-like object (e.g. io.BytesIO)
dest_uri -- MediaFire Resource URI
|
entailment
|
def download_file(self, src_uri, target):
"""Download file from MediaFire.
src_uri -- MediaFire file URI to download
target -- download path or file-like object in write mode
"""
resource = self.get_resource_by_uri(src_uri)
if not isinstance(resource, File):
raise MediaFireError("Only files can be downloaded")
quick_key = resource['quickkey']
result = self.api.file_get_links(quick_key=quick_key,
link_type='direct_download')
direct_download = result['links'][0]['direct_download']
# Force download over HTTPS
direct_download = direct_download.replace('http:', 'https:')
name = resource['filename']
target_is_filehandle = True if hasattr(target, 'write') else False
if not target_is_filehandle:
if (os.path.exists(target) and os.path.isdir(target)) or \
target.endswith("/"):
target = os.path.join(target, name)
if not os.path.isdir(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
logger.info("Downloading %s to %s", src_uri, target)
response = requests.get(direct_download, stream=True)
try:
if target_is_filehandle:
out_fd = target
else:
out_fd = open(target, 'wb')
checksum = hashlib.sha256()
for chunk in response.iter_content(chunk_size=4096):
if chunk:
out_fd.write(chunk)
checksum.update(chunk)
checksum_hex = checksum.hexdigest().lower()
if checksum_hex != resource['hash']:
raise DownloadError("Hash mismatch ({} != {})".format(
resource['hash'], checksum_hex))
logger.info("Download completed successfully")
finally:
if not target_is_filehandle:
out_fd.close()
|
Download file from MediaFire.
src_uri -- MediaFire file URI to download
target -- download path or file-like object in write mode
|
entailment
|
def update_file_metadata(self, uri, filename=None, description=None,
mtime=None, privacy=None):
"""Update file metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, File):
raise ValueError('Expected File, got {}'.format(type(resource)))
result = self.api.file_update(resource['quickkey'], filename=filename,
description=description,
mtime=mtime, privacy=privacy)
return result
|
Update file metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
|
entailment
|
def update_folder_metadata(self, uri, foldername=None, description=None,
mtime=None, privacy=None,
privacy_recursive=None):
"""Update folder metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
recursive -- update folder privacy recursively
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise ValueError('Expected Folder, got {}'.format(type(resource)))
result = self.api.folder_update(resource['folderkey'],
foldername=foldername,
description=description,
mtime=mtime,
privacy=privacy,
privacy_recursive=privacy_recursive)
return result
|
Update folder metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
recursive -- update folder privacy recursively
|
entailment
|
def _parse_uri(uri):
"""Parse and validate MediaFire URI."""
tokens = urlparse(uri)
if tokens.netloc != '':
logger.error("Invalid URI: %s", uri)
raise ValueError("MediaFire URI format error: "
"host should be empty - mf:///path")
if tokens.scheme != '' and tokens.scheme != URI_SCHEME:
raise ValueError("MediaFire URI format error: "
"must start with 'mf:' or '/'")
return posixpath.normpath(tokens.path)
|
Parse and validate MediaFire URI.
|
entailment
|
def merged(self):
'''The clean stats from all the hosts reporting to this host.'''
stats = {}
for topic in self.client.topics()['topics']:
for producer in self.client.lookup(topic)['producers']:
hostname = producer['broadcast_address']
port = producer['http_port']
host = '%s_%s' % (hostname, port)
stats[host] = nsqd.Client(
'http://%s:%s/' % (hostname, port)).clean_stats()
return stats
|
The clean stats from all the hosts reporting to this host.
|
entailment
|
def raw(self):
'''All the raw, unaggregated stats (with duplicates).'''
topic_keys = (
'message_count',
'depth',
'backend_depth',
'paused'
)
channel_keys = (
'in_flight_count',
'timeout_count',
'paused',
'deferred_count',
'message_count',
'depth',
'backend_depth',
'requeue_count'
)
for host, stats in self.merged.items():
for topic, stats in stats.get('topics', {}).items():
prefix = 'host.%s.topic.%s' % (host, topic)
for key in topic_keys:
value = int(stats.get(key, -1))
yield (
'host.%s.topic.%s.%s' % (host, topic, key),
value,
False
)
yield (
'topic.%s.%s' % (topic, key),
value,
True
)
yield (
'topics.%s' % key,
value,
True
)
for chan, stats in stats.get('channels', {}).items():
data = {
key: int(stats.get(key, -1)) for key in channel_keys
}
data['clients'] = len(stats.get('clients', []))
for key, value in data.items():
yield (
'host.%s.topic.%s.channel.%s.%s' % (host, topic, chan, key),
value,
False
)
yield (
'host.%s.topic.%s.channels.%s' % (host, topic, key),
value,
True
)
yield (
'topic.%s.channels.%s' % (topic, key),
value,
True
)
yield (
'channels.%s' % key,
value,
True
)
|
All the raw, unaggregated stats (with duplicates).
|
entailment
|
def stats(self):
'''Stats that have been aggregated appropriately.'''
data = Counter()
for name, value, aggregated in self.raw:
if aggregated:
data['%s.max' % name] = max(data['%s.max' % name], value)
data['%s.total' % name] += value
else:
data[name] = value
return sorted(data.items())
|
Stats that have been aggregated appropriately.
|
entailment
|
def get_curline():
"""Return the current python source line."""
if Frame:
frame = Frame.get_selected_python_frame()
if frame:
line = ''
f = frame.get_pyop()
if f and not f.is_optimized_out():
cwd = os.path.join(os.getcwd(), '')
fname = f.filename()
if cwd in fname:
fname = fname[len(cwd):]
try:
line = f.current_line()
except IOError:
pass
if line:
# Use repr(line) to avoid UnicodeDecodeError on the
# following print invocation.
line = repr(line).strip("'")
line = line[:-2] if line.endswith(r'\n') else line
return ('-> %s(%s): %s' % (fname,
f.current_line_num(), line))
return ''
|
Return the current python source line.
|
entailment
|
def reconnected(self, conn):
'''Subscribe connection and manipulate its RDY state'''
conn.sub(self._topic, self._channel)
conn.rdy(1)
|
Subscribe connection and manipulate its RDY state
|
entailment
|
def distribute_ready(self):
'''Distribute the ready state across all of the connections'''
connections = [c for c in self.connections() if c.alive()]
if len(connections) > self._max_in_flight:
raise NotImplementedError(
'Max in flight must be greater than number of connections')
else:
# Distribute the ready count evenly among the connections
for count, conn in distribute(self._max_in_flight, connections):
# We cannot exceed the maximum RDY count for a connection
if count > conn.max_rdy_count:
logger.info(
'Using max_rdy_count (%i) instead of %i for %s RDY',
conn.max_rdy_count, count, conn)
count = conn.max_rdy_count
logger.info('Sending RDY %i to %s', count, conn)
conn.rdy(count)
|
Distribute the ready state across all of the connections
|
entailment
|
def needs_distribute_ready(self):
'''Determine whether or not we need to redistribute the ready state'''
# Try to pre-empty starvation by comparing current RDY against
# the last value sent.
alive = [c for c in self.connections() if c.alive()]
if any(c.ready <= (c.last_ready_sent * 0.25) for c in alive):
return True
|
Determine whether or not we need to redistribute the ready state
|
entailment
|
def read(self):
'''Read some number of messages'''
found = Client.read(self)
# Redistribute our ready state if necessary
if self.needs_distribute_ready():
self.distribute_ready()
# Finally, return all the results we've read
return found
|
Read some number of messages
|
entailment
|
def profiler():
'''Profile the block'''
import cProfile
import pstats
pr = cProfile.Profile()
pr.enable()
yield
pr.disable()
ps = pstats.Stats(pr).sort_stats('tottime')
ps.print_stats()
|
Profile the block
|
entailment
|
def messages(count, size):
'''Generator for count messages of the provided size'''
import string
# Make sure we have at least 'size' letters
letters = islice(cycle(chain(string.lowercase, string.uppercase)), size)
return islice(cycle(''.join(l) for l in permutations(letters, size)), count)
|
Generator for count messages of the provided size
|
entailment
|
def grouper(iterable, n):
'''Collect data into fixed-length chunks or blocks'''
args = [iter(iterable)] * n
for group in izip_longest(fillvalue=None, *args):
group = [g for g in group if g != None]
yield group
|
Collect data into fixed-length chunks or blocks
|
entailment
|
def basic(topic='topic', channel='channel', count=1e6, size=10, gevent=False,
max_in_flight=2500, profile=False):
'''Basic benchmark'''
if gevent:
from gevent import monkey
monkey.patch_all()
# Check the types of the arguments
count = int(count)
size = int(size)
max_in_flight = int(max_in_flight)
from nsq.http import nsqd
from nsq.reader import Reader
print 'Publishing messages...'
for batch in grouper(messages(count, size), 1000):
nsqd.Client('http://localhost:4151').mpub(topic, batch)
print 'Consuming messages'
client = Reader(topic, channel, nsqd_tcp_addresses=['localhost:4150'],
max_in_flight=max_in_flight)
with closing(client):
start = -time.time()
if profile:
with profiler():
for message in islice(client, count):
message.fin()
else:
for message in islice(client, count):
message.fin()
start += time.time()
print 'Finished %i messages in %fs (%5.2f messages / second)' % (
count, start, count / start)
|
Basic benchmark
|
entailment
|
def stats():
'''Read a stream of floats and give summary statistics'''
import re
import sys
import math
values = []
for line in sys.stdin:
values.extend(map(float, re.findall(r'\d+\.?\d+', line)))
mean = sum(values) / len(values)
variance = sum((val - mean) ** 2 for val in values) / len(values)
print '%3i items; mean: %10.5f; std-dev: %10.5f' % (
len(values), mean, math.sqrt(variance))
|
Read a stream of floats and give summary statistics
|
entailment
|
def ready(self):
'''Whether or not enough time has passed since the last failure'''
if self._last_failed:
delta = time.time() - self._last_failed
return delta >= self.backoff()
return True
|
Whether or not enough time has passed since the last failure
|
entailment
|
def execute_status(args, root_dir=None):
"""Print the status of the daemon.
This function displays the current status of the daemon as well
as the whole queue and all available information about every entry
in the queue.
`terminaltables` is used to format and display the queue contents.
`colorclass` is used to color format the various items in the queue.
Args:
root_dir (string): The path to the root directory the daemon is running in.
"""
status = command_factory('status')({}, root_dir=root_dir)
# First rows, showing daemon status
if status['status'] == 'running':
status['status'] = Color('{autogreen}' + '{}'.format(status['status']) + '{/autogreen}')
elif status['status'] in ['paused']:
status['status'] = Color('{autoyellow}' + '{}'.format(status['status']) + '{/autoyellow}')
print('Daemon: {}\n'.format(status['status']))
# Handle queue data
data = status['data']
if isinstance(data, str):
print(data)
elif isinstance(data, dict):
# Format incomming data to be compatible with Terminaltables
formatted_data = []
formatted_data.append(['Index', 'Status', 'Code',
'Command', 'Path', 'Start', 'End'])
for key, entry in sorted(data.items(), key=operator.itemgetter(0)):
formatted_data.append(
[
'#{}'.format(key),
entry['status'],
'{}'.format(entry['returncode']),
entry['command'],
entry['path'],
entry['start'],
entry['end']
]
)
# Create AsciiTable instance and define style
table = AsciiTable(formatted_data)
table.outer_border = False
table.inner_column_border = False
terminal_width = terminal_size()
customWidth = table.column_widths
# If the text is wider than the actual terminal size, we
# compute a new size for the Command and Path column.
if (reduce(lambda a, b: a+b, table.column_widths) + 10) > terminal_width[0]:
# We have to subtract 14 because of table paddings
left_space = math.floor((terminal_width[0] - customWidth[0] - customWidth[1] - customWidth[2] - customWidth[5] - customWidth[6] - 14)/2)
if customWidth[3] < left_space:
customWidth[4] = 2*left_space - customWidth[3]
elif customWidth[4] < left_space:
customWidth[3] = 2*left_space - customWidth[4]
else:
customWidth[3] = left_space
customWidth[4] = left_space
# Format long strings to match the console width
for i, entry in enumerate(table.table_data):
for j, string in enumerate(entry):
max_width = customWidth[j]
wrapped_string = '\n'.join(wrap(string, max_width))
if j == 1:
if wrapped_string == 'done' or wrapped_string == 'running' or wrapped_string == 'paused':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string in ['queued', 'stashed']:
wrapped_string = Color('{autoyellow}' + '{}'.format(wrapped_string) + '{/autoyellow}')
elif wrapped_string in ['failed', 'stopping', 'killing']:
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
elif j == 2:
if wrapped_string == '0' and wrapped_string != 'Code':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string != '0' and wrapped_string != 'Code':
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
table.table_data[i][j] = wrapped_string
print(table.table)
print('')
|
Print the status of the daemon.
This function displays the current status of the daemon as well
as the whole queue and all available information about every entry
in the queue.
`terminaltables` is used to format and display the queue contents.
`colorclass` is used to color format the various items in the queue.
Args:
root_dir (string): The path to the root directory the daemon is running in.
|
entailment
|
def execute_log(args, root_dir):
"""Print the current log file.
Args:
args['keys'] (int): If given, we only look at the specified processes.
root_dir (string): The path to the root directory the daemon is running in.
"""
# Print the logs of all specified processes
if args.get('keys'):
config_dir = os.path.join(root_dir, '.config/pueue')
queue_path = os.path.join(config_dir, 'queue')
if os.path.exists(queue_path):
queue_file = open(queue_path, 'rb')
try:
queue = pickle.load(queue_file)
except Exception:
print('Queue log file seems to be corrupted. Aborting.')
return
queue_file.close()
else:
print('There is no queue log file. Aborting.')
return
for key in args.get('keys'):
# Check if there is an entry with this key
if queue.get(key) and queue[key]['status'] in ['failed', 'done']:
entry = queue[key]
print('Log of entry: {}'.format(key))
print('Returncode: {}'.format(entry['returncode']))
print('Command: {}'.format(entry['command']))
print('Path: {}'.format(entry['path']))
print('Start: {}, End: {} \n'.format(entry['start'], entry['end']))
# Write STDERR
if len(entry['stderr']) > 0:
print(Color('{autored}Stderr output: {/autored}\n ') + entry['stderr'])
# Write STDOUT
if len(entry['stdout']) > 0:
print(Color('{autogreen}Stdout output: {/autogreen}\n ') + entry['stdout'])
else:
print('No finished process with key {}.'.format(key))
# Print the log of all processes
else:
log_path = os.path.join(root_dir, '.local/share/pueue/queue.log')
log_file = open(log_path, 'r')
print(log_file.read())
|
Print the current log file.
Args:
args['keys'] (int): If given, we only look at the specified processes.
root_dir (string): The path to the root directory the daemon is running in.
|
entailment
|
def execute_show(args, root_dir):
"""Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
"""
key = None
if args.get('key'):
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if key not in status['data'] or status['data'][key]['status'] != 'running':
print('No running process with this key, use `log` to show finished processes.')
return
# In case no key provided, we take the oldest running process
else:
status = command_factory('status')({}, root_dir=root_dir)
if isinstance(status['data'], str):
print(status['data'])
return
for k in sorted(status['data'].keys()):
if status['data'][k]['status'] == 'running':
key = k
break
if key is None:
print('No running process, use `log` to show finished processes.')
return
config_dir = os.path.join(root_dir, '.config/pueue')
# Get current pueueSTDout file from tmp
stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))
stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))
stdoutDescriptor = open(stdoutFile, 'r')
stderrDescriptor = open(stderrFile, 'r')
running = True
# Continually print output with curses or just print once
if args['watch']:
# Initialize curses
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(2)
stdscr.keypad(True)
stdscr.refresh()
try:
# Update output every two seconds
while running:
stdscr.clear()
stdoutDescriptor.seek(0)
message = stdoutDescriptor.read()
stdscr.addstr(0, 0, message)
stdscr.refresh()
time.sleep(2)
except Exception:
# Curses cleanup
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
else:
print('Stdout output:\n')
stdoutDescriptor.seek(0)
print(get_descriptor_output(stdoutDescriptor, key))
print('\n\nStderr output:\n')
stderrDescriptor.seek(0)
print(get_descriptor_output(stderrDescriptor, key))
|
Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
|
entailment
|
def fetch_track(self, track_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches a song track by given ID.
:param track_id: the track ID.
:type track_id: str
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#tracks-track_id`.
'''
url = 'https://api.kkbox.com/v1.1/tracks/%s' % track_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches a song track by given ID.
:param track_id: the track ID.
:type track_id: str
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#tracks-track_id`.
|
entailment
|
def show(self, user, feed, id):
"""
Show a specific indicator by id
:param user: feed username
:param feed: feed name
:param id: indicator endpoint id [INT]
:return: dict
Example:
ret = Indicator.show('csirtgadgets','port-scanners', '1234')
"""
uri = '/users/{}/feeds/{}/indicators/{}'.format(user, feed, id)
return self.client.get(uri)
|
Show a specific indicator by id
:param user: feed username
:param feed: feed name
:param id: indicator endpoint id [INT]
:return: dict
Example:
ret = Indicator.show('csirtgadgets','port-scanners', '1234')
|
entailment
|
def create(self):
"""
Submit action on the Indicator object
:return: Indicator Object
"""
uri = '/users/{0}/feeds/{1}/indicators'\
.format(self.user, self.feed)
data = {
"indicator": json.loads(str(self.indicator)),
"comment": self.comment,
"content": self.content
}
if self.attachment:
attachment = self._file_to_attachment(
self.attachment, filename=self.attachment_name)
data['attachment'] = {
'data': attachment['data'],
'filename': attachment['filename']
}
if not data['indicator'].get('indicator'):
data['indicator']['indicator'] = attachment['sha1']
if not data['indicator'].get('indicator'):
raise Exception('Missing indicator')
return self.client.post(uri, data)
|
Submit action on the Indicator object
:return: Indicator Object
|
entailment
|
def create_bulk(self, indicators, user, feed):
from .constants import API_VERSION
if API_VERSION == '1':
print("create_bulk currently un-avail with APIv1")
raise SystemExit
"""
Submit action against the IndicatorBulk endpoint
:param indicators: list of Indicator Objects
:param user: feed username
:param feed: feed name
:return: list of Indicator Objects submitted
from csirtgsdk.client import Client
from csirtgsdk.indicator import Indicator
remote = 'https://csirtg.io/api'
token = ''
verify_ssl = True
i = {
'indicator': 'example.com',
'feed': 'test',
'user': 'admin',
'comment': 'this is a test',
}
data = []
cli = Client(remote=remote, token=token, verify_ssl=verify_ssl)
for x in range(0, 5):
data.append(
Indicator(cli, i)
)
ret = cli.submit_bulk(data, 'csirtgadgets', 'test-feed')
"""
uri = '/users/{0}/feeds/{1}/indicators_bulk'.format(user, feed)
data = {
'indicators': [
{
'indicator': i.args.indicator,
'feed_id': i.args.feed,
'tag_list': i.args.tags,
"description": i.args.description,
"portlist": i.args.portlist,
"protocol": i.args.protocol,
'firsttime': i.args.firsttime,
'lasttime': i.args.lasttime,
'portlist_src': i.args.portlist_src,
'comment': {
'content': i.args.comment
},
'rdata': i.args.rdata,
'rtype': i.args.rtype,
'content': i.args.content,
'provider': i.args.provider,
} for i in indicators
]
}
return self.client.post(uri, data)
|
Submit action against the IndicatorBulk endpoint
:param indicators: list of Indicator Objects
:param user: feed username
:param feed: feed name
:return: list of Indicator Objects submitted
from csirtgsdk.client import Client
from csirtgsdk.indicator import Indicator
remote = 'https://csirtg.io/api'
token = ''
verify_ssl = True
i = {
'indicator': 'example.com',
'feed': 'test',
'user': 'admin',
'comment': 'this is a test',
}
data = []
cli = Client(remote=remote, token=token, verify_ssl=verify_ssl)
for x in range(0, 5):
data.append(
Indicator(cli, i)
)
ret = cli.submit_bulk(data, 'csirtgadgets', 'test-feed')
|
entailment
|
def bind_global_key(conn, event_type, key_string, cb):
"""
An alias for ``bind_key(event_type, ROOT_WINDOW, key_string, cb)``.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
"""
root = conn.get_setup().roots[0].root
return bind_key(conn, event_type, root, key_string, cb)
|
An alias for ``bind_key(event_type, ROOT_WINDOW, key_string, cb)``.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
|
entailment
|
def bind_key(conn, event_type, wid, key_string, cb):
"""
Binds a function ``cb`` to a particular key press ``key_string`` on a
window ``wid``. Whether it's a key release or key press binding is
determined by ``event_type``.
``bind_key`` will automatically hook into the ``event`` module's dispatcher,
so that if you're using ``event.main()`` for your main loop, everything
will be taken care of for you.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param wid: The window to bind the key grab to.
:type wid: int
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
"""
assert event_type in ('KeyPress', 'KeyRelease')
mods, kc = parse_keystring(conn, key_string)
key = (wid, mods, kc)
if not kc:
print("Could not find a keycode for " + key_string)
return False
if not __keygrabs[key] and not grab_key(conn, wid, mods, kc):
return False
__keybinds[key].append(cb)
__keygrabs[key] += 1
return True
|
Binds a function ``cb`` to a particular key press ``key_string`` on a
window ``wid``. Whether it's a key release or key press binding is
determined by ``event_type``.
``bind_key`` will automatically hook into the ``event`` module's dispatcher,
so that if you're using ``event.main()`` for your main loop, everything
will be taken care of for you.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param wid: The window to bind the key grab to.
:type wid: int
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
|
entailment
|
def parse_keystring(conn, key_string):
"""
A utility function to turn strings like 'Mod1+Mod4+a' into a pair
corresponding to its modifiers and keycode.
:param key_string: String starting with zero or more modifiers followed
by exactly one key press.
Available modifiers: Control, Mod1, Mod2, Mod3, Mod4,
Mod5, Shift, Lock
:type key_string: str
:return: Tuple of modifier mask and keycode
:rtype: (mask, int)
"""
# FIXME this code is temporary hack, requires better abstraction
from PyQt5.QtGui import QKeySequence
from PyQt5.QtCore import Qt
from .qt_keycodes import KeyTbl, ModsTbl
keysequence = QKeySequence(key_string)
ks = keysequence[0]
# Calculate the modifiers
mods = Qt.NoModifier
qtmods = Qt.NoModifier
modifiers = 0
if (ks & Qt.ShiftModifier == Qt.ShiftModifier):
mods |= ModsTbl.index(Qt.ShiftModifier)
qtmods |= Qt.ShiftModifier.real
modifiers |= getattr(xproto.KeyButMask, "Shift", 0)
if (ks & Qt.AltModifier == Qt.AltModifier):
mods |= ModsTbl.index(Qt.AltModifier)
qtmods |= Qt.AltModifier.real
modifiers |= getattr(xproto.KeyButMask, "Mod1", 0)
if (ks & Qt.ControlModifier == Qt.ControlModifier):
mods |= ModsTbl.index(Qt.ControlModifier)
qtmods |= Qt.ControlModifier.real
modifiers |= getattr(xproto.KeyButMask, "Control", 0)
# Calculate the keys
qtkeys = ks ^ qtmods
key = QKeySequence(Qt.Key(qtkeys)).toString().lower()
keycode = lookup_string(conn, key)
return modifiers, keycode
# Fallback logic
modifiers = 0
keycode = None
key_string = "Shift+Control+A"
for part in key_string.split('+'):
if hasattr(xproto.KeyButMask, part):
modifiers |= getattr(xproto.KeyButMask, part)
else:
if len(part) == 1:
part = part.lower()
keycode = lookup_string(conn, part)
return modifiers, keycode
|
A utility function to turn strings like 'Mod1+Mod4+a' into a pair
corresponding to its modifiers and keycode.
:param key_string: String starting with zero or more modifiers followed
by exactly one key press.
Available modifiers: Control, Mod1, Mod2, Mod3, Mod4,
Mod5, Shift, Lock
:type key_string: str
:return: Tuple of modifier mask and keycode
:rtype: (mask, int)
|
entailment
|
def lookup_string(conn, kstr):
"""
Finds the keycode associated with a string representation of a keysym.
:param kstr: English representation of a keysym.
:return: Keycode, if one exists.
:rtype: int
"""
if kstr in keysyms:
return get_keycode(conn, keysyms[kstr])
elif len(kstr) > 1 and kstr.capitalize() in keysyms:
return get_keycode(conn, keysyms[kstr.capitalize()])
return None
|
Finds the keycode associated with a string representation of a keysym.
:param kstr: English representation of a keysym.
:return: Keycode, if one exists.
:rtype: int
|
entailment
|
def get_keyboard_mapping(conn):
"""
Return a keyboard mapping cookie that can be used to fetch the table of
keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode(conn)
return conn.core.GetKeyboardMapping(mn, mx - mn + 1)
|
Return a keyboard mapping cookie that can be used to fetch the table of
keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
|
entailment
|
def get_keyboard_mapping_unchecked(conn):
"""
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode()
return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1)
|
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
|
entailment
|
def get_keysym(conn, keycode, col=0, kbmap=None):
"""
Get the keysym associated with a particular keycode in the current X
environment. Although we get a list of keysyms from X in
'get_keyboard_mapping', this list is really a table with
'keysys_per_keycode' columns and ``mx - mn`` rows (where ``mx`` is the
maximum keycode and ``mn`` is the minimum keycode).
Thus, the index for a keysym given a keycode is:
``(keycode - mn) * keysyms_per_keycode + col``.
In most cases, setting ``col`` to 0 will work.
Witness the utter complexity:
http://tronche.com/gui/x/xlib/input/keyboard-encoding.html
You may also pass in your own keyboard mapping using the ``kbmap``
parameter, but xpybutil maintains an up-to-date version of this so you
shouldn't have to.
:param keycode: A physical key represented by an integer.
:type keycode: int
:param col: The column in the keysym table to use.
Unless you know what you're doing, just use 0.
:type col: int
:param kbmap: The keyboard mapping to use.
:type kbmap: xcb.xproto.GetKeyboardMapingReply
"""
if kbmap is None:
kbmap = __kbmap
mn, mx = get_min_max_keycode(conn)
per = kbmap.keysyms_per_keycode
ind = (keycode - mn) * per + col
return kbmap.keysyms[ind]
|
Get the keysym associated with a particular keycode in the current X
environment. Although we get a list of keysyms from X in
'get_keyboard_mapping', this list is really a table with
'keysys_per_keycode' columns and ``mx - mn`` rows (where ``mx`` is the
maximum keycode and ``mn`` is the minimum keycode).
Thus, the index for a keysym given a keycode is:
``(keycode - mn) * keysyms_per_keycode + col``.
In most cases, setting ``col`` to 0 will work.
Witness the utter complexity:
http://tronche.com/gui/x/xlib/input/keyboard-encoding.html
You may also pass in your own keyboard mapping using the ``kbmap``
parameter, but xpybutil maintains an up-to-date version of this so you
shouldn't have to.
:param keycode: A physical key represented by an integer.
:type keycode: int
:param col: The column in the keysym table to use.
Unless you know what you're doing, just use 0.
:type col: int
:param kbmap: The keyboard mapping to use.
:type kbmap: xcb.xproto.GetKeyboardMapingReply
|
entailment
|
def get_keycode(conn, keysym):
"""
Given a keysym, find the keycode mapped to it in the current X environment.
It is necessary to search the keysym table in order to do this, including
all columns.
:param keysym: An X keysym.
:return: A keycode or None if one could not be found.
:rtype: int
"""
mn, mx = get_min_max_keycode(conn)
cols = __kbmap.keysyms_per_keycode
for i in range(mn, mx + 1):
for j in range(0, cols):
ks = get_keysym(conn, i, col=j)
if ks == keysym:
return i
return None
|
Given a keysym, find the keycode mapped to it in the current X environment.
It is necessary to search the keysym table in order to do this, including
all columns.
:param keysym: An X keysym.
:return: A keycode or None if one could not be found.
:rtype: int
|
entailment
|
def get_keys_to_mods(conn):
"""
Fetches and creates the keycode -> modifier mask mapping. Typically, you
shouldn't have to use this---xpybutil will keep this up to date if it
changes.
This function may be useful in that it should closely replicate the output
of the ``xmodmap`` command. For example:
::
keymods = get_keys_to_mods()
for kc in sorted(keymods, key=lambda kc: keymods[kc]):
print keymods[kc], hex(kc), get_keysym_string(get_keysym(kc))
Which will very closely replicate ``xmodmap``. I'm not getting precise
results quite yet, but I do believe I'm getting at least most of what
matters. (i.e., ``xmodmap`` returns valid keysym strings for some that
I cannot.)
:return: A dict mapping from keycode to modifier mask.
:rtype: dict
"""
mm = xproto.ModMask
modmasks = [mm.Shift, mm.Lock, mm.Control,
mm._1, mm._2, mm._3, mm._4, mm._5] # order matters
mods = conn.core.GetModifierMapping().reply()
res = {}
keyspermod = mods.keycodes_per_modifier
for mmi in range(0, len(modmasks)):
row = mmi * keyspermod
for kc in mods.keycodes[row:row + keyspermod]:
res[kc] = modmasks[mmi]
return res
|
Fetches and creates the keycode -> modifier mask mapping. Typically, you
shouldn't have to use this---xpybutil will keep this up to date if it
changes.
This function may be useful in that it should closely replicate the output
of the ``xmodmap`` command. For example:
::
keymods = get_keys_to_mods()
for kc in sorted(keymods, key=lambda kc: keymods[kc]):
print keymods[kc], hex(kc), get_keysym_string(get_keysym(kc))
Which will very closely replicate ``xmodmap``. I'm not getting precise
results quite yet, but I do believe I'm getting at least most of what
matters. (i.e., ``xmodmap`` returns valid keysym strings for some that
I cannot.)
:return: A dict mapping from keycode to modifier mask.
:rtype: dict
|
entailment
|
def get_modifiers(state):
"""
Takes a ``state`` (typically found in key press or button press events)
and returns a string list representation of the modifiers that were pressed
when generating the event.
:param state: Typically from ``some_event.state``.
:return: List of modifier string representations.
:rtype: [str]
"""
ret = []
if state & xproto.ModMask.Shift:
ret.append('Shift')
if state & xproto.ModMask.Lock:
ret.append('Lock')
if state & xproto.ModMask.Control:
ret.append('Control')
if state & xproto.ModMask._1:
ret.append('Mod1')
if state & xproto.ModMask._2:
ret.append('Mod2')
if state & xproto.ModMask._3:
ret.append('Mod3')
if state & xproto.ModMask._4:
ret.append('Mod4')
if state & xproto.ModMask._5:
ret.append('Mod5')
if state & xproto.KeyButMask.Button1:
ret.append('Button1')
if state & xproto.KeyButMask.Button2:
ret.append('Button2')
if state & xproto.KeyButMask.Button3:
ret.append('Button3')
if state & xproto.KeyButMask.Button4:
ret.append('Button4')
if state & xproto.KeyButMask.Button5:
ret.append('Button5')
return ret
|
Takes a ``state`` (typically found in key press or button press events)
and returns a string list representation of the modifiers that were pressed
when generating the event.
:param state: Typically from ``some_event.state``.
:return: List of modifier string representations.
:rtype: [str]
|
entailment
|
def grab_key(conn, wid, modifiers, key):
"""
Grabs a key for a particular window and a modifiers/key value.
If the grab was successful, return True. Otherwise, return False.
If your client is grabbing keys, it is useful to notify the user if a
key wasn't grabbed. Keyboard shortcuts not responding is disorienting!
Also, this function will grab several keys based on varying modifiers.
Namely, this accounts for all of the "trivial" modifiers that may have
an effect on X events, but probably shouldn't effect key grabbing. (i.e.,
whether num lock or caps lock is on.)
N.B. You should probably be using 'bind_key' or 'bind_global_key' instead.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
"""
try:
for mod in TRIVIAL_MODS:
conn.core.GrabKeyChecked(True, wid, modifiers | mod, key, GM.Async,
GM.Async).check()
return True
except xproto.BadAccess:
return False
|
Grabs a key for a particular window and a modifiers/key value.
If the grab was successful, return True. Otherwise, return False.
If your client is grabbing keys, it is useful to notify the user if a
key wasn't grabbed. Keyboard shortcuts not responding is disorienting!
Also, this function will grab several keys based on varying modifiers.
Namely, this accounts for all of the "trivial" modifiers that may have
an effect on X events, but probably shouldn't effect key grabbing. (i.e.,
whether num lock or caps lock is on.)
N.B. You should probably be using 'bind_key' or 'bind_global_key' instead.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
|
entailment
|
def ungrab_key(conn, wid, modifiers, key):
"""
Ungrabs a key that was grabbed by ``grab_key``. Similarly, it will return
True on success and False on failure.
When ungrabbing a key, the parameters to this function should be
*precisely* the same as the parameters to ``grab_key``.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
"""
try:
for mod in TRIVIAL_MODS:
conn.core.UngrabKeyChecked(key, wid, modifiers | mod).check()
return True
except xproto.BadAccess:
return False
|
Ungrabs a key that was grabbed by ``grab_key``. Similarly, it will return
True on success and False on failure.
When ungrabbing a key, the parameters to this function should be
*precisely* the same as the parameters to ``grab_key``.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
|
entailment
|
def update_keyboard_mapping(conn, e):
"""
Whenever the keyboard mapping is changed, this function needs to be called
to update xpybutil's internal representing of the current keysym table.
Indeed, xpybutil will do this for you automatically.
Moreover, if something is changed that affects the current keygrabs,
xpybutil will initiate a regrab with the changed keycode.
:param e: The MappingNotify event.
:type e: xcb.xproto.MappingNotifyEvent
:rtype: void
"""
global __kbmap, __keysmods
newmap = get_keyboard_mapping(conn).reply()
if e is None:
__kbmap = newmap
__keysmods = get_keys_to_mods(conn)
return
if e.request == xproto.Mapping.Keyboard:
changes = {}
for kc in range(*get_min_max_keycode(conn)):
knew = get_keysym(kc, kbmap=newmap)
oldkc = get_keycode(conn, knew)
if oldkc != kc:
changes[oldkc] = kc
__kbmap = newmap
__regrab(changes)
elif e.request == xproto.Mapping.Modifier:
__keysmods = get_keys_to_mods()
|
Whenever the keyboard mapping is changed, this function needs to be called
to update xpybutil's internal representing of the current keysym table.
Indeed, xpybutil will do this for you automatically.
Moreover, if something is changed that affects the current keygrabs,
xpybutil will initiate a regrab with the changed keycode.
:param e: The MappingNotify event.
:type e: xcb.xproto.MappingNotifyEvent
:rtype: void
|
entailment
|
def run_keybind_callbacks(e):
"""
A function that intercepts all key press/release events, and runs
their corresponding callback functions. Nothing much to see here, except
that we must mask out the trivial modifiers from the state in order to
find the right callback.
Callbacks are called in the order that they have been added. (FIFO.)
:param e: A Key{Press,Release} event.
:type e: xcb.xproto.Key{Press,Release}Event
:rtype: bool True if the callback was serviced
"""
kc, mods = e.detail, e.state
for mod in TRIVIAL_MODS:
mods &= ~mod
key = (e.event, mods, kc)
serviced = False
for cb in __keybinds.get(key, []):
try:
cb(e)
serviced = True
except TypeError:
cb()
return serviced
|
A function that intercepts all key press/release events, and runs
their corresponding callback functions. Nothing much to see here, except
that we must mask out the trivial modifiers from the state in order to
find the right callback.
Callbacks are called in the order that they have been added. (FIFO.)
:param e: A Key{Press,Release} event.
:type e: xcb.xproto.Key{Press,Release}Event
:rtype: bool True if the callback was serviced
|
entailment
|
def __regrab(changes):
"""
Takes a dictionary of changes (mapping old keycode to new keycode) and
regrabs any keys that have been changed with the updated keycode.
:param changes: Mapping of changes from old keycode to new keycode.
:type changes: dict
:rtype: void
"""
for wid, mods, kc in __keybinds.keys():
if kc in changes:
ungrab_key(wid, mods, kc)
grab_key(wid, mods, changes[kc])
old = (wid, mods, kc)
new = (wid, mods, changes[kc])
__keybinds[new] = __keybinds[old]
del __keybinds[old]
|
Takes a dictionary of changes (mapping old keycode to new keycode) and
regrabs any keys that have been changed with the updated keycode.
:param changes: Mapping of changes from old keycode to new keycode.
:type changes: dict
:rtype: void
|
entailment
|
def get_storages(self, storage_type='normal'):
"""
Return a list of Storage objects from the API.
Storage types: public, private, normal, backup, cdrom, template, favorite
"""
res = self.get_request('/storage/' + storage_type)
return Storage._create_storage_objs(res['storages'], cloud_manager=self)
|
Return a list of Storage objects from the API.
Storage types: public, private, normal, backup, cdrom, template, favorite
|
entailment
|
def get_storage(self, storage):
"""
Return a Storage object from the API.
"""
res = self.get_request('/storage/' + str(storage))
return Storage(cloud_manager=self, **res['storage'])
|
Return a Storage object from the API.
|
entailment
|
def create_storage(self, size=10, tier='maxiops', title='Storage disk', zone='fi-hel1', backup_rule={}):
"""
Create a Storage object. Returns an object based on the API's response.
"""
body = {
'storage': {
'size': size,
'tier': tier,
'title': title,
'zone': zone,
'backup_rule': backup_rule
}
}
res = self.post_request('/storage', body)
return Storage(cloud_manager=self, **res['storage'])
|
Create a Storage object. Returns an object based on the API's response.
|
entailment
|
def modify_storage(self, storage, size, title, backup_rule={}):
"""
Modify a Storage object. Returns an object based on the API's response.
"""
res = self._modify_storage(str(storage), size, title, backup_rule)
return Storage(cloud_manager=self, **res['storage'])
|
Modify a Storage object. Returns an object based on the API's response.
|
entailment
|
def attach_storage(self, server, storage, storage_type, address):
"""
Attach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {}}
if storage:
body['storage_device']['storage'] = str(storage)
if storage_type:
body['storage_device']['type'] = storage_type
if address:
body['storage_device']['address'] = address
url = '/server/{0}/storage/attach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self)
|
Attach a Storage object to a Server. Return a list of the server's storages.
|
entailment
|
def detach_storage(self, server, address):
"""
Detach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {'address': address}}
url = '/server/{0}/storage/detach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self)
|
Detach a Storage object to a Server. Return a list of the server's storages.
|
entailment
|
def _reset(self, **kwargs):
"""
Reset after repopulating from API.
"""
# there are some inconsistenciens in the API regarding these
# note: this could be written in fancier ways, but this way is simpler
if 'uuid' in kwargs:
self.uuid = kwargs['uuid']
elif 'storage' in kwargs: # let's never use storage.storage internally
self.uuid = kwargs['storage']
if 'title' in kwargs:
self.title = kwargs['title']
elif 'storage_title' in kwargs:
self.title = kwargs['storage_title']
if 'size' in kwargs:
self.size = kwargs['size']
elif 'storage_size' in kwargs:
self.size = kwargs['storage_size']
# send the rest to super._reset
filtered_kwargs = dict(
(key, val)
for key, val in kwargs.items()
if key not in ['uuid', 'storage', 'title', 'storage_title', 'size', 'storage_size']
)
super(Storage, self)._reset(**filtered_kwargs)
|
Reset after repopulating from API.
|
entailment
|
def save(self):
"""
Save (modify) the storage to the API.
Note: only size and title are updateable fields.
"""
res = self.cloud_manager._modify_storage(self, self.size, self.title)
self._reset(**res['storage'])
|
Save (modify) the storage to the API.
Note: only size and title are updateable fields.
|
entailment
|
def to_dict(self):
"""
Return a dict that can be serialised to JSON and sent to UpCloud's API.
Uses the convenience attribute `os` for determining `action` and `storage`
fields.
"""
body = {
'tier': self.tier,
'title': self.title,
'size': self.size,
}
# optionals
if hasattr(self, 'address') and self.address:
body['address'] = self.address
if hasattr(self, 'zone') and self.zone:
body['zone'] = self.zone
return body
|
Return a dict that can be serialised to JSON and sent to UpCloud's API.
Uses the convenience attribute `os` for determining `action` and `storage`
fields.
|
entailment
|
def fetch_album(self, album_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches an album by given ID.
:param album_id: the album ID.
:type album_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#albums-album_id`.
'''
url = 'https://api.kkbox.com/v1.1/albums/%s' % album_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches an album by given ID.
:param album_id: the album ID.
:type album_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#albums-album_id`.
|
entailment
|
def lookup(self):
"""
Prints name, author, size and age
"""
print "%s by %s, size: %s, uploaded %s ago" % (self.name, self.author,
self.size, self.age)
|
Prints name, author, size and age
|
entailment
|
def _get_max_page(self, url):
"""
Open url and return amount of pages
"""
html = requests.get(url).text
pq = PyQuery(html)
try:
tds = int(pq("h2").text().split()[-1])
if tds % 25:
return tds / 25 + 1
return tds / 25
except ValueError:
raise ValueError("No results found!")
|
Open url and return amount of pages
|
entailment
|
def build(self, update=True):
"""
Build and return url. Also update max_page.
"""
ret = self.base + self.query
page = "".join(("/", str(self.page), "/"))
if self.category:
category = " category:" + self.category
else:
category = ""
if self.order:
order = "".join(("?field=", self.order[0], "&sorder=", self.order[1]))
else:
order = ""
ret = "".join((self.base, self.query, category, page, order))
if update:
self.max_page = self._get_max_page(ret)
return ret
|
Build and return url. Also update max_page.
|
entailment
|
def build(self, update=True):
"""
Build and return url. Also update max_page.
URL structure for user torrent lists differs from other result lists
as the page number is part of the query string and not the URL path
"""
query_str = "?page={}".format(self.page)
if self.order:
query_str += "".join(("&field=", self.order[0], "&sorder=",self.order[1]))
ret = "".join((self.base, self.user, "/uploads/", query_str))
if update:
self.max_page = self._get_max_page(ret)
return ret
|
Build and return url. Also update max_page.
URL structure for user torrent lists differs from other result lists
as the page number is part of the query string and not the URL path
|
entailment
|
def _items(self):
"""
Parse url and yield namedtuple Torrent for every torrent on page
"""
torrents = map(self._get_torrent, self._get_rows())
for t in torrents:
yield t
|
Parse url and yield namedtuple Torrent for every torrent on page
|
entailment
|
def _get_torrent(self, row):
"""
Parse row into namedtuple
"""
td = row("td")
name = td("a.cellMainLink").text()
name = name.replace(" . ", ".").replace(" .", ".")
author = td("a.plain").text()
verified_author = True if td(".lightgrey>.ka-verify") else False
category = td("span").find("strong").find("a").eq(0).text()
verified_torrent = True if td(".icon16>.ka-green") else False
comments = td(".iaconbox>.icommentjs>.iconvalue").text()
torrent_link = "http://" + BASE.domain
if td("a.cellMainLink").attr("href") is not None:
torrent_link += td("a.cellMainLink").attr("href")
magnet_link = td("a[data-nop]").eq(1).attr("href")
download_link = td("a[data-download]").attr("href")
td_centers = row("td.center")
size = td_centers.eq(0).text()
files = td_centers.eq(1).text()
age = " ".join(td_centers.eq(2).text().split())
seed = td_centers.eq(3).text()
leech = td_centers.eq(4).text()
return Torrent(name, author, verified_author, category, size,
files, age, seed, leech, verified_torrent, comments,
torrent_link, magnet_link, download_link)
|
Parse row into namedtuple
|
entailment
|
def _get_rows(self):
"""
Return all rows on page
"""
html = requests.get(self.url.build()).text
if re.search('did not match any documents', html):
return []
pq = PyQuery(html)
rows = pq("table.data").find("tr")
return map(rows.eq, range(rows.size()))[1:]
|
Return all rows on page
|
entailment
|
def pages(self, page_from, page_to):
"""
Yield torrents in range from page_from to page_to
"""
if not all([page_from < self.url.max_page, page_from > 0,
page_to <= self.url.max_page, page_to > page_from]):
raise IndexError("Invalid page numbers")
size = (page_to + 1) - page_from
threads = ret = []
page_list = range(page_from, page_to+1)
locks = [threading.Lock() for i in range(size)]
for lock in locks[1:]:
lock.acquire()
def t_function(pos):
"""
Thread function that fetch page for list of torrents
"""
res = self.page(page_list[pos]).list()
locks[pos].acquire()
ret.extend(res)
if pos != size-1:
locks[pos+1].release()
threads = [threading.Thread(target=t_function, args=(i,))
for i in range(size)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for torrent in ret:
yield torrent
|
Yield torrents in range from page_from to page_to
|
entailment
|
def all(self):
"""
Yield torrents in range from current page to last page
"""
return self.pages(self.url.page, self.url.max_page)
|
Yield torrents in range from current page to last page
|
entailment
|
def order(self, field, order=None):
"""
Set field and order set by arguments
"""
if not order:
order = ORDER.DESC
self.url.order = (field, order)
self.url.set_page(1)
return self
|
Set field and order set by arguments
|
entailment
|
def category(self, category):
"""
Change category of current search and return self
"""
self.url.category = category
self.url.set_page(1)
return self
|
Change category of current search and return self
|
entailment
|
def destroy(self):
"""
Remove this FirewallRule from the API.
This instance must be associated with a server for this method to work,
which is done by instantiating via server.get_firewall_rules().
"""
if not hasattr(self, 'server') or not self.server:
raise Exception(
"""FirewallRule not associated with server;
please use or server.get_firewall_rules() to get objects
that are associated with a server.
""")
return self.server.cloud_manager.delete_firewall_rule(
self.server.uuid,
self.position
)
|
Remove this FirewallRule from the API.
This instance must be associated with a server for this method to work,
which is done by instantiating via server.get_firewall_rules().
|
entailment
|
def fetch_new_release_category(self, category_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches new release categories by given ID.
:param category_id: the station ID.
:type category_id: str
:param terr: the current territory.
:return: API response.
:rtype: list
See `https://docs-en.kkbox.codes/v1.1/reference#newreleasecategories-category_id`
'''
url = 'https://api.kkbox.com/v1.1/new-release-categories/%s' % category_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches new release categories by given ID.
:param category_id: the station ID.
:type category_id: str
:param terr: the current territory.
:return: API response.
:rtype: list
See `https://docs-en.kkbox.codes/v1.1/reference#newreleasecategories-category_id`
|
entailment
|
def fetch_top_tracks_of_artist(self, artist_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetcher top tracks belong to an artist by given ID.
:param artist_id: the artist ID.
:type artist_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See 'https://docs-en.kkbox.codes/v1.1/reference#artists-artist_id-toptracks'
'''
url = 'https://api.kkbox.com/v1.1/artists/%s/top-tracks' % artist_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetcher top tracks belong to an artist by given ID.
:param artist_id: the artist ID.
:type artist_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See 'https://docs-en.kkbox.codes/v1.1/reference#artists-artist_id-toptracks'
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.