sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def do_down(self, arg):
"""d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
|
d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
|
entailment
|
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
self.set_sigint_handler()
return 1
|
unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
|
entailment
|
def do_run(self, arg):
"""run [args...]
Restart the debugged python program. If a string is supplied
it is splitted with "shlex", and the result is used as the new
sys.argv. History, breakpoints, actions and debugger options
are preserved. "restart" is an alias for "run".
"""
if arg:
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
# this is caught in the main debugger loop
raise Restart
|
run [args...]
Restart the debugged python program. If a string is supplied
it is splitted with "shlex", and the result is used as the new
sys.argv. History, breakpoints, actions and debugger options
are preserved. "restart" is an alias for "run".
|
entailment
|
def do_jump(self, arg):
"""j(ump) lineno
Set the next line that will be executed. Only available in
the bottom-most frame. This lets you jump back and execute
code again, or jump forward to skip code that you don't want
to run.
It should be noted that not all jumps are allowed -- for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause.
"""
if self.curindex + 1 != len(self.stack):
self.error('You can only jump within the bottom frame')
return
try:
arg = int(arg)
except ValueError:
self.error("The 'jump' command requires a line number")
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
self.error('Jump failed: %s' % e)
|
j(ump) lineno
Set the next line that will be executed. Only available in
the bottom-most frame. This lets you jump back and execute
code again, or jump forward to skip code that you don't want
to run.
It should be noted that not all jumps are allowed -- for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause.
|
entailment
|
def do_debug(self, arg):
"""debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
"""
self.settrace(False)
globals = self.curframe.f_globals
locals = self.get_locals(self.curframe)
p = Pdb(self.completekey, self.stdin, self.stdout, debug=True)
p.prompt = "(%s) " % self.prompt.strip()
self.message("ENTERING RECURSIVE DEBUGGER")
sys.call_tracing(p.run, (arg, globals, locals))
self.message("LEAVING RECURSIVE DEBUGGER")
self.settrace(True)
self.lastcmd = p.lastcmd
|
debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
|
entailment
|
def do_quit(self, arg):
"""q(uit)\nexit
Quit from the debugger. The program being executed is aborted.
"""
if isinstance(self.stdin, RemoteSocket) and not self.is_debug_instance:
return self.do_detach(arg)
self._user_requested_quit = True
self.set_quit()
return 1
|
q(uit)\nexit
Quit from the debugger. The program being executed is aborted.
|
entailment
|
def do_args(self, arg):
"""a(rgs)
Print the argument list of the current function.
"""
co = self.curframe.f_code
dict = self.get_locals(self.curframe)
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
if name in dict:
self.message('%s = %s' % (name, bdb.safe_repr(dict[name])))
else:
self.message('%s = *** undefined ***' % (name,))
|
a(rgs)
Print the argument list of the current function.
|
entailment
|
def do_retval(self, arg):
"""retval
Print the return value for the last return of a function.
"""
locals = self.get_locals(self.curframe)
if '__return__' in locals:
self.message(bdb.safe_repr(locals['__return__']))
else:
self.error('Not yet returned!')
|
retval
Print the return value for the last return of a function.
|
entailment
|
def do_p(self, arg):
"""p expression
Print the value of the expression.
"""
try:
self.message(bdb.safe_repr(self._getval(arg)))
except Exception:
pass
|
p expression
Print the value of the expression.
|
entailment
|
def do_pp(self, arg):
"""pp expression
Pretty-print the value of the expression.
"""
obj = self._getval(arg)
try:
repr(obj)
except Exception:
self.message(bdb.safe_repr(obj))
else:
self.message(pprint.pformat(obj))
|
pp expression
Pretty-print the value of the expression.
|
entailment
|
def do_list(self, arg):
"""l(ist) [first [,last] | .]
List source code for the current file. Without arguments,
list 11 lines around the current line or continue the previous
listing. With . as argument, list 11 lines around the current
line. With one argument, list 11 lines starting at that line.
With two arguments, list the given range; if the second
argument is less than the first, it is a count.
The current line in the current frame is indicated by "->".
If an exception is being debugged, the line where the
exception was originally raised or propagated is indicated by
">>", if it differs from the current line.
"""
self.lastcmd = 'list'
last = None
if arg and arg != '.':
try:
if ',' in arg:
first, last = arg.split(',')
first = int(first.strip())
last = int(last.strip())
if last < first:
# assume it's a count
last = first + last
else:
first = int(arg.strip())
first = max(1, first - 5)
except ValueError:
self.error('Error in argument: %r' % arg)
return
elif self.lineno is None or arg == '.':
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines = linecache.getlines(filename, self.curframe.f_globals)
self._print_lines(lines[first-1:last], first, breaklist,
self.curframe)
self.lineno = min(last, len(lines))
if len(lines) < last:
self.message('[EOF]')
except KeyboardInterrupt:
pass
|
l(ist) [first [,last] | .]
List source code for the current file. Without arguments,
list 11 lines around the current line or continue the previous
listing. With . as argument, list 11 lines around the current
line. With one argument, list 11 lines starting at that line.
With two arguments, list the given range; if the second
argument is less than the first, it is a count.
The current line in the current frame is indicated by "->".
If an exception is being debugged, the line where the
exception was originally raised or propagated is indicated by
">>", if it differs from the current line.
|
entailment
|
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe,
self.get_locals(self.curframe))
except IOError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
|
longlist | ll
List the whole source code for the current function or frame.
|
entailment
|
def do_source(self, arg):
"""source expression
Try to get source code for the given object and display it.
"""
try:
obj = self._getval(arg)
except Exception:
return
try:
lines, lineno = getsourcelines(obj, self.get_locals(self.curframe))
except (IOError, TypeError) as err:
self.error(err)
return
self._print_lines(lines, lineno)
|
source expression
Try to get source code for the given object and display it.
|
entailment
|
def _print_lines(self, lines, start, breaks=(), frame=None):
"""Print a range of lines."""
if frame:
current_lineno = frame.f_lineno
exc_lineno = self.tb_lineno.get(frame, -1)
else:
current_lineno = exc_lineno = -1
for lineno, line in enumerate(lines, start):
s = str(lineno).rjust(3)
if len(s) < 4:
s += ' '
if lineno in breaks:
s += 'B'
else:
s += ' '
if lineno == current_lineno:
s += '->'
elif lineno == exc_lineno:
s += '>>'
self.message(s + '\t' + line.rstrip())
|
Print a range of lines.
|
entailment
|
def do_whatis(self, arg):
"""whatis arg
Print the type of the argument.
"""
try:
value = self._getval(arg)
except Exception:
# _getval() already printed the error
return
code = None
# Is it a function?
try:
code = value.__code__
except Exception:
pass
if code:
self.message('Function %s' % code.co_name)
return
# Is it an instance method?
try:
code = value.__func__.__code__
except Exception:
pass
if code:
self.message('Method %s' % code.co_name)
return
# Is it a class?
if value.__class__ is type:
self.message('Class %s.%s' % (value.__module__, value.__name__))
return
# None of the above...
self.message(type(value))
|
whatis arg
Print the type of the argument.
|
entailment
|
def do_display(self, arg):
"""display [expression]
Display the value of the expression if it changed, each time execution
stops in the current frame.
Without expression, list all display expressions for the current frame.
"""
if not arg:
self.message('Currently displaying:')
for item in self.displaying.get(self.curframe, {}).items():
self.message('%s: %s' % bdb.safe_repr(item))
else:
val = self._getval_except(arg)
self.displaying.setdefault(self.curframe, {})[arg] = val
self.message('display %s: %s' % (arg, bdb.safe_repr(val)))
|
display [expression]
Display the value of the expression if it changed, each time execution
stops in the current frame.
Without expression, list all display expressions for the current frame.
|
entailment
|
def do_undisplay(self, arg):
"""undisplay [expression]
Do not display the expression any more in the current frame.
Without expression, clear all display expressions for the current frame.
"""
if arg:
try:
del self.displaying.get(self.curframe, {})[arg]
except KeyError:
self.error('not displaying %s' % arg)
else:
self.displaying.pop(self.curframe, None)
|
undisplay [expression]
Do not display the expression any more in the current frame.
Without expression, clear all display expressions for the current frame.
|
entailment
|
def do_interact(self, arg):
"""interact
Start an interative interpreter whose global namespace
contains all the (global and local) names found in the current scope.
"""
def readfunc(prompt):
self.stdout.write(prompt)
self.stdout.flush()
line = self.stdin.readline()
line = line.rstrip('\r\n')
if line == 'EOF':
raise EOFError
return line
ns = self.curframe.f_globals.copy()
ns.update(self.get_locals(self.curframe))
if isinstance(self.stdin, RemoteSocket):
# Main interpreter redirection of the code module.
if PY3:
import sys as _sys
else:
# Parent module 'pdb_clone' not found while handling absolute
# import.
_sys = __import__('sys', level=0)
code.sys = _sys
self.redirect(code.interact, local=ns, readfunc=readfunc)
else:
code.interact("*interactive*", local=ns)
|
interact
Start an interative interpreter whose global namespace
contains all the (global and local) names found in the current scope.
|
entailment
|
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
|
alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
|
entailment
|
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
|
unalias name
Delete the specified alias.
|
entailment
|
def do_thread(self, arg):
"""th(read) [threadnumber]
Without argument, display a summary of all active threads.
The summary prints for each thread:
1. the thread number assigned by pdb
2. the thread name
3. the python thread identifier
4. the current stack frame summary for that thread
An asterisk '*' to the left of the pdb thread number indicates the
current thread, a plus sign '+' indicates the thread being traced by
pdb.
With a pdb thread number as argument, make this thread the current
thread. The 'where', 'up' and 'down' commands apply now to the frame
stack of this thread. The current scope is now the frame currently
executed by this thread at the time the command is issued and the
'list', 'll', 'args', 'p', 'pp', 'source' and 'interact' commands are
run in the context of that frame. Note that this frame may bear no
relationship (for a non-deadlocked thread) to that thread's current
activity by the time you are examining the frame.
This command does not stop the thread.
"""
# Import the threading module in the main interpreter to get an
# enumeration of the main interpreter threads.
if PY3:
try:
import threading
except ImportError:
import dummy_threading as threading
else:
# Do not use relative import detection to avoid the RuntimeWarning:
# Parent module 'pdb_clone' not found while handling absolute
# import.
try:
threading = __import__('threading', level=0)
except ImportError:
threading = __import__('dummy_threading', level=0)
if not self.pdb_thread:
self.pdb_thread = threading.current_thread()
if not self.current_thread:
self.current_thread = self.pdb_thread
current_frames = sys._current_frames()
tlist = sorted(threading.enumerate(), key=attrgetter('name', 'ident'))
try:
self._do_thread(arg, current_frames, tlist)
finally:
# For some reason this local must be explicitly deleted in order
# to release the subinterpreter.
del current_frames
|
th(read) [threadnumber]
Without argument, display a summary of all active threads.
The summary prints for each thread:
1. the thread number assigned by pdb
2. the thread name
3. the python thread identifier
4. the current stack frame summary for that thread
An asterisk '*' to the left of the pdb thread number indicates the
current thread, a plus sign '+' indicates the thread being traced by
pdb.
With a pdb thread number as argument, make this thread the current
thread. The 'where', 'up' and 'down' commands apply now to the frame
stack of this thread. The current scope is now the frame currently
executed by this thread at the time the command is issued and the
'list', 'll', 'args', 'p', 'pp', 'source' and 'interact' commands are
run in the context of that frame. Note that this frame may bear no
relationship (for a non-deadlocked thread) to that thread's current
activity by the time you are examining the frame.
This command does not stop the thread.
|
entailment
|
def do_help(self, arg):
"""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
"""
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
else:
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO '
'if you need command help' % arg)
return
self.message(command.__doc__.rstrip())
|
h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
|
entailment
|
def read(self, limit=-1):
"""Read content. See file.read"""
remaining = self.len - self.parent_fd.tell() + self.offset
if limit > remaining or limit == -1:
limit = remaining
return self.parent_fd.read(limit)
|
Read content. See file.read
|
entailment
|
def seek(self, offset, whence=os.SEEK_SET):
"""Seek to position in stream, see file.seek"""
pos = None
if whence == os.SEEK_SET:
pos = self.offset + offset
elif whence == os.SEEK_CUR:
pos = self.tell() + offset
elif whence == os.SEEK_END:
pos = self.offset + self.len + offset
else:
raise ValueError("invalid whence {}".format(whence))
if pos > self.offset + self.len or pos < self.offset:
raise ValueError("seek position beyond chunk area")
self.parent_fd.seek(pos, os.SEEK_SET)
|
Seek to position in stream, see file.seek
|
entailment
|
def close(self):
"""Close file, see file.close"""
try:
self.parent_fd.fileno()
except io.UnsupportedOperation:
logger.debug("Not closing parent_fd - reusing existing")
else:
self.parent_fd.close()
|
Close file, see file.close
|
entailment
|
def _build_query(self, uri, params=None, action_token_type=None):
"""Prepare query string"""
if params is None:
params = QueryParams()
params['response_format'] = 'json'
session_token = None
if action_token_type in self._action_tokens:
# Favor action token
using_action_token = True
session_token = self._action_tokens[action_token_type]
else:
using_action_token = False
if self._session:
session_token = self._session['session_token']
if session_token:
params['session_token'] = session_token
# make order of parameters predictable for testing
keys = list(params.keys())
keys.sort()
query = urlencode([tuple([key, params[key]]) for key in keys])
if not using_action_token and self._session:
secret_key_mod = int(self._session['secret_key']) % 256
signature_base = (str(secret_key_mod) +
self._session['time'] +
uri + '?' + query).encode('ascii')
query += '&signature=' + hashlib.md5(signature_base).hexdigest()
return query
|
Prepare query string
|
entailment
|
def request(self, action, params=None, action_token_type=None,
upload_info=None, headers=None):
"""Perform request to MediaFire API
action -- "category/name" of method to call
params -- dict of parameters or query string
action_token_type -- action token to use: None, "upload", "image"
upload_info -- in case of upload, dict of "fd" and "filename"
headers -- additional headers to send (used for upload)
session_token and signature generation/update is handled automatically
"""
uri = self._build_uri(action)
if isinstance(params, six.text_type):
query = params
else:
query = self._build_query(uri, params, action_token_type)
if headers is None:
headers = {}
if upload_info is None:
# Use request body for query
data = query
headers['Content-Type'] = FORM_MIMETYPE
else:
# Use query string for query since payload is file
uri += '?' + query
if "filename" in upload_info:
data = MultipartEncoder(
fields={'file': (
upload_info["filename"],
upload_info["fd"],
UPLOAD_MIMETYPE
)}
)
headers["Content-Type"] = data.content_type
else:
data = upload_info["fd"]
headers["Content-Type"] = UPLOAD_MIMETYPE
logger.debug("uri=%s query=%s",
uri, query if not upload_info else None)
try:
# bytes from now on
url = (API_BASE + uri).encode('utf-8')
if isinstance(data, six.text_type):
# request's data is bytes, dict, or filehandle
data = data.encode('utf-8')
response = self.http.post(url, data=data,
headers=headers, stream=True)
except RequestException as ex:
logger.exception("HTTP request failed")
raise MediaFireConnectionError(
"RequestException: {}".format(ex))
return self._process_response(response)
|
Perform request to MediaFire API
action -- "category/name" of method to call
params -- dict of parameters or query string
action_token_type -- action token to use: None, "upload", "image"
upload_info -- in case of upload, dict of "fd" and "filename"
headers -- additional headers to send (used for upload)
session_token and signature generation/update is handled automatically
|
entailment
|
def _process_response(self, response):
"""Parse response"""
forward_raw = False
content_type = response.headers['Content-Type']
if content_type != 'application/json':
logger.debug("headers: %s", response.headers)
# API BUG: text/xml content-type with json payload
# http://forum.mediafiredev.com/showthread.php?136
if content_type == 'text/xml':
# we never request xml, so check it quacks like JSON
if not response.text.lstrip().startswith('{'):
forward_raw = True
else:
# _process_response can't deal with non-json,
# return response as is
forward_raw = True
if forward_raw:
response.raise_for_status()
return response
logger.debug("response: %s", response.text)
# if we are here, then most likely have json
try:
response_node = response.json()['response']
except ValueError:
# promised JSON but failed
raise MediaFireApiError("JSON decode failure")
if response_node.get('new_key', 'no') == 'yes':
self._regenerate_secret_key()
# check for errors
if response_node['result'] != 'Success':
raise MediaFireApiError(response_node['message'],
response_node['error'])
return response_node
|
Parse response
|
entailment
|
def _regenerate_secret_key(self):
"""Regenerate secret key
http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature
"""
# Don't regenerate the key if we have none
if self._session and 'secret_key' in self._session:
self._session['secret_key'] = (
int(self._session['secret_key']) * 16807) % 2147483647
|
Regenerate secret key
http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature
|
entailment
|
def session(self, value):
"""Set session token
value -- dict returned by user/get_session_token"""
# unset session token
if value is None:
self._session = None
return
if not isinstance(value, dict):
raise ValueError("session info is required")
session_parsed = {}
for key in ["session_token", "time", "secret_key"]:
if key not in value:
raise ValueError("Missing parameter: {}".format(key))
session_parsed[key] = value[key]
for key in ["ekey", "pkey"]:
# nice to have, but not mandatory
if key in value:
session_parsed[key] = value[key]
self._session = session_parsed
|
Set session token
value -- dict returned by user/get_session_token
|
entailment
|
def set_action_token(self, type_=None, action_token=None):
"""Set action tokens
type_ -- either "upload" or "image"
action_token -- string obtained from user/get_action_token,
set None to remove the token
"""
if action_token is None:
del self._action_tokens[type_]
else:
self._action_tokens[type_] = action_token
|
Set action tokens
type_ -- either "upload" or "image"
action_token -- string obtained from user/get_action_token,
set None to remove the token
|
entailment
|
def user_get_session_token(self, app_id=None, email=None, password=None,
ekey=None, fb_access_token=None,
tw_oauth_token=None,
tw_oauth_token_secret=None, api_key=None):
"""user/get_session_token
http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token
"""
if app_id is None:
raise ValueError("app_id must be defined")
params = QueryParams({
'application_id': str(app_id),
'token_version': 2,
'response_format': 'json'
})
if fb_access_token:
params['fb_access_token'] = fb_access_token
signature_keys = ['fb_access_token']
elif tw_oauth_token and tw_oauth_token_secret:
params['tw_oauth_token'] = tw_oauth_token
params['tw_oauth_token_secret'] = tw_oauth_token_secret
signature_keys = ['tw_oauth_token',
'tw_oauth_token_secret']
elif (email or ekey) and password:
signature_keys = []
if email:
signature_keys.append('email')
params['email'] = email
if ekey:
signature_keys.append('ekey')
params['ekey'] = ekey
params['password'] = password
signature_keys.append('password')
else:
raise ValueError("Credentials not provided")
signature_keys.append('application_id')
signature = hashlib.sha1()
for key in signature_keys:
signature.update(str(params[key]).encode('ascii'))
# Note: If the app uses a callback URL to provide its API key,
# or if it does not have the "Require Secret Key" option checked,
# then the API key may be omitted from the signature
if api_key:
signature.update(api_key.encode('ascii'))
query = urlencode(params)
query += '&signature=' + signature.hexdigest()
return self.request('user/get_session_token', params=query)
|
user/get_session_token
http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token
|
entailment
|
def user_set_avatar(self, action=None, quick_key=None, url=None):
"""user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
"""
return self.request("user/set_avatar", QueryParams({
"action": action,
"quick_key": quick_key,
"url": url
}))
|
user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
|
entailment
|
def user_update(self, display_name=None, first_name=None, last_name=None,
email=None, password=None, current_password=None,
birth_date=None, gender=None, website=None, subdomain=None,
location=None, newsletter=None, primary_usage=None,
timezone=None):
"""
user/update
http://www.mediafire.com/developers/core_api/1.3/user/#update
"""
return self.request("user/update", QueryParams({
"display_name": display_name,
"first_name": first_name,
"last_name": last_name,
"email": email,
"password": password,
"current_password": current_password,
"birth_date": birth_date,
"gender": gender,
"website": website,
"subdomain": subdomain,
"location": location,
"newsletter": newsletter,
"primary_usage": primary_usage,
"timezone": timezone
}))
|
user/update
http://www.mediafire.com/developers/core_api/1.3/user/#update
|
entailment
|
def folder_get_info(self, folder_key=None, device_id=None, details=None):
"""folder/get_info
http://www.mediafire.com/developers/core_api/1.3/folder/#get_info
"""
return self.request('folder/get_info', QueryParams({
'folder_key': folder_key,
'device_id': device_id,
'details': details
}))
|
folder/get_info
http://www.mediafire.com/developers/core_api/1.3/folder/#get_info
|
entailment
|
def folder_get_content(self, folder_key=None, content_type=None,
filter_=None, device_id=None, order_by=None,
order_direction=None, chunk=None, details=None,
chunk_size=None):
"""folder/get_content
http://www.mediafire.com/developers/core_api/1.3/folder/#get_content
"""
return self.request('folder/get_content', QueryParams({
'folder_key': folder_key,
'content_type': content_type,
'filter': filter_,
'device_id': device_id,
'order_by': order_by,
'order_direction': order_direction,
'chunk': chunk,
'details': details,
'chunk_size': chunk_size
}))
|
folder/get_content
http://www.mediafire.com/developers/core_api/1.3/folder/#get_content
|
entailment
|
def folder_update(self, folder_key, foldername=None, description=None,
privacy=None, privacy_recursive=None, mtime=None):
"""folder/update
http://www.mediafire.com/developers/core_api/1.3/folder/#update
"""
return self.request('folder/update', QueryParams({
'folder_key': folder_key,
'foldername': foldername,
'description': description,
'privacy': privacy,
'privacy_recursive': privacy_recursive,
'mtime': mtime
}))
|
folder/update
http://www.mediafire.com/developers/core_api/1.3/folder/#update
|
entailment
|
def folder_create(self, foldername=None, parent_key=None,
action_on_duplicate=None, mtime=None):
"""folder/create
http://www.mediafire.com/developers/core_api/1.3/folder/#create
"""
return self.request('folder/create', QueryParams({
'foldername': foldername,
'parent_key': parent_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime
}))
|
folder/create
http://www.mediafire.com/developers/core_api/1.3/folder/#create
|
entailment
|
def upload_check(self, filename=None, folder_key=None, filedrop_key=None,
size=None, hash_=None, path=None, resumable=None):
"""upload/check
http://www.mediafire.com/developers/core_api/1.3/upload/#check
"""
return self.request('upload/check', QueryParams({
'filename': filename,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'size': size,
'hash': hash_,
'path': path,
'resumable': resumable
}))
|
upload/check
http://www.mediafire.com/developers/core_api/1.3/upload/#check
|
entailment
|
def upload_simple(self, fd, filename, folder_key=None, path=None,
filedrop_key=None, action_on_duplicate=None,
mtime=None, file_size=None, file_hash=None):
"""upload/simple
http://www.mediafire.com/developers/core_api/1.3/upload/#simple
"""
action = 'upload/simple'
params = QueryParams({
'folder_key': folder_key,
'path': path,
'filedrop_key': filedrop_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime
})
headers = QueryParams({
'X-Filesize': str(file_size),
'X-Filehash': file_hash,
'X-Filename': filename.encode('utf-8')
})
upload_info = {
"fd": fd,
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers)
|
upload/simple
http://www.mediafire.com/developers/core_api/1.3/upload/#simple
|
entailment
|
def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,
unit_size, quick_key=None, action_on_duplicate=None,
mtime=None, version_control=None, folder_key=None,
filedrop_key=None, path=None, previous_hash=None):
"""upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
"""
action = 'upload/resumable'
headers = {
'x-filesize': str(filesize),
'x-filehash': filehash,
'x-unit-hash': unit_hash,
'x-unit-id': str(unit_id),
'x-unit-size': str(unit_size)
}
params = QueryParams({
'quick_key': quick_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'previous_hash': previous_hash
})
upload_info = {
"fd": fd,
"filename": "chunk"
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers)
|
upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
|
entailment
|
def upload_instant(self, filename, size, hash_, quick_key=None,
folder_key=None, filedrop_key=None, path=None,
action_on_duplicate=None, mtime=None,
version_control=None, previous_hash=None):
"""upload/instant
http://www.mediafire.com/developers/core_api/1.3/upload/#instant
"""
return self.request('upload/instant', QueryParams({
'filename': filename,
'size': size,
'hash': hash_,
'quick_key': quick_key,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'previous_hash': previous_hash
}))
|
upload/instant
http://www.mediafire.com/developers/core_api/1.3/upload/#instant
|
entailment
|
def file_update(self, quick_key, filename=None, description=None,
mtime=None, privacy=None):
"""file/update
http://www.mediafire.com/developers/core_api/1.3/file/#update
"""
return self.request('file/update', QueryParams({
'quick_key': quick_key,
'filename': filename,
'description': description,
'mtime': mtime,
'privacy': privacy
}))
|
file/update
http://www.mediafire.com/developers/core_api/1.3/file/#update
|
entailment
|
def file_update_file(self, quick_key, file_extension=None, filename=None,
description=None, mtime=None, privacy=None,
timezone=None):
"""file/update_file
http://www.mediafire.com/developers/core_api/1.3/file/#update_file
"""
return self.request('file/update', QueryParams({
'quick_key': quick_key,
'file_extension': file_extension,
'filename': filename,
'description': description,
'mtime': mtime,
'privacy': privacy,
'timezone': timezone
}))
|
file/update_file
http://www.mediafire.com/developers/core_api/1.3/file/#update_file
|
entailment
|
def file_zip(self, keys, confirm_download=None, meta_only=None):
"""file/zip
http://www.mediafire.com/developers/core_api/1.3/file/#zip
"""
return self.request('file/zip', QueryParams({
'keys': keys,
'confirm_download': confirm_download,
'meta_only': meta_only
}))
|
file/zip
http://www.mediafire.com/developers/core_api/1.3/file/#zip
|
entailment
|
def _reset(self):
'''Reset all of our stateful variables'''
self._socket = None
# The pending messages we have to send, and the current buffer we're
# sending
self._pending = deque()
self._out_buffer = ''
# Our read buffer
self._buffer = ''
# The identify response we last received from the server
self._identify_response = {}
# Our ready state
self.last_ready_sent = 0
self.ready = 0
|
Reset all of our stateful variables
|
entailment
|
def connect(self, force=False):
'''Establish a connection'''
# Don't re-establish existing connections
if not force and self.alive():
return True
self._reset()
# Otherwise, try to connect
with self._socket_lock:
try:
logger.info('Creating socket...')
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._timeout)
logger.info('Connecting to %s, %s', self.host, self.port)
self._socket.connect((self.host, self.port))
# Set our socket's blocking state to whatever ours is
self._socket.setblocking(self._blocking)
# Safely write our magic
self._pending.append(constants.MAGIC_V2)
while self.pending():
self.flush()
# And send our identify command
self.identify(self._identify_options)
while self.pending():
self.flush()
self._reconnnection_counter.success()
# Wait until we've gotten a response to IDENTIFY, try to read
# one. Also, only spend up to the provided timeout waiting to
# establish the connection.
limit = time.time() + self._timeout
responses = self._read(1)
while (not responses) and (time.time() < limit):
responses = self._read(1)
if not responses:
raise ConnectionTimeoutException(
'Read identify response timed out (%ss)' % self._timeout)
self.identified(responses[0])
return True
except:
logger.exception('Failed to connect')
if self._socket:
self._socket.close()
self._reconnnection_counter.failed()
self._reset()
return False
|
Establish a connection
|
entailment
|
def close(self):
'''Close our connection'''
# Flush any unsent message
try:
while self.pending():
self.flush()
except socket.error:
pass
with self._socket_lock:
try:
if self._socket:
self._socket.close()
finally:
self._reset()
|
Close our connection
|
entailment
|
def socket(self, blocking=True):
'''Blockingly yield the socket'''
# If the socket is available, then yield it. Otherwise, yield nothing
if self._socket_lock.acquire(blocking):
try:
yield self._socket
finally:
self._socket_lock.release()
|
Blockingly yield the socket
|
entailment
|
def identified(self, res):
'''Handle a response to our 'identify' command. Returns response'''
# If they support it, they should give us a JSON blob which we should
# inspect.
try:
res.data = json.loads(res.data)
self._identify_response = res.data
logger.info('Got identify response: %s', res.data)
except:
logger.warn('Server does not support feature negotiation')
self._identify_response = {}
# Save our max ready count unless it's not provided
self.max_rdy_count = self._identify_response.get(
'max_rdy_count', self.max_rdy_count)
if self._identify_options.get('tls_v1', False):
if not self._identify_response.get('tls_v1', False):
raise UnsupportedException(
'NSQd instance does not support TLS')
else:
self._socket = TLSSocket.wrap_socket(self._socket)
# Now is the appropriate time to send auth
if self._identify_response.get('auth_required', False):
if not self._auth_secret:
raise UnsupportedException(
'Auth required but not provided')
else:
self.auth(self._auth_secret)
# If we're not talking over TLS, warn the user
if not self._identify_response.get('tls_v1', False):
logger.warn('Using AUTH without TLS')
elif self._auth_secret:
logger.warn('Authentication secret provided but not required')
return res
|
Handle a response to our 'identify' command. Returns response
|
entailment
|
def setblocking(self, blocking):
'''Set whether or not this message is blocking'''
for sock in self.socket():
sock.setblocking(blocking)
self._blocking = blocking
|
Set whether or not this message is blocking
|
entailment
|
def flush(self):
'''Flush some of the waiting messages, returns count written'''
# When profiling, we found that while there was some efficiency to be
# gained elsewhere, the big performance hit is sending lots of small
# messages at a time. In particular, consumers send many 'FIN' messages
# which are very small indeed and the cost of dispatching so many system
# calls is very high. Instead, we prefer to glom together many messages
# into a single string to send at once.
total = 0
for sock in self.socket(blocking=False):
# If there's nothing left in the out buffer, take whatever's in the
# pending queue.
#
# When using SSL, if the socket throws 'SSL_WANT_WRITE', then the
# subsequent send requests have to send the same buffer.
pending = self._pending
data = self._out_buffer or ''.join(
pending.popleft() for _ in xrange(len(pending)))
try:
# Try to send as much of the first message as possible
total = sock.send(data)
except socket.error as exc:
# Catch (errno, message)-type socket.errors
if exc.args[0] not in self.WOULD_BLOCK_ERRS:
raise
self._out_buffer = data
else:
self._out_buffer = None
finally:
if total < len(data):
# Save the rest of the message that could not be sent
self._pending.appendleft(data[total:])
return total
|
Flush some of the waiting messages, returns count written
|
entailment
|
def send(self, command, message=None):
'''Send a command over the socket with length endcoded'''
if message:
joined = command + constants.NL + util.pack(message)
else:
joined = command + constants.NL
if self._blocking:
for sock in self.socket():
sock.sendall(joined)
else:
self._pending.append(joined)
|
Send a command over the socket with length endcoded
|
entailment
|
def identify(self, data):
'''Send an identification message'''
return self.send(constants.IDENTIFY, json.dumps(data))
|
Send an identification message
|
entailment
|
def sub(self, topic, channel):
'''Subscribe to a topic/channel'''
return self.send(' '.join((constants.SUB, topic, channel)))
|
Subscribe to a topic/channel
|
entailment
|
def pub(self, topic, message):
'''Publish to a topic'''
return self.send(' '.join((constants.PUB, topic)), message)
|
Publish to a topic
|
entailment
|
def mpub(self, topic, *messages):
'''Publish multiple messages to a topic'''
return self.send(constants.MPUB + ' ' + topic, messages)
|
Publish multiple messages to a topic
|
entailment
|
def rdy(self, count):
'''Indicate that you're ready to receive'''
self.ready = count
self.last_ready_sent = count
return self.send(constants.RDY + ' ' + str(count))
|
Indicate that you're ready to receive
|
entailment
|
def req(self, message_id, timeout):
'''Re-queue a message'''
return self.send(constants.REQ + ' ' + message_id + ' ' + str(timeout))
|
Re-queue a message
|
entailment
|
def _read(self, limit=1000):
'''Return all the responses read'''
# It's important to know that it may return no responses or multiple
# responses. It depends on how the buffering works out. First, read from
# the socket
for sock in self.socket():
if sock is None:
# Race condition. Connection has been closed.
return []
try:
packet = sock.recv(4096)
except socket.timeout:
# If the socket times out, return nothing
return []
except socket.error as exc:
# Catch (errno, message)-type socket.errors
if exc.args[0] in self.WOULD_BLOCK_ERRS:
return []
else:
raise
# Append our newly-read data to our buffer
self._buffer += packet
responses = []
total = 0
buf = self._buffer
remaining = len(buf)
while limit and (remaining >= 4):
size = struct.unpack('>l', buf[total:(total + 4)])[0]
# Now check to see if there's enough left in the buffer to read
# the message.
if (remaining - 4) >= size:
responses.append(Response.from_raw(
self, buf[(total + 4):(total + size + 4)]))
total += (size + 4)
remaining -= (size + 4)
limit -= 1
else:
break
self._buffer = self._buffer[total:]
return responses
|
Return all the responses read
|
entailment
|
def read(self):
'''Responses from an established socket'''
responses = self._read()
# Determine the number of messages in here and decrement our ready
# count appropriately
self.ready -= sum(
map(int, (r.frame_type == Message.FRAME_TYPE for r in responses)))
return responses
|
Responses from an established socket
|
entailment
|
def discover(self, topic):
'''Run the discovery mechanism'''
logger.info('Discovering on topic %s', topic)
producers = []
for lookupd in self._lookupd:
logger.info('Discovering on %s', lookupd)
try:
# Find all the current producers on this instance
for producer in lookupd.lookup(topic)['producers']:
logger.info('Found producer %s on %s', producer, lookupd)
producers.append(
(producer['broadcast_address'], producer['tcp_port']))
except ClientException:
logger.exception('Failed to query %s', lookupd)
new = []
for host, port in producers:
conn = self._connections.get((host, port))
if not conn:
logger.info('Discovered %s:%s', host, port)
new.append(self.connect(host, port))
elif not conn.alive():
logger.info('Reconnecting to %s:%s', host, port)
if conn.connect():
conn.setblocking(0)
self.reconnected(conn)
else:
logger.debug('Connection to %s:%s still alive', host, port)
# And return all the new connections
return [conn for conn in new if conn]
|
Run the discovery mechanism
|
entailment
|
def check_connections(self):
'''Connect to all the appropriate instances'''
logger.info('Checking connections')
if self._lookupd:
self.discover(self._topic)
# Make sure we're connected to all the prescribed hosts
for hostspec in self._nsqd_tcp_addresses:
logger.debug('Checking nsqd instance %s', hostspec)
host, port = hostspec.split(':')
port = int(port)
conn = self._connections.get((host, port), None)
# If there is no connection to it, we have to try to connect
if not conn:
logger.info('Connecting to %s:%s', host, port)
self.connect(host, port)
elif not conn.alive():
# If we've connected to it before, but it's no longer alive,
# we'll have to make a decision about when to try to reconnect
# to it, if we need to reconnect to it at all
if conn.ready_to_reconnect():
logger.info('Reconnecting to %s:%s', host, port)
if conn.connect():
conn.setblocking(0)
self.reconnected(conn)
else:
logger.debug('Checking freshness')
now = time.time()
time_check = math.ceil(now - self.last_recv_timestamp)
if time_check >= ((self.heartbeat_interval * 2) / 1000.0):
if conn.ready_to_reconnect():
logger.info('Reconnecting to %s:%s', host, port)
if conn.connect():
conn.setblocking(0)
self.reconnected(conn)
|
Connect to all the appropriate instances
|
entailment
|
def connection_checker(self):
'''Run periodic reconnection checks'''
thread = ConnectionChecker(self)
logger.info('Starting connection-checker thread')
thread.start()
try:
yield thread
finally:
logger.info('Stopping connection-checker')
thread.stop()
logger.info('Joining connection-checker')
thread.join()
|
Run periodic reconnection checks
|
entailment
|
def connect(self, host, port):
'''Connect to the provided host, port'''
conn = connection.Connection(host, port,
reconnection_backoff=self._reconnection_backoff,
auth_secret=self._auth_secret,
timeout=self._connect_timeout,
**self._identify_options)
if conn.alive():
conn.setblocking(0)
self.add(conn)
return conn
|
Connect to the provided host, port
|
entailment
|
def add(self, connection):
'''Add a connection'''
key = (connection.host, connection.port)
with self._lock:
if key not in self._connections:
self._connections[key] = connection
self.added(connection)
return connection
else:
return None
|
Add a connection
|
entailment
|
def remove(self, connection):
'''Remove a connection'''
key = (connection.host, connection.port)
with self._lock:
found = self._connections.pop(key, None)
try:
self.close_connection(found)
except Exception as exc:
logger.warn('Failed to close %s: %s', connection, exc)
return found
|
Remove a connection
|
entailment
|
def read(self):
'''Read from any of the connections that need it'''
# We'll check all living connections
connections = [c for c in self.connections() if c.alive()]
if not connections:
# If there are no connections, obviously we return no messages, but
# we should wait the duration of the timeout
time.sleep(self._timeout)
return []
# Not all connections need to be written to, so we'll only concern
# ourselves with those that require writes
writes = [c for c in connections if c.pending()]
try:
readable, writable, exceptable = select.select(
connections, writes, connections, self._timeout)
except exceptions.ConnectionClosedException:
logger.exception('Tried selecting on closed client')
return []
except select.error:
logger.exception('Error running select')
return []
# If we returned because the timeout interval passed, log it and return
if not (readable or writable or exceptable):
logger.debug('Timed out...')
return []
responses = []
# For each readable socket, we'll try to read some responses
for conn in readable:
try:
for res in conn.read():
# We'll capture heartbeats and respond to them automatically
if (isinstance(res, Response) and res.data == HEARTBEAT):
logger.info('Sending heartbeat to %s', conn)
conn.nop()
logger.debug('Setting last_recv_timestamp')
self.last_recv_timestamp = time.time()
continue
elif isinstance(res, Error):
nonfatal = (
exceptions.FinFailedException,
exceptions.ReqFailedException,
exceptions.TouchFailedException
)
if not isinstance(res.exception(), nonfatal):
# If it's not any of the non-fatal exceptions, then
# we have to close this connection
logger.error(
'Closing %s: %s', conn, res.exception())
self.close_connection(conn)
responses.append(res)
logger.debug('Setting last_recv_timestamp')
self.last_recv_timestamp = time.time()
except exceptions.NSQException:
logger.exception('Failed to read from %s', conn)
self.close_connection(conn)
except socket.error:
logger.exception('Failed to read from %s', conn)
self.close_connection(conn)
# For each writable socket, flush some data out
for conn in writable:
try:
conn.flush()
except socket.error:
logger.exception('Failed to flush %s', conn)
self.close_connection(conn)
# For each connection with an exception, try to close it and remove it
# from our connections
for conn in exceptable:
self.close_connection(conn)
return responses
|
Read from any of the connections that need it
|
entailment
|
def random_connection(self):
'''Pick a random living connection'''
# While at the moment there's no need for this to be a context manager
# per se, I would like to use that interface since I anticipate
# adding some wrapping around it at some point.
yield random.choice(
[conn for conn in self.connections() if conn.alive()])
|
Pick a random living connection
|
entailment
|
def wait_response(self):
'''Wait for a response'''
responses = self.read()
while not responses:
responses = self.read()
return responses
|
Wait for a response
|
entailment
|
def pub(self, topic, message):
'''Publish the provided message to the provided topic'''
with self.random_connection() as client:
client.pub(topic, message)
return self.wait_response()
|
Publish the provided message to the provided topic
|
entailment
|
def mpub(self, topic, *messages):
'''Publish messages to a topic'''
with self.random_connection() as client:
client.mpub(topic, *messages)
return self.wait_response()
|
Publish messages to a topic
|
entailment
|
def create_socket(self):
"""Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
"""
socket_path = os.path.join(self.config_dir, 'pueue.sock')
# Create Socket and exit with 1, if socket can't be created
try:
if os.path.exists(socket_path):
os.remove(socket_path)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(socket_path)
self.socket.setblocking(0)
self.socket.listen(0)
# Set file permissions
os.chmod(socket_path, stat.S_IRWXU)
except Exception:
self.logger.error("Daemon couldn't socket. Aborting")
self.logger.exception()
sys.exit(1)
return self.socket
|
Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
|
entailment
|
def initialize_directories(self, root_dir):
"""Create all directories needed for logs and configs."""
if not root_dir:
root_dir = os.path.expanduser('~')
# Create config directory, if it doesn't exist
self.config_dir = os.path.join(root_dir, '.config/pueue')
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
|
Create all directories needed for logs and configs.
|
entailment
|
def respond_client(self, answer, socket):
"""Send an answer to the client."""
response = pickle.dumps(answer, -1)
socket.sendall(response)
self.read_list.remove(socket)
socket.close()
|
Send an answer to the client.
|
entailment
|
def read_config(self):
"""Read a previous configuration file or create a new with default values."""
config_file = os.path.join(self.config_dir, 'pueue.ini')
self.config = configparser.ConfigParser()
# Try to get configuration file and return it
# If this doesn't work, a new default config file will be created
if os.path.exists(config_file):
try:
self.config.read(config_file)
return
except Exception:
self.logger.error('Error while parsing config file. Deleting old config')
self.logger.exception()
self.config['default'] = {
'resumeAfterStart': False,
'maxProcesses': 1,
'customShell': 'default',
}
self.config['log'] = {
'logTime': 60*60*24*14,
}
self.write_config()
|
Read a previous configuration file or create a new with default values.
|
entailment
|
def write_config(self):
"""Write the current configuration to the config file."""
config_file = os.path.join(self.config_dir, 'pueue.ini')
with open(config_file, 'w') as file_descriptor:
self.config.write(file_descriptor)
|
Write the current configuration to the config file.
|
entailment
|
def main(self):
"""The main function containing the loop for communication and process management.
This function is the heart of the daemon.
It is responsible for:
- Client communication
- Executing commands from clients
- Update the status of processes by polling the ProcessHandler.
- Logging
- Cleanup on exit
"""
try:
while self.running:
# Trigger the processing of finished processes by the ProcessHandler.
# If there are finished processes we write the log to keep it up to date.
if self.process_handler.check_finished():
self.logger.write(self.queue)
if self.reset and self.process_handler.all_finished():
# Rotate log and reset queue
self.logger.rotate(self.queue)
self.queue.reset()
self.reset = False
# Check if the ProcessHandler has any free slots to spawn a new process
if not self.paused and not self.reset and self.running:
self.process_handler.check_for_new()
# This is the communication section of the daemon.
# 1. Receive message from the client
# 2. Check payload and call respective function with payload as parameter.
# 3. Execute logic
# 4. Return payload with response to client
# Create list for waitable objects
readable, writable, failed = select.select(self.read_list, [], [], 1)
for waiting_socket in readable:
if waiting_socket is self.socket:
# Listening for clients to connect.
# Client sockets are added to readlist to be processed.
try:
client_socket, client_address = self.socket.accept()
self.read_list.append(client_socket)
except Exception:
self.logger.warning('Daemon rejected client')
else:
# Trying to receive instruction from client socket
try:
instruction = waiting_socket.recv(1048576)
except (EOFError, OSError):
self.logger.warning('Client died while sending message, dropping received data.')
# Remove client socket
self.read_list.remove(waiting_socket)
waiting_socket.close()
instruction = None
# Check for valid instruction
if instruction is not None:
# Check if received data can be unpickled.
try:
payload = pickle.loads(instruction)
except EOFError:
# Instruction is ignored if it can't be unpickled
self.logger.error('Received message is incomplete, dropping received data.')
self.read_list.remove(waiting_socket)
waiting_socket.close()
# Set invalid payload
payload = {'mode': ''}
functions = {
'add': self.add,
'remove': self.remove,
'edit': self.edit_command,
'switch': self.switch,
'send': self.pipe_to_process,
'status': self.send_status,
'start': self.start,
'pause': self.pause,
'stash': self.stash,
'enqueue': self.enqueue,
'restart': self.restart,
'kill': self.kill_process,
'reset': self.reset_everything,
'clear': self.clear,
'config': self.set_config,
'STOPDAEMON': self.stop_daemon,
}
if payload['mode'] in functions.keys():
self.logger.debug('Payload received:')
self.logger.debug(payload)
response = functions[payload['mode']](payload)
self.logger.debug('Sending payload:')
self.logger.debug(response)
try:
self.respond_client(response, waiting_socket)
except (BrokenPipeError):
self.logger.warning('Client disconnected during message dispatching. Function successfully executed anyway.')
# Remove client socket
self.read_list.remove(waiting_socket)
waiting_socket.close()
instruction = None
else:
self.respond_client({'message': 'Unknown Command',
'status': 'error'}, waiting_socket)
except Exception:
self.logger.exception()
# Wait for killed or stopped processes to finish (cleanup)
self.process_handler.wait_for_finish()
# Close socket, clean everything up and exit
self.socket.close()
cleanup(self.config_dir)
sys.exit(0)
|
The main function containing the loop for communication and process management.
This function is the heart of the daemon.
It is responsible for:
- Client communication
- Executing commands from clients
- Update the status of processes by polling the ProcessHandler.
- Logging
- Cleanup on exit
|
entailment
|
def stop_daemon(self, payload=None):
"""Kill current processes and initiate daemon shutdown.
The daemon will shut down after a last check on all killed processes.
"""
kill_signal = signals['9']
self.process_handler.kill_all(kill_signal, True)
self.running = False
return {'message': 'Pueue daemon shutting down',
'status': 'success'}
|
Kill current processes and initiate daemon shutdown.
The daemon will shut down after a last check on all killed processes.
|
entailment
|
def set_config(self, payload):
"""Update the current config depending on the payload and save it."""
self.config['default'][payload['option']] = str(payload['value'])
if payload['option'] == 'maxProcesses':
self.process_handler.set_max(payload['value'])
if payload['option'] == 'customShell':
path = payload['value']
if os.path.isfile(path) and os.access(path, os.X_OK):
self.process_handler.set_shell(path)
elif path == 'default':
self.process_handler.set_shell()
else:
return {'message': "File in path doesn't exist or is not executable.",
'status': 'error'}
self.write_config()
return {'message': 'Configuration successfully updated.',
'status': 'success'}
|
Update the current config depending on the payload and save it.
|
entailment
|
def pipe_to_process(self, payload):
"""Send something to stdin of a specific process."""
message = payload['input']
key = payload['key']
if not self.process_handler.is_running(key):
return {'message': 'No running process for this key',
'status': 'error'}
self.process_handler.send_to_process(message, key)
return {'message': 'Message sent',
'status': 'success'}
|
Send something to stdin of a specific process.
|
entailment
|
def send_status(self, payload):
"""Send the daemon status and the current queue for displaying."""
answer = {}
data = []
# Get daemon status
if self.paused:
answer['status'] = 'paused'
else:
answer['status'] = 'running'
# Add current queue or a message, that queue is empty
if len(self.queue) > 0:
data = deepcopy(self.queue.queue)
# Remove stderr and stdout output for transfer
# Some outputs are way to big for the socket buffer
# and this is not needed by the client
for key, item in data.items():
if 'stderr' in item:
del item['stderr']
if 'stdout' in item:
del item['stdout']
else:
data = 'Queue is empty'
answer['data'] = data
return answer
|
Send the daemon status and the current queue for displaying.
|
entailment
|
def reset_everything(self, payload):
"""Kill all processes, delete the queue and clean everything up."""
kill_signal = signals['9']
self.process_handler.kill_all(kill_signal, True)
self.process_handler.wait_for_finish()
self.reset = True
answer = {'message': 'Resetting current queue', 'status': 'success'}
return answer
|
Kill all processes, delete the queue and clean everything up.
|
entailment
|
def clear(self, payload):
"""Clear queue from any `done` or `failed` entries.
The log will be rotated once. Otherwise we would loose all logs from
thoes finished processes.
"""
self.logger.rotate(self.queue)
self.queue.clear()
self.logger.write(self.queue)
answer = {'message': 'Finished entries have been removed.', 'status': 'success'}
return answer
|
Clear queue from any `done` or `failed` entries.
The log will be rotated once. Otherwise we would loose all logs from
thoes finished processes.
|
entailment
|
def start(self, payload):
"""Start the daemon and all processes or only specific processes."""
# Start specific processes, if `keys` is given in the payload
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.start_process(key)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Started processes: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo paused, queued or stashed process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Start a all processes and the daemon
else:
self.process_handler.start_all()
if self.paused:
self.paused = False
answer = {'message': 'Daemon and all processes started.',
'status': 'success'}
else:
answer = {'message': 'Daemon already running, starting all processes.',
'status': 'success'}
return answer
|
Start the daemon and all processes or only specific processes.
|
entailment
|
def pause(self, payload):
"""Start the daemon and all processes or only specific processes."""
# Pause specific processes, if `keys` is given in the payload
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.pause_process(key)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Paused processes: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo running process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Pause all processes and the daemon
else:
if payload.get('wait'):
self.paused = True
answer = {'message': 'Pausing daemon, but waiting for processes to finish.',
'status': 'success'}
else:
self.process_handler.pause_all()
if not self.paused:
self.paused = True
answer = {'message': 'Daemon and all processes paused.',
'status': 'success'}
else:
answer = {'message': 'Daemon already paused, pausing all processes anyway.',
'status': 'success'}
return answer
|
Start the daemon and all processes or only specific processes.
|
entailment
|
def edit_command(self, payload):
"""Edit the command of a specific entry."""
key = payload['key']
command = payload['command']
if self.queue[key]:
if self.queue[key]['status'] in ['queued', 'stashed']:
self.queue[key]['command'] = command
answer = {'message': 'Command updated', 'status': 'error'}
else:
answer = {'message': "Entry is not 'queued' or 'stashed'",
'status': 'error'}
else:
answer = {'message': 'No entry with this key', 'status': 'error'}
# Pause all processes and the daemon
return answer
|
Edit the command of a specific entry.
|
entailment
|
def stash(self, payload):
"""Stash the specified processes."""
succeeded = []
failed = []
for key in payload['keys']:
if self.queue.get(key) is not None:
if self.queue[key]['status'] == 'queued':
self.queue[key]['status'] = 'stashed'
succeeded.append(str(key))
else:
failed.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Stashed entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo queued entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer
|
Stash the specified processes.
|
entailment
|
def kill_process(self, payload):
"""Pause the daemon and kill all processes or kill a specific process."""
# Kill specific processes, if `keys` is given in the payload
kill_signal = signals[payload['signal'].lower()]
kill_shell = payload.get('all', False)
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.kill_process(key, kill_signal, kill_shell)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += "Signal '{}' sent to processes: {}.".format(payload['signal'], ', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo running process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Kill all processes and the daemon
else:
self.process_handler.kill_all(kill_signal, kill_shell)
if kill_signal == signal.SIGINT or \
kill_signal == signal.SIGTERM or \
kill_signal == signal.SIGKILL:
self.paused = True
answer = {'message': 'Signal send to all processes.',
'status': 'success'}
return answer
|
Pause the daemon and kill all processes or kill a specific process.
|
entailment
|
def remove(self, payload):
"""Remove specified entries from the queue."""
succeeded = []
failed = []
for key in payload['keys']:
running = self.process_handler.is_running(key)
if not running:
removed = self.queue.remove(key)
if removed:
succeeded.append(str(key))
else:
failed.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Removed entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nRunning or non-existing entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer
|
Remove specified entries from the queue.
|
entailment
|
def switch(self, payload):
"""Switch the two specified entry positions in the queue."""
first = payload['first']
second = payload['second']
running = self.process_handler.is_running(first) or self.process_handler.is_running(second)
if running:
answer = {
'message': "Can't switch running processes, "
"please stop the processes before switching them.",
'status': 'error'
}
else:
switched = self.queue.switch(first, second)
if switched:
answer = {
'message': 'Entries #{} and #{} switched'.format(first, second),
'status': 'success'
}
else:
answer = {'message': "One or both entries do not exist or are not queued/stashed.",
'status': 'error'}
return answer
|
Switch the two specified entry positions in the queue.
|
entailment
|
def restart(self, payload):
"""Restart the specified entries."""
succeeded = []
failed = []
for key in payload['keys']:
restarted = self.queue.restart(key)
if restarted:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Restarted entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo finished entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer
|
Restart the specified entries.
|
entailment
|
def sendall(self, data, flags=0):
'''Same as socket.sendall'''
count = len(data)
while count:
sent = self.send(data, flags)
# This could probably be a buffer object
data = data[sent:]
count -= sent
|
Same as socket.sendall
|
entailment
|
def do_ls(client, args):
"""List directory"""
for item in client.get_folder_contents_iter(args.uri):
# privacy flag
if item['privacy'] == 'public':
item['pf'] = '@'
else:
item['pf'] = '-'
if isinstance(item, Folder):
# type flag
item['tf'] = 'd'
item['key'] = item['folderkey']
item['size'] = ''
else:
item['tf'] = '-'
item['key'] = item['quickkey']
item['name'] = item['filename']
print("{tf}{pf} {key:>15} {size:>10} {created} {name}".format(**item))
return True
|
List directory
|
entailment
|
def do_file_upload(client, args):
"""Upload files"""
# Sanity check
if len(args.paths) > 1:
# destination must be a directory
try:
resource = client.get_resource_by_uri(args.dest_uri)
except ResourceNotFoundError:
resource = None
if resource and not isinstance(resource, Folder):
print("file-upload: "
"target '{}' is not a directory".format(args.dest_uri))
return None
with client.upload_session():
for src_path in args.paths:
print("Uploading {} to {}".format(src_path, args.dest_uri))
result = client.upload_file(src_path, args.dest_uri)
print("Uploaded {}, result={}".format(src_path, result))
return True
|
Upload files
|
entailment
|
def do_file_download(client, args):
"""Download file"""
# Sanity check
if not os.path.isdir(args.dest_path) and not args.dest_path.endswith('/'):
print("file-download: "
"target '{}' is not a directory".format(args.dest_path))
if not os.path.exists(args.dest_path):
print("\tHint: add trailing / to create one")
return None
for src_uri in args.uris:
print("Downloading {} to {}".format(src_uri, args.dest_path))
client.download_file(src_uri, args.dest_path)
print("Downloaded {}".format(src_uri))
return True
|
Download file
|
entailment
|
def do_file_show(client, args):
"""Output file contents to stdout"""
for src_uri in args.uris:
client.download_file(src_uri, sys.stdout.buffer)
return True
|
Output file contents to stdout
|
entailment
|
def do_folder_create(client, args):
"""Create directory"""
for folder_uri in args.uris:
client.create_folder(folder_uri, recursive=True)
return True
|
Create directory
|
entailment
|
def do_resource_delete(client, args):
"""Remove resource"""
for resource_uri in args.uris:
client.delete_resource(resource_uri, purge=args.purge)
print("Deleted {}".format(resource_uri))
return True
|
Remove resource
|
entailment
|
def do_file_update_metadata(client, args):
"""Update file metadata"""
client.update_file_metadata(args.uri, filename=args.filename,
description=args.description, mtime=args.mtime,
privacy=args.privacy)
return True
|
Update file metadata
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.