Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _nginx_stream_spec(port_spec, bridge_ip):
"""This will output the nginx stream config string for specific port spec """
server_string_spec = "\t server {\n"
server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec))
server_string_spec += "\t \t {}\n".format(_nginx_proxy_string(port_spec, bridge_ip))
server_string_spec += "\t }\n"
return server_string_spec |
def get_nginx_configuration_spec(port_spec_dict, docker_bridge_ip):
"""This function will take in a port spec as specified by the port_spec compiler and
will output an nginx web proxy config string. This string can then be written to a file
and used running nginx """
nginx_http_config, nginx_stream_config = "", ""
for port_spec in port_spec_dict['nginx']:
if port_spec['type'] == 'http':
nginx_http_config += _nginx_http_spec(port_spec, docker_bridge_ip)
elif port_spec['type'] == 'stream':
nginx_stream_config += _nginx_stream_spec(port_spec, docker_bridge_ip)
return {'http': nginx_http_config, 'stream': nginx_stream_config} |
def memoized(fn):
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated). The cache lasts for the duration of each request.
"""
@functools.wraps(fn)
def memoizer(*args, **kwargs):
key = function_key(fn) + pickle.dumps(args) + pickle.dumps(_hash_kwargs(kwargs))
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return memoizer |
def _load_ssh_auth_post_yosemite(mac_username):
"""Starting with Yosemite, launchd was rearchitected and now only one
launchd process runs for all users. This allows us to much more easily
impersonate a user through launchd and extract the environment
variables from their running processes."""
user_id = subprocess.check_output(['id', '-u', mac_username])
ssh_auth_sock = subprocess.check_output(['launchctl', 'asuser', user_id, 'launchctl', 'getenv', 'SSH_AUTH_SOCK']).rstrip()
_set_ssh_auth_sock(ssh_auth_sock) |
def _load_ssh_auth_pre_yosemite():
"""For OS X versions before Yosemite, many launchd processes run simultaneously under
different users and different permission models. The simpler `asuser` trick we use
in Yosemite doesn't work, since it gets routed to the wrong launchd. We instead need
to find the running ssh-agent process and use its PID to navigate ourselves
to the correct launchd."""
for process in psutil.process_iter():
if process.name() == 'ssh-agent':
ssh_auth_sock = subprocess.check_output(['launchctl', 'bsexec', str(process.pid), 'launchctl', 'getenv', 'SSH_AUTH_SOCK']).rstrip()
if ssh_auth_sock:
_set_ssh_auth_sock(ssh_auth_sock)
break
else:
daemon_warnings.warn('ssh', 'No running ssh-agent found linked to SSH_AUTH_SOCK') |
def check_and_load_ssh_auth():
"""
Will check the mac_username config value; if it is present, will load that user's
SSH_AUTH_SOCK environment variable to the current environment. This allows git clones
to behave the same for the daemon as they do for the user
"""
mac_username = get_config_value(constants.CONFIG_MAC_USERNAME_KEY)
if not mac_username:
logging.info("Can't setup ssh authorization; no mac_username specified")
return
if not _running_on_mac(): # give our Linux unit tests a way to not freak out
logging.info("Skipping SSH load, we are not running on Mac")
return
if _mac_version_is_post_yosemite():
_load_ssh_auth_post_yosemite(mac_username)
else:
_load_ssh_auth_pre_yosemite() |
def _cleanup_path(path):
"""Recursively delete a path upon exiting this context
manager. Supports targets that are files or directories."""
try:
yield
finally:
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path) |
def copy_between_containers(source_name, source_path, dest_name, dest_path):
"""Copy a file from the source container to an intermediate staging
area on the local filesystem, then from that staging area to the
destination container.
These moves take place without demotion for two reasons:
1. There should be no permissions vulnerabilities with copying
between containers because it is assumed the non-privileged
user has full access to all Dusty containers.
2. The temp dir created by mkdtemp is owned by the owner of the
Dusty daemon process, so if we demoted our moves to/from that location
they would encounter permission errors."""
if not container_path_exists(source_name, source_path):
raise RuntimeError('ERROR: Path {} does not exist inside container {}.'.format(source_path, source_name))
temp_path = os.path.join(tempfile.mkdtemp(), str(uuid.uuid1()))
with _cleanup_path(temp_path):
copy_to_local(temp_path, source_name, source_path, demote=False)
copy_from_local(temp_path, dest_name, dest_path, demote=False) |
def copy_from_local(local_path, remote_name, remote_path, demote=True):
"""Copy a path from the local filesystem to a path inside a Dusty
container. The files on the local filesystem must be accessible
by the user specified in mac_username."""
if not os.path.exists(local_path):
raise RuntimeError('ERROR: Path {} does not exist'.format(local_path))
temp_identifier = str(uuid.uuid1())
if os.path.isdir(local_path):
sync_local_path_to_vm(local_path, os.path.join(vm_cp_path(remote_name), temp_identifier), demote=demote)
move_dir_inside_container(remote_name, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier), remote_path)
else:
sync_local_path_to_vm(local_path, os.path.join(vm_cp_path(remote_name), temp_identifier), demote=demote)
move_file_inside_container(remote_name, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier), remote_path) |
def copy_to_local(local_path, remote_name, remote_path, demote=True):
"""Copy a path from inside a Dusty container to a path on the
local filesystem. The path on the local filesystem must be
wrist-accessible by the user specified in mac_username."""
if not container_path_exists(remote_name, remote_path):
raise RuntimeError('ERROR: Path {} does not exist inside container {}.'.format(remote_path, remote_name))
temp_identifier = str(uuid.uuid1())
copy_path_inside_container(remote_name, remote_path, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier))
vm_path = os.path.join(vm_cp_path(remote_name), temp_identifier)
is_dir = vm_path_is_directory(vm_path)
sync_local_path_from_vm(local_path, vm_path, demote=demote, is_dir=is_dir) |
def _mount_repo(repo, wait_for_server=False):
"""
This function will create the VM directory where a repo will be mounted, if it
doesn't exist. If wait_for_server is set, it will wait up to 10 seconds for
the nfs server to start, by retrying mounts that fail with 'Connection Refused'.
If wait_for_server is not set, it will attempt to run the mount command once
"""
check_call_on_vm('sudo mkdir -p {}'.format(repo.vm_path))
if wait_for_server:
for i in range(0,10):
try:
_run_mount_command(repo)
return
except CalledProcessError as e:
if 'Connection refused' in e.output:
logging.info('Failed to mount repo; waiting for nfsd to restart')
time.sleep(1)
else:
logging.info(e.output)
raise e
log_to_client('Failed to mount repo {}'.format(repo.short_name))
raise RuntimeError('Unable to mount repo with NFS')
else:
_run_mount_command(repo) |
def get_port_spec_document(expanded_active_specs, docker_vm_ip):
""" Given a dictionary containing the expanded dusty DAG specs this function will
return a dictionary containing the port mappings needed by downstream methods. Currently
this includes docker_compose, virtualbox, nginx and hosts_file."""
forwarding_port = 65000
port_spec = {'docker_compose':{}, 'nginx':[], 'hosts_file':[]}
host_full_addresses, host_names, stream_host_ports = set(), set(), set()
# No matter the order of apps in expanded_active_specs, we want to produce a consistent
# port_spec with respect to the apps and the ports they are outputted on
for app_name in sorted(expanded_active_specs['apps'].keys()):
app_spec = expanded_active_specs['apps'][app_name]
if 'host_forwarding' not in app_spec:
continue
port_spec['docker_compose'][app_name] = []
for host_forwarding_spec in app_spec['host_forwarding']:
# These functions are just used for validating the set of specs all works together
_add_full_addresses(host_forwarding_spec, host_full_addresses)
if host_forwarding_spec['type'] == 'stream':
_add_stream_host_port(host_forwarding_spec, stream_host_ports)
port_spec['docker_compose'][app_name].append(_docker_compose_port_spec(host_forwarding_spec, forwarding_port))
port_spec['nginx'].append(_nginx_port_spec(host_forwarding_spec, forwarding_port, docker_vm_ip))
_add_host_names(host_forwarding_spec, docker_vm_ip, port_spec, host_names)
forwarding_port += 1
return port_spec |
def init_yaml_constructor():
"""
This dark magic is used to make yaml.safe_load encode all strings as utf-8,
where otherwise python unicode strings would be returned for non-ascii chars
"""
def utf_encoding_string_constructor(loader, node):
return loader.construct_scalar(node).encode('utf-8')
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', utf_encoding_string_constructor) |
def registry_from_image(image_name):
"""Returns the Docker registry host associated with
a given image name."""
if '/' not in image_name: # official image
return constants.PUBLIC_DOCKER_REGISTRY
prefix = image_name.split('/')[0]
if '.' not in prefix: # user image on official repository, e.g. thieman/clojure
return constants.PUBLIC_DOCKER_REGISTRY
return prefix |
def get_authed_registries():
"""Reads the local Docker client config for the current user
and returns all registries to which the user may be logged in.
This is intended to be run client-side, not by the daemon."""
result = set()
if not os.path.exists(constants.DOCKER_CONFIG_PATH):
return result
config = json.load(open(constants.DOCKER_CONFIG_PATH, 'r'))
for registry in config.get('auths', {}).iterkeys():
try:
parsed = urlparse(registry)
except Exception:
log_to_client('Error parsing registry {} from Docker config, will skip this registry').format(registry)
# This logic assumes the auth is either of the form
# gamechanger.io (no scheme, no path after host) or
# of the form https://index.docker.io/v1/ (scheme,
# netloc parses correctly, additional path does not matter).
# These are the formats I saw in my personal config file,
# not sure what other formats it might accept.
result.add(parsed.netloc) if parsed.netloc else result.add(parsed.path)
return result |
def streaming_to_client():
"""Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output through to the client and should only be
used for similarly complex circumstances."""
for handler in client_logger.handlers:
if hasattr(handler, 'append_newlines'):
break
else:
handler = None
old_propagate = client_logger.propagate
client_logger.propagate = False
if handler is not None:
old_append = handler.append_newlines
handler.append_newlines = False
yield
client_logger.propagate = old_propagate
if handler is not None:
handler.append_newlines = old_append |
def pty_fork(*args):
"""Runs a subprocess with a PTY attached via fork and exec.
The output from the PTY is streamed through log_to_client.
This should not be necessary for most subprocesses, we
built this to handle Compose up which only streams pull
progress if it is attached to a TTY."""
updated_env = copy(os.environ)
updated_env.update(get_docker_env())
args += (updated_env,)
executable = args[0]
demote_fn = demote_to_user(get_config_value(constants.CONFIG_MAC_USERNAME_KEY))
child_pid, pty_fd = pty.fork()
if child_pid == 0:
demote_fn()
os.execle(_executable_path(executable), *args)
else:
child_process = psutil.Process(child_pid)
terminal = os.fdopen(pty_fd, 'r', 0)
with streaming_to_client():
while child_process.status() == 'running':
output = terminal.read(1)
log_to_client(output)
_, exit_code = os.waitpid(child_pid, 0)
if exit_code != 0:
raise subprocess.CalledProcessError(exit_code, ' '.join(args[:-1])) |
def _compile_docker_commands(app_name, assembled_specs, port_spec):
""" This is used to compile the command that will be run when the docker container starts
up. This command has to install any libs that the app uses, run the `always` command, and
run the `once` command if the container is being launched for the first time """
app_spec = assembled_specs['apps'][app_name]
commands = ['set -e']
commands += _lib_install_commands_for_app(app_name, assembled_specs)
if app_spec['mount']:
commands.append("cd {}".format(container_code_path(app_spec)))
commands.append("export PATH=$PATH:{}".format(container_code_path(app_spec)))
commands += _copy_assets_commands_for_app(app_spec, assembled_specs)
commands += _get_once_commands(app_spec, port_spec)
commands += _get_always_commands(app_spec)
return commands |
def _increase_file_handle_limit():
"""Raise the open file handles permitted by the Dusty daemon process
and its child processes. The number we choose here needs to be within
the OS X default kernel hard limit, which is 10240."""
logging.info('Increasing file handle limit to {}'.format(constants.FILE_HANDLE_LIMIT))
resource.setrlimit(resource.RLIMIT_NOFILE,
(constants.FILE_HANDLE_LIMIT, resource.RLIM_INFINITY)) |
def _start_http_server():
"""Start the daemon's HTTP server on a separate thread.
This server is only used for servicing container status
requests from Dusty's custom 502 page."""
logging.info('Starting HTTP server at {}:{}'.format(constants.DAEMON_HTTP_BIND_IP,
constants.DAEMON_HTTP_BIND_PORT))
thread = threading.Thread(target=http_server.app.run, args=(constants.DAEMON_HTTP_BIND_IP,
constants.DAEMON_HTTP_BIND_PORT))
thread.daemon = True
thread.start() |
def get_dusty_images():
"""Returns all images listed in dusty specs (apps + bundles), in the form repository:tag. Tag will be set to latest
if no tag is specified in the specs"""
specs = get_specs()
dusty_image_names = [spec['image'] for spec in specs['apps'].values() + specs['services'].values() if 'image' in spec]
dusty_images = set([name if ':' in name else "{}:latest".format(name) for name in dusty_image_names])
return dusty_images |
def get_docker_client():
"""Ripped off and slightly modified based on docker-py's
kwargs_from_env utility function."""
env = get_docker_env()
host, cert_path, tls_verify = env['DOCKER_HOST'], env['DOCKER_CERT_PATH'], env['DOCKER_TLS_VERIFY']
params = {'base_url': host.replace('tcp://', 'https://'),
'timeout': None,
'version': 'auto'}
if tls_verify and cert_path:
params['tls'] = docker.tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=True,
ssl_version=None,
assert_hostname=False)
return docker.Client(**params) |
def get_dusty_containers(services, include_exited=False):
"""Get a list of containers associated with the list
of services. If no services are provided, attempts to
return all containers associated with Dusty."""
client = get_docker_client()
if services:
containers = [get_container_for_app_or_service(service, include_exited=include_exited) for service in services]
return [container for container in containers if container]
else:
return [container
for container in client.containers(all=include_exited)
if any(name.startswith('/dusty') for name in container.get('Names', []))] |
def configure_nfs_server():
"""
This function is used with `dusty up`. It will check all active repos to see if
they are exported. If any are missing, it will replace current dusty exports with
exports that are needed for currently active repos, and restart
the nfs server
"""
repos_for_export = get_all_repos(active_only=True, include_specs_repo=False)
current_exports = _get_current_exports()
needed_exports = _get_exports_for_repos(repos_for_export)
_ensure_managed_repos_dir_exists()
if not needed_exports.difference(current_exports):
if not _server_is_running():
_restart_server()
return
_write_exports_config(needed_exports)
_restart_server() |
def add_exports_for_repos(repos):
"""
This function will add needed entries to /etc/exports. It will not remove any
entries from the file. It will then restart the server if necessary
"""
current_exports = _get_current_exports()
needed_exports = _get_exports_for_repos(repos)
if not needed_exports.difference(current_exports):
if not _server_is_running():
_restart_server()
return
_write_exports_config(current_exports.union(needed_exports))
_restart_server() |
def _ensure_managed_repos_dir_exists():
"""
Our exports file will be invalid if this folder doesn't exist, and the NFS server
will not run correctly.
"""
if not os.path.exists(constants.REPOS_DIR):
os.makedirs(constants.REPOS_DIR) |
def register_consumer():
"""Given a hostname and port attempting to be accessed,
return a unique consumer ID for accessing logs from
the referenced container."""
global _consumers
hostname, port = request.form['hostname'], request.form['port']
app_name = _app_name_from_forwarding_info(hostname, port)
containers = get_dusty_containers([app_name], include_exited=True)
if not containers:
raise ValueError('No container exists for app {}'.format(app_name))
container = containers[0]
new_id = uuid1()
new_consumer = Consumer(container['Id'], datetime.utcnow())
_consumers[str(new_id)] = new_consumer
response = jsonify({'app_name': app_name, 'consumer_id': new_id})
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST'
return response |
def consume(consumer_id):
"""Given an existing consumer ID, return any new lines from the
log since the last time the consumer was consumed."""
global _consumers
consumer = _consumers[consumer_id]
client = get_docker_client()
try:
status = client.inspect_container(consumer.container_id)['State']['Status']
except Exception as e:
status = 'unknown'
new_logs = client.logs(consumer.container_id,
stdout=True,
stderr=True,
stream=False,
timestamps=False,
since=calendar.timegm(consumer.offset.timetuple()))
updated_consumer = Consumer(consumer.container_id, datetime.utcnow())
_consumers[str(consumer_id)] = updated_consumer
response = jsonify({'logs': new_logs, 'status': status})
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST'
return response |
def get_app_volume_mounts(app_name, assembled_specs, test=False):
""" This returns a list of formatted volume specs for an app. These mounts declared in the apps' spec
and mounts declared in all lib specs the app depends on"""
app_spec = assembled_specs['apps'][app_name]
volumes = [get_command_files_volume_mount(app_name, test=test)]
volumes.append(get_asset_volume_mount(app_name))
repo_mount = _get_app_repo_volume_mount(app_spec)
if repo_mount:
volumes.append(repo_mount)
volumes += _get_app_libs_volume_mounts(app_name, assembled_specs)
return volumes |
def get_lib_volume_mounts(base_lib_name, assembled_specs):
""" Returns a list of the formatted volume specs for a lib"""
volumes = [_get_lib_repo_volume_mount(assembled_specs['libs'][base_lib_name])]
volumes.append(get_command_files_volume_mount(base_lib_name, test=True))
for lib_name in assembled_specs['libs'][base_lib_name]['depends']['libs']:
lib_spec = assembled_specs['libs'][lib_name]
volumes.append(_get_lib_repo_volume_mount(lib_spec))
return volumes |
def _get_app_libs_volume_mounts(app_name, assembled_specs):
""" Returns a list of the formatted volume mounts for all libs that an app uses """
volumes = []
for lib_name in assembled_specs['apps'][app_name]['depends']['libs']:
lib_spec = assembled_specs['libs'][lib_name]
volumes.append("{}:{}".format(Repo(lib_spec['repo']).vm_path, container_code_path(lib_spec)))
return volumes |
def _dusty_vm_exists():
"""We use VBox directly instead of Docker Machine because it
shaves about 0.5 seconds off the runtime of this check."""
existing_vms = check_output_demoted(['VBoxManage', 'list', 'vms'])
for line in existing_vms.splitlines():
if '"{}"'.format(constants.VM_MACHINE_NAME) in line:
return True
return False |
def _init_docker_vm():
"""Initialize the Dusty VM if it does not already exist."""
if not _dusty_vm_exists():
log_to_client('Initializing new Dusty VM with Docker Machine')
machine_options = ['--driver', 'virtualbox',
'--virtualbox-cpu-count', '-1',
'--virtualbox-boot2docker-url', constants.CONFIG_BOOT2DOCKER_URL,
'--virtualbox-memory', str(get_config_value(constants.CONFIG_VM_MEM_SIZE)),
'--virtualbox-hostonly-nictype', constants.VM_NIC_TYPE]
check_call_demoted(['docker-machine', 'create'] + machine_options + [constants.VM_MACHINE_NAME],
redirect_stderr=True) |
def _start_docker_vm():
"""Start the Dusty VM if it is not already running."""
is_running = docker_vm_is_running()
if not is_running:
log_to_client('Starting docker-machine VM {}'.format(constants.VM_MACHINE_NAME))
_apply_nat_dns_host_resolver()
_apply_nat_net_less_greedy_subnet()
check_and_log_output_and_error_demoted(['docker-machine', 'start', constants.VM_MACHINE_NAME], quiet_on_success=True)
return is_running |
def docker_vm_is_running():
"""Using VBoxManage is 0.5 seconds or so faster than Machine."""
running_vms = check_output_demoted(['VBoxManage', 'list', 'runningvms'])
for line in running_vms.splitlines():
if '"{}"'.format(constants.VM_MACHINE_NAME) in line:
return True
return False |
def _get_localhost_ssh_port():
"""Something in the VM chain, either VirtualBox or Machine, helpfully
sets up localhost-to-VM forwarding on port 22. We can inspect this
rule to determine the port on localhost which gets forwarded to
22 in the VM."""
for line in _get_vm_config():
if line.startswith('Forwarding'):
spec = line.split('=')[1].strip('"')
name, protocol, host, host_port, target, target_port = spec.split(',')
if name == 'ssh' and protocol == 'tcp' and target_port == '22':
return host_port
raise ValueError('Could not determine localhost port for SSH forwarding') |
def _get_host_only_mac_address():
"""Returns the MAC address assigned to the host-only adapter,
using output from VBoxManage. Returned MAC address has no colons
and is lower-cased."""
# Get the number of the host-only adapter
vm_config = _get_vm_config()
for line in vm_config:
if line.startswith('hostonlyadapter'):
adapter_number = int(line[15:16])
break
else:
raise ValueError('No host-only adapter is defined for the Dusty VM')
for line in vm_config:
if line.startswith('macaddress{}'.format(adapter_number)):
return line.split('=')[1].strip('"').lower()
raise ValueError('Could not find MAC address for adapter number {}'.format(adapter_number)) |
def _ip_for_mac_from_ip_addr_show(ip_addr_show, target_mac):
"""Given the rather-complex output from an 'ip addr show' command
on the VM, parse the output to determine the IP address
assigned to the interface with the given MAC."""
return_next_ip = False
for line in ip_addr_show.splitlines():
line = line.strip()
if line.startswith('link/ether'):
line_mac = line.split(' ')[1].replace(':', '')
if line_mac == target_mac:
return_next_ip = True
elif return_next_ip and line.startswith('inet') and not line.startswith('inet6'):
ip = line.split(' ')[1].split('/')[0]
return ip |
def _get_host_only_ip():
"""Determine the host-only IP of the Dusty VM through Virtualbox and SSH
directly, bypassing Docker Machine. We do this because Docker Machine is
much slower, taking about 600ms total. We are basically doing the same
flow Docker Machine does in its own code."""
mac = _get_host_only_mac_address()
ip_addr_show = check_output_demoted(['ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-i', _vm_key_path(), '-p', _get_localhost_ssh_port(),
'docker@127.0.0.1', 'ip addr show'])
return _ip_for_mac_from_ip_addr_show(ip_addr_show, mac) |
def create_local_copy(cookie_file):
"""Make a local copy of the sqlite cookie database and return the new filename.
This is necessary in case this database is still being written to while the user browses
to avoid sqlite locking errors.
"""
# if type of cookie_file is a list, use the first element in the list
if isinstance(cookie_file, list):
cookie_file = cookie_file[0]
# check if cookie file exists
if os.path.exists(cookie_file):
# copy to random name in tmp folder
tmp_cookie_file = tempfile.NamedTemporaryFile(suffix='.sqlite').name
open(tmp_cookie_file, 'wb').write(open(cookie_file, 'rb').read())
return tmp_cookie_file
else:
raise BrowserCookieError('Can not find cookie file at: ' + cookie_file) |
def create_cookie(host, path, secure, expires, name, value):
"""Shortcut function to create a cookie
"""
return http.cookiejar.Cookie(0, name, value, None, False, host, host.startswith('.'), host.startswith('.'), path,
True, secure, expires, False, None, None, {}) |
def load(domain_name=""):
"""Try to load cookies from all supported browsers and return combined cookiejar
Optionally pass in a domain name to only load cookies from the specified domain
"""
cj = http.cookiejar.CookieJar()
for cookie_fn in [chrome, firefox]:
try:
for cookie in cookie_fn(domain_name=domain_name):
cj.set_cookie(cookie)
except BrowserCookieError:
pass
return cj |
def load(self):
"""Load sqlite cookies into a cookiejar
"""
con = sqlite3.connect(self.tmp_cookie_file)
cur = con.cursor()
try:
# chrome <=55
cur.execute('SELECT host_key, path, secure, expires_utc, name, value, encrypted_value '
'FROM cookies WHERE host_key like "%{}%";'.format(self.domain_name))
except sqlite3.OperationalError:
# chrome >=56
cur.execute('SELECT host_key, path, is_secure, expires_utc, name, value, encrypted_value '
'FROM cookies WHERE host_key like "%{}%";'.format(self.domain_name))
cj = http.cookiejar.CookieJar()
for item in cur.fetchall():
host, path, secure, expires, name = item[:5]
value = self._decrypt(item[5], item[6])
c = create_cookie(host, path, secure, expires, name, value)
cj.set_cookie(c)
con.close()
return cj |
def _decrypt(self, value, encrypted_value):
"""Decrypt encoded cookies
"""
if sys.platform == 'win32':
return self._decrypt_windows_chrome(value, encrypted_value)
if value or (encrypted_value[:3] != b'v10'):
return value
# Encrypted cookies should be prefixed with 'v10' according to the
# Chromium code. Strip it off.
encrypted_value = encrypted_value[3:]
encrypted_value_half_len = int(len(encrypted_value) / 2)
cipher = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(self.key, self.iv))
decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len])
decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:])
decrypted += cipher.feed()
return decrypted.decode("utf-8") |
def _gen(d, limit=20, count=False, grouprefs=None):
"""docstring for _gen"""
if grouprefs is None:
grouprefs = {}
ret = ['']
strings = 0
literal = False
for i in d:
if i[0] == sre_parse.IN:
subs = _in(i[1])
if count:
strings = (strings or 1) * len(subs)
ret = comb(ret, subs)
elif i[0] == sre_parse.LITERAL:
literal = True
ret = mappend(ret, unichr(i[1]))
elif i[0] == sre_parse.CATEGORY:
subs = CATEGORIES.get(i[1], [''])
if count:
strings = (strings or 1) * len(subs)
ret = comb(ret, subs)
elif i[0] == sre_parse.ANY:
subs = CATEGORIES['category_any']
if count:
strings = (strings or 1) * len(subs)
ret = comb(ret, subs)
elif i[0] == sre_parse.MAX_REPEAT or i[0] == sre_parse.MIN_REPEAT:
items = list(i[1][2])
if i[1][1] + 1 - i[1][0] >= limit:
r1 = i[1][0]
r2 = i[1][0] + limit
else:
r1 = i[1][0]
r2 = i[1][1] + 1
ran = range(r1, r2)
if count:
branch_count = 0
for p in ran:
branch_count += pow(_gen(items, limit, True, grouprefs), p)
strings = (strings or 1) * branch_count
ret = prods(ret, ran, items, limit, grouprefs)
elif i[0] == sre_parse.BRANCH:
if count:
for x in i[1][1]:
strings += _gen(x, limit, True, grouprefs) or 1
ret = concit(ret, i[1][1], limit, grouprefs)
elif i[0] == sre_parse.SUBPATTERN or i[0] == sre_parse.ASSERT:
subexpr = i[1][1]
if IS_PY36_OR_GREATER and i[0] == sre_parse.SUBPATTERN:
subexpr = i[1][3]
if count:
strings = (
strings or 1) * (sum(ggen([0], _gen, subexpr, limit=limit, count=True, grouprefs=grouprefs)) or 1)
ret = ggen(ret, _gen, subexpr, limit=limit, count=False, grouprefs=grouprefs, groupref=i[1][0])
# ignore ^ and $
elif i[0] == sre_parse.AT:
continue
elif i[0] == sre_parse.NOT_LITERAL:
subs = list(CATEGORIES['category_any'])
if unichr(i[1]) in subs:
subs.remove(unichr(i[1]))
if count:
strings = (strings or 1) * len(subs)
ret = comb(ret, subs)
elif i[0] == sre_parse.GROUPREF:
ret = dappend(ret, grouprefs, i[1])
elif i[0] == sre_parse.ASSERT_NOT:
pass
else:
print('[!] cannot handle expression ' + repr(i))
if count:
if strings == 0 and literal:
inc = True
for i in d:
if i[0] not in (sre_parse.AT, sre_parse.LITERAL):
inc = False
if inc:
strings = 1
return strings
return ret |
def _randone(d, limit=20, grouprefs=None):
if grouprefs is None:
grouprefs = {}
"""docstring for _randone"""
ret = ''
for i in d:
if i[0] == sre_parse.IN:
ret += choice(_in(i[1]))
elif i[0] == sre_parse.LITERAL:
ret += unichr(i[1])
elif i[0] == sre_parse.CATEGORY:
ret += choice(CATEGORIES.get(i[1], ['']))
elif i[0] == sre_parse.ANY:
ret += choice(CATEGORIES['category_any'])
elif i[0] == sre_parse.MAX_REPEAT or i[0] == sre_parse.MIN_REPEAT:
if i[1][1] + 1 - i[1][0] >= limit:
min, max = i[1][0], i[1][0] + limit - 1
else:
min, max = i[1][0], i[1][1]
for _ in range(randint(min, max)):
ret += _randone(list(i[1][2]), limit, grouprefs)
elif i[0] == sre_parse.BRANCH:
ret += _randone(choice(i[1][1]), limit, grouprefs)
elif i[0] == sre_parse.SUBPATTERN or i[0] == sre_parse.ASSERT:
subexpr = i[1][1]
if IS_PY36_OR_GREATER and i[0] == sre_parse.SUBPATTERN:
subexpr = i[1][3]
subp = _randone(subexpr, limit, grouprefs)
if i[1][0]:
grouprefs[i[1][0]] = subp
ret += subp
elif i[0] == sre_parse.AT:
continue
elif i[0] == sre_parse.NOT_LITERAL:
c = list(CATEGORIES['category_any'])
if unichr(i[1]) in c:
c.remove(unichr(i[1]))
ret += choice(c)
elif i[0] == sre_parse.GROUPREF:
ret += grouprefs[i[1]]
elif i[0] == sre_parse.ASSERT_NOT:
pass
else:
print('[!] cannot handle expression "%s"' % str(i))
return ret |
def sre_to_string(sre_obj, paren=True):
"""sre_parse object to string
:param sre_obj: Output of sre_parse.parse()
:type sre_obj: list
:rtype: str
"""
ret = u''
for i in sre_obj:
if i[0] == sre_parse.IN:
prefix = ''
if len(i[1]) and i[1][0][0] == sre_parse.NEGATE:
prefix = '^'
ret += u'[{0}{1}]'.format(prefix, sre_to_string(i[1], paren=paren))
elif i[0] == sre_parse.LITERAL:
u = unichr(i[1])
ret += u if u not in sre_parse.SPECIAL_CHARS else '\\{0}'.format(u)
elif i[0] == sre_parse.CATEGORY:
ret += REVERSE_CATEGORIES[i[1]]
elif i[0] == sre_parse.ANY:
ret += '.'
elif i[0] == sre_parse.BRANCH:
# TODO simplifications here
parts = [sre_to_string(x, paren=paren) for x in i[1][1]]
if not any(parts):
continue
if i[1][0]:
if len(parts) == 1:
paren = False
prefix = ''
else:
prefix = '?:'
branch = '|'.join(parts)
if paren:
ret += '({0}{1})'.format(prefix, branch)
else:
ret += '{0}'.format(branch)
elif i[0] == sre_parse.SUBPATTERN:
subexpr = i[1][1]
if IS_PY36_OR_GREATER and i[0] == sre_parse.SUBPATTERN:
subexpr = i[1][3]
if i[1][0]:
ret += '({0})'.format(sre_to_string(subexpr, paren=False))
else:
ret += '{0}'.format(sre_to_string(subexpr, paren=paren))
elif i[0] == sre_parse.NOT_LITERAL:
ret += '[^{0}]'.format(unichr(i[1]))
elif i[0] == sre_parse.MAX_REPEAT:
if i[1][0] == i[1][1]:
range_str = '{{{0}}}'.format(i[1][0])
else:
if i[1][0] == 0 and i[1][1] - i[1][0] == sre_parse.MAXREPEAT:
range_str = '*'
elif i[1][0] == 1 and i[1][1] - i[1][0] == sre_parse.MAXREPEAT - 1:
range_str = '+'
else:
range_str = '{{{0},{1}}}'.format(i[1][0], i[1][1])
ret += sre_to_string(i[1][2], paren=paren) + range_str
elif i[0] == sre_parse.MIN_REPEAT:
if i[1][0] == 0 and i[1][1] == sre_parse.MAXREPEAT:
range_str = '*?'
elif i[1][0] == 1 and i[1][1] == sre_parse.MAXREPEAT:
range_str = '+?'
elif i[1][1] == sre_parse.MAXREPEAT:
range_str = '{{{0},}}?'.format(i[1][0])
else:
range_str = '{{{0},{1}}}?'.format(i[1][0], i[1][1])
ret += sre_to_string(i[1][2], paren=paren) + range_str
elif i[0] == sre_parse.GROUPREF:
ret += '\\{0}'.format(i[1])
elif i[0] == sre_parse.AT:
if i[1] == sre_parse.AT_BEGINNING:
ret += '^'
elif i[1] == sre_parse.AT_END:
ret += '$'
elif i[0] == sre_parse.NEGATE:
pass
elif i[0] == sre_parse.RANGE:
ret += '{0}-{1}'.format(unichr(i[1][0]), unichr(i[1][1]))
elif i[0] == sre_parse.ASSERT:
if i[1][0]:
ret += '(?={0})'.format(sre_to_string(i[1][1], paren=False))
else:
ret += '{0}'.format(sre_to_string(i[1][1], paren=paren))
elif i[0] == sre_parse.ASSERT_NOT:
pass
else:
print('[!] cannot handle expression "%s"' % str(i))
return ret |
def parse(s):
"""Regular expression parser
:param s: Regular expression
:type s: str
:rtype: list
"""
if IS_PY3:
r = sre_parse.parse(s, flags=U)
else:
r = sre_parse.parse(s.decode('utf-8'), flags=U)
return list(r) |
def ib64_patched(self, attrsD, contentparams):
""" Patch isBase64 to prevent Base64 encoding of JSON content
"""
if attrsD.get("mode", "") == "base64":
return 0
if self.contentparams["type"].startswith("text/"):
return 0
if self.contentparams["type"].endswith("+xml"):
return 0
if self.contentparams["type"].endswith("/xml"):
return 0
if self.contentparams["type"].endswith("/json"):
return 0
return 0 |
def cleanwrap(func):
""" Wrapper for Zotero._cleanup
"""
def enc(self, *args, **kwargs):
""" Send each item to _cleanup() """
return (func(self, item, **kwargs) for item in args)
return enc |
def retrieve(func):
"""
Decorator for Zotero read API methods; calls _retrieve_data() and passes
the result to the correct processor, based on a lookup
"""
def wrapped_f(self, *args, **kwargs):
"""
Returns result of _retrieve_data()
func's return value is part of a URI, and it's this
which is intercepted and passed to _retrieve_data:
'/users/123/items?key=abc123'
"""
if kwargs:
self.add_parameters(**kwargs)
retrieved = self._retrieve_data(func(self, *args))
# we now always have links in the header response
self.links = self._extract_links()
# determine content and format, based on url params
content = (
self.content.search(self.request.url)
and self.content.search(self.request.url).group(0)
or "bib"
)
# JSON by default
formats = {
"application/atom+xml": "atom",
"application/x-bibtex": "bibtex",
"application/json": "json",
"text/html": "snapshot",
"text/plain": "plain",
"application/pdf; charset=utf-8": "pdf",
"application/pdf": "pdf",
"application/msword": "doc",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
"application/zip": "zip",
"application/epub+zip": "zip",
"audio/mpeg": "mp3",
"video/mp4": "mp4",
"audio/x-wav": "wav",
"video/x-msvideo": "avi",
"application/octet-stream": "octet",
"application/x-tex": "tex",
"application/x-texinfo": "texinfo",
"image/jpeg": "jpeg",
"image/png": "png",
"image/gif": "gif",
"image/tiff": "tiff",
"application/postscript": "postscript",
"application/rtf": "rtf",
}
# select format, or assume JSON
content_type_header = self.request.headers["Content-Type"].lower() + ";"
re.compile("\s+")
fmt = formats.get(
# strip "; charset=..." segment
content_type_header[0: content_type_header.index(";")],
"json",
)
# clear all query parameters
self.url_params = None
# check to see whether it's tag data
if "tags" in self.request.url:
self.tag_data = False
return self._tags_data(retrieved.json())
if fmt == "atom":
parsed = feedparser.parse(retrieved.text)
# select the correct processor
processor = self.processors.get(content)
# process the content correctly with a custom rule
return processor(parsed)
if fmt == "snapshot":
# we need to dump as a zip!
self.snapshot = True
if fmt == "bibtex":
parser = bibtexparser.bparser.BibTexParser(common_strings=True)
return parser.parse(retrieved.text)
# it's binary, so return raw content
elif fmt != "json":
return retrieved.content
# no need to do anything special, return JSON
else:
return retrieved.json()
return wrapped_f |
def ss_wrap(func):
""" ensure that a SavedSearch object exists """
def wrapper(self, *args, **kwargs):
if not self.savedsearch:
self.savedsearch = SavedSearch(self)
return func(self, *args, **kwargs)
return wrapper |
def error_handler(req):
""" Error handler for HTTP requests
"""
error_codes = {
400: ze.UnsupportedParams,
401: ze.UserNotAuthorised,
403: ze.UserNotAuthorised,
404: ze.ResourceNotFound,
409: ze.Conflict,
412: ze.PreConditionFailed,
413: ze.RequestEntityTooLarge,
428: ze.PreConditionRequired,
429: ze.TooManyRequests,
}
def err_msg(req):
""" Return a nicely-formatted error message
"""
return "\nCode: %s\nURL: %s\nMethod: %s\nResponse: %s" % (
req.status_code,
# error.msg,
req.url,
req.request.method,
req.text,
)
if error_codes.get(req.status_code):
# check to see whether its 429
if req.status_code == 429:
# call our back-off function
delay = backoff.delay
if delay > 32:
# we've waited a total of 62 seconds (2 + 4 … + 32), so give up
backoff.reset()
raise ze.TooManyRetries(
"Continuing to receive HTTP 429 \
responses after 62 seconds. You are being rate-limited, try again later"
)
time.sleep(delay)
sess = requests.Session()
new_req = sess.send(req.request)
try:
new_req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(new_req)
else:
raise error_codes.get(req.status_code)(err_msg(req))
else:
raise ze.HTTPError(err_msg(req)) |
def default_headers(self):
"""
It's always OK to include these headers
"""
_headers = {
"User-Agent": "Pyzotero/%s" % __version__,
"Zotero-API-Version": "%s" % __api_version__,
}
if self.api_key:
_headers["Authorization"] = "Bearer %s" % self.api_key
return _headers |
def _cache(self, response, key):
"""
Add a retrieved template to the cache for 304 checking
accepts a dict and key name, adds the retrieval time, and adds both
to self.templates as a new dict using the specified key
"""
# cache template and retrieval time for subsequent calls
thetime = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("GMT"))
self.templates[key] = {"tmplt": response.json(), "updated": thetime}
return copy.deepcopy(response.json()) |
def _cleanup(self, to_clean, allow=()):
""" Remove keys we added for internal use
"""
# this item's been retrieved from the API, we only need the 'data'
# entry
if to_clean.keys() == ["links", "library", "version", "meta", "key", "data"]:
to_clean = to_clean["data"]
return dict(
[
[k, v]
for k, v in list(to_clean.items())
if (k in allow or k not in self.temp_keys)
]
) |
def _retrieve_data(self, request=None):
"""
Retrieve Zotero items via the API
Combine endpoint and request to access the specific resource
Returns a JSON document
"""
full_url = "%s%s" % (self.endpoint, request)
# The API doesn't return this any more, so we have to cheat
self.self_link = request
self.request = requests.get(url=full_url, headers=self.default_headers())
self.request.encoding = "utf-8"
try:
self.request.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self.request)
return self.request |
def _extract_links(self):
"""
Extract self, first, next, last links from a request response
"""
extracted = dict()
try:
for key, value in self.request.links.items():
parsed = urlparse(value["url"])
fragment = "{path}?{query}".format(path=parsed[2], query=parsed[4])
extracted[key] = fragment
# add a 'self' link
parsed = list(urlparse(self.self_link))
# strip 'format' query parameter
stripped = "&".join(
[
"%s=%s" % (p[0], p[1])
for p in parse_qsl(parsed[4])
if p[0] != "format"
]
)
# rebuild url fragment
# this is a death march
extracted["self"] = urlunparse(
[parsed[0], parsed[1], parsed[2], parsed[3], stripped, parsed[5]]
)
return extracted
except KeyError:
# No links present, because it's a single item
return None |
def _updated(self, url, payload, template=None):
"""
Generic call to see if a template request returns 304
accepts:
- a string to combine with the API endpoint
- a dict of format values, in case they're required by 'url'
- a template name to check for
As per the API docs, a template less than 1 hour old is
assumed to be fresh, and will immediately return False if found
"""
# If the template is more than an hour old, try a 304
if (
abs(
datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("GMT"))
- self.templates[template]["updated"]
).seconds
> 3600
):
query = self.endpoint + url.format(
u=self.library_id, t=self.library_type, **payload
)
headers = {
"If-Modified-Since": payload["updated"].strftime(
"%a, %d %b %Y %H:%M:%S %Z"
)
}
headers.update(self.default_headers())
# perform the request, and check whether the response returns 304
req = requests.get(query, headers=headers)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return req.status_code == 304
# Still plenty of life left in't
return False |
def add_parameters(self, **params):
"""
Add URL parameters
Also ensure that only valid format/content combinations are requested
"""
self.url_params = None
# we want JSON by default
if not params.get("format"):
params["format"] = "json"
# non-standard content must be retrieved as Atom
if params.get("content"):
params["format"] = "atom"
# TODO: rewrite format=atom, content=json request
if "limit" not in params or params.get("limit") == 0:
params["limit"] = 100
# Need ability to request arbitrary number of results for version
# response
# -1 value is hack that works with current version
elif params["limit"] == -1 or params["limit"] is None:
del params["limit"]
# bib format can't have a limit
if params.get("format") == "bib":
del params["limit"]
self.url_params = urlencode(params, doseq=True) |
def _build_query(self, query_string, no_params=False):
"""
Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method
"""
try:
query = quote(query_string.format(u=self.library_id, t=self.library_type))
except KeyError as err:
raise ze.ParamNotPassed("There's a request parameter missing: %s" % err)
# Add the URL parameters and the user key, if necessary
if no_params is False:
if not self.url_params:
self.add_parameters()
query = "%s?%s" % (query, self.url_params)
return query |
def publications(self):
""" Return the contents of My Publications
"""
if self.library_type != "users":
raise ze.CallDoesNotExist(
"This API call does not exist for group libraries"
)
query_string = "/{t}/{u}/publications/items"
return self._build_query(query_string) |
def num_collectionitems(self, collection):
""" Return the total number of items in the specified collection
"""
query = "/{t}/{u}/collections/{c}/items".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._totals(query) |
def num_tagitems(self, tag):
""" Return the total number of items for the specified tag
"""
query = "/{t}/{u}/tags/{ta}/items".format(
u=self.library_id, t=self.library_type, ta=tag
)
return self._totals(query) |
def _totals(self, query):
""" General method for returning total counts
"""
self.add_parameters(limit=1)
query = self._build_query(query)
self._retrieve_data(query)
self.url_params = None
# extract the 'total items' figure
return int(self.request.headers["Total-Results"]) |
def key_info(self, **kwargs):
"""
Retrieve info about the permissions associated with the
key associated to the given Zotero instance
"""
query_string = "/keys/{k}".format(k=self.api_key)
return self._build_query(query_string) |
def fulltext_item(self, itemkey, **kwargs):
""" Get full-text content for an item"""
query_string = "/{t}/{u}/items/{itemkey}/fulltext".format(
t=self.library_type, u=self.library_id, itemkey=itemkey
)
return self._build_query(query_string) |
def set_fulltext(self, itemkey, payload):
""""
Set full-text data for an item
<itemkey> should correspond to an existing attachment item.
payload should be a dict containing three keys:
'content': the full-text content and either
For text documents, 'indexedChars' and 'totalChars' OR
For PDFs, 'indexedPages' and 'totalPages'.
"""
headers = self.default_headers()
headers.update({"Content-Type": "application/json"})
req = requests.put(
url=self.endpoint
+ "/{t}/{u}/items/{k}/fulltext".format(
t=self.library_type, u=self.library_id, k=itemkey
),
headers=headers,
data=json.dumps(payload),
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return True |
def new_fulltext(self, version):
"""
Retrieve list of full-text content items and versions which are newer
than <version>
"""
query_string = "/{t}/{u}/fulltext".format(
t=self.library_type, u=self.library_id
)
headers = {"since": str(version)}
headers.update(self.default_headers())
req = requests.get(self.endpoint + query_string, headers=headers)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return req.json() |
def last_modified_version(self, **kwargs):
""" Get the last modified version
"""
self.items(**kwargs)
return int(self.request.headers.get("last-modified-version", 0)) |
def file(self, item, **kwargs):
""" Get the file from an specific item
"""
query_string = "/{t}/{u}/items/{i}/file".format(
u=self.library_id, t=self.library_type, i=item.upper()
)
return self._build_query(query_string, no_params=True) |
def dump(self, itemkey, filename=None, path=None):
"""
Dump a file attachment to disk, with optional filename and path
"""
if not filename:
filename = self.item(itemkey)["data"]["filename"]
if path:
pth = os.path.join(path, filename)
else:
pth = filename
file = self.file(itemkey)
if self.snapshot:
self.snapshot = False
pth = pth + ".zip"
with open(pth, "wb") as f:
f.write(file) |
def all_collections(self, collid=None):
"""
Retrieve all collections and subcollections. Works for top-level collections
or for a specific collection. Works at all collection depths.
"""
all_collections = []
def subcoll(clct):
""" recursively add collections to a flat master list """
all_collections.append(clct)
if clct["meta"].get("numCollections", 0) > 0:
# add collection to master list & recur with all child
# collections
[
subcoll(c)
for c in self.everything(self.collections_sub(clct["data"]["key"]))
]
# select all top-level collections or a specific collection and
# children
if collid:
toplevel = [self.collection(collid)]
else:
toplevel = self.everything(self.collections_top())
[subcoll(collection) for collection in toplevel]
return all_collections |
def collections_sub(self, collection, **kwargs):
""" Get subcollections for a specific collection
"""
query_string = "/{t}/{u}/collections/{c}/collections".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._build_query(query_string) |
def tags(self, **kwargs):
""" Get tags
"""
query_string = "/{t}/{u}/tags"
self.tag_data = True
return self._build_query(query_string) |
def iterfollow(self):
""" Generator for self.follow()
"""
# use same criterion as self.follow()
if self.links is None:
return
if self.links.get("next"):
yield self.follow()
else:
raise StopIteration |
def everything(self, query):
"""
Retrieve all items in the library for a particular query
This method will override the 'limit' parameter if it's been set
"""
try:
items = []
items.extend(query)
while self.links.get("next"):
items.extend(self.follow())
except TypeError:
# we have a bibliography object ughh
items = copy.deepcopy(query)
while self.links.get("next"):
items.entries.extend(self.follow().entries)
return items |
def get_subset(self, subset):
"""
Retrieve a subset of items
Accepts a single argument: a list of item IDs
"""
if len(subset) > 50:
raise ze.TooManyItems("You may only retrieve 50 items per call")
# remember any url parameters that have been set
params = self.url_params
retr = []
for itm in subset:
retr.extend(self.item(itm))
self.url_params = params
# clean up URL params when we're finished
self.url_params = None
return retr |
def _json_processor(self, retrieved):
""" Format and return data from API calls which return Items
"""
json_kwargs = {}
if self.preserve_json_order:
json_kwargs["object_pairs_hook"] = OrderedDict
# send entries to _tags_data if there's no JSON
try:
items = [
json.loads(e["content"][0]["value"], **json_kwargs)
for e in retrieved.entries
]
except KeyError:
return self._tags_data(retrieved)
return items |
def _csljson_processor(self, retrieved):
""" Return a list of dicts which are dumped CSL JSON
"""
items = []
json_kwargs = {}
if self.preserve_json_order:
json_kwargs["object_pairs_hook"] = OrderedDict
for csl in retrieved.entries:
items.append(json.loads(csl["content"][0]["value"], **json_kwargs))
self.url_params = None
return items |
def _bib_processor(self, retrieved):
""" Return a list of strings formatted as HTML bibliography entries
"""
items = []
for bib in retrieved.entries:
items.append(bib["content"][0]["value"])
self.url_params = None
return items |
def _citation_processor(self, retrieved):
""" Return a list of strings formatted as HTML citation entries
"""
items = []
for cit in retrieved.entries:
items.append(cit["content"][0]["value"])
self.url_params = None
return items |
def item_template(self, itemtype):
""" Get a template for a new item
"""
# if we have a template and it hasn't been updated since we stored it
template_name = "item_template_" + itemtype
query_string = "/items/new?itemType={i}".format(i=itemtype)
if self.templates.get(template_name) and not self._updated(
query_string, self.templates[template_name], template_name
):
return copy.deepcopy(self.templates[template_name]["tmplt"])
# otherwise perform a normal request and cache the response
retrieved = self._retrieve_data(query_string)
return self._cache(retrieved, template_name) |
def _attachment(self, payload, parentid=None):
"""
Create attachments
accepts a list of one or more attachment template dicts
and an optional parent Item ID. If this is specified,
attachments are created under this ID
"""
attachment = Zupload(self, payload, parentid)
res = attachment.upload()
return res |
def show_condition_operators(self, condition):
""" Show available operators for a given saved search condition """
# dict keys of allowed operators for the current condition
permitted_operators = self.savedsearch.conditions_operators.get(condition)
# transform these into values
permitted_operators_list = set(
[self.savedsearch.operators.get(op) for op in permitted_operators]
)
return permitted_operators_list |
def saved_search(self, name, conditions):
""" Create a saved search. conditions is a list of dicts
containing search conditions, and must contain the following str keys:
condition, operator, value
"""
self.savedsearch._validate(conditions)
payload = [{"name": name, "conditions": conditions}]
headers = {"Zotero-Write-Token": token()}
headers.update(self.default_headers())
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/searches".format(t=self.library_type, u=self.library_id),
headers=headers,
data=json.dumps(payload),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return req.json() |
def delete_saved_search(self, keys):
""" Delete one or more saved searches by passing a list of one or more
unique search keys
"""
headers = {"Zotero-Write-Token": token()}
headers.update(self.default_headers())
req = requests.delete(
url=self.endpoint
+ "/{t}/{u}/searches".format(t=self.library_type, u=self.library_id),
headers=headers,
params={"searchKey": ",".join(keys)},
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return req.status_code |
def upload_attachments(self, attachments, parentid=None, basedir=None):
"""Upload files to the already created (but never uploaded) attachments"""
return Zupload(self, attachments, parentid, basedir=basedir).upload() |
def add_tags(self, item, *tags):
"""
Add one or more tags to a retrieved item,
then update it on the server
Accepts a dict, and one or more tags to add to it
Returns the updated item from the server
"""
# Make sure there's a tags field, or add one
try:
assert item["data"]["tags"]
except AssertionError:
item["data"]["tags"] = list()
for tag in tags:
item["data"]["tags"].append({"tag": "%s" % tag})
# make sure everything's OK
assert self.check_items([item])
return self.update_item(item) |
def check_items(self, items):
"""
Check that items to be created contain no invalid dict keys
Accepts a single argument: a list of one or more dicts
The retrieved fields are cached and re-used until a 304 call fails
"""
# check for a valid cached version
if self.templates.get("item_fields") and not self._updated(
"/itemFields", self.templates["item_fields"], "item_fields"
):
template = set(t["field"] for t in self.templates["item_fields"]["tmplt"])
else:
template = set(t["field"] for t in self.item_fields())
# add fields we know to be OK
template = template | set(
[
"path",
"tags",
"notes",
"itemType",
"creators",
"mimeType",
"linkMode",
"note",
"charset",
"dateAdded",
"version",
"collections",
"dateModified",
"relations",
# attachment items
"parentItem",
"mtime",
"contentType",
"md5",
"filename",
]
)
template = template | set(self.temp_keys)
for pos, item in enumerate(items):
if set(item) == set(["links", "library", "version", "meta", "key", "data"]):
# we have an item that was retrieved from the API
item = item["data"]
to_check = set(i for i in list(item.keys()))
difference = to_check.difference(template)
if difference:
raise ze.InvalidItemFields(
"Invalid keys present in item %s: %s"
% (pos + 1, " ".join(i for i in difference))
)
return items |
def fields_types(self, tname, qstring, itemtype):
""" Retrieve item fields or creator types
"""
# check for a valid cached version
template_name = tname + itemtype
query_string = qstring.format(i=itemtype)
if self.templates.get(template_name) and not self._updated(
query_string, self.templates[template_name], template_name
):
return self.templates[template_name]["tmplt"]
# otherwise perform a normal request and cache the response
retrieved = self._retrieve_data(query_string)
return self._cache(retrieved, template_name) |
def item_fields(self):
""" Get all available item fields
"""
# Check for a valid cached version
if self.templates.get("item_fields") and not self._updated(
"/itemFields", self.templates["item_fields"], "item_fields"
):
return self.templates["item_fields"]["tmplt"]
query_string = "/itemFields"
# otherwise perform a normal request and cache the response
retrieved = self._retrieve_data(query_string)
return self._cache(retrieved, "item_fields") |
def create_items(self, payload, parentid=None, last_modified=None):
"""
Create new Zotero items
Accepts two arguments:
a list containing one or more item dicts
an optional parent item ID.
Note that this can also be used to update existing items
"""
if len(payload) > 50:
raise ze.TooManyItems("You may only create up to 50 items per call")
# TODO: strip extra data if it's an existing item
headers = {"Zotero-Write-Token": token(), "Content-Type": "application/json"}
if last_modified is not None:
headers["If-Unmodified-Since-Version"] = str(last_modified)
to_send = json.dumps([i for i in self._cleanup(*payload, allow=("key"))])
headers.update(self.default_headers())
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/items".format(t=self.library_type, u=self.library_id),
data=to_send,
headers=dict(headers),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
resp = req.json()
if parentid:
# we need to create child items using PATCH
# TODO: handle possibility of item creation + failed parent
# attachment
uheaders = {
"If-Unmodified-Since-Version": req.headers["last-modified-version"]
}
uheaders.update(self.default_headers())
for value in resp["success"].values():
payload = json.dumps({"parentItem": parentid})
presp = requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{v}".format(
t=self.library_type, u=self.library_id, v=value
),
data=payload,
headers=dict(uheaders),
)
self.request = presp
try:
presp.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(presp)
return resp |
def create_collections(self, payload, last_modified=None):
"""
Create new Zotero collections
Accepts one argument, a list of dicts containing the following keys:
'name': the name of the collection
'parentCollection': OPTIONAL, the parent collection to which you wish to add this
"""
# no point in proceeding if there's no 'name' key
for item in payload:
if "name" not in item:
raise ze.ParamNotPassed("The dict you pass must include a 'name' key")
# add a blank 'parentCollection' key if it hasn't been passed
if "parentCollection" not in item:
item["parentCollection"] = ""
headers = {"Zotero-Write-Token": token()}
if last_modified is not None:
headers["If-Unmodified-Since-Version"] = str(last_modified)
headers.update(self.default_headers())
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/collections".format(t=self.library_type, u=self.library_id),
headers=headers,
data=json.dumps(payload),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return req.json() |
def update_collection(self, payload, last_modified=None):
"""
Update a Zotero collection property such as 'name'
Accepts one argument, a dict containing collection data retrieved
using e.g. 'collections()'
"""
modified = payload["version"]
if last_modified is not None:
modified = last_modified
key = payload["key"]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
headers.update({"Content-Type": "application/json"})
req = requests.put(
url=self.endpoint
+ "/{t}/{u}/collections/{c}".format(
t=self.library_type, u=self.library_id, c=key
),
headers=headers,
data=json.dumps(payload),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return True |
def attachment_simple(self, files, parentid=None):
"""
Add attachments using filenames as title
Arguments:
One or more file paths to add as attachments:
An optional Item ID, which will create child attachments
"""
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for fls in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = os.path.basename(files[idx])
tmplt["filename"] = files[idx]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add) |
def attachment_both(self, files, parentid=None):
"""
Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments
"""
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for f in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = files[idx][0]
tmplt["filename"] = files[idx][1]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add) |
def update_item(self, payload, last_modified=None):
"""
Update an existing item
Accepts one argument, a dict containing Item data
"""
to_send = self.check_items([payload])[0]
if last_modified is None:
modified = payload["version"]
else:
modified = last_modified
ident = payload["key"]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
req = requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{id}".format(
t=self.library_type, u=self.library_id, id=ident
),
headers=headers,
data=json.dumps(to_send),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return True |
def update_items(self, payload):
"""
Update existing items
Accepts one argument, a list of dicts containing Item data
"""
to_send = [self.check_items([p])[0] for p in payload]
headers = {}
headers.update(self.default_headers())
# the API only accepts 50 items at a time, so we have to split
# anything longer
for chunk in chunks(to_send, 50):
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/items/".format(t=self.library_type, u=self.library_id),
headers=headers,
data=json.dumps(chunk),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return True |
def addto_collection(self, collection, payload):
"""
Add one or more items to a collection
Accepts two arguments:
The collection ID, and an item dict
"""
ident = payload["key"]
modified = payload["version"]
# add the collection data from the item
modified_collections = payload["data"]["collections"] + [collection]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
req = requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{i}".format(
t=self.library_type, u=self.library_id, i=ident
),
data=json.dumps({"collections": modified_collections}),
headers=headers,
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.