sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def interactive_shell(self, command=None, paster=False, detach=False):
"""
launch interactive shell session with all writable volumes
:param: list of strings to execute instead of bash
"""
if not exists(self.target + '/.bash_profile'):
# this file is required for activating the virtualenv
self.create_bash_profile()
if not command:
command = []
use_tty = sys.stdin.isatty() and sys.stdout.isatty()
background = environ.get('CIRCLECI', False) or detach
if is_boot2docker():
venv_volumes = ['--volumes-from', self._get_container_name('venv')]
else:
venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw']
self._create_run_ini(self.port, production=False, output='run.ini')
self._create_run_ini(self.port, production=True, output='test.ini',
source='ckan/test-core.ini', override_site_url=False)
script = scripts.get_script_path('shell.sh')
if paster:
script = scripts.get_script_path('paster.sh')
if command and command != ['help'] and command != ['--help']:
command += ['--config=/project/development.ini']
command = [self.extension_dir] + command
proxy_settings = self._proxy_settings()
if proxy_settings:
venv_volumes += ['-v',
self.sitedir + '/run/proxy-environment:/etc/environment:ro']
links = {self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'}
links.update({self._get_container_name(container): container for container
in self.extra_containers})
link_params = []
for link in links:
link_params.append('--link')
link_params.append(link + ':' + links[link])
if 'datapusher' in self.containers_running():
link_params.append('--link')
link_params.append(self._get_container_name('datapusher') + ':datapusher')
# FIXME: consider switching this to dockerpty
# using subprocess for docker client's interactive session
return subprocess.call([
DOCKER_EXE, 'run',
] + (['--rm'] if not background else []) + [
'-t' if use_tty else '',
'-d' if detach else '-i',
] + venv_volumes + [
'-v', self.target + ':/project:rw',
'-v', self.sitedir + '/files:/var/www/storage:rw',
'-v', script + ':/scripts/shell.sh:ro',
'-v', scripts.get_script_path('paster_cd.sh') + ':/scripts/paster_cd.sh:ro',
'-v', self.sitedir + '/run/run.ini:/project/development.ini:ro',
'-v', self.sitedir +
'/run/test.ini:/project/ckan/test-core.ini:ro'] +
link_params +
['--hostname', self.name,
'datacats/web', '/scripts/shell.sh'] + command)
|
launch interactive shell session with all writable volumes
:param: list of strings to execute instead of bash
|
entailment
|
def install_package_requirements(self, psrc, stream_output=None):
"""
Install from requirements.txt file found in psrc
:param psrc: name of directory in environment directory
"""
package = self.target + '/' + psrc
assert isdir(package), package
reqname = '/requirements.txt'
if not exists(package + reqname):
reqname = '/pip-requirements.txt'
if not exists(package + reqname):
return
return self.user_run_script(
script=scripts.get_script_path('install_reqs.sh'),
args=['/project/' + psrc + reqname],
rw_venv=True,
rw_project=True,
stream_output=stream_output
)
|
Install from requirements.txt file found in psrc
:param psrc: name of directory in environment directory
|
entailment
|
def purge_data(self, which_sites=None, never_delete=False):
"""
Remove uploaded files, postgres db, solr index, venv
"""
# Default to the set of all sites
if not exists(self.datadir + '/.version'):
format_version = 1
else:
with open(self.datadir + '/.version') as f:
format_version = int(f.read().strip())
if format_version == 1:
print 'WARNING: Defaulting to old purge for version 1.'
datadirs = ['files', 'solr']
if is_boot2docker():
remove_container('datacats_pgdata_{}'.format(self.name))
remove_container('datacats_venv_{}'.format(self.name))
else:
datadirs += ['postgres', 'venv']
web_command(
command=['/scripts/purge.sh']
+ ['/project/data/' + d for d in datadirs],
ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'},
rw={self.datadir: '/project/data'},
)
shutil.rmtree(self.datadir)
elif format_version == 2:
if not which_sites:
which_sites = self.sites
datadirs = []
boot2docker = is_boot2docker()
if which_sites:
if self.target:
cp = SafeConfigParser()
cp.read([self.target + '/.datacats-environment'])
for site in which_sites:
if boot2docker:
remove_container(self._get_container_name('pgdata'))
else:
datadirs += [site + '/postgres']
# Always rm the site dir & solr & files
datadirs += [site, site + '/files', site + '/solr']
if self.target:
cp.remove_section('site_' + site)
self.sites.remove(site)
if self.target:
with open(self.target + '/.datacats-environment', 'w') as conf:
cp.write(conf)
datadirs = ['sites/' + datadir for datadir in datadirs]
if not self.sites and not never_delete:
datadirs.append('venv')
web_command(
command=['/scripts/purge.sh']
+ ['/project/data/' + d for d in datadirs],
ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'},
rw={self.datadir: '/project/data'},
)
if not self.sites and not never_delete:
shutil.rmtree(self.datadir)
else:
raise DatacatsError('Unknown format version {}'.format(format_version))
|
Remove uploaded files, postgres db, solr index, venv
|
entailment
|
def logs(self, container, tail='all', follow=False, timestamps=False):
"""
:param container: 'web', 'solr' or 'postgres'
:param tail: number of lines to show
:param follow: True to return generator instead of list
:param timestamps: True to include timestamps
"""
return container_logs(
self._get_container_name(container),
tail,
follow,
timestamps)
|
:param container: 'web', 'solr' or 'postgres'
:param tail: number of lines to show
:param follow: True to return generator instead of list
:param timestamps: True to include timestamps
|
entailment
|
def _proxy_settings(self):
"""
Create/replace ~/.datacats/run/proxy-environment and return
entry for ro mount for containers
"""
if not ('https_proxy' in environ or 'HTTPS_PROXY' in environ
or 'http_proxy' in environ or 'HTTP_PROXY' in environ):
return {}
https_proxy = environ.get('https_proxy')
if https_proxy is None:
https_proxy = environ.get('HTTPS_PROXY')
http_proxy = environ.get('http_proxy')
if http_proxy is None:
http_proxy = environ.get('HTTP_PROXY')
no_proxy = environ.get('no_proxy')
if no_proxy is None:
no_proxy = environ.get('NO_PROXY', '')
no_proxy = no_proxy + ',solr,db'
out = [
'PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:'
'/bin:/usr/games:/usr/local/games"\n']
if https_proxy is not None:
out.append('https_proxy=' + posix_quote(https_proxy) + '\n')
out.append('HTTPS_PROXY=' + posix_quote(https_proxy) + '\n')
if http_proxy is not None:
out.append('http_proxy=' + posix_quote(http_proxy) + '\n')
out.append('HTTP_PROXY=' + posix_quote(http_proxy) + '\n')
if no_proxy is not None:
out.append('no_proxy=' + posix_quote(no_proxy) + '\n')
out.append('NO_PROXY=' + posix_quote(no_proxy) + '\n')
with open(self.sitedir + '/run/proxy-environment', 'w') as f:
f.write("".join(out))
return {self.sitedir + '/run/proxy-environment': '/etc/environment'}
|
Create/replace ~/.datacats/run/proxy-environment and return
entry for ro mount for containers
|
entailment
|
def _get_container_name(self, container_type):
"""
Gets the full name of a container of the type specified.
Currently the supported types are:
- 'venv'
- 'postgres'
- 'solr'
- 'web'
- 'pgdata'
- 'lessc'
- 'datapusher'
- 'redis'
The name will be formatted appropriately with any prefixes and postfixes
needed.
:param container_type: The type of container name to generate (see above).
"""
if container_type in ['venv']:
return 'datacats_{}_{}'.format(container_type, self.name)
else:
return 'datacats_{}_{}_{}'.format(container_type, self.name, self.site_name)
|
Gets the full name of a container of the type specified.
Currently the supported types are:
- 'venv'
- 'postgres'
- 'solr'
- 'web'
- 'pgdata'
- 'lessc'
- 'datapusher'
- 'redis'
The name will be formatted appropriately with any prefixes and postfixes
needed.
:param container_type: The type of container name to generate (see above).
|
entailment
|
def less(environment, opts):
# pylint: disable=unused-argument
"""Recompiles less files in an environment.
Usage:
datacats less [ENVIRONMENT]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
require_extra_image(LESSC_IMAGE)
print 'Converting .less files to .css...'
for log in environment.compile_less():
print log
|
Recompiles less files in an environment.
Usage:
datacats less [ENVIRONMENT]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def fetch_and_convert_dataset(source_files, target_filename):
"""
Decorator applied to a dataset conversion function that converts acquired
source files into a dataset file that BatchUp can use.
Parameters
----------
source_file: list of `AbstractSourceFile` instances
A list of files to be acquired
target_filename: str or callable
The name of the target file in which to store the converted data
either as a string or as a function of the form `fn() -> str`
that returns it.
The conversion function is of the form `fn(source_paths, target_path)`.
It should return `target_path` if successful, `None` otherwise.
After the conversion function is successfully applied, the temporary
source files that were downloaded or copied into BatchUp's temporary
directory are deleted, unless the conversion function moved or deleted
them in which case no action is taken.
Example
-------
In this example, we will show how to acquire the USPS dataset from an
online source. USPS is provided as an HDF5 file anyway, so the
conversion function simply moves it to the target path:
>>> import shutil
>>>
>>> _USPS_SRC_ONLINE = DownloadSourceFile(
... filename='usps.h5',
... url='https://github.com/Britefury/usps_dataset/raw/master/'
... 'usps.h5',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_ONLINE], 'usps.h5')
... def usps_data_online(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it:
>>> usps_path = usps_data_online() #doctest: +SKIP
In this example, the USPS dataset will be acquired from a file on the
filesystem. Note that the source path is fixed; the next example
shows how we can determine the source path dynamically:
>>> _USPS_SRC_OFFLINE_FIXED = CopySourceFile(
... filename='usps.h5',
... source_path='some/path/to/usps.h5',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_OFFLINE_FIXED], 'usps.h5')
... def usps_data_offline_fixed(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it:
>>> usps_path = usps_data_offline_fixed() #doctest: +SKIP
The source path is provided as an argument to the decorated fetch
function:
>>> _USPS_SRC_OFFLINE_DYNAMIC = CopySourceFile(
... filename='usps.h5',
... arg_name='usps_path',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_OFFLINE_DYNAMIC], 'usps.h5')
... def usps_data_offline_dynamic(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it (note that the KW-arg `usps_path` is the same
>>> # as the `arg_name` parameter given to `CopySourceFile` above:
>>> usps_path = usps_data_offline_dynamic(
... usps_path=get_config('mypath')) #doctest: +SKIP
"""
if not isinstance(target_filename, six.string_types) and \
not callable(target_filename):
raise TypeError(
'target_filename must either be a string or be callable (it is '
'a {})'.format(type(target_filename)))
for src in source_files:
if not isinstance(src, AbstractSourceFile):
raise TypeError('source_files should contain'
'`AbstractSourceFile` instances, '
'not {}'.format(type(src)))
def decorate_fetcher(convert_function):
def fetch(**kwargs):
target_fn = path_string(target_filename)
target_path = config.get_data_path(target_fn)
# If the target file does not exist, we need to acquire the
# source files and convert them
if not os.path.exists(target_path):
# Acquire the source files
source_paths = []
for src in source_files:
p = src.acquire(**kwargs)
if p is not None:
if p in source_paths:
raise ValueError(
'Duplicate source file {}'.format(p))
source_paths.append(p)
else:
print('Failed to acquire {}'.format(src))
return None
# Got the source files
# Convert
converted_path = convert_function(source_paths, target_path)
# If successful, delete the source files
if converted_path is not None:
for src in source_files:
src.clean_up()
return converted_path
else:
# Target file already exists
return target_path
fetch.__name__ = convert_function.__name__
return fetch
return decorate_fetcher
|
Decorator applied to a dataset conversion function that converts acquired
source files into a dataset file that BatchUp can use.
Parameters
----------
source_file: list of `AbstractSourceFile` instances
A list of files to be acquired
target_filename: str or callable
The name of the target file in which to store the converted data
either as a string or as a function of the form `fn() -> str`
that returns it.
The conversion function is of the form `fn(source_paths, target_path)`.
It should return `target_path` if successful, `None` otherwise.
After the conversion function is successfully applied, the temporary
source files that were downloaded or copied into BatchUp's temporary
directory are deleted, unless the conversion function moved or deleted
them in which case no action is taken.
Example
-------
In this example, we will show how to acquire the USPS dataset from an
online source. USPS is provided as an HDF5 file anyway, so the
conversion function simply moves it to the target path:
>>> import shutil
>>>
>>> _USPS_SRC_ONLINE = DownloadSourceFile(
... filename='usps.h5',
... url='https://github.com/Britefury/usps_dataset/raw/master/'
... 'usps.h5',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_ONLINE], 'usps.h5')
... def usps_data_online(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it:
>>> usps_path = usps_data_online() #doctest: +SKIP
In this example, the USPS dataset will be acquired from a file on the
filesystem. Note that the source path is fixed; the next example
shows how we can determine the source path dynamically:
>>> _USPS_SRC_OFFLINE_FIXED = CopySourceFile(
... filename='usps.h5',
... source_path='some/path/to/usps.h5',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_OFFLINE_FIXED], 'usps.h5')
... def usps_data_offline_fixed(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it:
>>> usps_path = usps_data_offline_fixed() #doctest: +SKIP
The source path is provided as an argument to the decorated fetch
function:
>>> _USPS_SRC_OFFLINE_DYNAMIC = CopySourceFile(
... filename='usps.h5',
... arg_name='usps_path',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_OFFLINE_DYNAMIC], 'usps.h5')
... def usps_data_offline_dynamic(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it (note that the KW-arg `usps_path` is the same
>>> # as the `arg_name` parameter given to `CopySourceFile` above:
>>> usps_path = usps_data_offline_dynamic(
... usps_path=get_config('mypath')) #doctest: +SKIP
|
entailment
|
def delete_dataset_cache(*filenames):
"""
Delete the cache (converted files) for a dataset.
Parameters
----------
filenames: str
Filenames of files to delete
"""
for filename in filenames:
filename = path_string(filename)
path = config.get_data_path(filename)
if os.path.exists(path):
os.remove(path)
|
Delete the cache (converted files) for a dataset.
Parameters
----------
filenames: str
Filenames of files to delete
|
entailment
|
def acquire(self, **kwargs):
"""
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
"""
return config.download_data(self.temp_filename, self.url,
self.sha256)
|
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
|
entailment
|
def acquire(self, **kwargs):
"""
Copy the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the copy failed.
"""
if self.source_path is None:
source_path = kwargs[self.arg_name]
else:
source_path = self.source_path
return config.copy_data(self.temp_filename, source_path, self.sha256)
|
Copy the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the copy failed.
|
entailment
|
def acquire(self, **kwargs):
"""
Copy the file and return its path
Returns
-------
str or None
The path of the file or None if it does not exist or if
verification failed.
"""
path = path_string(self.path)
if os.path.exists(path):
if config.verify_file(path, self.sha256):
return path
return None
|
Copy the file and return its path
Returns
-------
str or None
The path of the file or None if it does not exist or if
verification failed.
|
entailment
|
def _retry_func(func, param, num, retry_notif, error_msg):
"""
A function which retries a given function num times and calls retry_notif each
time the function is retried.
:param func: The function to retry num times.
:param num: The number of times to try before giving up.
:param retry_notif: Will be called with the same parameter as func if we have to retry the
function. Will also receive the number of retries so far as a second
parameter.
:param: error_msg: The message
Throws DatacatsError if we run out of retries. Returns otherwise.
"""
for retry_num in range(num):
if retry_num:
retry_notif(param, retry_num)
try:
func(param)
return
except DatacatsError:
pass
raise DatacatsError(error_msg)
|
A function which retries a given function num times and calls retry_notif each
time the function is retried.
:param func: The function to retry num times.
:param num: The number of times to try before giving up.
:param retry_notif: Will be called with the same parameter as func if we have to retry the
function. Will also receive the number of retries so far as a second
parameter.
:param: error_msg: The message
Throws DatacatsError if we run out of retries. Returns otherwise.
|
entailment
|
def retrieve(self):
"""
Retrieve a result from executing a task. Note that tasks are executed
in order and that if the next task has not yet completed, this call
will block until the result is available.
Returns
-------
A result from the result buffer.
"""
if len(self.__result_buffer) > 0:
res = self.__result_buffer.popleft()
value = res.get()
else:
return None
self.__populate_buffer()
return value
|
Retrieve a result from executing a task. Note that tasks are executed
in order and that if the next task has not yet completed, this call
will block until the result is available.
Returns
-------
A result from the result buffer.
|
entailment
|
def install(environment, opts):
"""Install or reinstall Python packages within this environment
Usage:
datacats install [-q] [--address=IP] [ENVIRONMENT [PACKAGE ...]]
datacats install -c [q] [--address=IP] [ENVIRONMENT]
Options:
--address=IP The address to bind to when reloading after install
-c --clean Reinstall packages into a clean virtualenv
-q --quiet Do not show output from installing packages and requirements.
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
environment.require_data()
install_all(environment, opts['--clean'], verbose=not opts['--quiet'],
packages=opts['PACKAGE'])
for site in environment.sites:
environment = Environment.load(environment.name, site)
if 'web' in environment.containers_running():
# FIXME: reload without changing debug setting?
manage.reload_(environment, {
'--address': opts['--address'],
'--background': False,
'--no-watch': False,
'--production': False,
'PORT': None,
'--syslog': False,
'--site-url': None,
'--interactive': False
})
|
Install or reinstall Python packages within this environment
Usage:
datacats install [-q] [--address=IP] [ENVIRONMENT [PACKAGE ...]]
datacats install -c [q] [--address=IP] [ENVIRONMENT]
Options:
--address=IP The address to bind to when reloading after install
-c --clean Reinstall packages into a clean virtualenv
-q --quiet Do not show output from installing packages and requirements.
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def migrate(opts):
"""Migrate an environment to a given revision of the datadir format.
Usage:
datacats migrate [-y] [-r VERSION] [ENVIRONMENT_DIR]
Options:
-r --revision=VERSION The version of the datadir format you want
to convert to [default: 2]
-y --yes Answer yes to all questions.
Defaults to '.' if ENVIRONMENT_DIR isn't specified.
"""
try:
version = int(opts['--revision'])
except:
raise DatacatsError('--revision parameter must be an integer.')
always_yes = opts['--yes']
if 'ENVIRONMENT_DIR' not in opts or not opts['ENVIRONMENT_DIR']:
cwd = getcwd()
# Get the dirname
opts['ENVIRONMENT_DIR'] = split(cwd if cwd[-1] != '/' else cwd[:-1])[1]
datadir = expanduser('~/.datacats/' + opts['ENVIRONMENT_DIR'])
if needs_format_conversion(datadir, version):
convert_environment(datadir, version, always_yes)
print 'Successfully converted datadir {} to format version {}'.format(datadir, version)
else:
print 'datadir {} is already at version {}.'.format(datadir, version)
|
Migrate an environment to a given revision of the datadir format.
Usage:
datacats migrate [-y] [-r VERSION] [ENVIRONMENT_DIR]
Options:
-r --revision=VERSION The version of the datadir format you want
to convert to [default: 2]
-y --yes Answer yes to all questions.
Defaults to '.' if ENVIRONMENT_DIR isn't specified.
|
entailment
|
def deploy(environment, opts, profile):
"""Deploy environment to production DataCats.com cloud service
Usage:
datacats deploy [--create] [ENVIRONMENT [TARGET_NAME]]
Options:
--create Create a new environment on DataCats.com instead
of updating an existing environment
ENVIRONMENT may be an environment name or a path to a environment directory.
Default: '.'
TARGET_NAME is the name of the environment on DataCats.com. Defaults to
the environment name.
"""
target_name = opts['TARGET_NAME']
if target_name is None:
target_name = environment.name
if not valid_deploy_name(target_name):
raise DatacatsError(" `{target_name}` target name for deployment can't be accepted.\n"
"Can't have http://{target_name}.datacats.io for your datcat URL\n"
"Please choose a target name at least 5 characters long,\n"
"and containing only lowercase letters and numbers\n"
.format(target_name=target_name))
if opts['--create']:
profile.create(environment, target_name)
profile.deploy(environment, target_name, stdout)
print "Deployed source to http://{0}.datacats.io".format(target_name)
if opts['--create']:
try:
pw = confirm_password()
profile.admin_password(environment, target_name, pw)
except KeyboardInterrupt:
pass
|
Deploy environment to production DataCats.com cloud service
Usage:
datacats deploy [--create] [ENVIRONMENT [TARGET_NAME]]
Options:
--create Create a new environment on DataCats.com instead
of updating an existing environment
ENVIRONMENT may be an environment name or a path to a environment directory.
Default: '.'
TARGET_NAME is the name of the environment on DataCats.com. Defaults to
the environment name.
|
entailment
|
def _trim_batch(batch, length):
"""Trim the mini-batch `batch` to the size `length`.
`batch` can be:
- a NumPy array, in which case it's first axis will be trimmed to size
`length`
- a tuple, in which case `_trim_batch` applied recursively to
each element and the resulting tuple returned
As a consequence, mini-batches can be structured; lists and tuples can
be nested arbitrarily deep.
Parameters
----------
batch: tuple or NumPy array
the mini-batch to trim
length: int
the size to which `batch` is to be trimmed
Returns
-------
tuple or NumPy array of same structure as `batch`
The trimmed mini-batch
"""
if isinstance(batch, tuple):
return tuple([_trim_batch(b, length) for b in batch])
else:
return batch[:length]
|
Trim the mini-batch `batch` to the size `length`.
`batch` can be:
- a NumPy array, in which case it's first axis will be trimmed to size
`length`
- a tuple, in which case `_trim_batch` applied recursively to
each element and the resulting tuple returned
As a consequence, mini-batches can be structured; lists and tuples can
be nested arbitrarily deep.
Parameters
----------
batch: tuple or NumPy array
the mini-batch to trim
length: int
the size to which `batch` is to be trimmed
Returns
-------
tuple or NumPy array of same structure as `batch`
The trimmed mini-batch
|
entailment
|
def batch_map_concat(func, batch_iter, progress_iter_func=None,
n_batches=None, prepend_args=None):
"""
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the per-sample results.
The function `func` should return the result for each sample in the
mini-batch as an array. To return multiple results (e.g. loss and errors)
return a tuple of arrays (e.g. `(loss_array, error_array)`)
`batch_iter` must be an iterator that generates mini-batches that
contain samples
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
n_batches: [optional] integer
Process at most this number of batches before returning.
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The per-sample sum of the results of the function `func` e.g.
`(batch_A, batch_B, ...)`
Returns an empty tuple if there were 0 samples in the data set.
Examples
--------
In these examples we will demonstrate the use of `batch_map` to apply
a function (e.g. a Theano function that runs on the GPU) to samples
in a data set. We construct an iterator that generates mini-batches from
the data set and pass it to `batch_map` along with the function that
we wish to apply. The function will receive the batches and process them.
Define a function to apply to samples:
>>> def sqr_sum(x):
... # Ensure that we receive batches of the expected size:
... assert len(x) in {5, 2}
... return (x ** 2).sum(axis=1)
Construct data to process and create a data source:
>>> X = np.random.normal(size=(7, 10))
>>> ds = ArrayDataSource([X])
Apply the function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> X_sqr_sum = batch_map_concat(sqr_sum, batch_iter)
>>> assert np.allclose(X_sqr_sum[0], (X ** 2).sum(axis=1))
There are also cases where we wish to limit the number of batches that
will be processed:
- when the iterator generates an infinite number of samples
- when the data set is huge and we wish to show results as we go
Use the `n_batches` argument to limit the number of batches to process:
>>> X_large = np.random.normal(size=(100, 10))
>>> ds_large = ArrayDataSource([X_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_result = batch_map_concat(sqr_sum, iter_large, n_batches=2)
... # Should have 10 samples per partial result
... assert len(partial_result[0]) == 10
... j = i * 10
... assert np.allclose(partial_result[0],
... (X_large[j:j + 10]**2).sum(axis=1))
"""
# Accumulator for results and number of samples
results = []
# If `progress_iter_func` is not `None`, apply it
if progress_iter_func is not None:
batch_iter = progress_iter_func(batch_iter, total=n_batches,
leave=False)
# Apply `func` to each batch
n_processed = 0
for batch in batch_iter:
# Apply on batch and check the type of the results
if prepend_args is not None:
batch_results = func(*(prepend_args + tuple(batch)))
else:
batch_results = func(*batch)
if batch_results is None:
pass
elif isinstance(batch_results, np.ndarray):
batch_results = (batch_results,)
elif isinstance(batch_results, tuple):
pass
else:
raise TypeError(
'Batch function should return a tuple of results, a '
'single result as a NumPy array, or None, '
'not {}'.format(type(batch_results)))
# Accumulate training results
if batch_results is not None:
results.append(batch_results)
n_processed += 1
if n_batches is not None and n_processed >= n_batches:
break
# Concatenate result arrays
if len(results) > 0:
results = zip(*results)
results = tuple([np.concatenate(list(r), axis=0) for r in results])
return results
else:
return None
|
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the per-sample results.
The function `func` should return the result for each sample in the
mini-batch as an array. To return multiple results (e.g. loss and errors)
return a tuple of arrays (e.g. `(loss_array, error_array)`)
`batch_iter` must be an iterator that generates mini-batches that
contain samples
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
n_batches: [optional] integer
Process at most this number of batches before returning.
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The per-sample sum of the results of the function `func` e.g.
`(batch_A, batch_B, ...)`
Returns an empty tuple if there were 0 samples in the data set.
Examples
--------
In these examples we will demonstrate the use of `batch_map` to apply
a function (e.g. a Theano function that runs on the GPU) to samples
in a data set. We construct an iterator that generates mini-batches from
the data set and pass it to `batch_map` along with the function that
we wish to apply. The function will receive the batches and process them.
Define a function to apply to samples:
>>> def sqr_sum(x):
... # Ensure that we receive batches of the expected size:
... assert len(x) in {5, 2}
... return (x ** 2).sum(axis=1)
Construct data to process and create a data source:
>>> X = np.random.normal(size=(7, 10))
>>> ds = ArrayDataSource([X])
Apply the function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> X_sqr_sum = batch_map_concat(sqr_sum, batch_iter)
>>> assert np.allclose(X_sqr_sum[0], (X ** 2).sum(axis=1))
There are also cases where we wish to limit the number of batches that
will be processed:
- when the iterator generates an infinite number of samples
- when the data set is huge and we wish to show results as we go
Use the `n_batches` argument to limit the number of batches to process:
>>> X_large = np.random.normal(size=(100, 10))
>>> ds_large = ArrayDataSource([X_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_result = batch_map_concat(sqr_sum, iter_large, n_batches=2)
... # Should have 10 samples per partial result
... assert len(partial_result[0]) == 10
... j = i * 10
... assert np.allclose(partial_result[0],
... (X_large[j:j + 10]**2).sum(axis=1))
|
entailment
|
def batch_map_mean(func, batch_iter, progress_iter_func=None, sum_axis=None,
n_batches=None, prepend_args=None):
"""
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the across-samples mean of the results returned by `func`
The `sum_axis` arguments tells `mean_batch_map` how to process the
results of `func` before accumulating them:
- If `sum_axis` is `None`, `func` should return the
across-samples SUM of the results of operating on the mini-batch the
sum of the values for the samples, e.g. for loss and error it should
return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))`
- Otherwise, `sum_axis` should specify the axis or axes over which
the the batch results should be summed, e.g. if `func` returns a
per-sample loss and error in two arrays
`[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis`
a value of `0` to sum over axis 0 to get the per-batch loss and error.
These results will be accumulated and divided by the number of samples
at the end to get the mean.
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
sum_axis: (default=`None`) int, tuple of ints or None
If an integer or a tuple of integers, the results returned by `func`
will be summed across this axis / these axes before being accumulated;
e.g. if `func` returns an array of per-sample losses, with axis 0
being the sample dimension, passing a value of `0` as `sum_axis`
will cause these results to be summed along axis 0 to get the
per-batch sum before accumulating the losses. The total summed loss
will be divided by the number of samples at the end in order to
compute the mean loss.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The sum of the results of the function `fn` divided by the number of
samples processed, e.g.
`(sum(outA_per_batch) / n_samples,
sum(outB_per_batch) / n_samples,
...)`
Examples
--------
The following examples will demonstrate the use of `mean_batch_map`
to compute binary cross entropy loss over a data set.
A few variants will be demonstrated:
- the default behaviour in which the function being applied should
return the sum over the batch sample axis
- having the function return per sample results and maving
`mean_batch_map` perform the sum operation. This is easier to
understand but less efficient as a Theano function would have to
move more data back from the GPU.
- limiting the number of batches that will be processed in order to get
partial results when dealing with a large data set
Define a function to compute the per-sample binary cross entropy
loss:
>>> def binary_crossentropy_loss(pred, target):
... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred)
... return e.mean(axis=1)
Now define a function that computes the *SUM* of the binary cross
entropy losses over the sample axis (axis 0), as the default
behaviour of `mean_batch_map` will sum them up and divide by the
number of samples at the end:
>>> def binary_crossentropy_loss_sum(pred, target):
... return binary_crossentropy_loss(pred, target).sum()
Construct prediction and target data
>>> pred = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> ds = ArrayDataSource([pred, tgt])
Apply the loss sum function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Have `mean_batch_map` sum over axis 0:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter,
... sum_axis=0)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Construct a large data set and use `batch
>>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> ds_large = ArrayDataSource([pred_large, tgt_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_loss = batch_map_mean(binary_crossentropy_loss_sum,
... iter_large, n_batches=2)
... j = i * 10
... assert np.allclose(
... partial_loss, binary_crossentropy_loss(
... pred_large[j:j + 10], tgt_large[j:j + 10]).mean())
"""
# Accumulator for results and number of samples
results_accum = None
n_samples_accum = 0
# If `progress_iter_func` is not `None`, apply it
if progress_iter_func is not None:
batch_iter = progress_iter_func(batch_iter, total=n_batches,
leave=False)
# Train on each batch
n_processed = 0
for batch in batch_iter:
# Get number of samples in batch; can vary
batch_n = _length_of_batch(batch)
# Apply on batch and check the type of the results
if prepend_args is not None:
batch_results = func(*(prepend_args + tuple(batch)))
else:
batch_results = func(*batch)
if batch_results is None:
pass
elif isinstance(batch_results, (np.ndarray, float)):
batch_results = (batch_results,)
elif isinstance(batch_results, tuple):
pass
else:
raise TypeError(
'Batch function should return a tuple of results, a '
'single result as a NumPy array or float, or None, '
'not {}'.format(type(batch_results)))
# Accumulate results and number of samples
if results_accum is None:
# Initialise the accumulator to the batch results if `func`
# returns summed results or if it returned None;
# don't attempt to iterate over None and sum each item
if batch_results is None:
pass
elif sum_axis is None:
results_accum = list(batch_results)
else:
results_accum = [br.sum(axis=sum_axis) for br in batch_results]
else:
if batch_results is not None:
for i in range(len(results_accum)):
br = batch_results[i]
if sum_axis is not None:
br = br.sum(axis=sum_axis)
results_accum[i] += br
n_samples_accum += batch_n
n_processed += 1
if n_batches is not None and n_processed >= n_batches:
break
# Divide by the number of training examples used to compute mean
if results_accum is not None:
results_accum = tuple([np.array(r).astype(float) / n_samples_accum
for r in results_accum])
return results_accum
|
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the across-samples mean of the results returned by `func`
The `sum_axis` arguments tells `mean_batch_map` how to process the
results of `func` before accumulating them:
- If `sum_axis` is `None`, `func` should return the
across-samples SUM of the results of operating on the mini-batch the
sum of the values for the samples, e.g. for loss and error it should
return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))`
- Otherwise, `sum_axis` should specify the axis or axes over which
the the batch results should be summed, e.g. if `func` returns a
per-sample loss and error in two arrays
`[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis`
a value of `0` to sum over axis 0 to get the per-batch loss and error.
These results will be accumulated and divided by the number of samples
at the end to get the mean.
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
sum_axis: (default=`None`) int, tuple of ints or None
If an integer or a tuple of integers, the results returned by `func`
will be summed across this axis / these axes before being accumulated;
e.g. if `func` returns an array of per-sample losses, with axis 0
being the sample dimension, passing a value of `0` as `sum_axis`
will cause these results to be summed along axis 0 to get the
per-batch sum before accumulating the losses. The total summed loss
will be divided by the number of samples at the end in order to
compute the mean loss.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The sum of the results of the function `fn` divided by the number of
samples processed, e.g.
`(sum(outA_per_batch) / n_samples,
sum(outB_per_batch) / n_samples,
...)`
Examples
--------
The following examples will demonstrate the use of `mean_batch_map`
to compute binary cross entropy loss over a data set.
A few variants will be demonstrated:
- the default behaviour in which the function being applied should
return the sum over the batch sample axis
- having the function return per sample results and maving
`mean_batch_map` perform the sum operation. This is easier to
understand but less efficient as a Theano function would have to
move more data back from the GPU.
- limiting the number of batches that will be processed in order to get
partial results when dealing with a large data set
Define a function to compute the per-sample binary cross entropy
loss:
>>> def binary_crossentropy_loss(pred, target):
... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred)
... return e.mean(axis=1)
Now define a function that computes the *SUM* of the binary cross
entropy losses over the sample axis (axis 0), as the default
behaviour of `mean_batch_map` will sum them up and divide by the
number of samples at the end:
>>> def binary_crossentropy_loss_sum(pred, target):
... return binary_crossentropy_loss(pred, target).sum()
Construct prediction and target data
>>> pred = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> ds = ArrayDataSource([pred, tgt])
Apply the loss sum function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Have `mean_batch_map` sum over axis 0:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter,
... sum_axis=0)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Construct a large data set and use `batch
>>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> ds_large = ArrayDataSource([pred_large, tgt_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_loss = batch_map_mean(binary_crossentropy_loss_sum,
... iter_large, n_batches=2)
... j = i * 10
... assert np.allclose(
... partial_loss, binary_crossentropy_loss(
... pred_large[j:j + 10], tgt_large[j:j + 10]).mean())
|
entailment
|
def coerce_data_source(x):
"""
Helper function to coerce an object into a data source, selecting the
appropriate data source class for the given object. If `x` is already
a data source it is returned as is.
Parameters
----------
x: any
The object to coerce. If `x` is a data source, it is returned as is.
If it is a list or tuple of array-like objects they will be wrapped
in an `ArrayDataSource` that will be returned. If `x` is an iterator
it will be wrapped in an `IteratorDataSource`. If it is a callable
it will be wrapped in a `CallableDataSource`.
Returns
-------
`x` coerced into a data source
Raises
------
`TypeError` if `x` is not a data souce, a list or tuple of array-like
objects, an iterator or a callable.
"""
if isinstance(x, AbstractDataSource):
return x
elif isinstance(x, (list, tuple)):
# Sequence of array-likes
items = []
for item in x:
if _is_array_like(item):
items.append(item)
else:
raise TypeError(
'Cannot convert x to a data source; x is a sequence and '
'one of the elements is not an array-like object, rather '
'a {}'.format(type(item)))
if len(items) == 0:
raise ValueError('Cannot convert x to a data source; x is an '
'empty sequence')
return ArrayDataSource(items)
elif isinstance(x, collections.Iterator):
return IteratorDataSource(x)
elif callable(x):
return CallableDataSource(x)
else:
raise TypeError('Cannot convert x to a data source; can only handle '
'iterators, callables, non-empty sequences of '
'array-like objects; cannot '
'handle {}'.format(type(x)))
|
Helper function to coerce an object into a data source, selecting the
appropriate data source class for the given object. If `x` is already
a data source it is returned as is.
Parameters
----------
x: any
The object to coerce. If `x` is a data source, it is returned as is.
If it is a list or tuple of array-like objects they will be wrapped
in an `ArrayDataSource` that will be returned. If `x` is an iterator
it will be wrapped in an `IteratorDataSource`. If it is a callable
it will be wrapped in a `CallableDataSource`.
Returns
-------
`x` coerced into a data source
Raises
------
`TypeError` if `x` is not a data souce, a list or tuple of array-like
objects, an iterator or a callable.
|
entailment
|
def batch_map_concat(self, func, batch_size, progress_iter_func=None,
n_batches=None, prepend_args=None, **kwargs):
"""A batch oriented implementation of `map`.
Applies a function to all the samples in this data source by breaking
the data into mini-batches and applying the function to each
mini-batch.
Returns the per-sample results.
This method is a wrapper around the :func:`batch_map` function;
please see its documentation for more information and examples.
The function `func` should return the result for each sample in the
mini-batch as an array. To return multiple results (e.g. loss and
errors) return a tuple of arrays (e.g. `(loss_array, error_array)`)
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_size: int
The mini-batch size
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The per-sample sum of the results of the function `func` e.g.
`(batch_A, batch_B, ...)`
Returns an empty tuple if there were 0 samples in the data set.
Examples
--------
Define a function to apply to samples:
>>> def sqr_sum(x):
... return (x ** 2).sum(axis=1)
Construct data to process and create a data source:
>>> X = np.random.normal(size=(7, 10))
>>> ds = ArrayDataSource([X])
Apply the function defined above:
>>> X_sqr_sum = ds.batch_map_concat(sqr_sum, batch_size=5)
>>> assert (X_sqr_sum[0] == (X ** 2).sum(axis=1)).all()
"""
if n_batches is None:
n = self.num_samples(**kwargs)
if n == np.inf:
raise ValueError('Data set has infinite size or sampler will '
'generate infinite samples but no n_batches '
'limit specified')
elif n is not None:
n_batches = sampling.num_batches(n, batch_size)
batch_iter = self.batch_iterator(batch_size, **kwargs)
return batch_map_concat(func, batch_iter, progress_iter_func,
n_batches, prepend_args)
|
A batch oriented implementation of `map`.
Applies a function to all the samples in this data source by breaking
the data into mini-batches and applying the function to each
mini-batch.
Returns the per-sample results.
This method is a wrapper around the :func:`batch_map` function;
please see its documentation for more information and examples.
The function `func` should return the result for each sample in the
mini-batch as an array. To return multiple results (e.g. loss and
errors) return a tuple of arrays (e.g. `(loss_array, error_array)`)
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_size: int
The mini-batch size
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The per-sample sum of the results of the function `func` e.g.
`(batch_A, batch_B, ...)`
Returns an empty tuple if there were 0 samples in the data set.
Examples
--------
Define a function to apply to samples:
>>> def sqr_sum(x):
... return (x ** 2).sum(axis=1)
Construct data to process and create a data source:
>>> X = np.random.normal(size=(7, 10))
>>> ds = ArrayDataSource([X])
Apply the function defined above:
>>> X_sqr_sum = ds.batch_map_concat(sqr_sum, batch_size=5)
>>> assert (X_sqr_sum[0] == (X ** 2).sum(axis=1)).all()
|
entailment
|
def samples_by_indices(self, indices):
"""
Gather a batch of samples by indices, applying the mapping
described by the (optional) `indices` array passed to the
constructor.
Parameters
----------
indices: 1D-array of ints or slice
The samples to retrieve
Returns
-------
list of arrays
A mini-batch in the form of a list of NumPy arrays
"""
indices = self.sampler.map_indices(indices)
return self.samples_by_indices_nomapping(indices)
|
Gather a batch of samples by indices, applying the mapping
described by the (optional) `indices` array passed to the
constructor.
Parameters
----------
indices: 1D-array of ints or slice
The samples to retrieve
Returns
-------
list of arrays
A mini-batch in the form of a list of NumPy arrays
|
entailment
|
def batch_indices_iterator(self, batch_size, shuffle=None, **kwargs):
"""
Create an iterator that generates mini-batch sample indices.
The batches will have `batch_size` elements, with the exception
of the final batch which will have less if there are insufficient
elements left to make a complete batch.
If `shuffle` is `None` or `False` elements will be extracted in
order. If it is a `numpy.random.RandomState`, it will be used to
randomise the order in which elements are extracted from the data.
If it is `True`, NumPy's default random number generator will be
use to shuffle elements.
If an array of indices was provided to the constructor, the subset of
samples identified in that array is used, rather than the complete
set of samples.
The generated mini-batches indices take the form of 1D NumPy integer
arrays.
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates mini-batches in the form of 1D NumPy
integer arrays.
"""
shuffle_rng = self._get_shuffle_rng(shuffle)
if shuffle_rng is not None:
return self.sampler.shuffled_indices_batch_iterator(
batch_size, shuffle_rng)
else:
return self.sampler.in_order_indices_batch_iterator(batch_size)
|
Create an iterator that generates mini-batch sample indices.
The batches will have `batch_size` elements, with the exception
of the final batch which will have less if there are insufficient
elements left to make a complete batch.
If `shuffle` is `None` or `False` elements will be extracted in
order. If it is a `numpy.random.RandomState`, it will be used to
randomise the order in which elements are extracted from the data.
If it is `True`, NumPy's default random number generator will be
use to shuffle elements.
If an array of indices was provided to the constructor, the subset of
samples identified in that array is used, rather than the complete
set of samples.
The generated mini-batches indices take the form of 1D NumPy integer
arrays.
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates mini-batches in the form of 1D NumPy
integer arrays.
|
entailment
|
def batch_iterator(self, batch_size, shuffle=None, **kwargs):
"""
Create an iterator that generates mini-batches extracted from
this data source. The batches will have `batch_size` elements, with
the exception of the final batch which will have less if there are
insufficient elements left to make a complete batch.
If `shuffle` is `None` or `False` elements will be extracted in
order. If it is a `numpy.random.RandomState`, it will be used to
randomise the order in which elements are extracted from the data.
If it is `True`, NumPy's default random number generator will be
use to shuffle elements.
If an array of indices was provided to the constructor, the subset of
samples identified in that array is used, rather than the complete
set of samples.
The generated mini-batches take the form `[batch_x, batch_y, ...]`.
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates items of type `[batch_x, batch_y, ...]`
where `batch_x`, `batch_y`, etc are themselves arrays.
"""
for batch_ndx in self.batch_indices_iterator(
batch_size, shuffle=shuffle, **kwargs):
yield self.samples_by_indices_nomapping(batch_ndx)
|
Create an iterator that generates mini-batches extracted from
this data source. The batches will have `batch_size` elements, with
the exception of the final batch which will have less if there are
insufficient elements left to make a complete batch.
If `shuffle` is `None` or `False` elements will be extracted in
order. If it is a `numpy.random.RandomState`, it will be used to
randomise the order in which elements are extracted from the data.
If it is `True`, NumPy's default random number generator will be
use to shuffle elements.
If an array of indices was provided to the constructor, the subset of
samples identified in that array is used, rather than the complete
set of samples.
The generated mini-batches take the form `[batch_x, batch_y, ...]`.
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates items of type `[batch_x, batch_y, ...]`
where `batch_x`, `batch_y`, etc are themselves arrays.
|
entailment
|
def samples_by_indices_nomapping(self, indices):
"""
Gather a batch of samples by indices *without* applying any index
mapping resulting from the (optional) use of the `indices` array
passed to the constructor.
Parameters
----------
indices: 1D-array of ints or slice
The samples to retrieve
Returns
-------
list of arrays
A mini-batch in the form of a list of NumPy arrays
"""
batch = tuple([d[indices] for d in self.data])
if self.include_indices:
if isinstance(indices, slice):
indices = np.arange(indices.start, indices.stop,
indices.step)
return (indices,) + batch
else:
return batch
|
Gather a batch of samples by indices *without* applying any index
mapping resulting from the (optional) use of the `indices` array
passed to the constructor.
Parameters
----------
indices: 1D-array of ints or slice
The samples to retrieve
Returns
-------
list of arrays
A mini-batch in the form of a list of NumPy arrays
|
entailment
|
def num_samples(self, **kwargs):
"""
Get the number of samples in this data source.
Returns
-------
int, `np.inf` or `None`.
An int if the number of samples is known, `np.inf` if it is
infinite or `None` if the number of samples is unknown.
"""
if self.num_samples_fn is None:
return None
elif callable(self.num_samples_fn):
return self.num_samples_fn(**kwargs)
else:
return self.num_samples_fn
|
Get the number of samples in this data source.
Returns
-------
int, `np.inf` or `None`.
An int if the number of samples is known, `np.inf` if it is
infinite or `None` if the number of samples is unknown.
|
entailment
|
def samples_by_indices_nomapping(self, indices):
"""
Gather a batch of samples by indices *without* applying any index
mapping.
Parameters
----------
indices: list of either 1D-array of ints or slice
A list of index arrays or slices; one for each data source
that identify the samples to access
Returns
-------
nested list of arrays
A mini-batch
"""
if not self._random_access:
raise TypeError('samples_by_indices_nomapping method not '
'supported as one or more of the underlying '
'data sources does not support random access')
if len(indices) != len(self.datasets):
raise ValueError(
'length mis-match: indices has {} items, self has {} data '
'sources, should be equal'.format(len(indices),
len(self.datasets)))
batch = tuple([ds.samples_by_indices_nomapping(ndx)
for ds, ndx in zip(self.datasets, indices)])
return self._prepare_batch(batch)
|
Gather a batch of samples by indices *without* applying any index
mapping.
Parameters
----------
indices: list of either 1D-array of ints or slice
A list of index arrays or slices; one for each data source
that identify the samples to access
Returns
-------
nested list of arrays
A mini-batch
|
entailment
|
def batch_indices_iterator(self, batch_size, **kwargs):
"""
Create an iterator that generates mini-batch sample indices
The generated mini-batches indices take the form of nested lists of
either:
- 1D NumPy integer arrays
- slices
The list nesting structure with match that of the tree of data sources
rooted at `self`
Parameters
----------
batch_size: int
Mini-batch size
Returns
-------
iterator
An iterator that generates items that are nested lists of slices
or 1D NumPy integer arrays.
"""
if not self._random_access:
raise TypeError('batch_indices_iterator method not supported as '
'one or more of the underlying data sources '
'does not support random access')
iterators = [d.batch_indices_iterator(batch_size, **kwargs)
for d in self.datasets]
for batch in six.moves.zip(*iterators):
yield self._prepare_index_batch(batch)
|
Create an iterator that generates mini-batch sample indices
The generated mini-batches indices take the form of nested lists of
either:
- 1D NumPy integer arrays
- slices
The list nesting structure with match that of the tree of data sources
rooted at `self`
Parameters
----------
batch_size: int
Mini-batch size
Returns
-------
iterator
An iterator that generates items that are nested lists of slices
or 1D NumPy integer arrays.
|
entailment
|
def samples_by_indices_nomapping(self, indices):
"""
Gather a batch of samples by indices *without* applying any index
mapping.
Parameters
----------
indices: a tuple of the form `(dataset_index, sample_indices)`
The `dataset_index` identifies the dataset from which to draw
samples while `sample_indices` identifies the samples to draw
from it.
Returns
-------
nested list of arrays
A mini-batch
"""
if not self._random_access:
raise TypeError('samples_by_indices_nomapping method not '
'supported as one or more of the underlying '
'data sources does not support random access')
if not isinstance(indices, tuple):
raise TypeError('indices should be a tuple, not a {}'.format(
type(indices)
))
dataset_index, sample_indices = indices
ds = self.datasets[dataset_index]
return ds.samples_by_indices_nomapping(sample_indices)
|
Gather a batch of samples by indices *without* applying any index
mapping.
Parameters
----------
indices: a tuple of the form `(dataset_index, sample_indices)`
The `dataset_index` identifies the dataset from which to draw
samples while `sample_indices` identifies the samples to draw
from it.
Returns
-------
nested list of arrays
A mini-batch
|
entailment
|
def batch_indices_iterator(self, batch_size, shuffle=None, **kwargs):
"""
Create an iterator that generates mini-batch sample indices
The generated mini-batches indices take the form of nested lists of
either:
- 1D NumPy integer arrays
- slices
The list nesting structure with match that of the tree of data sources
rooted at `self`
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates items that are nested lists of slices
or 1D NumPy integer arrays.
"""
if not self._random_access:
raise TypeError('batch_indices_iterator method not supported as '
'one or more of the underlying data sources '
'does not support random access')
shuffle_rng = self._get_shuffle_rng(shuffle)
iterators = [d.batch_indices_iterator(batch_size,
shuffle=shuffle_rng, **kwargs)
for d in self.datasets]
return self._ds_iterator(batch_size, iterators, shuffle_rng, **kwargs)
|
Create an iterator that generates mini-batch sample indices
The generated mini-batches indices take the form of nested lists of
either:
- 1D NumPy integer arrays
- slices
The list nesting structure with match that of the tree of data sources
rooted at `self`
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates items that are nested lists of slices
or 1D NumPy integer arrays.
|
entailment
|
def samples_by_indices_nomapping(self, indices):
"""
Gather a batch of samples by indices *without* applying any index
mapping.
Parameters
----------
indices: 1D-array of ints or slice
An index array or a slice that selects the samples to retrieve
Returns
-------
nested list of arrays
A mini-batch
"""
if not self._random_access:
raise TypeError('samples_by_indices_nomapping method not '
'supported as one or more of the underlying '
'data sources does not support random access')
batch = self.source.samples_by_indices_nomapping(indices)
return self.fn(*batch)
|
Gather a batch of samples by indices *without* applying any index
mapping.
Parameters
----------
indices: 1D-array of ints or slice
An index array or a slice that selects the samples to retrieve
Returns
-------
nested list of arrays
A mini-batch
|
entailment
|
def samples_by_indices(self, indices):
"""
Gather a batch of samples by indices, applying any index
mapping defined by the underlying data sources.
Parameters
----------
indices: 1D-array of ints or slice
An index array or a slice that selects the samples to retrieve
Returns
-------
nested list of arrays
A mini-batch
"""
if not self._random_access:
raise TypeError('samples_by_indices method not supported as one '
'or more of the underlying data sources does '
'not support random access')
batch = self.source.samples_by_indices(indices)
return self.fn(*batch)
|
Gather a batch of samples by indices, applying any index
mapping defined by the underlying data sources.
Parameters
----------
indices: 1D-array of ints or slice
An index array or a slice that selects the samples to retrieve
Returns
-------
nested list of arrays
A mini-batch
|
entailment
|
def batch_indices_iterator(self, batch_size, **kwargs):
"""
Create an iterator that generates mini-batch sample indices
The generated mini-batches indices take the form of nested lists of
either:
- 1D NumPy integer arrays
- slices
The list nesting structure with match that of the tree of data sources
rooted at `self`
Parameters
----------
batch_size: int
Mini-batch size
Returns
-------
iterator
An iterator that generates items that are nested lists of slices
or 1D NumPy integer arrays.
"""
if not self._random_access:
raise TypeError('batch_indices_iterator method not supported as '
'one or more of the underlying data sources '
'does not support random access')
return self.source.batch_indices_iterator(batch_size, **kwargs)
|
Create an iterator that generates mini-batch sample indices
The generated mini-batches indices take the form of nested lists of
either:
- 1D NumPy integer arrays
- slices
The list nesting structure with match that of the tree of data sources
rooted at `self`
Parameters
----------
batch_size: int
Mini-batch size
Returns
-------
iterator
An iterator that generates items that are nested lists of slices
or 1D NumPy integer arrays.
|
entailment
|
def purge(opts):
"""Purge environment database and uploaded files
Usage:
datacats purge [-s NAME | --delete-environment] [-y] [ENVIRONMENT]
Options:
--delete-environment Delete environment directory as well as its data, as
well as the data for **all** sites.
-s --site=NAME Specify a site to be purge [default: primary]
-y --yes Respond yes to all prompts (i.e. force)
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
old = False
try:
environment = Environment.load(opts['ENVIRONMENT'], opts['--site'])
except DatacatsError:
environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], data_only=True)
if get_format_version(environment.datadir) == 1:
old = True
environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], allow_old=True)
# We need a valid site if they don't want to blow away everything.
if not opts['--delete-environment'] and not old:
environment.require_valid_site()
sites = [opts['--site']] if not opts['--delete-environment'] else environment.sites
if not opts['--yes']:
y_or_n_prompt('datacats purge will delete all stored data')
environment.stop_ckan()
environment.stop_supporting_containers()
environment.purge_data(sites)
if opts['--delete-environment']:
if environment.target:
rmtree(environment.target)
else:
DatacatsError(("Unable to find the environment source"
" directory so that it can be deleted.\n"
"Chances are it's because it already does not exist"))
|
Purge environment database and uploaded files
Usage:
datacats purge [-s NAME | --delete-environment] [-y] [ENVIRONMENT]
Options:
--delete-environment Delete environment directory as well as its data, as
well as the data for **all** sites.
-s --site=NAME Specify a site to be purge [default: primary]
-y --yes Respond yes to all prompts (i.e. force)
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def pretty_print(self):
"""
Print the error message to stdout with colors and borders
"""
print colored.blue("-" * 40)
print colored.red("datacats: problem was encountered:")
print self.message
print colored.blue("-" * 40)
|
Print the error message to stdout with colors and borders
|
entailment
|
def generate_password():
"""
Return a 16-character alphanumeric random string generated by the
operating system's secure pseudo random number generator
"""
chars = uppercase + lowercase + digits
return ''.join(SystemRandom().choice(chars) for x in xrange(16))
|
Return a 16-character alphanumeric random string generated by the
operating system's secure pseudo random number generator
|
entailment
|
def _machine_check_connectivity():
"""
This method calls to docker-machine on the command line and
makes sure that it is up and ready.
Potential improvements to be made:
- Support multiple machine names (run a `docker-machine ls` and then
see which machines are active. Use a priority list)
"""
with open(devnull, 'w') as devnull_f:
try:
status = subprocess.check_output(
['docker-machine', 'status', 'dev'],
stderr=devnull_f).strip()
if status == 'Stopped':
raise DatacatsError('Please start your docker-machine '
'VM with "docker-machine start dev"')
# XXX HACK: This exists because of
# http://github.com/datacats/datacats/issues/63,
# as a temporary fix.
if 'tls' in _docker_kwargs:
# It will print out messages to the user otherwise.
_docker_kwargs['tls'].assert_hostname = False
except subprocess.CalledProcessError:
raise DatacatsError('Please create a docker-machine with '
'"docker-machine start dev"')
|
This method calls to docker-machine on the command line and
makes sure that it is up and ready.
Potential improvements to be made:
- Support multiple machine names (run a `docker-machine ls` and then
see which machines are active. Use a priority list)
|
entailment
|
def ro_rw_to_binds(ro, rw):
"""
ro and rw {localdir: binddir} dicts to docker-py's
{localdir: {'bind': binddir, 'ro': T/F}} binds dicts
"""
out = {}
if ro:
for localdir, binddir in ro.iteritems():
out[localdir] = {'bind': binddir, 'ro': True}
if rw:
for localdir, binddir in rw.iteritems():
out[localdir] = {'bind': binddir, 'ro': False}
return out
|
ro and rw {localdir: binddir} dicts to docker-py's
{localdir: {'bind': binddir, 'ro': T/F}} binds dicts
|
entailment
|
def web_command(command, ro=None, rw=None, links=None,
image='datacats/web', volumes_from=None, commit=False,
clean_up=False, stream_output=None, entrypoint=None):
"""
Run a single command in a web image optionally preloaded with the ckan
source and virtual envrionment.
:param command: command to execute
:param ro: {localdir: binddir} dict for read-only volumes
:param rw: {localdir: binddir} dict for read-write volumes
:param links: links passed to start
:param image: docker image name to use
:param volumes_from:
:param commit: True to create a new image based on result
:param clean_up: True to remove container even on error
:param stream_output: file to write stderr+stdout from command
:param entrypoint: override entrypoint (script that runs command)
:returns: image id if commit=True
"""
binds = ro_rw_to_binds(ro, rw)
c = _get_docker().create_container(
image=image,
command=command,
volumes=binds_to_volumes(binds),
detach=False,
host_config=_get_docker().create_host_config(binds=binds, volumes_from=volumes_from, links=links),
entrypoint=entrypoint)
_get_docker().start(
container=c['Id'],
)
if stream_output:
for output in _get_docker().attach(
c['Id'], stdout=True, stderr=True, stream=True):
stream_output.write(output)
if _get_docker().wait(c['Id']):
# Before the (potential) cleanup, grab the logs!
logs = _get_docker().logs(c['Id'])
if clean_up:
remove_container(c['Id'])
raise WebCommandError(command, c['Id'][:12], logs)
if commit:
rval = _get_docker().commit(c['Id'])
if not remove_container(c['Id']):
# circle ci doesn't let us remove containers, quiet the warnings
if not environ.get('CIRCLECI', False):
warn('failed to remove container: {0}'.format(c['Id']))
if commit:
return rval['Id']
|
Run a single command in a web image optionally preloaded with the ckan
source and virtual envrionment.
:param command: command to execute
:param ro: {localdir: binddir} dict for read-only volumes
:param rw: {localdir: binddir} dict for read-write volumes
:param links: links passed to start
:param image: docker image name to use
:param volumes_from:
:param commit: True to create a new image based on result
:param clean_up: True to remove container even on error
:param stream_output: file to write stderr+stdout from command
:param entrypoint: override entrypoint (script that runs command)
:returns: image id if commit=True
|
entailment
|
def remote_server_command(command, environment, user_profile, **kwargs):
"""
Wraps web_command function with docker bindings needed to connect to
a remote server (such as datacats.com) and run commands there
(for example, when you want to copy your catalog to that server).
The files binded to the docker image include the user's ssh credentials:
ssh_config file,
rsa and rsa.pub user keys
known_hosts whith public keys of the remote server (if known)
The **kwargs (keyword arguments) are passed on to the web_command call
intact, see the web_command's doc string for details
"""
if environment.remote_server_key:
temp = tempfile.NamedTemporaryFile(mode="wb")
temp.write(environment.remote_server_key)
temp.seek(0)
known_hosts = temp.name
else:
known_hosts = get_script_path('known_hosts')
binds = {
user_profile.profiledir + '/id_rsa': '/root/.ssh/id_rsa',
known_hosts: '/root/.ssh/known_hosts',
get_script_path('ssh_config'): '/etc/ssh/ssh_config'
}
if kwargs.get("include_project_dir", None):
binds[environment.target] = '/project'
del kwargs["include_project_dir"]
kwargs["ro"] = binds
try:
web_command(command, **kwargs)
except WebCommandError as e:
e.user_description = 'Sending a command to remote server failed'
raise e
|
Wraps web_command function with docker bindings needed to connect to
a remote server (such as datacats.com) and run commands there
(for example, when you want to copy your catalog to that server).
The files binded to the docker image include the user's ssh credentials:
ssh_config file,
rsa and rsa.pub user keys
known_hosts whith public keys of the remote server (if known)
The **kwargs (keyword arguments) are passed on to the web_command call
intact, see the web_command's doc string for details
|
entailment
|
def run_container(name, image, command=None, environment=None,
ro=None, rw=None, links=None, detach=True, volumes_from=None,
port_bindings=None, log_syslog=False):
"""
Wrapper for docker create_container, start calls
:param log_syslog: bool flag to redirect container's logs to host's syslog
:returns: container info dict or None if container couldn't be created
Raises PortAllocatedError if container couldn't start on the
requested port.
"""
binds = ro_rw_to_binds(ro, rw)
log_config = LogConfig(type=LogConfig.types.JSON)
if log_syslog:
log_config = LogConfig(
type=LogConfig.types.SYSLOG,
config={'syslog-tag': name})
host_config = _get_docker().create_host_config(binds=binds, log_config=log_config, links=links, volumes_from=volumes_from, port_bindings=port_bindings)
c = _get_docker().create_container(
name=name,
image=image,
command=command,
environment=environment,
volumes=binds_to_volumes(binds),
detach=detach,
stdin_open=False,
tty=False,
ports=list(port_bindings) if port_bindings else None,
host_config=host_config)
try:
_get_docker().start(
container=c['Id'],
)
except APIError as e:
if 'address already in use' in e.explanation:
try:
_get_docker().remove_container(name, force=True)
except APIError:
pass
raise PortAllocatedError()
raise
return c
|
Wrapper for docker create_container, start calls
:param log_syslog: bool flag to redirect container's logs to host's syslog
:returns: container info dict or None if container couldn't be created
Raises PortAllocatedError if container couldn't start on the
requested port.
|
entailment
|
def remove_container(name, force=False):
"""
Wrapper for docker remove_container
:returns: True if container was found and removed
"""
try:
if not force:
_get_docker().stop(name)
except APIError:
pass
try:
_get_docker().remove_container(name, force=True)
return True
except APIError:
return False
|
Wrapper for docker remove_container
:returns: True if container was found and removed
|
entailment
|
def container_logs(name, tail, follow, timestamps):
"""
Wrapper for docker logs, attach commands.
"""
if follow:
return _get_docker().attach(
name,
stdout=True,
stderr=True,
stream=True
)
return _docker.logs(
name,
stdout=True,
stderr=True,
tail=tail,
timestamps=timestamps,
)
|
Wrapper for docker logs, attach commands.
|
entailment
|
def collect_logs(name):
"""
Returns a string representation of the logs from a container.
This is similar to container_logs but uses the `follow` option
and flattens the logs into a string instead of a generator.
:param name: The container name to grab logs for
:return: A string representation of the logs
"""
logs = container_logs(name, "all", True, None)
string = ""
for s in logs:
string += s
return string
|
Returns a string representation of the logs from a container.
This is similar to container_logs but uses the `follow` option
and flattens the logs into a string instead of a generator.
:param name: The container name to grab logs for
:return: A string representation of the logs
|
entailment
|
def pull_stream(image):
"""
Return generator of pull status objects
"""
return (json.loads(s) for s in _get_docker().pull(image, stream=True))
|
Return generator of pull status objects
|
entailment
|
def data_only_container(name, volumes):
"""
create "data-only container" if it doesn't already exist.
We'd like to avoid these, but postgres + boot2docker make
it difficult, see issue #5
"""
info = inspect_container(name)
if info:
return
c = _get_docker().create_container(
name=name,
image='datacats/postgres', # any image will do
command='true',
volumes=volumes,
detach=True)
return c
|
create "data-only container" if it doesn't already exist.
We'd like to avoid these, but postgres + boot2docker make
it difficult, see issue #5
|
entailment
|
def main():
"""
The main entry point for datacats cli tool
(as defined in setup.py's entry_points)
It parses the cli arguments for corresponding options
and runs the corresponding command
"""
# pylint: disable=bare-except
try:
command_fn, opts = _parse_arguments(sys.argv[1:])
# purge handles loading differently
# 1 - Bail and just call the command if it doesn't have ENVIRONMENT.
if command_fn == purge.purge or 'ENVIRONMENT' not in opts:
return command_fn(opts)
environment = Environment.load(
opts['ENVIRONMENT'] or '.',
opts['--site'] if '--site' in opts else 'primary')
if command_fn not in COMMANDS_THAT_USE_SSH:
return command_fn(environment, opts)
# for commands that communicate with a remote server
# we load UserProfile and test our communication
user_profile = UserProfile()
user_profile.test_ssh_key(environment)
return command_fn(environment, opts, user_profile)
except DatacatsError as e:
_error_exit(e)
except SystemExit:
raise
except:
exc_info = "\n".join([line.rstrip()
for line in traceback.format_exception(*sys.exc_info())])
user_message = ("Something that should not"
" have happened happened when attempting"
" to run this command:\n"
" datacats {args}\n\n"
"It is seems to be a bug.\n"
"Please report this issue to us by"
" creating an issue ticket at\n\n"
" https://github.com/datacats/datacats/issues\n\n"
"so that we would be able to look into that "
"and fix the issue."
).format(args=" ".join(sys.argv[1:]))
_error_exit(DatacatsError(user_message,
parent_exception=UndocumentedError(exc_info)))
|
The main entry point for datacats cli tool
(as defined in setup.py's entry_points)
It parses the cli arguments for corresponding options
and runs the corresponding command
|
entailment
|
def _subcommand_arguments(args):
"""
Return (subcommand, (possibly adjusted) arguments for that subcommand)
Returns (None, args) when no subcommand is found
Parsing our arguments is hard. Each subcommand has its own docopt
validation, and some subcommands (paster and shell) have positional
options (some options passed to datacats and others passed to
commands run inside the container)
"""
skip_site = False
# Find subcommand without docopt so that subcommand options may appear
# anywhere
for i, a in enumerate(args):
if skip_site:
skip_site = False
continue
if a.startswith('-'):
if a == '-s' or a == '--site':
skip_site = True
continue
if a == 'help':
return _subcommand_arguments(args[:i] + ['--help'] + args[i + 1:])
if a not in COMMANDS:
raise DatacatsError("\'{0}\' command is not recognized. \n"
"See \'datacats help\' for the list of available commands".format(a))
command = a
break
else:
return None, args
if command != 'shell' and command != 'paster':
return command, args
# shell requires the environment name, paster does not
remaining_positional = 2 if command == 'shell' else 1
# i is where the subcommand starts.
# shell, paster are special: options might belong to the command being
# find where the the inner command starts and insert a '--' before
# so that we can separate inner options from ones we need to parse
while i < len(args):
a = args[i]
if a.startswith('-'):
if a == '-s' or a == '--site':
# site name is coming
i += 2
continue
i += 1
continue
if remaining_positional:
remaining_positional -= 1
i += 1
continue
return command, args[:i] + ['--'] + args[i:]
return command, args
|
Return (subcommand, (possibly adjusted) arguments for that subcommand)
Returns (None, args) when no subcommand is found
Parsing our arguments is hard. Each subcommand has its own docopt
validation, and some subcommands (paster and shell) have positional
options (some options passed to datacats and others passed to
commands run inside the container)
|
entailment
|
def start(environment, opts):
"""Create containers and start serving environment
Usage:
datacats start [-b] [--site-url SITE_URL] [-p|--no-watch] [-s NAME]
[-i] [--syslog] [--address=IP] [ENVIRONMENT [PORT]]
datacats start -r [-b] [--site-url SITE_URL] [-s NAME] [--syslog]
[-i] [--address=IP] [ENVIRONMENT]
Options:
--address=IP Address to listen on (Linux-only)
-b --background Don't wait for response from web server
--no-watch Do not automatically reload templates and .py files on change
-i --interactive Calls out to docker via the command line, allowing
for interactivity with the web image.
-p --production Start with apache and debug=false
-s --site=NAME Specify a site to start [default: primary]
--syslog Log to the syslog
--site-url SITE_URL The site_url to use in API responses. Defaults to old setting or
will attempt to determine it. (e.g. http://example.org:{port}/)
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
environment.require_data()
if environment.fully_running():
print 'Already running at {0}'.format(environment.web_address())
return
reload_(environment, opts)
|
Create containers and start serving environment
Usage:
datacats start [-b] [--site-url SITE_URL] [-p|--no-watch] [-s NAME]
[-i] [--syslog] [--address=IP] [ENVIRONMENT [PORT]]
datacats start -r [-b] [--site-url SITE_URL] [-s NAME] [--syslog]
[-i] [--address=IP] [ENVIRONMENT]
Options:
--address=IP Address to listen on (Linux-only)
-b --background Don't wait for response from web server
--no-watch Do not automatically reload templates and .py files on change
-i --interactive Calls out to docker via the command line, allowing
for interactivity with the web image.
-p --production Start with apache and debug=false
-s --site=NAME Specify a site to start [default: primary]
--syslog Log to the syslog
--site-url SITE_URL The site_url to use in API responses. Defaults to old setting or
will attempt to determine it. (e.g. http://example.org:{port}/)
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def reload_(environment, opts):
"""Reload environment source and configuration
Usage:
datacats reload [-b] [-p|--no-watch] [--syslog] [-s NAME] [--site-url=SITE_URL]
[-i] [--address=IP] [ENVIRONMENT [PORT]]
datacats reload -r [-b] [--syslog] [-s NAME] [--address=IP] [--site-url=SITE_URL]
[-i] [ENVIRONMENT]
Options:
--address=IP Address to listen on (Linux-only)
-i --interactive Calls out to docker via the command line, allowing
for interactivity with the web image.
--site-url=SITE_URL The site_url to use in API responses. Can use Python template syntax
to insert the port and address (e.g. http://example.org:{port}/)
-b --background Don't wait for response from web server
--no-watch Do not automatically reload templates and .py files on change
-p --production Reload with apache and debug=false
-s --site=NAME Specify a site to reload [default: primary]
--syslog Log to the syslog
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
if opts['--interactive']:
# We can't wait for the server if we're tty'd
opts['--background'] = True
if opts['--address'] and is_boot2docker():
raise DatacatsError('Cannot specify address on boot2docker.')
environment.require_data()
environment.stop_ckan()
if opts['PORT'] or opts['--address'] or opts['--site-url']:
if opts['PORT']:
environment.port = int(opts['PORT'])
if opts['--address']:
environment.address = opts['--address']
if opts['--site-url']:
site_url = opts['--site-url']
# TODO: Check it against a regex or use urlparse
try:
site_url = site_url.format(address=environment.address, port=environment.port)
environment.site_url = site_url
environment.save_site(False)
except (KeyError, IndexError, ValueError) as e:
raise DatacatsError('Could not parse site_url: {}'.format(e))
environment.save()
for container in environment.extra_containers:
require_extra_image(EXTRA_IMAGE_MAPPING[container])
environment.stop_supporting_containers()
environment.start_supporting_containers()
environment.start_ckan(
production=opts['--production'],
paster_reload=not opts['--no-watch'],
log_syslog=opts['--syslog'],
interactive=opts['--interactive'])
write('Starting web server at {0} ...'.format(environment.web_address()))
if opts['--background']:
write('\n')
return
try:
environment.wait_for_web_available()
finally:
write('\n')
|
Reload environment source and configuration
Usage:
datacats reload [-b] [-p|--no-watch] [--syslog] [-s NAME] [--site-url=SITE_URL]
[-i] [--address=IP] [ENVIRONMENT [PORT]]
datacats reload -r [-b] [--syslog] [-s NAME] [--address=IP] [--site-url=SITE_URL]
[-i] [ENVIRONMENT]
Options:
--address=IP Address to listen on (Linux-only)
-i --interactive Calls out to docker via the command line, allowing
for interactivity with the web image.
--site-url=SITE_URL The site_url to use in API responses. Can use Python template syntax
to insert the port and address (e.g. http://example.org:{port}/)
-b --background Don't wait for response from web server
--no-watch Do not automatically reload templates and .py files on change
-p --production Reload with apache and debug=false
-s --site=NAME Specify a site to reload [default: primary]
--syslog Log to the syslog
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def info(environment, opts):
"""Display information about environment and running containers
Usage:
datacats info [-qr] [ENVIRONMENT]
Options:
-q --quiet Echo only the web URL or nothing if not running
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
damaged = False
sites = environment.sites
if not environment.sites:
sites = []
damaged = True
if opts['--quiet']:
if damaged:
raise DatacatsError('Damaged datadir: cannot get address.')
for site in sites:
environment.site_name = site
print '{}: {}'.format(site, environment.web_address())
return
datadir = environment.datadir
if not environment.data_exists():
datadir = ''
elif damaged:
datadir += ' (damaged)'
print 'Environment name: ' + environment.name
print ' Environment dir: ' + environment.target
print ' Data dir: ' + datadir
print ' Sites: ' + ' '.join(environment.sites)
for site in environment.sites:
print
environment.site_name = site
print ' Site: ' + site
print ' Containers: ' + ' '.join(environment.containers_running())
sitedir = environment.sitedir + (' (damaged)' if not environment.data_complete() else '')
print ' Site dir: ' + sitedir
addr = environment.web_address()
if addr:
print ' Available at: ' + addr
|
Display information about environment and running containers
Usage:
datacats info [-qr] [ENVIRONMENT]
Options:
-q --quiet Echo only the web URL or nothing if not running
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def logs(environment, opts):
"""Display or follow container logs
Usage:
datacats logs [--postgres | --solr | --datapusher] [-s NAME] [-tr] [--tail=LINES] [ENVIRONMENT]
datacats logs -f [--postgres | --solr | --datapusher] [-s NAME] [-r] [ENVIRONMENT]
Options:
--datapusher Show logs for datapusher instead of web logs
--postgres Show postgres database logs instead of web logs
-f --follow Follow logs instead of exiting immediately
--solr Show solr search logs instead of web logs
-t --timestamps Add timestamps to log lines
-s --site=NAME Specify a site for logs if needed [default: primary]
--tail=LINES Number of lines to show [default: all]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
container = 'web'
if opts['--solr']:
container = 'solr'
if opts['--postgres']:
container = 'postgres'
if opts['--datapusher']:
container = 'datapusher'
tail = opts['--tail']
if tail != 'all':
tail = int(tail)
l = environment.logs(container, tail, opts['--follow'],
opts['--timestamps'])
if not opts['--follow']:
print l
return
try:
for message in l:
write(message)
except KeyboardInterrupt:
print
|
Display or follow container logs
Usage:
datacats logs [--postgres | --solr | --datapusher] [-s NAME] [-tr] [--tail=LINES] [ENVIRONMENT]
datacats logs -f [--postgres | --solr | --datapusher] [-s NAME] [-r] [ENVIRONMENT]
Options:
--datapusher Show logs for datapusher instead of web logs
--postgres Show postgres database logs instead of web logs
-f --follow Follow logs instead of exiting immediately
--solr Show solr search logs instead of web logs
-t --timestamps Add timestamps to log lines
-s --site=NAME Specify a site for logs if needed [default: primary]
--tail=LINES Number of lines to show [default: all]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def open_(environment, opts):
# pylint: disable=unused-argument
"""Open web browser window to this environment
Usage:
datacats open [-r] [-s NAME] [ENVIRONMENT]
Options:
-s --site=NAME Choose a site to open [default: primary]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
environment.require_data()
addr = environment.web_address()
if not addr:
print "Site not currently running"
else:
webbrowser.open(addr)
|
Open web browser window to this environment
Usage:
datacats open [-r] [-s NAME] [ENVIRONMENT]
Options:
-s --site=NAME Choose a site to open [default: primary]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def tweak(environment, opts):
"""Commands operating on environment data
Usage:
datacats tweak --install-postgis [ENVIRONMENT]
datacats tweak --add-redis [ENVIRONMENT]
datacats tweak --admin-password [ENVIRONMENT]
Options:
--install-postgis Install postgis in ckan database
--add-redis Adds redis next time this environment reloads
-s --site=NAME Choose a site to tweak [default: primary]
-p --admin-password Prompt to change the admin password
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
environment.require_data()
if opts['--install-postgis']:
print "Installing postgis"
environment.install_postgis_sql()
if opts['--add-redis']:
# Let the user know if they are trying to add it and it is already there
print ('Adding redis extra container... Please note that you will have '
'to reload your environment for these changes to take effect ("datacats reload {}")'
.format(environment.name))
environment.add_extra_container('redis', error_on_exists=True)
if opts['--admin-password']:
environment.create_admin_set_password(confirm_password())
|
Commands operating on environment data
Usage:
datacats tweak --install-postgis [ENVIRONMENT]
datacats tweak --add-redis [ENVIRONMENT]
datacats tweak --admin-password [ENVIRONMENT]
Options:
--install-postgis Install postgis in ckan database
--add-redis Adds redis next time this environment reloads
-s --site=NAME Choose a site to tweak [default: primary]
-p --admin-password Prompt to change the admin password
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
entailment
|
def _split_path(path):
"""
A wrapper around the normal split function that ignores any trailing /.
:return: A tuple of the form (dirname, last) where last is the last element
in the path.
"""
# Get around a quirk in path_split where a / at the end will make the
# dirname (split[0]) the entire path
path = path[:-1] if path[-1] == '/' else path
split = path_split(path)
return split
|
A wrapper around the normal split function that ignores any trailing /.
:return: A tuple of the form (dirname, last) where last is the last element
in the path.
|
entailment
|
def _one_to_two(datadir):
"""After this command, your environment will be converted to format version {}.
and will only work with datacats version exceeding and including 1.0.0.
This migration is necessary to support multiple sites within the same environment.
Your current site will be kept and will be named "primary".
Would you like to continue the migration? (y/n) [n]:"""
new_site_name = 'primary'
split = _split_path(datadir)
print 'Making sure that containers are stopped...'
env_name = split[1]
# Old-style names on purpose! We need to stop old containers!
remove_container('datacats_web_' + env_name)
remove_container('datacats_solr_' + env_name)
remove_container('datacats_postgres_' + env_name)
print 'Doing conversion...'
# Begin the actual conversion
to_move = (['files', 'passwords.ini', 'run', 'solr'] +
(['postgres'] if not is_boot2docker() else []))
# Make a primary site
site_path = path_join(datadir, 'sites', new_site_name)
if not exists(site_path):
makedirs(site_path)
web_command(
command=['/scripts/migrate.sh',
'/project/data',
'/project/data/sites/' + new_site_name] +
to_move,
ro={scripts.get_script_path('migrate.sh'): '/scripts/migrate.sh'},
rw={datadir: '/project/data'},
clean_up=True
)
if is_boot2docker():
rename_container('datacats_pgdata_' + env_name,
'datacats_pgdata_' + env_name + '_' + new_site_name)
# Lastly, grab the project directory and update the ini file
with open(path_join(datadir, 'project-dir')) as pd:
project = pd.read()
cp = SafeConfigParser()
config_loc = path_join(project, '.datacats-environment')
cp.read([config_loc])
new_section = 'site_' + new_site_name
cp.add_section(new_section)
# Ports need to be moved into the new section
port = cp.get('datacats', 'port')
cp.remove_option('datacats', 'port')
cp.set(new_section, 'port', port)
with open(config_loc, 'w') as config:
cp.write(config)
# Make a session secret for it (make it per-site)
cp = SafeConfigParser()
config_loc = path_join(site_path, 'passwords.ini')
cp.read([config_loc])
# Generate a new secret
cp.set('passwords', 'beaker_session_secret', generate_password())
with open(config_loc, 'w') as config:
cp.write(config)
with open(path_join(datadir, '.version'), 'w') as f:
f.write('2')
|
After this command, your environment will be converted to format version {}.
and will only work with datacats version exceeding and including 1.0.0.
This migration is necessary to support multiple sites within the same environment.
Your current site will be kept and will be named "primary".
Would you like to continue the migration? (y/n) [n]:
|
entailment
|
def _two_to_one(datadir):
"""After this command, your environment will be converted to format version {}
and will not work with Datacats versions beyond and including 1.0.0.
This format version doesn't support multiple sites, and after this only your
"primary" site will be usable, though other sites will be maintained if you
wish to do a migration back to a version which supports multisite.
Would you like to continue the migration? (y/n) [n]:"""
_, env_name = _split_path(datadir)
print 'Making sure that containers are stopped...'
# New-style names
remove_container('datacats_web_{}_primary'.format(env_name))
remove_container('datacats_postgres_{}_primary'.format(env_name))
remove_container('datacats_solr_{}_primary'.format(env_name))
print 'Doing conversion...'
if exists(path_join(datadir, '.version')):
os.remove(path_join(datadir, '.version'))
to_move = (['files', 'passwords.ini', 'run', 'solr'] +
(['postgres'] if not is_boot2docker() else []))
web_command(
command=['/scripts/migrate.sh',
'/project/data/sites/primary',
'/project/data'] + to_move,
ro={scripts.get_script_path('migrate.sh'): '/scripts/migrate.sh'},
rw={datadir: '/project/data'}
)
pgdata_name = 'datacats_pgdata_{}_primary'.format(env_name)
if is_boot2docker() and inspect_container(pgdata_name):
rename_container(pgdata_name, 'datacats_pgdata_{}'.format(env_name))
print 'Doing cleanup...'
with open(path_join(datadir, 'project-dir')) as pd:
datacats_env_location = path_join(pd.read(), '.datacats-environment')
cp = SafeConfigParser()
cp.read(datacats_env_location)
# We need to move the port OUT of site_primary section and INTO datacats
cp.set('datacats', 'port', cp.get('site_primary', 'port'))
cp.remove_section('site_primary')
with open(datacats_env_location, 'w') as config:
cp.write(config)
cp = SafeConfigParser()
cp.read(path_join(datadir, 'passwords.ini'))
# This isn't needed in this version
cp.remove_option('passwords', 'beaker_session_secret')
with open(path_join(datadir, 'passwords.ini'), 'w') as config:
cp.write(config)
|
After this command, your environment will be converted to format version {}
and will not work with Datacats versions beyond and including 1.0.0.
This format version doesn't support multiple sites, and after this only your
"primary" site will be usable, though other sites will be maintained if you
wish to do a migration back to a version which supports multisite.
Would you like to continue the migration? (y/n) [n]:
|
entailment
|
def convert_environment(datadir, version, always_yes):
"""
Converts an environment TO the version specified by `version`.
:param datadir: The datadir to convert.
:param version: The version to convert TO.
:param always_yes: True if the user shouldn't be prompted about the migration.
"""
# Since we don't call either load() or new() we have to call require_images ourselves.
require_images()
inp = None
old_version = _get_current_format(datadir)
migration_func = migrations[(old_version, version)]
if version > CURRENT_FORMAT_VERSION:
raise DatacatsError('Cannot migrate to a version higher than the '
'current one.')
if version < 1:
raise DatacatsError('Datadir versioning starts at 1.')
if not always_yes:
while inp != 'y' and inp != 'n':
inp = raw_input(migration_func.__doc__.format(version))
if inp == 'n':
sys.exit(1)
lockfile = LockFile(path_join(datadir, '.migration_lock'))
lockfile.acquire()
try:
# FIXME: If we wanted to, we could find a set of conversions which
# would bring us up to the one we want if there's no direct path.
# This isn't necessary with just two formats, but it may be useful
# at 3.
# Call the appropriate conversion function
migration_func(datadir)
finally:
lockfile.release()
|
Converts an environment TO the version specified by `version`.
:param datadir: The datadir to convert.
:param version: The version to convert TO.
:param always_yes: True if the user shouldn't be prompted about the migration.
|
entailment
|
def get_history_by_flight_number(self, flight_number, page=1, limit=100):
"""Fetch the history of a flight by its number.
This method can be used to get the history of a flight route by the number.
It checks the user authentication and returns the data accordingly.
Args:
flight_number (str): The flight number, e.g. AI101
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_history_by_flight_number('AI101')
f.get_history_by_flight_number('AI101',page=1,limit=10)
"""
url = FLT_BASE.format(flight_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_data(url)
|
Fetch the history of a flight by its number.
This method can be used to get the history of a flight route by the number.
It checks the user authentication and returns the data accordingly.
Args:
flight_number (str): The flight number, e.g. AI101
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_history_by_flight_number('AI101')
f.get_history_by_flight_number('AI101',page=1,limit=10)
|
entailment
|
def get_history_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the history of a particular aircraft by its tail number.
This method can be used to get the history of a particular aircraft by its tail number.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_history_by_flight_number('VT-ANL')
f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
"""
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_data(url, True)
|
Fetch the history of a particular aircraft by its tail number.
This method can be used to get the history of a particular aircraft by its tail number.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_history_by_flight_number('VT-ANL')
f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
|
entailment
|
def get_airports(self, country):
"""Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India')
"""
url = AIRPORT_BASE.format(country.replace(" ", "-"))
return self._fr24.get_airports_data(url)
|
Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India')
|
entailment
|
def get_info_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
"""
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_data(url)
|
Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
|
entailment
|
def get_fleet(self, airline_key):
"""Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic')
"""
url = AIRLINE_FLEET_BASE.format(airline_key)
return self._fr24.get_airline_fleet_data(url, self.AUTH_TOKEN != '')
|
Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic')
|
entailment
|
def get_flights(self, search_key):
"""Get the flights for a particular airline.
Given a full or partial flight number string, this method returns the first 100 flights matching that string.
Please note this method was different in earlier versions. The older versions took an airline code and returned all scheduled flights for that airline
Args:
search_key (str): Full or partial flight number for any airline e.g. MI47 to get all SilkAir flights starting with MI47
Returns:
A list of dicts, one for each scheduled flight in the airlines network
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights('MI47')
"""
# assume limit 100 to return first 100 of any wild card search
url = AIRLINE_FLT_BASE.format(search_key, 100)
return self._fr24.get_airline_flight_data(url)
|
Get the flights for a particular airline.
Given a full or partial flight number string, this method returns the first 100 flights matching that string.
Please note this method was different in earlier versions. The older versions took an airline code and returned all scheduled flights for that airline
Args:
search_key (str): Full or partial flight number for any airline e.g. MI47 to get all SilkAir flights starting with MI47
Returns:
A list of dicts, one for each scheduled flight in the airlines network
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights('MI47')
|
entailment
|
def get_flights_from_to(self, origin, destination):
"""Get the flights for a particular origin and destination.
Given an origin and destination this method returns the upcoming scheduled flights between these two points.
The data returned has the airline, airport and schedule information - this is subject to change in future.
Args:
origin (str): The origin airport code
destination (str): The destination airport code
Returns:
A list of dicts, one for each scheduled flight between the two points.
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights_from_to('SIN','HYD')
"""
# assume limit 100 to return first 100 of any wild card search
url = AIRLINE_FLT_BASE_POINTS.format(origin, destination)
return self._fr24.get_airline_flight_data(url, by_airports=True)
|
Get the flights for a particular origin and destination.
Given an origin and destination this method returns the upcoming scheduled flights between these two points.
The data returned has the airline, airport and schedule information - this is subject to change in future.
Args:
origin (str): The origin airport code
destination (str): The destination airport code
Returns:
A list of dicts, one for each scheduled flight between the two points.
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights_from_to('SIN','HYD')
|
entailment
|
def get_airport_weather(self, iata, page=1, limit=100):
"""Retrieve the weather at an airport
Given the IATA code of an airport, this method returns the weather information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_weather('HYD')
f.get_airport_weather('HYD',page=1,limit=10)
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
weather = self._fr24.get_airport_weather(url)
mi = weather['sky']['visibility']['mi']
if (mi is not None) and (mi != "None"):
mi = float(mi)
km = mi * 1.6094
weather['sky']['visibility']['km'] = km
return weather
|
Retrieve the weather at an airport
Given the IATA code of an airport, this method returns the weather information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_weather('HYD')
f.get_airport_weather('HYD',page=1,limit=10)
|
entailment
|
def get_airport_metars(self, iata, page=1, limit=100):
"""Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
w = self._fr24.get_airport_weather(url)
return w['metar']
|
Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
|
entailment
|
def get_airport_metars_hist(self, iata):
"""Retrieve the metar data for past 72 hours. The data will not be parsed to readable format.
Given the IATA code of an airport, this method returns the metar information for last 72 hours.
Args:
iata (str): The IATA code for an airport, e.g. HYD
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars_hist('HYD')
"""
url = AIRPORT_BASE.format(iata) + "/weather"
return self._fr24.get_airport_metars_hist(url)
|
Retrieve the metar data for past 72 hours. The data will not be parsed to readable format.
Given the IATA code of an airport, this method returns the metar information for last 72 hours.
Args:
iata (str): The IATA code for an airport, e.g. HYD
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars_hist('HYD')
|
entailment
|
def get_airport_stats(self, iata, page=1, limit=100):
"""Retrieve the performance statistics at an airport
Given the IATA code of an airport, this method returns the performance statistics for the airport.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_stats('HYD')
f.get_airport_stats('HYD',page=1,limit=10)
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_airport_stats(url)
|
Retrieve the performance statistics at an airport
Given the IATA code of an airport, this method returns the performance statistics for the airport.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_stats('HYD')
f.get_airport_stats('HYD',page=1,limit=10)
|
entailment
|
def get_airport_details(self, iata, page=1, limit=100):
"""Retrieve the details of an airport
Given the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_details('HYD')
f.get_airport_details('HYD',page=1,limit=10)
"""
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
details = self._fr24.get_airport_details(url)
weather = self._fr24.get_airport_weather(url)
# weather has more correct and standard elevation details in feet and meters
details['position']['elevation'] = weather['elevation']
return details
|
Retrieve the details of an airport
Given the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_details('HYD')
f.get_airport_details('HYD',page=1,limit=10)
|
entailment
|
def get_images_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the images of a particular aircraft by its tail number.
This method can be used to get the images of the aircraft. The images are in 3 sizes and you can use what suits your need.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A dict with the images of the aircraft in various sizes
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_images_by_flight_number('VT-ANL')
f.get_images_by_flight_number('VT-ANL',page=1,limit=10)
"""
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_image_data(url)
|
Fetch the images of a particular aircraft by its tail number.
This method can be used to get the images of the aircraft. The images are in 3 sizes and you can use what suits your need.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A dict with the images of the aircraft in various sizes
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_images_by_flight_number('VT-ANL')
f.get_images_by_flight_number('VT-ANL',page=1,limit=10)
|
entailment
|
def login(self, email, password):
"""Login to the flightradar24 session
The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.
For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains
a token that will be passed on all the requests; this obtains the data as per the plan limits.
Args:
email (str): The email ID which is used to login to flightradar24
password (str): The password for the user ID
Example::
from pyflightdata import FlightData
f=FlightData()
f.login(myemail,mypassword)
"""
response = FlightData.session.post(
url=LOGIN_URL,
data={
'email': email,
'password': password,
'remember': 'true',
'type': 'web'
},
headers={
'Origin': 'https://www.flightradar24.com',
'Referer': 'https://www.flightradar24.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0'
}
)
response = self._fr24.json_loads_byteified(
response.content) if response.status_code == 200 else None
if response:
token = response['userData']['subscriptionKey']
self.AUTH_TOKEN = token
|
Login to the flightradar24 session
The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.
For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains
a token that will be passed on all the requests; this obtains the data as per the plan limits.
Args:
email (str): The email ID which is used to login to flightradar24
password (str): The password for the user ID
Example::
from pyflightdata import FlightData
f=FlightData()
f.login(myemail,mypassword)
|
entailment
|
def decode_metar(self, metar):
"""
Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
"""
try:
from metar import Metar
except:
return "Unable to parse metars. Please install parser from https://github.com/tomp/python-metar."
m = Metar.Metar(metar)
return m.string()
|
Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
|
entailment
|
def _get_auth_packet(self, username, password, client):
"""
Get the pyrad authentication packet for the username/password and the
given pyrad client.
"""
pkt = client.CreateAuthPacket(code=AccessRequest,
User_Name=username)
pkt["User-Password"] = pkt.PwCrypt(password)
pkt["NAS-Identifier"] = 'django-radius'
for key, val in list(getattr(settings, 'RADIUS_ATTRIBUTES', {}).items()):
pkt[key] = val
return pkt
|
Get the pyrad authentication packet for the username/password and the
given pyrad client.
|
entailment
|
def _get_client(self, server):
"""
Get the pyrad client for a given server. RADIUS server is described by
a 3-tuple: (<hostname>, <port>, <secret>).
"""
return Client(
server=server[0],
authport=server[1],
secret=server[2],
dict=self._get_dictionary(),
)
|
Get the pyrad client for a given server. RADIUS server is described by
a 3-tuple: (<hostname>, <port>, <secret>).
|
entailment
|
def _perform_radius_auth(self, client, packet):
"""
Perform the actual radius authentication by passing the given packet
to the server which `client` is bound to.
Returns True or False depending on whether the user is authenticated
successfully.
"""
try:
reply = client.SendPacket(packet)
except Timeout as e:
logging.error("RADIUS timeout occurred contacting %s:%s" % (
client.server, client.authport))
return False
except Exception as e:
logging.error("RADIUS error: %s" % e)
return False
if reply.code == AccessReject:
logging.warning("RADIUS access rejected for user '%s'" % (
packet['User-Name']))
return False
elif reply.code != AccessAccept:
logging.error("RADIUS access error for user '%s' (code %s)" % (
packet['User-Name'], reply.code))
return False
logging.info("RADIUS access granted for user '%s'" % (
packet['User-Name']))
return True
|
Perform the actual radius authentication by passing the given packet
to the server which `client` is bound to.
Returns True or False depending on whether the user is authenticated
successfully.
|
entailment
|
def _radius_auth(self, server, username, password):
"""
Authenticate the given username/password against the RADIUS server
described by `server`.
"""
client = self._get_client(server)
packet = self._get_auth_packet(username, password, client)
return self._perform_radius_auth(client, packet)
|
Authenticate the given username/password against the RADIUS server
described by `server`.
|
entailment
|
def get_django_user(self, username, password=None):
"""
Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not).
"""
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User(username=username)
if password is not None:
user.set_password(password)
user.save()
return user
|
Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not).
|
entailment
|
def authenticate(self, request, username=None, password=None):
"""
Check credentials against RADIUS server and return a User object or
None.
"""
if isinstance(username, basestring):
username = username.encode('utf-8')
if isinstance(password, basestring):
password = password.encode('utf-8')
server = self._get_server_from_settings()
result = self._radius_auth(server, username, password)
if result:
return self.get_django_user(username, password)
return None
|
Check credentials against RADIUS server and return a User object or
None.
|
entailment
|
def authenticate(self, request, username=None, password=None, realm=None):
"""
Check credentials against the RADIUS server identified by `realm` and
return a User object or None. If no argument is supplied, Django will
skip this backend and try the next one (as a TypeError will be raised
and caught).
"""
if isinstance(username, basestring):
username = username.encode('utf-8')
if isinstance(password, basestring):
password = password.encode('utf-8')
server = self.get_server(realm)
if not server:
return None
result = self._radius_auth(server, username, password)
if result:
full_username = self.construct_full_username(username, realm)
return self.get_django_user(full_username, password)
return None
|
Check credentials against the RADIUS server identified by `realm` and
return a User object or None. If no argument is supplied, Django will
skip this backend and try the next one (as a TypeError will be raised
and caught).
|
entailment
|
def move(self, dst):
"Closes then moves the file to dst."
self.close()
shutil.move(self.path, dst)
|
Closes then moves the file to dst.
|
entailment
|
def sigma_clipping(date, mag, err, threshold=3, iteration=1):
"""
Remove any fluctuated data points by magnitudes.
Parameters
----------
date : array_like
An array of dates.
mag : array_like
An array of magnitudes.
err : array_like
An array of magnitude errors.
threshold : float, optional
Threshold for sigma-clipping.
iteration : int, optional
The number of iteration.
Returns
-------
date : array_like
Sigma-clipped dates.
mag : array_like
Sigma-clipped magnitudes.
err : array_like
Sigma-clipped magnitude errors.
"""
# Check length.
if (len(date) != len(mag)) \
or (len(date) != len(err)) \
or (len(mag) != len(err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# By magnitudes
for i in range(int(iteration)):
mean = np.median(mag)
std = np.std(mag)
index = (mag >= mean - threshold*std) & (mag <= mean + threshold*std)
date = date[index]
mag = mag[index]
err = err[index]
return date, mag, err
|
Remove any fluctuated data points by magnitudes.
Parameters
----------
date : array_like
An array of dates.
mag : array_like
An array of magnitudes.
err : array_like
An array of magnitude errors.
threshold : float, optional
Threshold for sigma-clipping.
iteration : int, optional
The number of iteration.
Returns
-------
date : array_like
Sigma-clipped dates.
mag : array_like
Sigma-clipped magnitudes.
err : array_like
Sigma-clipped magnitude errors.
|
entailment
|
def from_spec(spec):
"""Return a schema object from a spec.
A spec is either a string for a scalar type, or a list of 0 or 1 specs,
or a dictionary with two elements: {'fields': { ... }, required: [...]}.
"""
if spec == '':
return any_schema
if framework.is_str(spec):
# Scalar type
if spec not in SCALAR_TYPES:
raise exceptions.SchemaError('Not a valid schema type: %r' % spec)
return ScalarSchema(spec)
if framework.is_list(spec):
return ListSchema(spec[0] if len(spec) else any_schema)
if framework.is_tuple(spec):
return TupleSchema(spec.get('fields', {}), spec.get('required', []))
raise exceptions.SchemaError('Not valid schema spec; %r' % spec)
|
Return a schema object from a spec.
A spec is either a string for a scalar type, or a list of 0 or 1 specs,
or a dictionary with two elements: {'fields': { ... }, required: [...]}.
|
entailment
|
def validate(obj, schema):
"""Validate an object according to its own AND an externally imposed schema."""
if not framework.EvaluationContext.current().validate:
# Short circuit evaluation when disabled
return obj
# Validate returned object according to its own schema
if hasattr(obj, 'tuple_schema'):
obj.tuple_schema.validate(obj)
# Validate object according to externally imposed schema
if schema:
schema.validate(obj)
return obj
|
Validate an object according to its own AND an externally imposed schema.
|
entailment
|
def attach(obj, schema):
"""Attach the given schema to the given object."""
# We have a silly exception for lists, since they have no 'attach_schema'
# method, and I don't feel like making a subclass for List just to add it.
# So, we recursively search the list for tuples and attach the schema in
# there.
if framework.is_list(obj) and isinstance(schema, ListSchema):
for x in obj:
attach(x, schema.element_schema)
return
# Otherwise, the object should be able to handle its own schema attachment.
getattr(obj, 'attach_schema', nop)(schema)
|
Attach the given schema to the given object.
|
entailment
|
def get_feature_set_all():
"""
Return a list of entire features.
A set of entire features regardless of being used to train a model or
predict a class.
Returns
-------
feature_names : list
A list of features' names.
"""
features = get_feature_set()
features.append('cusum')
features.append('eta')
features.append('n_points')
features.append('period_SNR')
features.append('period_log10FAP')
features.append('period_uncertainty')
features.append('weighted_mean')
features.append('weighted_std')
features.sort()
return features
|
Return a list of entire features.
A set of entire features regardless of being used to train a model or
predict a class.
Returns
-------
feature_names : list
A list of features' names.
|
entailment
|
def parameters(self):
""" A property that returns all of the model's parameters. """
parameters = []
for hl in self.hidden_layers:
parameters.extend(hl.parameters)
parameters.extend(self.top_layer.parameters)
return parameters
|
A property that returns all of the model's parameters.
|
entailment
|
def parameters(self, value):
""" Used to set all of the model's parameters to new values.
**Parameters:**
value : array_like
New values for the model parameters. Must be of length
``self.n_parameters``.
"""
if len(value) != self.n_parameters:
raise ValueError("Incorrect length of parameter vector. "
"Model has %d parameters, but got %d" %
(self.n_parameters, len(value)))
i = 0
for hl in self.hidden_layers:
hl.parameters = value[i:i + hl.n_parameters]
i += hl.n_parameters
self.top_layer.parameters = value[-self.top_layer.n_parameters:]
|
Used to set all of the model's parameters to new values.
**Parameters:**
value : array_like
New values for the model parameters. Must be of length
``self.n_parameters``.
|
entailment
|
def checksum(self):
""" Returns an MD5 digest of the model.
This can be used to easily identify whether two models have the
same architecture.
"""
m = md5()
for hl in self.hidden_layers:
m.update(str(hl.architecture))
m.update(str(self.top_layer.architecture))
return m.hexdigest()
|
Returns an MD5 digest of the model.
This can be used to easily identify whether two models have the
same architecture.
|
entailment
|
def evaluate(self, input_data, targets,
return_cache=False, prediction=True):
""" Evaluate the loss function without computing gradients.
**Parameters:**
input_data : GPUArray
Data to evaluate
targets: GPUArray
Targets
return_cache : bool, optional
Whether to return intermediary variables from the
computation and the hidden activations.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
loss : float
The value of the loss function.
hidden_cache : list, only returned if ``return_cache == True``
Cache as returned by :meth:`hebel.models.NeuralNet.feed_forward`.
activations : list, only returned if ``return_cache == True``
Hidden activations as returned by
:meth:`hebel.models.NeuralNet.feed_forward`.
"""
# Forward pass
activations, hidden_cache = self.feed_forward(
input_data, return_cache=True, prediction=prediction)
loss = self.top_layer.train_error(None,
targets, average=False, cache=activations,
prediction=prediction)
for hl in self.hidden_layers:
if hl.l1_penalty_weight: loss += hl.l1_penalty
if hl.l2_penalty_weight: loss += hl.l2_penalty
if self.top_layer.l1_penalty_weight: loss += self.top_layer.l1_penalty
if self.top_layer.l2_penalty_weight: loss += self.top_layer.l2_penalty
if not return_cache:
return loss
else:
return loss, hidden_cache, activations
|
Evaluate the loss function without computing gradients.
**Parameters:**
input_data : GPUArray
Data to evaluate
targets: GPUArray
Targets
return_cache : bool, optional
Whether to return intermediary variables from the
computation and the hidden activations.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
loss : float
The value of the loss function.
hidden_cache : list, only returned if ``return_cache == True``
Cache as returned by :meth:`hebel.models.NeuralNet.feed_forward`.
activations : list, only returned if ``return_cache == True``
Hidden activations as returned by
:meth:`hebel.models.NeuralNet.feed_forward`.
|
entailment
|
def training_pass(self, input_data, targets):
""" Perform a full forward and backward pass through the model.
**Parameters:**
input_data : GPUArray
Data to train the model with.
targets : GPUArray
Training targets.
**Returns:**
loss : float
Value of loss function as evaluated on the data and targets.
gradients : list of GPUArray
Gradients obtained from backpropagation in the backward pass.
"""
# Forward pass
loss, hidden_cache, logistic_cache = self.evaluate(
input_data, targets, return_cache=True, prediction=False)
if not np.isfinite(loss):
raise ValueError('Infinite activations!')
# Backpropagation
if self.hidden_layers:
hidden_activations = hidden_cache[-1][0]
else:
hidden_activations = input_data
df_top_layer = \
self.top_layer.backprop(hidden_activations, targets,
cache=logistic_cache)
gradients = list(df_top_layer[0][::-1])
df_hidden = df_top_layer[1]
if self.hidden_layers:
hidden_inputs = [input_data] + [c[0] for c in hidden_cache[:-1]]
for hl, hc, hi in \
zip(self.hidden_layers[::-1], hidden_cache[::-1],
hidden_inputs[::-1]):
g, df_hidden = hl.backprop(hi, df_hidden, cache=hc)
gradients.extend(g[::-1])
gradients.reverse()
return loss, gradients
|
Perform a full forward and backward pass through the model.
**Parameters:**
input_data : GPUArray
Data to train the model with.
targets : GPUArray
Training targets.
**Returns:**
loss : float
Value of loss function as evaluated on the data and targets.
gradients : list of GPUArray
Gradients obtained from backpropagation in the backward pass.
|
entailment
|
def feed_forward(self, input_data, return_cache=False, prediction=True):
""" Run data forward through the model.
**Parameters:**
input_data : GPUArray
Data to run through the model.
return_cache : bool, optional
Whether to return the intermediary results.
prediction : bool, optional
Whether to run in prediction mode. Only relevant when
using dropout. If true, weights are multiplied by 1 - dropout.
If false, then half of hidden units are randomly dropped and
the dropout mask is returned in case ``return_cache==True``.
**Returns:**
prediction : GPUArray
Predictions from the model.
cache : list of GPUArray, only returned if ``return_cache == True``
Results of intermediary computations.
"""
hidden_cache = None # Create variable in case there are no hidden layers
if self.hidden_layers:
# Forward pass
hidden_cache = []
for i in range(len(self.hidden_layers)):
hidden_activations = hidden_cache[i - 1][0] if i else input_data
# Use dropout predict if previous layer has dropout
hidden_cache.append(self.hidden_layers[i]
.feed_forward(hidden_activations,
prediction=prediction))
hidden_activations = hidden_cache[-1][0]
else:
hidden_activations = input_data
# Use dropout_predict if last hidden layer has dropout
activations = \
self.top_layer.feed_forward(hidden_activations,
prediction=False)
if return_cache:
return activations, hidden_cache
return activations
|
Run data forward through the model.
**Parameters:**
input_data : GPUArray
Data to run through the model.
return_cache : bool, optional
Whether to return the intermediary results.
prediction : bool, optional
Whether to run in prediction mode. Only relevant when
using dropout. If true, weights are multiplied by 1 - dropout.
If false, then half of hidden units are randomly dropped and
the dropout mask is returned in case ``return_cache==True``.
**Returns:**
prediction : GPUArray
Predictions from the model.
cache : list of GPUArray, only returned if ``return_cache == True``
Results of intermediary computations.
|
entailment
|
def shallow_run(self):
"""Derive not-period-based features."""
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std)
|
Derive not-period-based features.
|
entailment
|
def deep_run(self):
"""Derive period-based features."""
# Lomb-Scargle period finding.
self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period)
# Features based on a phase-folded light curve
# such as Eta, slope-percentile, etc.
# Should be called after the getPeriodLS() is called.
# Created phased a folded light curve.
# We use period * 2 to take eclipsing binaries into account.
phase_folded_date = self.date % (self.period * 2.)
sorted_index = np.argsort(phase_folded_date)
folded_date = phase_folded_date[sorted_index]
folded_mag = self.mag[sorted_index]
# phase Eta
self.phase_eta = self.get_eta(folded_mag, self.weighted_std)
# Slope percentile.
self.slope_per10, self.slope_per90 = \
self.slope_percentile(folded_date, folded_mag)
# phase Cusum
self.phase_cusum = self.get_cusum(folded_mag)
|
Derive period-based features.
|
entailment
|
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
"""
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
"""
|
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
|
entailment
|
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
"""
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty
|
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
|
entailment
|
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
"""
return y - self.fourier_series(pars, x, order)
|
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
|
entailment
|
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
"""
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum
|
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
|
entailment
|
def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k
|
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.