body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def gsr(epi_data, mask, direction='y', ref_file=None, out_file=None):
'\n Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The\n procedure is as follows:\n\n #. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.\n\n #. Rotate by :math:`N/2`\n\n #. Remove the intersection with the original mask\n\n #. Generate a non-ghost background\n\n #. Calculate the :abbr:`GSR (ghost to signal ratio)`\n\n\n .. warning ::\n\n This should be used with EPI images for which the phase\n encoding direction is known.\n\n :param str epi_file: path to epi file\n :param str mask_file: path to brain mask\n :param str direction: the direction of phase encoding (x, y, all)\n :return: the computed gsr\n\n '
direction = direction.lower()
if (direction[(- 1)] not in ['x', 'y', 'all']):
raise Exception(('Unknown direction %s, should be one of x, -x, y, -y, all' % direction))
if (direction == 'all'):
result = []
for newdir in ['x', 'y']:
ofile = None
if (out_file is not None):
(fname, ext) = op.splitext(ofile)
if (ext == '.gz'):
(fname, ext2) = op.splitext(fname)
ext = (ext2 + ext)
ofile = ('%s_%s%s' % (fname, newdir, ext))
result += [gsr(epi_data, mask, newdir, ref_file=ref_file, out_file=ofile)]
return result
n2_mask = np.zeros_like(mask)
if (direction == 'x'):
n2lim = np.floor((mask.shape[0] / 2))
n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim * 2), :, :]
n2_mask[n2lim:(n2lim * 2), :, :] = mask[:n2lim, :, :]
elif (direction == 'y'):
n2lim = np.floor((mask.shape[1] / 2))
n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim * 2), :]
n2_mask[:, n2lim:(n2lim * 2), :] = mask[:, :n2lim, :]
elif (direction == 'z'):
n2lim = np.floor((mask.shape[2] / 2))
n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim * 2)]
n2_mask[:, :, n2lim:(n2lim * 2)] = mask[:, :, :n2lim]
n2_mask = (n2_mask * (1 - mask))
n2_mask = (n2_mask + (2 * ((1 - n2_mask) - mask)))
if ((ref_file is not None) and (out_file is not None)):
ref = nb.load(ref_file)
out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())
out.to_filename(out_file)
ghost = (epi_data[(n2_mask == 1)].mean() - epi_data[(n2_mask == 2)].mean())
signal = epi_data[(n2_mask == 0)].mean()
return float((ghost / signal))
| 1,144,839,233,202,535,200
|
Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The
procedure is as follows:
#. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.
#. Rotate by :math:`N/2`
#. Remove the intersection with the original mask
#. Generate a non-ghost background
#. Calculate the :abbr:`GSR (ghost to signal ratio)`
.. warning ::
This should be used with EPI images for which the phase
encoding direction is known.
:param str epi_file: path to epi file
:param str mask_file: path to brain mask
:param str direction: the direction of phase encoding (x, y, all)
:return: the computed gsr
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
gsr
|
amakropoulos/structural-pipeline-measures
|
python
|
def gsr(epi_data, mask, direction='y', ref_file=None, out_file=None):
'\n Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The\n procedure is as follows:\n\n #. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.\n\n #. Rotate by :math:`N/2`\n\n #. Remove the intersection with the original mask\n\n #. Generate a non-ghost background\n\n #. Calculate the :abbr:`GSR (ghost to signal ratio)`\n\n\n .. warning ::\n\n This should be used with EPI images for which the phase\n encoding direction is known.\n\n :param str epi_file: path to epi file\n :param str mask_file: path to brain mask\n :param str direction: the direction of phase encoding (x, y, all)\n :return: the computed gsr\n\n '
direction = direction.lower()
if (direction[(- 1)] not in ['x', 'y', 'all']):
raise Exception(('Unknown direction %s, should be one of x, -x, y, -y, all' % direction))
if (direction == 'all'):
result = []
for newdir in ['x', 'y']:
ofile = None
if (out_file is not None):
(fname, ext) = op.splitext(ofile)
if (ext == '.gz'):
(fname, ext2) = op.splitext(fname)
ext = (ext2 + ext)
ofile = ('%s_%s%s' % (fname, newdir, ext))
result += [gsr(epi_data, mask, newdir, ref_file=ref_file, out_file=ofile)]
return result
n2_mask = np.zeros_like(mask)
if (direction == 'x'):
n2lim = np.floor((mask.shape[0] / 2))
n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim * 2), :, :]
n2_mask[n2lim:(n2lim * 2), :, :] = mask[:n2lim, :, :]
elif (direction == 'y'):
n2lim = np.floor((mask.shape[1] / 2))
n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim * 2), :]
n2_mask[:, n2lim:(n2lim * 2), :] = mask[:, :n2lim, :]
elif (direction == 'z'):
n2lim = np.floor((mask.shape[2] / 2))
n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim * 2)]
n2_mask[:, :, n2lim:(n2lim * 2)] = mask[:, :, :n2lim]
n2_mask = (n2_mask * (1 - mask))
n2_mask = (n2_mask + (2 * ((1 - n2_mask) - mask)))
if ((ref_file is not None) and (out_file is not None)):
ref = nb.load(ref_file)
out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())
out.to_filename(out_file)
ghost = (epi_data[(n2_mask == 1)].mean() - epi_data[(n2_mask == 2)].mean())
signal = epi_data[(n2_mask == 0)].mean()
return float((ghost / signal))
|
def dvars(func, mask, output_all=False, out_file=None):
'\n Compute the mean :abbr:`DVARS (D referring to temporal\n derivative of timecourses, VARS referring to RMS variance over voxels)`\n [Power2012]_.\n\n Particularly, the *standardized* :abbr:`DVARS (D referring to temporal\n derivative of timecourses, VARS referring to RMS variance over voxels)`\n [Nichols2013]_ are computed.\n\n .. note:: Implementation details\n\n Uses the implementation of the `Yule-Walker equations\n from nitime\n <http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html#nitime.algorithms.autoregressive.AR_est_YW>`_\n for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.\n\n :param numpy.ndarray func: functional data, after head-motion-correction.\n :param numpy.ndarray mask: a 3D mask of the brain\n :param bool output_all: write out all dvars\n :param str out_file: a path to which the standardized dvars should be saved.\n :return: the standardized DVARS\n\n '
if (len(func.shape) != 4):
raise RuntimeError(('Input fMRI dataset should be 4-dimensional' % func))
zv_mask = zero_variance(func, mask)
idx = np.where((zv_mask > 0))
mfunc = func[idx[0], idx[1], idx[2], :]
func_sd = ((np.percentile(mfunc, 75) - np.percentile(mfunc, 25)) / 1.349)
mfunc -= mfunc.mean(axis=1)[(..., np.newaxis)]
ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)
func_sd_pd = np.squeeze((np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd))
diff_sd_mean = func_sd_pd[(func_sd_pd > 0)].mean()
func_diff = np.diff(mfunc, axis=1)
dvars_nstd = func_diff.std(axis=0)
dvars_stdz = (dvars_nstd / diff_sd_mean)
diff_vx_stdz = (func_diff / np.array(([func_sd_pd] * func_diff.shape[(- 1)])).T)
dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)
if output_all:
gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))
else:
gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)
if (out_file is not None):
np.savetxt(out_file, gendvars, fmt='%.12f')
return gendvars
| 8,815,339,491,648,703,000
|
Compute the mean :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Power2012]_.
Particularly, the *standardized* :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Nichols2013]_ are computed.
.. note:: Implementation details
Uses the implementation of the `Yule-Walker equations
from nitime
<http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html#nitime.algorithms.autoregressive.AR_est_YW>`_
for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.
:param numpy.ndarray func: functional data, after head-motion-correction.
:param numpy.ndarray mask: a 3D mask of the brain
:param bool output_all: write out all dvars
:param str out_file: a path to which the standardized dvars should be saved.
:return: the standardized DVARS
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
dvars
|
amakropoulos/structural-pipeline-measures
|
python
|
def dvars(func, mask, output_all=False, out_file=None):
'\n Compute the mean :abbr:`DVARS (D referring to temporal\n derivative of timecourses, VARS referring to RMS variance over voxels)`\n [Power2012]_.\n\n Particularly, the *standardized* :abbr:`DVARS (D referring to temporal\n derivative of timecourses, VARS referring to RMS variance over voxels)`\n [Nichols2013]_ are computed.\n\n .. note:: Implementation details\n\n Uses the implementation of the `Yule-Walker equations\n from nitime\n <http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html#nitime.algorithms.autoregressive.AR_est_YW>`_\n for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.\n\n :param numpy.ndarray func: functional data, after head-motion-correction.\n :param numpy.ndarray mask: a 3D mask of the brain\n :param bool output_all: write out all dvars\n :param str out_file: a path to which the standardized dvars should be saved.\n :return: the standardized DVARS\n\n '
if (len(func.shape) != 4):
raise RuntimeError(('Input fMRI dataset should be 4-dimensional' % func))
zv_mask = zero_variance(func, mask)
idx = np.where((zv_mask > 0))
mfunc = func[idx[0], idx[1], idx[2], :]
func_sd = ((np.percentile(mfunc, 75) - np.percentile(mfunc, 25)) / 1.349)
mfunc -= mfunc.mean(axis=1)[(..., np.newaxis)]
ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)
func_sd_pd = np.squeeze((np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd))
diff_sd_mean = func_sd_pd[(func_sd_pd > 0)].mean()
func_diff = np.diff(mfunc, axis=1)
dvars_nstd = func_diff.std(axis=0)
dvars_stdz = (dvars_nstd / diff_sd_mean)
diff_vx_stdz = (func_diff / np.array(([func_sd_pd] * func_diff.shape[(- 1)])).T)
dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)
if output_all:
gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))
else:
gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)
if (out_file is not None):
np.savetxt(out_file, gendvars, fmt='%.12f')
return gendvars
|
def fd_jenkinson(in_file, rmax=80.0, out_file=None):
'\n Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_\n on a 4D dataset, after ``3dvolreg`` has been executed\n (generally a file named ``*.affmat12.1D``).\n\n :param str in_file: path to epi file\n :param float rmax: the default radius (as in FSL) of a sphere represents\n the brain in which the angular displacements are projected.\n :param str out_file: a path for the output file with the FD\n\n :return: the output file with the FD, and the average FD along\n the time series\n :rtype: tuple(str, float)\n\n\n .. note ::\n\n :code:`infile` should have one 3dvolreg affine matrix in one row -\n NOT the motion parameters\n\n\n '
import sys
import math
if (out_file is None):
(fname, ext) = op.splitext(op.basename(in_file))
out_file = op.abspath(('%s_fdfile%s' % (fname, ext)))
if ('rel.rms' in in_file):
return in_file
pm_ = np.genfromtxt(in_file)
original_shape = pm_.shape
pm = np.zeros((pm_.shape[0], (pm_.shape[1] + 4)))
pm[:, :original_shape[1]] = pm_
pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]
T_rb_prev = np.matrix(np.eye(4))
flag = 0
X = [0]
for i in range(0, pm.shape[0]):
T_rb = np.matrix(pm[i].reshape(4, 4))
if (flag == 0):
flag = 1
else:
M = (np.dot(T_rb, T_rb_prev.I) - np.eye(4))
A = M[0:3, 0:3]
b = M[0:3, 3]
FD_J = math.sqrt(((((rmax * rmax) / 5) * np.trace(np.dot(A.T, A))) + np.dot(b.T, b)))
X.append(FD_J)
T_rb_prev = T_rb
np.savetxt(out_file, X)
return out_file
| 7,429,434,850,716,944,000
|
Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_
on a 4D dataset, after ``3dvolreg`` has been executed
(generally a file named ``*.affmat12.1D``).
:param str in_file: path to epi file
:param float rmax: the default radius (as in FSL) of a sphere represents
the brain in which the angular displacements are projected.
:param str out_file: a path for the output file with the FD
:return: the output file with the FD, and the average FD along
the time series
:rtype: tuple(str, float)
.. note ::
:code:`infile` should have one 3dvolreg affine matrix in one row -
NOT the motion parameters
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
fd_jenkinson
|
amakropoulos/structural-pipeline-measures
|
python
|
def fd_jenkinson(in_file, rmax=80.0, out_file=None):
'\n Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_\n on a 4D dataset, after ``3dvolreg`` has been executed\n (generally a file named ``*.affmat12.1D``).\n\n :param str in_file: path to epi file\n :param float rmax: the default radius (as in FSL) of a sphere represents\n the brain in which the angular displacements are projected.\n :param str out_file: a path for the output file with the FD\n\n :return: the output file with the FD, and the average FD along\n the time series\n :rtype: tuple(str, float)\n\n\n .. note ::\n\n :code:`infile` should have one 3dvolreg affine matrix in one row -\n NOT the motion parameters\n\n\n '
import sys
import math
if (out_file is None):
(fname, ext) = op.splitext(op.basename(in_file))
out_file = op.abspath(('%s_fdfile%s' % (fname, ext)))
if ('rel.rms' in in_file):
return in_file
pm_ = np.genfromtxt(in_file)
original_shape = pm_.shape
pm = np.zeros((pm_.shape[0], (pm_.shape[1] + 4)))
pm[:, :original_shape[1]] = pm_
pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]
T_rb_prev = np.matrix(np.eye(4))
flag = 0
X = [0]
for i in range(0, pm.shape[0]):
T_rb = np.matrix(pm[i].reshape(4, 4))
if (flag == 0):
flag = 1
else:
M = (np.dot(T_rb, T_rb_prev.I) - np.eye(4))
A = M[0:3, 0:3]
b = M[0:3, 3]
FD_J = math.sqrt(((((rmax * rmax) / 5) * np.trace(np.dot(A.T, A))) + np.dot(b.T, b)))
X.append(FD_J)
T_rb_prev = T_rb
np.savetxt(out_file, X)
return out_file
|
def gcor(func, mask):
'\n Compute the :abbr:`GCOR (global correlation)`.\n\n :param numpy.ndarray func: input fMRI dataset, after motion correction\n :param numpy.ndarray mask: 3D brain mask\n :return: the computed GCOR value\n\n '
tv_mask = zero_variance(func, mask)
idx = np.where((tv_mask > 0))
zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)
avg_ts = zscores.mean(axis=0)
return float((avg_ts.transpose().dot(avg_ts) / len(avg_ts)))
| 1,447,277,806,473,772,000
|
Compute the :abbr:`GCOR (global correlation)`.
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the computed GCOR value
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
gcor
|
amakropoulos/structural-pipeline-measures
|
python
|
def gcor(func, mask):
'\n Compute the :abbr:`GCOR (global correlation)`.\n\n :param numpy.ndarray func: input fMRI dataset, after motion correction\n :param numpy.ndarray mask: 3D brain mask\n :return: the computed GCOR value\n\n '
tv_mask = zero_variance(func, mask)
idx = np.where((tv_mask > 0))
zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)
avg_ts = zscores.mean(axis=0)
return float((avg_ts.transpose().dot(avg_ts) / len(avg_ts)))
|
def zero_variance(func, mask):
'\n Mask out voxels with zero variance across t-axis\n\n :param numpy.ndarray func: input fMRI dataset, after motion correction\n :param numpy.ndarray mask: 3D brain mask\n :return: the 3D mask of voxels with nonzero variance across :math:`t`.\n :rtype: numpy.ndarray\n\n '
idx = np.where((mask > 0))
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance)
tv_mask[(tvariance > 0)] = 1
newmask = np.zeros_like(mask)
newmask[idx] = tv_mask
return newmask
| 7,738,469,890,951,027,000
|
Mask out voxels with zero variance across t-axis
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the 3D mask of voxels with nonzero variance across :math:`t`.
:rtype: numpy.ndarray
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
zero_variance
|
amakropoulos/structural-pipeline-measures
|
python
|
def zero_variance(func, mask):
'\n Mask out voxels with zero variance across t-axis\n\n :param numpy.ndarray func: input fMRI dataset, after motion correction\n :param numpy.ndarray mask: 3D brain mask\n :return: the 3D mask of voxels with nonzero variance across :math:`t`.\n :rtype: numpy.ndarray\n\n '
idx = np.where((mask > 0))
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance)
tv_mask[(tvariance > 0)] = 1
newmask = np.zeros_like(mask)
newmask[idx] = tv_mask
return newmask
|
def main():
'Run administrative tasks.'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tutotrial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc
execute_from_command_line(sys.argv)
| 6,364,296,494,956,147,000
|
Run administrative tasks.
|
tutorial/manage.py
|
main
|
aiueocode/djangorest
|
python
|
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tutotrial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc
execute_from_command_line(sys.argv)
|
def forget(self):
'Forget about (and possibly remove the result of) this task.'
self._cache = None
self.backend.forget(self.id)
| 8,274,503,778,675,953,000
|
Forget about (and possibly remove the result of) this task.
|
venv/lib/python2.7/site-packages/celery/result.py
|
forget
|
CharleyFarley/ovvio
|
python
|
def forget(self):
self._cache = None
self.backend.forget(self.id)
|
def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None):
'Send revoke signal to all workers.\n\n Any worker receiving the task, or having reserved the\n task, *must* ignore it.\n\n :keyword terminate: Also terminate the process currently working\n on the task (if any).\n :keyword signal: Name of signal to send to process if terminate.\n Default is TERM.\n :keyword wait: Wait for replies from workers. Will wait for 1 second\n by default or you can specify a custom ``timeout``.\n :keyword timeout: Time in seconds to wait for replies if ``wait``\n enabled.\n\n '
self.app.control.revoke(self.id, connection=connection, terminate=terminate, signal=signal, reply=wait, timeout=timeout)
| 257,758,433,628,583,740
|
Send revoke signal to all workers.
Any worker receiving the task, or having reserved the
task, *must* ignore it.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from workers. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
|
venv/lib/python2.7/site-packages/celery/result.py
|
revoke
|
CharleyFarley/ovvio
|
python
|
def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None):
'Send revoke signal to all workers.\n\n Any worker receiving the task, or having reserved the\n task, *must* ignore it.\n\n :keyword terminate: Also terminate the process currently working\n on the task (if any).\n :keyword signal: Name of signal to send to process if terminate.\n Default is TERM.\n :keyword wait: Wait for replies from workers. Will wait for 1 second\n by default or you can specify a custom ``timeout``.\n :keyword timeout: Time in seconds to wait for replies if ``wait``\n enabled.\n\n '
self.app.control.revoke(self.id, connection=connection, terminate=terminate, signal=signal, reply=wait, timeout=timeout)
|
def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True, follow_parents=True, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES):
'Wait until task is ready, and return its result.\n\n .. warning::\n\n Waiting for tasks within a task may lead to deadlocks.\n Please read :ref:`task-synchronous-subtasks`.\n\n :keyword timeout: How long to wait, in seconds, before the\n operation times out.\n :keyword propagate: Re-raise exception if the task failed.\n :keyword interval: Time to wait (in seconds) before retrying to\n retrieve the result. Note that this does not have any effect\n when using the amqp result store backend, as it does not\n use polling.\n :keyword no_ack: Enable amqp no ack (automatically acknowledge\n message). If this is :const:`False` then the message will\n **not be acked**.\n :keyword follow_parents: Reraise any exception raised by parent task.\n\n :raises celery.exceptions.TimeoutError: if `timeout` is not\n :const:`None` and the result does not arrive within `timeout`\n seconds.\n\n If the remote call raised an exception then that exception will\n be re-raised.\n\n '
assert_will_not_block()
on_interval = None
if (follow_parents and propagate and self.parent):
on_interval = self._maybe_reraise_parent_error
on_interval()
if self._cache:
if propagate:
self.maybe_reraise()
return self.result
meta = self.backend.wait_for(self.id, timeout=timeout, interval=interval, on_interval=on_interval, no_ack=no_ack)
if meta:
self._maybe_set_cache(meta)
status = meta['status']
if ((status in PROPAGATE_STATES) and propagate):
raise meta['result']
return meta['result']
| 7,289,805,622,543,313,000
|
Wait until task is ready, and return its result.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please read :ref:`task-synchronous-subtasks`.
:keyword timeout: How long to wait, in seconds, before the
operation times out.
:keyword propagate: Re-raise exception if the task failed.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve the result. Note that this does not have any effect
when using the amqp result store backend, as it does not
use polling.
:keyword no_ack: Enable amqp no ack (automatically acknowledge
message). If this is :const:`False` then the message will
**not be acked**.
:keyword follow_parents: Reraise any exception raised by parent task.
:raises celery.exceptions.TimeoutError: if `timeout` is not
:const:`None` and the result does not arrive within `timeout`
seconds.
If the remote call raised an exception then that exception will
be re-raised.
|
venv/lib/python2.7/site-packages/celery/result.py
|
get
|
CharleyFarley/ovvio
|
python
|
def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True, follow_parents=True, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES):
'Wait until task is ready, and return its result.\n\n .. warning::\n\n Waiting for tasks within a task may lead to deadlocks.\n Please read :ref:`task-synchronous-subtasks`.\n\n :keyword timeout: How long to wait, in seconds, before the\n operation times out.\n :keyword propagate: Re-raise exception if the task failed.\n :keyword interval: Time to wait (in seconds) before retrying to\n retrieve the result. Note that this does not have any effect\n when using the amqp result store backend, as it does not\n use polling.\n :keyword no_ack: Enable amqp no ack (automatically acknowledge\n message). If this is :const:`False` then the message will\n **not be acked**.\n :keyword follow_parents: Reraise any exception raised by parent task.\n\n :raises celery.exceptions.TimeoutError: if `timeout` is not\n :const:`None` and the result does not arrive within `timeout`\n seconds.\n\n If the remote call raised an exception then that exception will\n be re-raised.\n\n '
assert_will_not_block()
on_interval = None
if (follow_parents and propagate and self.parent):
on_interval = self._maybe_reraise_parent_error
on_interval()
if self._cache:
if propagate:
self.maybe_reraise()
return self.result
meta = self.backend.wait_for(self.id, timeout=timeout, interval=interval, on_interval=on_interval, no_ack=no_ack)
if meta:
self._maybe_set_cache(meta)
status = meta['status']
if ((status in PROPAGATE_STATES) and propagate):
raise meta['result']
return meta['result']
|
def collect(self, intermediate=False, **kwargs):
'Iterator, like :meth:`get` will wait for the task to complete,\n but will also follow :class:`AsyncResult` and :class:`ResultSet`\n returned by the task, yielding ``(result, value)`` tuples for each\n result in the tree.\n\n An example would be having the following tasks:\n\n .. code-block:: python\n\n from celery import group\n from proj.celery import app\n\n @app.task(trail=True)\n def A(how_many):\n return group(B.s(i) for i in range(how_many))()\n\n @app.task(trail=True)\n def B(i):\n return pow2.delay(i)\n\n @app.task(trail=True)\n def pow2(i):\n return i ** 2\n\n Note that the ``trail`` option must be enabled\n so that the list of children is stored in ``result.children``.\n This is the default but enabled explicitly for illustration.\n\n Calling :meth:`collect` would return:\n\n .. code-block:: python\n\n >>> from celery.result import ResultBase\n >>> from proj.tasks import A\n\n >>> result = A.delay(10)\n >>> [v for v in result.collect()\n ... if not isinstance(v, (ResultBase, tuple))]\n [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n\n '
for (_, R) in self.iterdeps(intermediate=intermediate):
(yield (R, R.get(**kwargs)))
| 2,989,732,586,260,419,600
|
Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.
An example would be having the following tasks:
.. code-block:: python
from celery import group
from proj.celery import app
@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()
@app.task(trail=True)
def B(i):
return pow2.delay(i)
@app.task(trail=True)
def pow2(i):
return i ** 2
Note that the ``trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.
Calling :meth:`collect` would return:
.. code-block:: python
>>> from celery.result import ResultBase
>>> from proj.tasks import A
>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
|
venv/lib/python2.7/site-packages/celery/result.py
|
collect
|
CharleyFarley/ovvio
|
python
|
def collect(self, intermediate=False, **kwargs):
'Iterator, like :meth:`get` will wait for the task to complete,\n but will also follow :class:`AsyncResult` and :class:`ResultSet`\n returned by the task, yielding ``(result, value)`` tuples for each\n result in the tree.\n\n An example would be having the following tasks:\n\n .. code-block:: python\n\n from celery import group\n from proj.celery import app\n\n @app.task(trail=True)\n def A(how_many):\n return group(B.s(i) for i in range(how_many))()\n\n @app.task(trail=True)\n def B(i):\n return pow2.delay(i)\n\n @app.task(trail=True)\n def pow2(i):\n return i ** 2\n\n Note that the ``trail`` option must be enabled\n so that the list of children is stored in ``result.children``.\n This is the default but enabled explicitly for illustration.\n\n Calling :meth:`collect` would return:\n\n .. code-block:: python\n\n >>> from celery.result import ResultBase\n >>> from proj.tasks import A\n\n >>> result = A.delay(10)\n >>> [v for v in result.collect()\n ... if not isinstance(v, (ResultBase, tuple))]\n [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n\n '
for (_, R) in self.iterdeps(intermediate=intermediate):
(yield (R, R.get(**kwargs)))
|
def ready(self):
'Returns :const:`True` if the task has been executed.\n\n If the task is still running, pending, or is waiting\n for retry then :const:`False` is returned.\n\n '
return (self.state in self.backend.READY_STATES)
| 7,264,632,802,835,550,000
|
Returns :const:`True` if the task has been executed.
If the task is still running, pending, or is waiting
for retry then :const:`False` is returned.
|
venv/lib/python2.7/site-packages/celery/result.py
|
ready
|
CharleyFarley/ovvio
|
python
|
def ready(self):
'Returns :const:`True` if the task has been executed.\n\n If the task is still running, pending, or is waiting\n for retry then :const:`False` is returned.\n\n '
return (self.state in self.backend.READY_STATES)
|
def successful(self):
'Returns :const:`True` if the task executed successfully.'
return (self.state == states.SUCCESS)
| 4,838,101,157,056,682,000
|
Returns :const:`True` if the task executed successfully.
|
venv/lib/python2.7/site-packages/celery/result.py
|
successful
|
CharleyFarley/ovvio
|
python
|
def successful(self):
return (self.state == states.SUCCESS)
|
def failed(self):
'Returns :const:`True` if the task failed.'
return (self.state == states.FAILURE)
| 3,617,739,404,020,108,000
|
Returns :const:`True` if the task failed.
|
venv/lib/python2.7/site-packages/celery/result.py
|
failed
|
CharleyFarley/ovvio
|
python
|
def failed(self):
return (self.state == states.FAILURE)
|
def __str__(self):
'`str(self) -> self.id`'
return str(self.id)
| 4,040,552,640,130,330,600
|
`str(self) -> self.id`
|
venv/lib/python2.7/site-packages/celery/result.py
|
__str__
|
CharleyFarley/ovvio
|
python
|
def __str__(self):
return str(self.id)
|
def __hash__(self):
'`hash(self) -> hash(self.id)`'
return hash(self.id)
| 1,800,964,682,614,371,300
|
`hash(self) -> hash(self.id)`
|
venv/lib/python2.7/site-packages/celery/result.py
|
__hash__
|
CharleyFarley/ovvio
|
python
|
def __hash__(self):
return hash(self.id)
|
@property
def result(self):
'When the task has been executed, this contains the return value.\n If the task raised an exception, this will be the exception\n instance.'
return self._get_task_meta()['result']
| 1,858,990,990,539,996,000
|
When the task has been executed, this contains the return value.
If the task raised an exception, this will be the exception
instance.
|
venv/lib/python2.7/site-packages/celery/result.py
|
result
|
CharleyFarley/ovvio
|
python
|
@property
def result(self):
'When the task has been executed, this contains the return value.\n If the task raised an exception, this will be the exception\n instance.'
return self._get_task_meta()['result']
|
@property
def traceback(self):
'Get the traceback of a failed task.'
return self._get_task_meta().get('traceback')
| 3,479,075,735,694,560,000
|
Get the traceback of a failed task.
|
venv/lib/python2.7/site-packages/celery/result.py
|
traceback
|
CharleyFarley/ovvio
|
python
|
@property
def traceback(self):
return self._get_task_meta().get('traceback')
|
@property
def state(self):
'The tasks current state.\n\n Possible values includes:\n\n *PENDING*\n\n The task is waiting for execution.\n\n *STARTED*\n\n The task has been started.\n\n *RETRY*\n\n The task is to be retried, possibly because of failure.\n\n *FAILURE*\n\n The task raised an exception, or has exceeded the retry limit.\n The :attr:`result` attribute then contains the\n exception raised by the task.\n\n *SUCCESS*\n\n The task executed successfully. The :attr:`result` attribute\n then contains the tasks return value.\n\n '
return self._get_task_meta()['status']
| -8,797,558,300,920,828,000
|
The tasks current state.
Possible values includes:
*PENDING*
The task is waiting for execution.
*STARTED*
The task has been started.
*RETRY*
The task is to be retried, possibly because of failure.
*FAILURE*
The task raised an exception, or has exceeded the retry limit.
The :attr:`result` attribute then contains the
exception raised by the task.
*SUCCESS*
The task executed successfully. The :attr:`result` attribute
then contains the tasks return value.
|
venv/lib/python2.7/site-packages/celery/result.py
|
state
|
CharleyFarley/ovvio
|
python
|
@property
def state(self):
'The tasks current state.\n\n Possible values includes:\n\n *PENDING*\n\n The task is waiting for execution.\n\n *STARTED*\n\n The task has been started.\n\n *RETRY*\n\n The task is to be retried, possibly because of failure.\n\n *FAILURE*\n\n The task raised an exception, or has exceeded the retry limit.\n The :attr:`result` attribute then contains the\n exception raised by the task.\n\n *SUCCESS*\n\n The task executed successfully. The :attr:`result` attribute\n then contains the tasks return value.\n\n '
return self._get_task_meta()['status']
|
@property
def task_id(self):
'compat alias to :attr:`id`'
return self.id
| -4,713,316,254,543,998,000
|
compat alias to :attr:`id`
|
venv/lib/python2.7/site-packages/celery/result.py
|
task_id
|
CharleyFarley/ovvio
|
python
|
@property
def task_id(self):
return self.id
|
def add(self, result):
'Add :class:`AsyncResult` as a new member of the set.\n\n Does nothing if the result is already a member.\n\n '
if (result not in self.results):
self.results.append(result)
| 2,171,822,964,541,988,000
|
Add :class:`AsyncResult` as a new member of the set.
Does nothing if the result is already a member.
|
venv/lib/python2.7/site-packages/celery/result.py
|
add
|
CharleyFarley/ovvio
|
python
|
def add(self, result):
'Add :class:`AsyncResult` as a new member of the set.\n\n Does nothing if the result is already a member.\n\n '
if (result not in self.results):
self.results.append(result)
|
def remove(self, result):
'Remove result from the set; it must be a member.\n\n :raises KeyError: if the result is not a member.\n\n '
if isinstance(result, string_t):
result = self.app.AsyncResult(result)
try:
self.results.remove(result)
except ValueError:
raise KeyError(result)
| 4,068,160,251,530,570,000
|
Remove result from the set; it must be a member.
:raises KeyError: if the result is not a member.
|
venv/lib/python2.7/site-packages/celery/result.py
|
remove
|
CharleyFarley/ovvio
|
python
|
def remove(self, result):
'Remove result from the set; it must be a member.\n\n :raises KeyError: if the result is not a member.\n\n '
if isinstance(result, string_t):
result = self.app.AsyncResult(result)
try:
self.results.remove(result)
except ValueError:
raise KeyError(result)
|
def discard(self, result):
'Remove result from the set if it is a member.\n\n If it is not a member, do nothing.\n\n '
try:
self.remove(result)
except KeyError:
pass
| 1,490,687,160,468,694,800
|
Remove result from the set if it is a member.
If it is not a member, do nothing.
|
venv/lib/python2.7/site-packages/celery/result.py
|
discard
|
CharleyFarley/ovvio
|
python
|
def discard(self, result):
'Remove result from the set if it is a member.\n\n If it is not a member, do nothing.\n\n '
try:
self.remove(result)
except KeyError:
pass
|
def update(self, results):
'Update set with the union of itself and an iterable with\n results.'
self.results.extend((r for r in results if (r not in self.results)))
| 1,345,366,747,161,373,200
|
Update set with the union of itself and an iterable with
results.
|
venv/lib/python2.7/site-packages/celery/result.py
|
update
|
CharleyFarley/ovvio
|
python
|
def update(self, results):
'Update set with the union of itself and an iterable with\n results.'
self.results.extend((r for r in results if (r not in self.results)))
|
def clear(self):
'Remove all results from this set.'
self.results[:] = []
| -7,683,751,693,161,588,000
|
Remove all results from this set.
|
venv/lib/python2.7/site-packages/celery/result.py
|
clear
|
CharleyFarley/ovvio
|
python
|
def clear(self):
self.results[:] = []
|
def successful(self):
'Was all of the tasks successful?\n\n :returns: :const:`True` if all of the tasks finished\n successfully (i.e. did not raise an exception).\n\n '
return all((result.successful() for result in self.results))
| 3,133,950,208,285,973,500
|
Was all of the tasks successful?
:returns: :const:`True` if all of the tasks finished
successfully (i.e. did not raise an exception).
|
venv/lib/python2.7/site-packages/celery/result.py
|
successful
|
CharleyFarley/ovvio
|
python
|
def successful(self):
'Was all of the tasks successful?\n\n :returns: :const:`True` if all of the tasks finished\n successfully (i.e. did not raise an exception).\n\n '
return all((result.successful() for result in self.results))
|
def failed(self):
'Did any of the tasks fail?\n\n :returns: :const:`True` if one of the tasks failed.\n (i.e., raised an exception)\n\n '
return any((result.failed() for result in self.results))
| -4,198,079,692,271,458,000
|
Did any of the tasks fail?
:returns: :const:`True` if one of the tasks failed.
(i.e., raised an exception)
|
venv/lib/python2.7/site-packages/celery/result.py
|
failed
|
CharleyFarley/ovvio
|
python
|
def failed(self):
'Did any of the tasks fail?\n\n :returns: :const:`True` if one of the tasks failed.\n (i.e., raised an exception)\n\n '
return any((result.failed() for result in self.results))
|
def waiting(self):
'Are any of the tasks incomplete?\n\n :returns: :const:`True` if one of the tasks are still\n waiting for execution.\n\n '
return any(((not result.ready()) for result in self.results))
| -4,263,889,825,430,423,600
|
Are any of the tasks incomplete?
:returns: :const:`True` if one of the tasks are still
waiting for execution.
|
venv/lib/python2.7/site-packages/celery/result.py
|
waiting
|
CharleyFarley/ovvio
|
python
|
def waiting(self):
'Are any of the tasks incomplete?\n\n :returns: :const:`True` if one of the tasks are still\n waiting for execution.\n\n '
return any(((not result.ready()) for result in self.results))
|
def ready(self):
'Did all of the tasks complete? (either by success of failure).\n\n :returns: :const:`True` if all of the tasks has been\n executed.\n\n '
return all((result.ready() for result in self.results))
| 5,308,216,925,372,778,000
|
Did all of the tasks complete? (either by success of failure).
:returns: :const:`True` if all of the tasks has been
executed.
|
venv/lib/python2.7/site-packages/celery/result.py
|
ready
|
CharleyFarley/ovvio
|
python
|
def ready(self):
'Did all of the tasks complete? (either by success of failure).\n\n :returns: :const:`True` if all of the tasks has been\n executed.\n\n '
return all((result.ready() for result in self.results))
|
def completed_count(self):
'Task completion count.\n\n :returns: the number of tasks completed.\n\n '
return sum((int(result.successful()) for result in self.results))
| -7,257,203,177,105,533,000
|
Task completion count.
:returns: the number of tasks completed.
|
venv/lib/python2.7/site-packages/celery/result.py
|
completed_count
|
CharleyFarley/ovvio
|
python
|
def completed_count(self):
'Task completion count.\n\n :returns: the number of tasks completed.\n\n '
return sum((int(result.successful()) for result in self.results))
|
def forget(self):
'Forget about (and possible remove the result of) all the tasks.'
for result in self.results:
result.forget()
| -1,757,364,964,035,442,200
|
Forget about (and possible remove the result of) all the tasks.
|
venv/lib/python2.7/site-packages/celery/result.py
|
forget
|
CharleyFarley/ovvio
|
python
|
def forget(self):
for result in self.results:
result.forget()
|
def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None):
'Send revoke signal to all workers for all tasks in the set.\n\n :keyword terminate: Also terminate the process currently working\n on the task (if any).\n :keyword signal: Name of signal to send to process if terminate.\n Default is TERM.\n :keyword wait: Wait for replies from worker. Will wait for 1 second\n by default or you can specify a custom ``timeout``.\n :keyword timeout: Time in seconds to wait for replies if ``wait``\n enabled.\n\n '
self.app.control.revoke([r.id for r in self.results], connection=connection, timeout=timeout, terminate=terminate, signal=signal, reply=wait)
| 7,090,524,531,389,367,000
|
Send revoke signal to all workers for all tasks in the set.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from worker. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
|
venv/lib/python2.7/site-packages/celery/result.py
|
revoke
|
CharleyFarley/ovvio
|
python
|
def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None):
'Send revoke signal to all workers for all tasks in the set.\n\n :keyword terminate: Also terminate the process currently working\n on the task (if any).\n :keyword signal: Name of signal to send to process if terminate.\n Default is TERM.\n :keyword wait: Wait for replies from worker. Will wait for 1 second\n by default or you can specify a custom ``timeout``.\n :keyword timeout: Time in seconds to wait for replies if ``wait``\n enabled.\n\n '
self.app.control.revoke([r.id for r in self.results], connection=connection, timeout=timeout, terminate=terminate, signal=signal, reply=wait)
|
def __getitem__(self, index):
'`res[i] -> res.results[i]`'
return self.results[index]
| -5,759,615,132,893,857,000
|
`res[i] -> res.results[i]`
|
venv/lib/python2.7/site-packages/celery/result.py
|
__getitem__
|
CharleyFarley/ovvio
|
python
|
def __getitem__(self, index):
return self.results[index]
|
@deprecated('3.2', '3.3')
def iterate(self, timeout=None, propagate=True, interval=0.5):
'Deprecated method, use :meth:`get` with a callback argument.'
elapsed = 0.0
results = OrderedDict(((result.id, copy(result)) for result in self.results))
while results:
removed = set()
for (task_id, result) in items(results):
if result.ready():
(yield result.get(timeout=(timeout and (timeout - elapsed)), propagate=propagate))
removed.add(task_id)
elif result.backend.subpolling_interval:
time.sleep(result.backend.subpolling_interval)
for task_id in removed:
results.pop(task_id, None)
time.sleep(interval)
elapsed += interval
if (timeout and (elapsed >= timeout)):
raise TimeoutError('The operation timed out')
| -6,680,390,843,746,875,000
|
Deprecated method, use :meth:`get` with a callback argument.
|
venv/lib/python2.7/site-packages/celery/result.py
|
iterate
|
CharleyFarley/ovvio
|
python
|
@deprecated('3.2', '3.3')
def iterate(self, timeout=None, propagate=True, interval=0.5):
elapsed = 0.0
results = OrderedDict(((result.id, copy(result)) for result in self.results))
while results:
removed = set()
for (task_id, result) in items(results):
if result.ready():
(yield result.get(timeout=(timeout and (timeout - elapsed)), propagate=propagate))
removed.add(task_id)
elif result.backend.subpolling_interval:
time.sleep(result.backend.subpolling_interval)
for task_id in removed:
results.pop(task_id, None)
time.sleep(interval)
elapsed += interval
if (timeout and (elapsed >= timeout)):
raise TimeoutError('The operation timed out')
|
def get(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True):
'See :meth:`join`\n\n This is here for API compatibility with :class:`AsyncResult`,\n in addition it uses :meth:`join_native` if available for the\n current result backend.\n\n '
return (self.join_native if self.supports_native_join else self.join)(timeout=timeout, propagate=propagate, interval=interval, callback=callback, no_ack=no_ack)
| 2,206,958,811,034,429,400
|
See :meth:`join`
This is here for API compatibility with :class:`AsyncResult`,
in addition it uses :meth:`join_native` if available for the
current result backend.
|
venv/lib/python2.7/site-packages/celery/result.py
|
get
|
CharleyFarley/ovvio
|
python
|
def get(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True):
'See :meth:`join`\n\n This is here for API compatibility with :class:`AsyncResult`,\n in addition it uses :meth:`join_native` if available for the\n current result backend.\n\n '
return (self.join_native if self.supports_native_join else self.join)(timeout=timeout, propagate=propagate, interval=interval, callback=callback, no_ack=no_ack)
|
def join(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True):
'Gathers the results of all tasks as a list in order.\n\n .. note::\n\n This can be an expensive operation for result store\n backends that must resort to polling (e.g. database).\n\n You should consider using :meth:`join_native` if your backend\n supports it.\n\n .. warning::\n\n Waiting for tasks within a task may lead to deadlocks.\n Please see :ref:`task-synchronous-subtasks`.\n\n :keyword timeout: The number of seconds to wait for results before\n the operation times out.\n\n :keyword propagate: If any of the tasks raises an exception, the\n exception will be re-raised.\n\n :keyword interval: Time to wait (in seconds) before retrying to\n retrieve a result from the set. Note that this\n does not have any effect when using the amqp\n result store backend, as it does not use polling.\n\n :keyword callback: Optional callback to be called for every result\n received. Must have signature ``(task_id, value)``\n No results will be returned by this function if\n a callback is specified. The order of results\n is also arbitrary when a callback is used.\n To get access to the result object for a particular\n id you will have to generate an index first:\n ``index = {r.id: r for r in gres.results.values()}``\n Or you can create new result objects on the fly:\n ``result = app.AsyncResult(task_id)`` (both will\n take advantage of the backend cache anyway).\n\n :keyword no_ack: Automatic message acknowledgement (Note that if this\n is set to :const:`False` then the messages *will not be\n acknowledged*).\n\n :raises celery.exceptions.TimeoutError: if ``timeout`` is not\n :const:`None` and the operation takes longer than ``timeout``\n seconds.\n\n '
assert_will_not_block()
time_start = monotonic()
remaining = None
results = []
for result in self.results:
remaining = None
if timeout:
remaining = (timeout - (monotonic() - time_start))
if (remaining <= 0.0):
raise TimeoutError('join operation timed out')
value = result.get(timeout=remaining, propagate=propagate, interval=interval, no_ack=no_ack)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
| 5,768,098,422,675,149,000
|
Gathers the results of all tasks as a list in order.
.. note::
This can be an expensive operation for result store
backends that must resort to polling (e.g. database).
You should consider using :meth:`join_native` if your backend
supports it.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
:keyword timeout: The number of seconds to wait for results before
the operation times out.
:keyword propagate: If any of the tasks raises an exception, the
exception will be re-raised.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this
does not have any effect when using the amqp
result store backend, as it does not use polling.
:keyword callback: Optional callback to be called for every result
received. Must have signature ``(task_id, value)``
No results will be returned by this function if
a callback is specified. The order of results
is also arbitrary when a callback is used.
To get access to the result object for a particular
id you will have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
:keyword no_ack: Automatic message acknowledgement (Note that if this
is set to :const:`False` then the messages *will not be
acknowledged*).
:raises celery.exceptions.TimeoutError: if ``timeout`` is not
:const:`None` and the operation takes longer than ``timeout``
seconds.
|
venv/lib/python2.7/site-packages/celery/result.py
|
join
|
CharleyFarley/ovvio
|
python
|
def join(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True):
'Gathers the results of all tasks as a list in order.\n\n .. note::\n\n This can be an expensive operation for result store\n backends that must resort to polling (e.g. database).\n\n You should consider using :meth:`join_native` if your backend\n supports it.\n\n .. warning::\n\n Waiting for tasks within a task may lead to deadlocks.\n Please see :ref:`task-synchronous-subtasks`.\n\n :keyword timeout: The number of seconds to wait for results before\n the operation times out.\n\n :keyword propagate: If any of the tasks raises an exception, the\n exception will be re-raised.\n\n :keyword interval: Time to wait (in seconds) before retrying to\n retrieve a result from the set. Note that this\n does not have any effect when using the amqp\n result store backend, as it does not use polling.\n\n :keyword callback: Optional callback to be called for every result\n received. Must have signature ``(task_id, value)``\n No results will be returned by this function if\n a callback is specified. The order of results\n is also arbitrary when a callback is used.\n To get access to the result object for a particular\n id you will have to generate an index first:\n ``index = {r.id: r for r in gres.results.values()}``\n Or you can create new result objects on the fly:\n ``result = app.AsyncResult(task_id)`` (both will\n take advantage of the backend cache anyway).\n\n :keyword no_ack: Automatic message acknowledgement (Note that if this\n is set to :const:`False` then the messages *will not be\n acknowledged*).\n\n :raises celery.exceptions.TimeoutError: if ``timeout`` is not\n :const:`None` and the operation takes longer than ``timeout``\n seconds.\n\n '
assert_will_not_block()
time_start = monotonic()
remaining = None
results = []
for result in self.results:
remaining = None
if timeout:
remaining = (timeout - (monotonic() - time_start))
if (remaining <= 0.0):
raise TimeoutError('join operation timed out')
value = result.get(timeout=remaining, propagate=propagate, interval=interval, no_ack=no_ack)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
|
def iter_native(self, timeout=None, interval=0.5, no_ack=True):
'Backend optimized version of :meth:`iterate`.\n\n .. versionadded:: 2.2\n\n Note that this does not support collecting the results\n for different task types using different backends.\n\n This is currently only supported by the amqp, Redis and cache\n result backends.\n\n '
results = self.results
if (not results):
return iter([])
return self.backend.get_many(set((r.id for r in results)), timeout=timeout, interval=interval, no_ack=no_ack)
| -1,819,789,941,179,141,000
|
Backend optimized version of :meth:`iterate`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
|
venv/lib/python2.7/site-packages/celery/result.py
|
iter_native
|
CharleyFarley/ovvio
|
python
|
def iter_native(self, timeout=None, interval=0.5, no_ack=True):
'Backend optimized version of :meth:`iterate`.\n\n .. versionadded:: 2.2\n\n Note that this does not support collecting the results\n for different task types using different backends.\n\n This is currently only supported by the amqp, Redis and cache\n result backends.\n\n '
results = self.results
if (not results):
return iter([])
return self.backend.get_many(set((r.id for r in results)), timeout=timeout, interval=interval, no_ack=no_ack)
|
def join_native(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True):
'Backend optimized version of :meth:`join`.\n\n .. versionadded:: 2.2\n\n Note that this does not support collecting the results\n for different task types using different backends.\n\n This is currently only supported by the amqp, Redis and cache\n result backends.\n\n '
assert_will_not_block()
order_index = (None if callback else dict(((result.id, i) for (i, result) in enumerate(self.results))))
acc = (None if callback else [None for _ in range(len(self))])
for (task_id, meta) in self.iter_native(timeout, interval, no_ack):
value = meta['result']
if (propagate and (meta['status'] in states.PROPAGATE_STATES)):
raise value
if callback:
callback(task_id, value)
else:
acc[order_index[task_id]] = value
return acc
| 9,139,139,065,209,087,000
|
Backend optimized version of :meth:`join`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
|
venv/lib/python2.7/site-packages/celery/result.py
|
join_native
|
CharleyFarley/ovvio
|
python
|
def join_native(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True):
'Backend optimized version of :meth:`join`.\n\n .. versionadded:: 2.2\n\n Note that this does not support collecting the results\n for different task types using different backends.\n\n This is currently only supported by the amqp, Redis and cache\n result backends.\n\n '
assert_will_not_block()
order_index = (None if callback else dict(((result.id, i) for (i, result) in enumerate(self.results))))
acc = (None if callback else [None for _ in range(len(self))])
for (task_id, meta) in self.iter_native(timeout, interval, no_ack):
value = meta['result']
if (propagate and (meta['status'] in states.PROPAGATE_STATES)):
raise value
if callback:
callback(task_id, value)
else:
acc[order_index[task_id]] = value
return acc
|
@property
def subtasks(self):
'Deprecated alias to :attr:`results`.'
return self.results
| -297,266,856,338,344,450
|
Deprecated alias to :attr:`results`.
|
venv/lib/python2.7/site-packages/celery/result.py
|
subtasks
|
CharleyFarley/ovvio
|
python
|
@property
def subtasks(self):
return self.results
|
def save(self, backend=None):
'Save group-result for later retrieval using :meth:`restore`.\n\n Example::\n\n >>> def save_and_restore(result):\n ... result.save()\n ... result = GroupResult.restore(result.id)\n\n '
return (backend or self.app.backend).save_group(self.id, self)
| 2,484,700,479,012,440,600
|
Save group-result for later retrieval using :meth:`restore`.
Example::
>>> def save_and_restore(result):
... result.save()
... result = GroupResult.restore(result.id)
|
venv/lib/python2.7/site-packages/celery/result.py
|
save
|
CharleyFarley/ovvio
|
python
|
def save(self, backend=None):
'Save group-result for later retrieval using :meth:`restore`.\n\n Example::\n\n >>> def save_and_restore(result):\n ... result.save()\n ... result = GroupResult.restore(result.id)\n\n '
return (backend or self.app.backend).save_group(self.id, self)
|
def delete(self, backend=None):
'Remove this result if it was previously saved.'
(backend or self.app.backend).delete_group(self.id)
| -5,962,614,933,109,574,000
|
Remove this result if it was previously saved.
|
venv/lib/python2.7/site-packages/celery/result.py
|
delete
|
CharleyFarley/ovvio
|
python
|
def delete(self, backend=None):
(backend or self.app.backend).delete_group(self.id)
|
@classmethod
def restore(self, id, backend=None):
'Restore previously saved group result.'
return (backend or (self.app.backend if self.app else current_app.backend)).restore_group(id)
| 7,488,101,914,846,607,000
|
Restore previously saved group result.
|
venv/lib/python2.7/site-packages/celery/result.py
|
restore
|
CharleyFarley/ovvio
|
python
|
@classmethod
def restore(self, id, backend=None):
return (backend or (self.app.backend if self.app else current_app.backend)).restore_group(id)
|
def itersubtasks(self):
'Deprecated. Use ``iter(self.results)`` instead.'
return iter(self.results)
| 4,639,782,609,641,679,000
|
Deprecated. Use ``iter(self.results)`` instead.
|
venv/lib/python2.7/site-packages/celery/result.py
|
itersubtasks
|
CharleyFarley/ovvio
|
python
|
def itersubtasks(self):
return iter(self.results)
|
@property
def total(self):
'Deprecated: Use ``len(r)``.'
return len(self)
| -8,613,052,794,903,339,000
|
Deprecated: Use ``len(r)``.
|
venv/lib/python2.7/site-packages/celery/result.py
|
total
|
CharleyFarley/ovvio
|
python
|
@property
def total(self):
return len(self)
|
@property
def taskset_id(self):
'compat alias to :attr:`self.id`'
return self.id
| 3,185,373,323,444,679,700
|
compat alias to :attr:`self.id`
|
venv/lib/python2.7/site-packages/celery/result.py
|
taskset_id
|
CharleyFarley/ovvio
|
python
|
@property
def taskset_id(self):
return self.id
|
@property
def result(self):
'The tasks return value'
return self._result
| -5,183,060,910,031,868,000
|
The tasks return value
|
venv/lib/python2.7/site-packages/celery/result.py
|
result
|
CharleyFarley/ovvio
|
python
|
@property
def result(self):
return self._result
|
@property
def state(self):
'The tasks state.'
return self._state
| 7,325,794,408,771,949,000
|
The tasks state.
|
venv/lib/python2.7/site-packages/celery/result.py
|
state
|
CharleyFarley/ovvio
|
python
|
@property
def state(self):
return self._state
|
@property
def traceback(self):
'The traceback if the task failed.'
return self._traceback
| 608,441,875,333,079,700
|
The traceback if the task failed.
|
venv/lib/python2.7/site-packages/celery/result.py
|
traceback
|
CharleyFarley/ovvio
|
python
|
@property
def traceback(self):
return self._traceback
|
def get_prepared_model(stage: str, no_classes: int, input_shape: list, loss: str, optimizer: str, metrics: list) -> Model:
"Function creates ANN model and compile.\n Args:\n stage ([str]): stage of experiment\n no_classes ([INT]): No of classes for classification\n input_shape ([int, int]): Input shape for model's input layer\n loss ([str]): Loss function for model\n optimizer ([str]): Optimizer for model\n metrics ([str]): Metrics to watch while training\n Returns:\n model: ANN demo model\n "
LAYERS = []
BASE_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, activation='relu', name='hidden1'), tf.keras.layers.Dense(units=196, activation='relu', name='hidden2'), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
KERNEL_INIT_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
BN_BEFORE_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, name='hidden1', kernel_initializer='glorot_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.Dense(units=196, name='hidden2', kernel_initializer='glorot_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
BN_AFTER_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
logging.info('Creating Model..')
if (stage == 'BASE_MODEL'):
LAYERS = BASE_LAYERS
elif (stage == 'KERNEL_INIT_MODEL'):
LAYERS = KERNEL_INIT_LAYERS
elif (stage == 'BN_BEFORE_MODEL'):
LAYERS = BN_BEFORE_LAYERS
elif (stage == 'BN_AFTER_MODEL'):
LAYERS = BN_AFTER_LAYERS
model_ann = tf.keras.models.Sequential(LAYERS)
logging.info('Compiling Model..')
model_ann.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return model_ann
| -1,242,451,499,876,563,000
|
Function creates ANN model and compile.
Args:
stage ([str]): stage of experiment
no_classes ([INT]): No of classes for classification
input_shape ([int, int]): Input shape for model's input layer
loss ([str]): Loss function for model
optimizer ([str]): Optimizer for model
metrics ([str]): Metrics to watch while training
Returns:
model: ANN demo model
|
src/utils/model.py
|
get_prepared_model
|
iDataAstro/MNIST_CLASSIFICATION
|
python
|
def get_prepared_model(stage: str, no_classes: int, input_shape: list, loss: str, optimizer: str, metrics: list) -> Model:
"Function creates ANN model and compile.\n Args:\n stage ([str]): stage of experiment\n no_classes ([INT]): No of classes for classification\n input_shape ([int, int]): Input shape for model's input layer\n loss ([str]): Loss function for model\n optimizer ([str]): Optimizer for model\n metrics ([str]): Metrics to watch while training\n Returns:\n model: ANN demo model\n "
LAYERS = []
BASE_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, activation='relu', name='hidden1'), tf.keras.layers.Dense(units=196, activation='relu', name='hidden2'), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
KERNEL_INIT_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
BN_BEFORE_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, name='hidden1', kernel_initializer='glorot_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.Dense(units=196, name='hidden2', kernel_initializer='glorot_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
BN_AFTER_LAYERS = [tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'), tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform', bias_initializer='zeros'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')]
logging.info('Creating Model..')
if (stage == 'BASE_MODEL'):
LAYERS = BASE_LAYERS
elif (stage == 'KERNEL_INIT_MODEL'):
LAYERS = KERNEL_INIT_LAYERS
elif (stage == 'BN_BEFORE_MODEL'):
LAYERS = BN_BEFORE_LAYERS
elif (stage == 'BN_AFTER_MODEL'):
LAYERS = BN_AFTER_LAYERS
model_ann = tf.keras.models.Sequential(LAYERS)
logging.info('Compiling Model..')
model_ann.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return model_ann
|
def save_model(model_dir: str, model: Model, model_suffix: str) -> None:
'\n args:\n model_dir: directory to save the model\n model: model object to save\n model_suffix: Suffix to save the model\n '
create_directories([model_dir])
model_file = os.path.join(model_dir, f'{model_suffix}.h5')
model.save(model_file)
logging.info(f'Saved model: {model_file}')
| 8,604,583,271,117,257,000
|
args:
model_dir: directory to save the model
model: model object to save
model_suffix: Suffix to save the model
|
src/utils/model.py
|
save_model
|
iDataAstro/MNIST_CLASSIFICATION
|
python
|
def save_model(model_dir: str, model: Model, model_suffix: str) -> None:
'\n args:\n model_dir: directory to save the model\n model: model object to save\n model_suffix: Suffix to save the model\n '
create_directories([model_dir])
model_file = os.path.join(model_dir, f'{model_suffix}.h5')
model.save(model_file)
logging.info(f'Saved model: {model_file}')
|
def save_history_plot(history, plot_dir: str, stage: str) -> None:
'\n Args:\n history: History object for plotting loss/accuracy curves\n plot_dir: Directory to save plot files\n stage: Stage name for training\n '
pd.DataFrame(history.history).plot(figsize=(10, 8))
plt.grid(True)
create_directories([plot_dir])
plot_file = os.path.join(plot_dir, (stage + '_loss_accuracy.png'))
plt.savefig(plot_file)
logging.info(f'Loss accuracy plot saved: {plot_file}')
| 1,670,037,681,986,658,600
|
Args:
history: History object for plotting loss/accuracy curves
plot_dir: Directory to save plot files
stage: Stage name for training
|
src/utils/model.py
|
save_history_plot
|
iDataAstro/MNIST_CLASSIFICATION
|
python
|
def save_history_plot(history, plot_dir: str, stage: str) -> None:
'\n Args:\n history: History object for plotting loss/accuracy curves\n plot_dir: Directory to save plot files\n stage: Stage name for training\n '
pd.DataFrame(history.history).plot(figsize=(10, 8))
plt.grid(True)
create_directories([plot_dir])
plot_file = os.path.join(plot_dir, (stage + '_loss_accuracy.png'))
plt.savefig(plot_file)
logging.info(f'Loss accuracy plot saved: {plot_file}')
|
def get_callbacks(checkpoint_dir: str, tensorboard_logs: str, stage: str) -> list:
'\n Args:\n checkpoint_dir: Directory to save the model at checkpoint\n tensorboard_logs: Directory to save tensorboard logs\n stage: Stage name for training\n Returns:\n callback_list: List of created callbacks\n '
create_directories([checkpoint_dir, tensorboard_logs])
tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_logs)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)
ckpt_file_path = os.path.join(checkpoint_dir, f'{stage}_ckpt_model.h5')
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file_path, save_best_only=True)
callback_list = [tensorboard_cb, early_stopping_cb, checkpoint_cb]
logging.info(f'Callbacks created: {callback_list}')
return callback_list
| 4,078,891,736,999,478,000
|
Args:
checkpoint_dir: Directory to save the model at checkpoint
tensorboard_logs: Directory to save tensorboard logs
stage: Stage name for training
Returns:
callback_list: List of created callbacks
|
src/utils/model.py
|
get_callbacks
|
iDataAstro/MNIST_CLASSIFICATION
|
python
|
def get_callbacks(checkpoint_dir: str, tensorboard_logs: str, stage: str) -> list:
'\n Args:\n checkpoint_dir: Directory to save the model at checkpoint\n tensorboard_logs: Directory to save tensorboard logs\n stage: Stage name for training\n Returns:\n callback_list: List of created callbacks\n '
create_directories([checkpoint_dir, tensorboard_logs])
tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_logs)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)
ckpt_file_path = os.path.join(checkpoint_dir, f'{stage}_ckpt_model.h5')
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file_path, save_best_only=True)
callback_list = [tensorboard_cb, early_stopping_cb, checkpoint_cb]
logging.info(f'Callbacks created: {callback_list}')
return callback_list
|
def handle_event(context: events.ExecutionContext, event: events.ExecutionEvent) -> None:
'Short output style shows single symbols in the progress bar.\n\n Otherwise, identical to the default output style.\n '
if isinstance(event, events.Initialized):
default.handle_initialized(context, event)
if isinstance(event, events.AfterExecution):
context.hypothesis_output.extend(event.hypothesis_output)
handle_after_execution(context, event)
if isinstance(event, events.Finished):
default.handle_finished(context, event)
if isinstance(event, events.Interrupted):
default.handle_interrupted(context, event)
| -4,836,795,211,007,890,000
|
Short output style shows single symbols in the progress bar.
Otherwise, identical to the default output style.
|
src/schemathesis/cli/output/short.py
|
handle_event
|
RonnyPfannschmidt/schemathesis
|
python
|
def handle_event(context: events.ExecutionContext, event: events.ExecutionEvent) -> None:
'Short output style shows single symbols in the progress bar.\n\n Otherwise, identical to the default output style.\n '
if isinstance(event, events.Initialized):
default.handle_initialized(context, event)
if isinstance(event, events.AfterExecution):
context.hypothesis_output.extend(event.hypothesis_output)
handle_after_execution(context, event)
if isinstance(event, events.Finished):
default.handle_finished(context, event)
if isinstance(event, events.Interrupted):
default.handle_interrupted(context, event)
|
@staticmethod
def serialize_test_record(test_record):
'Override method to alter how test records are serialized to file data.'
return pickle.dumps(test_record, (- 1))
| 5,715,423,257,767,327,000
|
Override method to alter how test records are serialized to file data.
|
openhtf/output/callbacks/__init__.py
|
serialize_test_record
|
airdeng/openhtf
|
python
|
@staticmethod
def serialize_test_record(test_record):
return pickle.dumps(test_record, (- 1))
|
@staticmethod
def open_file(filename):
'Override method to alter file open behavior or file types.'
return Atomic(filename)
| 1,194,922,633,395,250,700
|
Override method to alter file open behavior or file types.
|
openhtf/output/callbacks/__init__.py
|
open_file
|
airdeng/openhtf
|
python
|
@staticmethod
def open_file(filename):
return Atomic(filename)
|
@contextlib.contextmanager
def open_output_file(self, test_record):
'Open file based on pattern.'
record_dict = data.convert_to_base_types(test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if (isinstance(pattern, six.string_types) or callable(pattern)):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
(yield output_file)
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
(yield self.filename_pattern)
else:
raise ValueError('filename_pattern must be string, callable, or File-like object')
| 1,003,922,441,930,395,100
|
Open file based on pattern.
|
openhtf/output/callbacks/__init__.py
|
open_output_file
|
airdeng/openhtf
|
python
|
@contextlib.contextmanager
def open_output_file(self, test_record):
record_dict = data.convert_to_base_types(test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if (isinstance(pattern, six.string_types) or callable(pattern)):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
(yield output_file)
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
(yield self.filename_pattern)
else:
raise ValueError('filename_pattern must be string, callable, or File-like object')
|
async def place_conditional_order(self, market: str, side: str, size: float, type: str='stop', limit_price: float=None, reduce_only: bool=False, cancel: bool=True, trigger_price: float=None, trail_value: float=None) -> dict:
"\n To send a Stop Market order, set type='stop' and supply a trigger_price\n To send a Stop Limit order, also supply a limit_price\n To send a Take Profit Market order, set type='trailing_stop' and supply a trigger_price\n To send a Trailing Stop order, set type='trailing_stop' and supply a trail_value\n "
assert (type in ('stop', 'take_profit', 'trailing_stop'))
assert ((type not in ('stop', 'take_profit')) or (trigger_price is not None)), 'Need trigger prices for stop losses and take profits'
assert ((type not in 'trailing_stop') or ((trigger_price is None) and (trail_value is not None))), 'Trailing stops need a trail value and cannot take a trigger price'
return (await self._request('POST', 'conditional_orders', json={'market': market, 'side': side, 'triggerPrice': trigger_price, 'size': size, 'reduceOnly': reduce_only, 'type': 'stop', 'cancelLimitOnTrigger': cancel, 'orderPrice': limit_price}))
| -751,737,147,964,922,100
|
To send a Stop Market order, set type='stop' and supply a trigger_price
To send a Stop Limit order, also supply a limit_price
To send a Take Profit Market order, set type='trailing_stop' and supply a trigger_price
To send a Trailing Stop order, set type='trailing_stop' and supply a trail_value
|
quant/platform/ftx.py
|
place_conditional_order
|
a04512/alphahunter
|
python
|
async def place_conditional_order(self, market: str, side: str, size: float, type: str='stop', limit_price: float=None, reduce_only: bool=False, cancel: bool=True, trigger_price: float=None, trail_value: float=None) -> dict:
"\n To send a Stop Market order, set type='stop' and supply a trigger_price\n To send a Stop Limit order, also supply a limit_price\n To send a Take Profit Market order, set type='trailing_stop' and supply a trigger_price\n To send a Trailing Stop order, set type='trailing_stop' and supply a trail_value\n "
assert (type in ('stop', 'take_profit', 'trailing_stop'))
assert ((type not in ('stop', 'take_profit')) or (trigger_price is not None)), 'Need trigger prices for stop losses and take profits'
assert ((type not in 'trailing_stop') or ((trigger_price is None) and (trail_value is not None))), 'Trailing stops need a trail value and cannot take a trigger price'
return (await self._request('POST', 'conditional_orders', json={'market': market, 'side': side, 'triggerPrice': trigger_price, 'size': size, 'reduceOnly': reduce_only, 'type': 'stop', 'cancelLimitOnTrigger': cancel, 'orderPrice': limit_price}))
|
def __init__(self, **kwargs):
'Initialize.'
self.cb = kwargs['cb']
state = None
self._platform = kwargs.get('platform')
self._symbols = kwargs.get('symbols')
self._strategy = kwargs.get('strategy')
self._account = kwargs.get('account')
self._access_key = kwargs.get('access_key')
self._secret_key = kwargs.get('secret_key')
self._subaccount_name = kwargs.get('subaccount_name')
if (not self._platform):
state = State(self._platform, self._account, 'param platform miss')
elif (self._account and ((not self._access_key) or (not self._secret_key))):
state = State(self._platform, self._account, 'param access_key or secret_key miss')
elif (not self._strategy):
state = State(self._platform, self._account, 'param strategy miss')
elif (not self._symbols):
state = State(self._platform, self._account, 'param symbols miss')
if state:
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
return
self._host = 'https://ftx.com'
self._wss = 'wss://ftx.com'
url = (self._wss + '/ws')
super(FTXTrader, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {'op': 'ping'}
self._rest_api = FTXRestAPI(self._host, self._access_key, self._secret_key, self._subaccount_name)
self._orderbooks: DefaultDict[(str, Dict[(str, DefaultDict[(float, float)])])] = defaultdict((lambda : {side: defaultdict(float) for side in {'bids', 'asks'}}))
self._assets: DefaultDict[str:Dict[(str, float)]] = defaultdict((lambda : {k: 0.0 for k in {'free', 'locked', 'total'}}))
self._syminfo: DefaultDict[str:Dict[(str, Any)]] = defaultdict(dict)
if (self._account != None):
self.initialize()
if (self.cb.on_kline_update_callback or self.cb.on_orderbook_update_callback or self.cb.on_trade_update_callback or self.cb.on_ticker_update_callback):
FTXMarket(**kwargs)
| -8,882,331,551,842,784,000
|
Initialize.
|
quant/platform/ftx.py
|
__init__
|
a04512/alphahunter
|
python
|
def __init__(self, **kwargs):
self.cb = kwargs['cb']
state = None
self._platform = kwargs.get('platform')
self._symbols = kwargs.get('symbols')
self._strategy = kwargs.get('strategy')
self._account = kwargs.get('account')
self._access_key = kwargs.get('access_key')
self._secret_key = kwargs.get('secret_key')
self._subaccount_name = kwargs.get('subaccount_name')
if (not self._platform):
state = State(self._platform, self._account, 'param platform miss')
elif (self._account and ((not self._access_key) or (not self._secret_key))):
state = State(self._platform, self._account, 'param access_key or secret_key miss')
elif (not self._strategy):
state = State(self._platform, self._account, 'param strategy miss')
elif (not self._symbols):
state = State(self._platform, self._account, 'param symbols miss')
if state:
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
return
self._host = 'https://ftx.com'
self._wss = 'wss://ftx.com'
url = (self._wss + '/ws')
super(FTXTrader, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {'op': 'ping'}
self._rest_api = FTXRestAPI(self._host, self._access_key, self._secret_key, self._subaccount_name)
self._orderbooks: DefaultDict[(str, Dict[(str, DefaultDict[(float, float)])])] = defaultdict((lambda : {side: defaultdict(float) for side in {'bids', 'asks'}}))
self._assets: DefaultDict[str:Dict[(str, float)]] = defaultdict((lambda : {k: 0.0 for k in {'free', 'locked', 'total'}}))
self._syminfo: DefaultDict[str:Dict[(str, Any)]] = defaultdict(dict)
if (self._account != None):
self.initialize()
if (self.cb.on_kline_update_callback or self.cb.on_orderbook_update_callback or self.cb.on_trade_update_callback or self.cb.on_ticker_update_callback):
FTXMarket(**kwargs)
|
async def create_order(self, symbol, action, price, quantity, order_type=ORDER_TYPE_LIMIT, *args, **kwargs):
" Create an order.\n\n Args:\n symbol: Trade target\n action: Trade direction, `BUY` or `SELL`.\n price: Price of each contract.\n quantity: The buying or selling quantity.\n order_type: Order type, `MARKET` or `LIMIT`.\n\n Returns:\n order_no: Order ID if created successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
if (action == ORDER_ACTION_BUY):
side = 'buy'
else:
side = 'sell'
size = abs(float(quantity))
price = float(price)
if (order_type == ORDER_TYPE_LIMIT):
ot = 'limit'
elif (order_type == ORDER_TYPE_MARKET):
ot = 'market'
price = None
else:
raise NotImplementedError
(success, error) = (await self._rest_api.place_order(symbol, side, price, size, ot))
if error:
return (None, error)
if (not success['success']):
return (None, 'place_order error')
result = success['result']
return (str(result['id']), None)
| 8,215,648,725,332,022,000
|
Create an order.
Args:
symbol: Trade target
action: Trade direction, `BUY` or `SELL`.
price: Price of each contract.
quantity: The buying or selling quantity.
order_type: Order type, `MARKET` or `LIMIT`.
Returns:
order_no: Order ID if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
|
quant/platform/ftx.py
|
create_order
|
a04512/alphahunter
|
python
|
async def create_order(self, symbol, action, price, quantity, order_type=ORDER_TYPE_LIMIT, *args, **kwargs):
" Create an order.\n\n Args:\n symbol: Trade target\n action: Trade direction, `BUY` or `SELL`.\n price: Price of each contract.\n quantity: The buying or selling quantity.\n order_type: Order type, `MARKET` or `LIMIT`.\n\n Returns:\n order_no: Order ID if created successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
if (action == ORDER_ACTION_BUY):
side = 'buy'
else:
side = 'sell'
size = abs(float(quantity))
price = float(price)
if (order_type == ORDER_TYPE_LIMIT):
ot = 'limit'
elif (order_type == ORDER_TYPE_MARKET):
ot = 'market'
price = None
else:
raise NotImplementedError
(success, error) = (await self._rest_api.place_order(symbol, side, price, size, ot))
if error:
return (None, error)
if (not success['success']):
return (None, 'place_order error')
result = success['result']
return (str(result['id']), None)
|
async def revoke_order(self, symbol, *order_nos):
' Revoke (an) order(s).\n\n Args:\n symbol: Trade target\n order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel all orders for \n this symbol. If you set 1 or multiple param, you can cancel an or multiple order.\n\n Returns:\n 删除全部订单情况: 成功=(True, None), 失败=(False, error information)\n 删除单个或多个订单情况: (删除成功的订单id[], 删除失败的订单id及错误信息[]),比如删除三个都成功那么结果为([1xx,2xx,3xx], [])\n '
if (len(order_nos) == 0):
(success, error) = (await self._rest_api.cancel_orders(symbol))
if error:
return (False, error)
if (not success['success']):
return (False, 'cancel_orders error')
return (True, None)
else:
result = []
for order_no in order_nos:
(_, e) = (await self._rest_api.cancel_order(order_no))
if e:
result.append((order_no, e))
else:
result.append((order_no, None))
return (tuple(result), None)
| 725,604,617,516,433,300
|
Revoke (an) order(s).
Args:
symbol: Trade target
order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel all orders for
this symbol. If you set 1 or multiple param, you can cancel an or multiple order.
Returns:
删除全部订单情况: 成功=(True, None), 失败=(False, error information)
删除单个或多个订单情况: (删除成功的订单id[], 删除失败的订单id及错误信息[]),比如删除三个都成功那么结果为([1xx,2xx,3xx], [])
|
quant/platform/ftx.py
|
revoke_order
|
a04512/alphahunter
|
python
|
async def revoke_order(self, symbol, *order_nos):
' Revoke (an) order(s).\n\n Args:\n symbol: Trade target\n order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel all orders for \n this symbol. If you set 1 or multiple param, you can cancel an or multiple order.\n\n Returns:\n 删除全部订单情况: 成功=(True, None), 失败=(False, error information)\n 删除单个或多个订单情况: (删除成功的订单id[], 删除失败的订单id及错误信息[]),比如删除三个都成功那么结果为([1xx,2xx,3xx], [])\n '
if (len(order_nos) == 0):
(success, error) = (await self._rest_api.cancel_orders(symbol))
if error:
return (False, error)
if (not success['success']):
return (False, 'cancel_orders error')
return (True, None)
else:
result = []
for order_no in order_nos:
(_, e) = (await self._rest_api.cancel_order(order_no))
if e:
result.append((order_no, e))
else:
result.append((order_no, None))
return (tuple(result), None)
|
async def get_assets(self):
" 获取交易账户资产信息\n\n Args:\n None\n\n Returns:\n assets: Asset if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
(success, error) = (await self._rest_api.get_account_info())
if error:
return (None, error)
if (not success['success']):
return (None, 'get_account_info error')
data = success['result']
assets = {}
total = float(data['collateral'])
free = float(data['freeCollateral'])
locked = (total - free)
assets['USD'] = {'total': total, 'free': free, 'locked': locked}
if (assets == self._assets):
update = False
else:
update = True
self._assets = assets
timestamp = tools.get_cur_timestamp_ms()
ast = Asset(self._platform, self._account, self._assets, timestamp, update)
return (ast, None)
| -8,438,721,267,337,178,000
|
获取交易账户资产信息
Args:
None
Returns:
assets: Asset if successfully, otherwise it's None.
error: Error information, otherwise it's None.
|
quant/platform/ftx.py
|
get_assets
|
a04512/alphahunter
|
python
|
async def get_assets(self):
" 获取交易账户资产信息\n\n Args:\n None\n\n Returns:\n assets: Asset if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
(success, error) = (await self._rest_api.get_account_info())
if error:
return (None, error)
if (not success['success']):
return (None, 'get_account_info error')
data = success['result']
assets = {}
total = float(data['collateral'])
free = float(data['freeCollateral'])
locked = (total - free)
assets['USD'] = {'total': total, 'free': free, 'locked': locked}
if (assets == self._assets):
update = False
else:
update = True
self._assets = assets
timestamp = tools.get_cur_timestamp_ms()
ast = Asset(self._platform, self._account, self._assets, timestamp, update)
return (ast, None)
|
def _convert_order_format(self, o):
'将交易所订单结构转换为本交易系统标准订单结构格式\n '
order_no = str(o['id'])
state = o['status']
remain = float(o['remainingSize'])
filled = float(o['filledSize'])
size = float(o['size'])
price = (None if (o['price'] == None) else float(o['price']))
avg_price = (None if (o['avgFillPrice'] == None) else float(o['avgFillPrice']))
if (state == 'new'):
status = ORDER_STATUS_SUBMITTED
elif (state == 'open'):
if (remain < size):
status = ORDER_STATUS_PARTIAL_FILLED
else:
status = ORDER_STATUS_SUBMITTED
elif (state == 'closed'):
if (filled < size):
status = ORDER_STATUS_CANCELED
else:
status = ORDER_STATUS_FILLED
else:
return None
info = {'platform': self._platform, 'account': self._account, 'strategy': self._strategy, 'order_no': order_no, 'action': (ORDER_ACTION_BUY if (o['side'] == 'buy') else ORDER_ACTION_SELL), 'symbol': o['market'], 'price': price, 'quantity': size, 'order_type': (ORDER_TYPE_LIMIT if (o['type'] == 'limit') else ORDER_TYPE_MARKET), 'remain': remain, 'status': status, 'avg_price': avg_price}
order = Order(**info)
return order
| -6,512,945,056,998,619,000
|
将交易所订单结构转换为本交易系统标准订单结构格式
|
quant/platform/ftx.py
|
_convert_order_format
|
a04512/alphahunter
|
python
|
def _convert_order_format(self, o):
'\n '
order_no = str(o['id'])
state = o['status']
remain = float(o['remainingSize'])
filled = float(o['filledSize'])
size = float(o['size'])
price = (None if (o['price'] == None) else float(o['price']))
avg_price = (None if (o['avgFillPrice'] == None) else float(o['avgFillPrice']))
if (state == 'new'):
status = ORDER_STATUS_SUBMITTED
elif (state == 'open'):
if (remain < size):
status = ORDER_STATUS_PARTIAL_FILLED
else:
status = ORDER_STATUS_SUBMITTED
elif (state == 'closed'):
if (filled < size):
status = ORDER_STATUS_CANCELED
else:
status = ORDER_STATUS_FILLED
else:
return None
info = {'platform': self._platform, 'account': self._account, 'strategy': self._strategy, 'order_no': order_no, 'action': (ORDER_ACTION_BUY if (o['side'] == 'buy') else ORDER_ACTION_SELL), 'symbol': o['market'], 'price': price, 'quantity': size, 'order_type': (ORDER_TYPE_LIMIT if (o['type'] == 'limit') else ORDER_TYPE_MARKET), 'remain': remain, 'status': status, 'avg_price': avg_price}
order = Order(**info)
return order
|
async def get_orders(self, symbol):
" 获取当前挂单列表\n\n Args:\n symbol: Trade target\n\n Returns:\n orders: Order list if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
orders: List[Order] = []
(success, error) = (await self._rest_api.get_open_orders(symbol))
if error:
return (None, error)
if (not success['success']):
return (None, 'get_open_orders error')
data = success['result']
for o in data:
order = self._convert_order_format(o)
if (order == None):
return (None, 'get_open_orders error')
orders.append(order)
return (orders, None)
| -2,887,691,378,927,101,400
|
获取当前挂单列表
Args:
symbol: Trade target
Returns:
orders: Order list if successfully, otherwise it's None.
error: Error information, otherwise it's None.
|
quant/platform/ftx.py
|
get_orders
|
a04512/alphahunter
|
python
|
async def get_orders(self, symbol):
" 获取当前挂单列表\n\n Args:\n symbol: Trade target\n\n Returns:\n orders: Order list if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
orders: List[Order] = []
(success, error) = (await self._rest_api.get_open_orders(symbol))
if error:
return (None, error)
if (not success['success']):
return (None, 'get_open_orders error')
data = success['result']
for o in data:
order = self._convert_order_format(o)
if (order == None):
return (None, 'get_open_orders error')
orders.append(order)
return (orders, None)
|
async def get_position(self, symbol):
" 获取当前持仓\n\n Args:\n symbol: Trade target\n\n Returns:\n position: Position if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
(success, error) = (await self._rest_api.get_positions(True))
if error:
return (None, error)
if (not success['success']):
return (None, 'get_position error')
p = next(filter((lambda x: (x['future'] == symbol)), success['result']), None)
if (p == None):
return (Position(self._platform, self._account, self._strategy, symbol), None)
if (p['netSize'] == 0):
return (Position(self._platform, self._account, self._strategy, symbol), None)
pos = Position(self._platform, self._account, self._strategy, symbol)
pos.margin_mode = MARGIN_MODE_CROSSED
pos.utime = tools.get_cur_timestamp_ms()
if (p['netSize'] < 0):
pos.long_quantity = 0
pos.long_avail_qty = 0
pos.long_open_price = 0
pos.long_hold_price = 0
pos.long_liquid_price = 0
pos.long_unrealised_pnl = 0
pos.long_leverage = 0
pos.long_margin = 0
pos.short_quantity = abs(p['netSize'])
pos.short_avail_qty = ((pos.short_quantity - p['longOrderSize']) if (p['longOrderSize'] < pos.short_quantity) else 0)
pos.short_open_price = p['recentAverageOpenPrice']
pos.short_hold_price = p['entryPrice']
pos.short_liquid_price = p['estimatedLiquidationPrice']
pos.short_unrealised_pnl = p['unrealizedPnl']
pos.short_leverage = int((1 / p['initialMarginRequirement']))
pos.short_margin = p['collateralUsed']
else:
pos.long_quantity = abs(p['netSize'])
pos.long_avail_qty = ((pos.long_quantity - p['shortOrderSize']) if (p['shortOrderSize'] < pos.long_quantity) else 0)
pos.long_open_price = p['recentAverageOpenPrice']
pos.long_hold_price = p['entryPrice']
pos.long_liquid_price = p['estimatedLiquidationPrice']
pos.long_unrealised_pnl = p['unrealizedPnl']
pos.long_leverage = int((1 / p['initialMarginRequirement']))
pos.long_margin = p['collateralUsed']
pos.short_quantity = 0
pos.short_avail_qty = 0
pos.short_open_price = 0
pos.short_hold_price = 0
pos.short_liquid_price = 0
pos.short_unrealised_pnl = 0
pos.short_leverage = 0
pos.short_margin = 0
return (pos, None)
| 6,262,820,115,701,591,000
|
获取当前持仓
Args:
symbol: Trade target
Returns:
position: Position if successfully, otherwise it's None.
error: Error information, otherwise it's None.
|
quant/platform/ftx.py
|
get_position
|
a04512/alphahunter
|
python
|
async def get_position(self, symbol):
" 获取当前持仓\n\n Args:\n symbol: Trade target\n\n Returns:\n position: Position if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
(success, error) = (await self._rest_api.get_positions(True))
if error:
return (None, error)
if (not success['success']):
return (None, 'get_position error')
p = next(filter((lambda x: (x['future'] == symbol)), success['result']), None)
if (p == None):
return (Position(self._platform, self._account, self._strategy, symbol), None)
if (p['netSize'] == 0):
return (Position(self._platform, self._account, self._strategy, symbol), None)
pos = Position(self._platform, self._account, self._strategy, symbol)
pos.margin_mode = MARGIN_MODE_CROSSED
pos.utime = tools.get_cur_timestamp_ms()
if (p['netSize'] < 0):
pos.long_quantity = 0
pos.long_avail_qty = 0
pos.long_open_price = 0
pos.long_hold_price = 0
pos.long_liquid_price = 0
pos.long_unrealised_pnl = 0
pos.long_leverage = 0
pos.long_margin = 0
pos.short_quantity = abs(p['netSize'])
pos.short_avail_qty = ((pos.short_quantity - p['longOrderSize']) if (p['longOrderSize'] < pos.short_quantity) else 0)
pos.short_open_price = p['recentAverageOpenPrice']
pos.short_hold_price = p['entryPrice']
pos.short_liquid_price = p['estimatedLiquidationPrice']
pos.short_unrealised_pnl = p['unrealizedPnl']
pos.short_leverage = int((1 / p['initialMarginRequirement']))
pos.short_margin = p['collateralUsed']
else:
pos.long_quantity = abs(p['netSize'])
pos.long_avail_qty = ((pos.long_quantity - p['shortOrderSize']) if (p['shortOrderSize'] < pos.long_quantity) else 0)
pos.long_open_price = p['recentAverageOpenPrice']
pos.long_hold_price = p['entryPrice']
pos.long_liquid_price = p['estimatedLiquidationPrice']
pos.long_unrealised_pnl = p['unrealizedPnl']
pos.long_leverage = int((1 / p['initialMarginRequirement']))
pos.long_margin = p['collateralUsed']
pos.short_quantity = 0
pos.short_avail_qty = 0
pos.short_open_price = 0
pos.short_hold_price = 0
pos.short_liquid_price = 0
pos.short_unrealised_pnl = 0
pos.short_leverage = 0
pos.short_margin = 0
return (pos, None)
|
async def get_symbol_info(self, symbol):
" 获取指定符号相关信息\n\n Args:\n symbol: Trade target\n\n Returns:\n symbol_info: SymbolInfo if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
'\n {\n "success": true,\n "result": [\n {\n "name": "BTC-0628",\n "baseCurrency": null,\n "quoteCurrency": null,\n "type": "future",\n "underlying": "BTC",\n "enabled": true,\n "ask": 3949.25,\n "bid": 3949,\n "last": 10579.52,\n "priceIncrement": 0.25,\n "sizeIncrement": 0.001\n }\n ]\n }\n '
info = self._syminfo[symbol]
if (not info):
return (None, 'Symbol not exist')
price_tick = float(info['priceIncrement'])
size_tick = float(info['sizeIncrement'])
size_limit = None
value_tick = None
value_limit = None
if (info['type'] == 'future'):
base_currency = info['underlying']
quote_currency = 'USD'
settlement_currency = 'USD'
else:
base_currency = info['baseCurrency']
quote_currency = info['quoteCurrency']
settlement_currency = info['quoteCurrency']
symbol_type = info['type']
is_inverse = False
multiplier = 1
syminfo = SymbolInfo(self._platform, symbol, price_tick, size_tick, size_limit, value_tick, value_limit, base_currency, quote_currency, settlement_currency, symbol_type, is_inverse, multiplier)
return (syminfo, None)
| 2,875,572,908,821,740,500
|
获取指定符号相关信息
Args:
symbol: Trade target
Returns:
symbol_info: SymbolInfo if successfully, otherwise it's None.
error: Error information, otherwise it's None.
|
quant/platform/ftx.py
|
get_symbol_info
|
a04512/alphahunter
|
python
|
async def get_symbol_info(self, symbol):
" 获取指定符号相关信息\n\n Args:\n symbol: Trade target\n\n Returns:\n symbol_info: SymbolInfo if successfully, otherwise it's None.\n error: Error information, otherwise it's None.\n "
'\n {\n "success": true,\n "result": [\n {\n "name": "BTC-0628",\n "baseCurrency": null,\n "quoteCurrency": null,\n "type": "future",\n "underlying": "BTC",\n "enabled": true,\n "ask": 3949.25,\n "bid": 3949,\n "last": 10579.52,\n "priceIncrement": 0.25,\n "sizeIncrement": 0.001\n }\n ]\n }\n '
info = self._syminfo[symbol]
if (not info):
return (None, 'Symbol not exist')
price_tick = float(info['priceIncrement'])
size_tick = float(info['sizeIncrement'])
size_limit = None
value_tick = None
value_limit = None
if (info['type'] == 'future'):
base_currency = info['underlying']
quote_currency = 'USD'
settlement_currency = 'USD'
else:
base_currency = info['baseCurrency']
quote_currency = info['quoteCurrency']
settlement_currency = info['quoteCurrency']
symbol_type = info['type']
is_inverse = False
multiplier = 1
syminfo = SymbolInfo(self._platform, symbol, price_tick, size_tick, size_limit, value_tick, value_limit, base_currency, quote_currency, settlement_currency, symbol_type, is_inverse, multiplier)
return (syminfo, None)
|
async def invalid_indicate(self, symbol, indicate_type):
" update (an) callback function.\n\n Args:\n symbol: Trade target\n indicate_type: INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION\n\n Returns:\n success: If execute successfully, return True, otherwise it's False.\n error: If execute failed, return error information, otherwise it's None.\n "
async def _task():
if ((indicate_type == INDICATE_ORDER) and self.cb.on_order_update_callback):
(success, error) = (await self.get_orders(symbol))
if error:
state = State(self._platform, self._account, 'get_orders error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
for order in success:
SingleTask.run(self.cb.on_order_update_callback, order)
elif ((indicate_type == INDICATE_ASSET) and self.cb.on_asset_update_callback):
(success, error) = (await self.get_assets())
if error:
state = State(self._platform, self._account, 'get_assets error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_asset_update_callback, success)
elif ((indicate_type == INDICATE_POSITION) and self.cb.on_position_update_callback):
(success, error) = (await self.get_position(symbol))
if error:
state = State(self._platform, self._account, 'get_position error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_position_update_callback, success)
if ((indicate_type == INDICATE_ORDER) or (indicate_type == INDICATE_ASSET) or (indicate_type == INDICATE_POSITION)):
SingleTask.run(_task)
return (True, None)
else:
logger.error('indicate_type error! indicate_type:', indicate_type, caller=self)
return (False, 'indicate_type error')
| 5,374,433,212,418,980,000
|
update (an) callback function.
Args:
symbol: Trade target
indicate_type: INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
Returns:
success: If execute successfully, return True, otherwise it's False.
error: If execute failed, return error information, otherwise it's None.
|
quant/platform/ftx.py
|
invalid_indicate
|
a04512/alphahunter
|
python
|
async def invalid_indicate(self, symbol, indicate_type):
" update (an) callback function.\n\n Args:\n symbol: Trade target\n indicate_type: INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION\n\n Returns:\n success: If execute successfully, return True, otherwise it's False.\n error: If execute failed, return error information, otherwise it's None.\n "
async def _task():
if ((indicate_type == INDICATE_ORDER) and self.cb.on_order_update_callback):
(success, error) = (await self.get_orders(symbol))
if error:
state = State(self._platform, self._account, 'get_orders error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
for order in success:
SingleTask.run(self.cb.on_order_update_callback, order)
elif ((indicate_type == INDICATE_ASSET) and self.cb.on_asset_update_callback):
(success, error) = (await self.get_assets())
if error:
state = State(self._platform, self._account, 'get_assets error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_asset_update_callback, success)
elif ((indicate_type == INDICATE_POSITION) and self.cb.on_position_update_callback):
(success, error) = (await self.get_position(symbol))
if error:
state = State(self._platform, self._account, 'get_position error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_position_update_callback, success)
if ((indicate_type == INDICATE_ORDER) or (indicate_type == INDICATE_ASSET) or (indicate_type == INDICATE_POSITION)):
SingleTask.run(_task)
return (True, None)
else:
logger.error('indicate_type error! indicate_type:', indicate_type, caller=self)
return (False, 'indicate_type error')
|
async def _login(self):
'FTX的websocket接口真是逗逼,验证成功的情况下居然不会返回任何消息'
ts = int((time.time() * 1000))
signature = hmac.new(self._secret_key.encode(), f'{ts}websocket_login'.encode(), 'sha256').hexdigest()
args = {'key': self._access_key, 'sign': signature, 'time': ts}
if self._subaccount_name:
args['subaccount'] = self._subaccount_name
data = {'op': 'login', 'args': args}
(await self.send_json(data))
| 3,940,037,901,716,461,600
|
FTX的websocket接口真是逗逼,验证成功的情况下居然不会返回任何消息
|
quant/platform/ftx.py
|
_login
|
a04512/alphahunter
|
python
|
async def _login(self):
ts = int((time.time() * 1000))
signature = hmac.new(self._secret_key.encode(), f'{ts}websocket_login'.encode(), 'sha256').hexdigest()
args = {'key': self._access_key, 'sign': signature, 'time': ts}
if self._subaccount_name:
args['subaccount'] = self._subaccount_name
data = {'op': 'login', 'args': args}
(await self.send_json(data))
|
async def connected_callback(self):
'网络链接成功回调\n '
if (self._account != None):
(await self._login())
(success, error) = (await self._rest_api.list_markets())
if error:
state = State(self._platform, self._account, 'list_markets error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
for info in success['result']:
self._syminfo[info['name']] = info
if (self.cb.on_order_update_callback != None):
for sym in self._symbols:
(orders, error) = (await self.get_orders(sym))
if error:
state = State(self._platform, self._account, 'get_orders error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
for o in orders:
SingleTask.run(self.cb.on_order_update_callback, o)
if (self.cb.on_position_update_callback != None):
for sym in self._symbols:
(pos, error) = (await self.get_position(sym))
if error:
state = State(self._platform, self._account, 'get_position error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
SingleTask.run(self.cb.on_position_update_callback, pos)
if (self.cb.on_asset_update_callback != None):
(ast, error) = (await self.get_assets())
if error:
state = State(self._platform, self._account, 'get_assets error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
SingleTask.run(self.cb.on_asset_update_callback, ast)
if (self.cb.on_order_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'orders'}))
if (self.cb.on_fill_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'fills'}))
self._subscribe_response_count = 0
| -7,279,199,030,973,096,000
|
网络链接成功回调
|
quant/platform/ftx.py
|
connected_callback
|
a04512/alphahunter
|
python
|
async def connected_callback(self):
'\n '
if (self._account != None):
(await self._login())
(success, error) = (await self._rest_api.list_markets())
if error:
state = State(self._platform, self._account, 'list_markets error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
for info in success['result']:
self._syminfo[info['name']] = info
if (self.cb.on_order_update_callback != None):
for sym in self._symbols:
(orders, error) = (await self.get_orders(sym))
if error:
state = State(self._platform, self._account, 'get_orders error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
for o in orders:
SingleTask.run(self.cb.on_order_update_callback, o)
if (self.cb.on_position_update_callback != None):
for sym in self._symbols:
(pos, error) = (await self.get_position(sym))
if error:
state = State(self._platform, self._account, 'get_position error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
SingleTask.run(self.cb.on_position_update_callback, pos)
if (self.cb.on_asset_update_callback != None):
(ast, error) = (await self.get_assets())
if error:
state = State(self._platform, self._account, 'get_assets error: {}'.format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
(await self.socket_close())
return
SingleTask.run(self.cb.on_asset_update_callback, ast)
if (self.cb.on_order_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'orders'}))
if (self.cb.on_fill_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'fills'}))
self._subscribe_response_count = 0
|
async def process(self, msg):
' Process message that received from websocket.\n\n Args:\n msg: message received from websocket.\n\n Returns:\n None.\n '
if (not isinstance(msg, dict)):
return
logger.debug('msg:', json.dumps(msg), caller=self)
if (msg['type'] == 'error'):
state = State(self._platform, self._account, 'Websocket connection failed: {}'.format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif (msg['type'] == 'pong'):
return
elif (msg['type'] == 'info'):
if (msg['code'] == 20001):
@async_method_locker('FTXTrader._ws_close.locker')
async def _ws_close():
(await self.socket_close())
SingleTask.run(_ws_close)
elif (msg['type'] == 'unsubscribed'):
return
elif (msg['type'] == 'subscribed'):
self._subscribe_response_count = (self._subscribe_response_count + 1)
if (self._subscribe_response_count == 2):
state = State(self._platform, self._account, 'Environment ready', State.STATE_CODE_READY)
SingleTask.run(self.cb.on_state_update_callback, state)
elif (msg['type'] == 'update'):
channel = msg['channel']
if (channel == 'orders'):
self._update_order(msg)
elif (channel == 'fills'):
self._update_fill(msg)
| -7,256,647,120,417,005,000
|
Process message that received from websocket.
Args:
msg: message received from websocket.
Returns:
None.
|
quant/platform/ftx.py
|
process
|
a04512/alphahunter
|
python
|
async def process(self, msg):
' Process message that received from websocket.\n\n Args:\n msg: message received from websocket.\n\n Returns:\n None.\n '
if (not isinstance(msg, dict)):
return
logger.debug('msg:', json.dumps(msg), caller=self)
if (msg['type'] == 'error'):
state = State(self._platform, self._account, 'Websocket connection failed: {}'.format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif (msg['type'] == 'pong'):
return
elif (msg['type'] == 'info'):
if (msg['code'] == 20001):
@async_method_locker('FTXTrader._ws_close.locker')
async def _ws_close():
(await self.socket_close())
SingleTask.run(_ws_close)
elif (msg['type'] == 'unsubscribed'):
return
elif (msg['type'] == 'subscribed'):
self._subscribe_response_count = (self._subscribe_response_count + 1)
if (self._subscribe_response_count == 2):
state = State(self._platform, self._account, 'Environment ready', State.STATE_CODE_READY)
SingleTask.run(self.cb.on_state_update_callback, state)
elif (msg['type'] == 'update'):
channel = msg['channel']
if (channel == 'orders'):
self._update_order(msg)
elif (channel == 'fills'):
self._update_fill(msg)
|
def _update_order(self, order_info):
' Order update.\n\n Args:\n order_info: Order information.\n\n Returns:\n None.\n '
o = order_info['data']
order = self._convert_order_format(o)
if (order == None):
return
SingleTask.run(self.cb.on_order_update_callback, order)
| -7,402,522,007,243,926,000
|
Order update.
Args:
order_info: Order information.
Returns:
None.
|
quant/platform/ftx.py
|
_update_order
|
a04512/alphahunter
|
python
|
def _update_order(self, order_info):
' Order update.\n\n Args:\n order_info: Order information.\n\n Returns:\n None.\n '
o = order_info['data']
order = self._convert_order_format(o)
if (order == None):
return
SingleTask.run(self.cb.on_order_update_callback, order)
|
def _update_fill(self, fill_info):
' Fill update.\n\n Args:\n fill_info: Fill information.\n\n Returns:\n None.\n '
data = fill_info['data']
fill_no = str(data['id'])
order_no = str(data['orderId'])
price = float(data['price'])
size = float(data['size'])
fee = float(data['fee'])
ts = tools.utctime_str_to_mts(data['time'], '%Y-%m-%dT%H:%M:%S.%f+00:00')
liquidity = (LIQUIDITY_TYPE_TAKER if (data['liquidity'] == 'taker') else LIQUIDITY_TYPE_MAKER)
info = {'platform': self._platform, 'account': self._account, 'strategy': self._strategy, 'fill_no': fill_no, 'order_no': order_no, 'side': (ORDER_ACTION_BUY if (data['side'] == 'buy') else ORDER_ACTION_SELL), 'symbol': data['market'], 'price': price, 'quantity': size, 'liquidity': liquidity, 'fee': fee, 'ctime': ts}
fill = Fill(**info)
SingleTask.run(self.cb.on_fill_update_callback, fill)
| -9,178,885,241,940,646,000
|
Fill update.
Args:
fill_info: Fill information.
Returns:
None.
|
quant/platform/ftx.py
|
_update_fill
|
a04512/alphahunter
|
python
|
def _update_fill(self, fill_info):
' Fill update.\n\n Args:\n fill_info: Fill information.\n\n Returns:\n None.\n '
data = fill_info['data']
fill_no = str(data['id'])
order_no = str(data['orderId'])
price = float(data['price'])
size = float(data['size'])
fee = float(data['fee'])
ts = tools.utctime_str_to_mts(data['time'], '%Y-%m-%dT%H:%M:%S.%f+00:00')
liquidity = (LIQUIDITY_TYPE_TAKER if (data['liquidity'] == 'taker') else LIQUIDITY_TYPE_MAKER)
info = {'platform': self._platform, 'account': self._account, 'strategy': self._strategy, 'fill_no': fill_no, 'order_no': order_no, 'side': (ORDER_ACTION_BUY if (data['side'] == 'buy') else ORDER_ACTION_SELL), 'symbol': data['market'], 'price': price, 'quantity': size, 'liquidity': liquidity, 'fee': fee, 'ctime': ts}
fill = Fill(**info)
SingleTask.run(self.cb.on_fill_update_callback, fill)
|
@staticmethod
def mapping_layer():
' 获取符号映射关系.\n Returns:\n layer: 符号映射关系\n '
return None
| -4,603,296,640,271,726,600
|
获取符号映射关系.
Returns:
layer: 符号映射关系
|
quant/platform/ftx.py
|
mapping_layer
|
a04512/alphahunter
|
python
|
@staticmethod
def mapping_layer():
' 获取符号映射关系.\n Returns:\n layer: 符号映射关系\n '
return None
|
def __init__(self, **kwargs):
'Initialize.'
self._platform = kwargs['platform']
self._symbols = kwargs['symbols']
self._host = 'https://ftx.com'
self._wss = 'wss://ftx.com'
url = (self._wss + '/ws')
super(FTXMarket, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {'op': 'ping'}
self._rest_api = FTXRestAPI(self._host, None, None, None)
self._orderbooks: DefaultDict[(str, Dict[(str, DefaultDict[(float, float)])])] = defaultdict((lambda : {side: defaultdict(float) for side in {'bids', 'asks'}}))
self.initialize()
| -1,737,670,192,026,487,300
|
Initialize.
|
quant/platform/ftx.py
|
__init__
|
a04512/alphahunter
|
python
|
def __init__(self, **kwargs):
self._platform = kwargs['platform']
self._symbols = kwargs['symbols']
self._host = 'https://ftx.com'
self._wss = 'wss://ftx.com'
url = (self._wss + '/ws')
super(FTXMarket, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {'op': 'ping'}
self._rest_api = FTXRestAPI(self._host, None, None, None)
self._orderbooks: DefaultDict[(str, Dict[(str, DefaultDict[(float, float)])])] = defaultdict((lambda : {side: defaultdict(float) for side in {'bids', 'asks'}}))
self.initialize()
|
async def connected_callback(self):
'网络链接成功回调\n '
for sym in self._symbols:
if (self.cb.on_trade_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'trades', 'market': sym}))
if (self.cb.on_orderbook_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': sym}))
if (self.cb.on_ticker_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'ticker', 'market': sym}))
if (self.cb.on_kline_update_callback != None):
LoopRunTask.register(self._kline_loop_query, 60, sym)
| 8,191,372,696,025,688,000
|
网络链接成功回调
|
quant/platform/ftx.py
|
connected_callback
|
a04512/alphahunter
|
python
|
async def connected_callback(self):
'\n '
for sym in self._symbols:
if (self.cb.on_trade_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'trades', 'market': sym}))
if (self.cb.on_orderbook_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': sym}))
if (self.cb.on_ticker_update_callback != None):
(await self.send_json({'op': 'subscribe', 'channel': 'ticker', 'market': sym}))
if (self.cb.on_kline_update_callback != None):
LoopRunTask.register(self._kline_loop_query, 60, sym)
|
async def process(self, msg):
' Process message that received from websocket.\n\n Args:\n msg: message received from websocket.\n\n Returns:\n None.\n '
if (not isinstance(msg, dict)):
return
logger.debug('msg:', json.dumps(msg), caller=self)
if (msg.get('type') == 'pong'):
return
elif (msg['type'] == 'error'):
state = State(self._platform, self._account, 'Websocket connection failed: {}'.format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif (msg['type'] == 'info'):
if (msg['code'] == 20001):
@async_method_locker('FTXMarket._ws_close.locker')
async def _ws_close():
(await self.socket_close())
SingleTask.run(_ws_close)
elif (msg['type'] == 'unsubscribed'):
return
elif (msg['type'] == 'subscribed'):
return
elif ((msg['type'] == 'update') or (msg['type'] == 'partial')):
channel = msg['channel']
if (channel == 'orderbook'):
self._update_orderbook(msg)
elif (channel == 'trades'):
self._update_trades(msg)
elif (channel == 'ticker'):
self._update_ticker(msg)
| -192,911,100,035,693,280
|
Process message that received from websocket.
Args:
msg: message received from websocket.
Returns:
None.
|
quant/platform/ftx.py
|
process
|
a04512/alphahunter
|
python
|
async def process(self, msg):
' Process message that received from websocket.\n\n Args:\n msg: message received from websocket.\n\n Returns:\n None.\n '
if (not isinstance(msg, dict)):
return
logger.debug('msg:', json.dumps(msg), caller=self)
if (msg.get('type') == 'pong'):
return
elif (msg['type'] == 'error'):
state = State(self._platform, self._account, 'Websocket connection failed: {}'.format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif (msg['type'] == 'info'):
if (msg['code'] == 20001):
@async_method_locker('FTXMarket._ws_close.locker')
async def _ws_close():
(await self.socket_close())
SingleTask.run(_ws_close)
elif (msg['type'] == 'unsubscribed'):
return
elif (msg['type'] == 'subscribed'):
return
elif ((msg['type'] == 'update') or (msg['type'] == 'partial')):
channel = msg['channel']
if (channel == 'orderbook'):
self._update_orderbook(msg)
elif (channel == 'trades'):
self._update_trades(msg)
elif (channel == 'ticker'):
self._update_ticker(msg)
|
def _update_ticker(self, ticker_info):
' ticker update.\n\n Args:\n ticker_info: ticker information.\n\n Returns:\n '
ts = int((float(ticker_info['data']['time']) * 1000))
p = {'platform': self._platform, 'symbol': ticker_info['market'], 'ask': ticker_info['data']['ask'], 'bid': ticker_info['data']['bid'], 'last': ticker_info['data']['last'], 'timestamp': ts}
ticker = Ticker(**p)
SingleTask.run(self.cb.on_ticker_update_callback, ticker)
| -2,724,322,725,391,350,300
|
ticker update.
Args:
ticker_info: ticker information.
Returns:
|
quant/platform/ftx.py
|
_update_ticker
|
a04512/alphahunter
|
python
|
def _update_ticker(self, ticker_info):
' ticker update.\n\n Args:\n ticker_info: ticker information.\n\n Returns:\n '
ts = int((float(ticker_info['data']['time']) * 1000))
p = {'platform': self._platform, 'symbol': ticker_info['market'], 'ask': ticker_info['data']['ask'], 'bid': ticker_info['data']['bid'], 'last': ticker_info['data']['last'], 'timestamp': ts}
ticker = Ticker(**p)
SingleTask.run(self.cb.on_ticker_update_callback, ticker)
|
def _update_trades(self, trades_info):
' trades update.\n\n Args:\n trades_info: trades information.\n\n Returns:\n '
for t in trades_info['data']:
ts = tools.utctime_str_to_mts(t['time'], '%Y-%m-%dT%H:%M:%S.%f+00:00')
p = {'platform': self._platform, 'symbol': trades_info['market'], 'action': (ORDER_ACTION_BUY if (t['side'] == 'buy') else ORDER_ACTION_SELL), 'price': t['price'], 'quantity': t['size'], 'timestamp': ts}
trade = Trade(**p)
SingleTask.run(self.cb.on_trade_update_callback, trade)
| 4,211,527,120,994,042,400
|
trades update.
Args:
trades_info: trades information.
Returns:
|
quant/platform/ftx.py
|
_update_trades
|
a04512/alphahunter
|
python
|
def _update_trades(self, trades_info):
' trades update.\n\n Args:\n trades_info: trades information.\n\n Returns:\n '
for t in trades_info['data']:
ts = tools.utctime_str_to_mts(t['time'], '%Y-%m-%dT%H:%M:%S.%f+00:00')
p = {'platform': self._platform, 'symbol': trades_info['market'], 'action': (ORDER_ACTION_BUY if (t['side'] == 'buy') else ORDER_ACTION_SELL), 'price': t['price'], 'quantity': t['size'], 'timestamp': ts}
trade = Trade(**p)
SingleTask.run(self.cb.on_trade_update_callback, trade)
|
def _update_orderbook(self, orderbook_info):
' orderbook update.\n\n Args:\n orderbook_info: orderbook information.\n\n Returns:\n '
market = orderbook_info['market']
data = orderbook_info['data']
if (data['action'] == 'partial'):
self._reset_orderbook(market)
for side in {'bids', 'asks'}:
book = self._orderbooks[market][side]
for (price, size) in data[side]:
if size:
book[price] = size
else:
del book[price]
checksum = data['checksum']
orderbook = self._get_orderbook(market)
checksum_data = [':'.join([f'{float(order[0])}:{float(order[1])}' for order in (bid, offer) if order]) for (bid, offer) in zip_longest(orderbook['bids'][:100], orderbook['asks'][:100])]
computed_result = int(zlib.crc32(':'.join(checksum_data).encode()))
if (computed_result != checksum):
@async_method_locker('FTXMarket._re_subscribe.locker')
async def _re_subscribe():
(await self.send_json({'op': 'unsubscribe', 'channel': 'orderbook', 'market': market}))
(await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': market}))
SingleTask.run(_re_subscribe)
return
logger.debug('orderbook:', json.dumps(orderbook), caller=self)
ts = int((float(data['time']) * 1000))
p = {'platform': self._platform, 'symbol': market, 'asks': orderbook['asks'], 'bids': orderbook['bids'], 'timestamp': ts}
ob = Orderbook(**p)
SingleTask.run(self.cb.on_orderbook_update_callback, ob)
| -5,539,249,548,123,632,000
|
orderbook update.
Args:
orderbook_info: orderbook information.
Returns:
|
quant/platform/ftx.py
|
_update_orderbook
|
a04512/alphahunter
|
python
|
def _update_orderbook(self, orderbook_info):
' orderbook update.\n\n Args:\n orderbook_info: orderbook information.\n\n Returns:\n '
market = orderbook_info['market']
data = orderbook_info['data']
if (data['action'] == 'partial'):
self._reset_orderbook(market)
for side in {'bids', 'asks'}:
book = self._orderbooks[market][side]
for (price, size) in data[side]:
if size:
book[price] = size
else:
del book[price]
checksum = data['checksum']
orderbook = self._get_orderbook(market)
checksum_data = [':'.join([f'{float(order[0])}:{float(order[1])}' for order in (bid, offer) if order]) for (bid, offer) in zip_longest(orderbook['bids'][:100], orderbook['asks'][:100])]
computed_result = int(zlib.crc32(':'.join(checksum_data).encode()))
if (computed_result != checksum):
@async_method_locker('FTXMarket._re_subscribe.locker')
async def _re_subscribe():
(await self.send_json({'op': 'unsubscribe', 'channel': 'orderbook', 'market': market}))
(await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': market}))
SingleTask.run(_re_subscribe)
return
logger.debug('orderbook:', json.dumps(orderbook), caller=self)
ts = int((float(data['time']) * 1000))
p = {'platform': self._platform, 'symbol': market, 'asks': orderbook['asks'], 'bids': orderbook['bids'], 'timestamp': ts}
ob = Orderbook(**p)
SingleTask.run(self.cb.on_orderbook_update_callback, ob)
|
def _update_kline(self, kline_info, symbol):
' kline update.\n\n Args:\n kline_info: kline information.\n\n Returns:\n None.\n '
info = {'platform': self._platform, 'symbol': symbol, 'open': kline_info['open'], 'high': kline_info['high'], 'low': kline_info['low'], 'close': kline_info['close'], 'volume': kline_info['volume'], 'timestamp': tools.utctime_str_to_mts(kline_info['startTime'], '%Y-%m-%dT%H:%M:%S+00:00'), 'kline_type': MARKET_TYPE_KLINE}
kline = Kline(**info)
SingleTask.run(self.cb.on_kline_update_callback, kline)
| -6,886,070,000,955,551,000
|
kline update.
Args:
kline_info: kline information.
Returns:
None.
|
quant/platform/ftx.py
|
_update_kline
|
a04512/alphahunter
|
python
|
def _update_kline(self, kline_info, symbol):
' kline update.\n\n Args:\n kline_info: kline information.\n\n Returns:\n None.\n '
info = {'platform': self._platform, 'symbol': symbol, 'open': kline_info['open'], 'high': kline_info['high'], 'low': kline_info['low'], 'close': kline_info['close'], 'volume': kline_info['volume'], 'timestamp': tools.utctime_str_to_mts(kline_info['startTime'], '%Y-%m-%dT%H:%M:%S+00:00'), 'kline_type': MARKET_TYPE_KLINE}
kline = Kline(**info)
SingleTask.run(self.cb.on_kline_update_callback, kline)
|
def main():
'\n Calls the other functions in this module to test and/or demonstrate them.\n '
drawing_speed = 10
window = rg.TurtleWindow()
window.tracer(drawing_speed)
draw_circles(rg.Point(100, 50))
draw_circles(rg.Point((- 200), 0))
window.update()
window.close_on_mouse_click()
| 2,244,172,143,528,723,200
|
Calls the other functions in this module to test and/or demonstrate them.
|
src/m5_why_parameters_are_powerful.py
|
main
|
brownme1/02-ObjectsFunctionsAndMethods
|
python
|
def main():
'\n \n '
drawing_speed = 10
window = rg.TurtleWindow()
window.tracer(drawing_speed)
draw_circles(rg.Point(100, 50))
draw_circles(rg.Point((- 200), 0))
window.update()
window.close_on_mouse_click()
|
def draw_circles(point):
'\n Constructs a SimpleTurtle, then uses the SimpleTurtle to draw 10 circles\n such that:\n -- Each is centered at the given Point, and\n -- They have radii: 15 30 45 60 75 ..., respectively.\n '
turtle = rg.SimpleTurtle()
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0)
for k in range(1, 11):
turtle.pen_up()
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
turtle.draw_circle((15 * k))
| 8,336,630,033,255,436,000
|
Constructs a SimpleTurtle, then uses the SimpleTurtle to draw 10 circles
such that:
-- Each is centered at the given Point, and
-- They have radii: 15 30 45 60 75 ..., respectively.
|
src/m5_why_parameters_are_powerful.py
|
draw_circles
|
brownme1/02-ObjectsFunctionsAndMethods
|
python
|
def draw_circles(point):
'\n Constructs a SimpleTurtle, then uses the SimpleTurtle to draw 10 circles\n such that:\n -- Each is centered at the given Point, and\n -- They have radii: 15 30 45 60 75 ..., respectively.\n '
turtle = rg.SimpleTurtle()
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0)
for k in range(1, 11):
turtle.pen_up()
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
turtle.draw_circle((15 * k))
|
def better_draw_circles(point):
'\n Starts out the same as the draw_circles function defined ABOVE.\n You Will make it an IMPROVED, MORE POWERFUL function per the above _TODO_.\n '
turtle = rg.SimpleTurtle()
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0)
for k in range(1, 11):
turtle.pen_up()
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
print((15 * k))
| -5,529,178,186,068,817,000
|
Starts out the same as the draw_circles function defined ABOVE.
You Will make it an IMPROVED, MORE POWERFUL function per the above _TODO_.
|
src/m5_why_parameters_are_powerful.py
|
better_draw_circles
|
brownme1/02-ObjectsFunctionsAndMethods
|
python
|
def better_draw_circles(point):
'\n Starts out the same as the draw_circles function defined ABOVE.\n You Will make it an IMPROVED, MORE POWERFUL function per the above _TODO_.\n '
turtle = rg.SimpleTurtle()
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0)
for k in range(1, 11):
turtle.pen_up()
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
print((15 * k))
|
def even_better_draw_circles(point):
' An improved version of draw_circles, per the _TODO_ above. '
| 5,236,924,129,325,634,000
|
An improved version of draw_circles, per the _TODO_ above.
|
src/m5_why_parameters_are_powerful.py
|
even_better_draw_circles
|
brownme1/02-ObjectsFunctionsAndMethods
|
python
|
def even_better_draw_circles(point):
' '
|
def vol(volpath, ext='.npz', batch_size=1, expected_nb_files=(- 1), expected_files=None, data_proc_fn=None, relabel=None, nb_labels_reshape=0, keep_vol_size=False, name='single_vol', nb_restart_cycle=None, patch_size=None, patch_stride=1, collapse_2d=None, extract_slice=None, force_binary=False, nb_feats=1, patch_rand=False, patch_rand_seed=None, vol_rand_seed=None, binary=False, yield_incomplete_final_batch=True, verbose=False):
'\n generator for single volume (or volume patches) from a list of files\n\n simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,\n and prepares it for keras model formats\n\n if a patch size is passed, breaks the volume into patches and generates those\n '
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
assert (nb_files > 0), ('Could not find any files at %s with extension %s' % (volpath, ext))
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)
if (data_proc_fn is not None):
vol_data = data_proc_fn(vol_data)
nb_patches_per_vol = 1
if ((patch_size is not None) and all(((f is not None) for f in patch_size))):
if ((relabel is None) and (len(patch_size) == (len(vol_data.shape) - 1))):
tmp_patch_size = [f for f in patch_size]
patch_size = [*patch_size, vol_data.shape[(- 1)]]
patch_stride = [f for f in patch_stride]
patch_stride = [*patch_stride, vol_data.shape[(- 1)]]
assert (len(vol_data.shape) == len(patch_size)), ('Vol dims %d are not equal to patch dims %d' % (len(vol_data.shape), len(patch_size)))
nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))
if (nb_restart_cycle is None):
print('setting restart cycle to', nb_files)
nb_restart_cycle = nb_files
assert (nb_restart_cycle <= (nb_files * nb_patches_per_vol)), ('%s restart cycle (%s) too big (%s) in %s' % (name, nb_restart_cycle, (nb_files * nb_patches_per_vol), volpath))
if (expected_nb_files >= 0):
assert (nb_files == expected_nb_files), ('number of files do not match: %d, %d' % (nb_files, expected_nb_files))
if (expected_files is not None):
if (not (volfiles == expected_files)):
print('file lists did not match. You should probably stop execution.', file=sys.stderr)
print(len(volfiles), len(expected_files))
if verbose:
print('nb_restart_cycle:', nb_restart_cycle)
fileidx = (- 1)
batch_idx = (- 1)
feat_idx = 0
batch_shape = None
while 1:
fileidx = np.mod((fileidx + 1), nb_restart_cycle)
if (verbose and (fileidx == 0)):
print(('starting %s cycle' % name))
try:
if verbose:
print(('opening %s' % os.path.join(volpath, volfiles[fileidx])))
file_name = os.path.join(volpath, volfiles[fileidx])
vol_data = _load_medical_volume(file_name, ext, verbose)
except:
debug_error_msg = '#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s'
print((debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0])))
raise
if (data_proc_fn is not None):
vol_data = data_proc_fn(vol_data)
if (relabel is not None):
vol_data = _relabel(vol_data, relabel)
if (patch_size is None):
this_patch_size = vol_data.shape
patch_stride = [1 for f in this_patch_size]
else:
this_patch_size = [f for f in patch_size]
for (pi, p) in enumerate(this_patch_size):
if (p is None):
this_patch_size[pi] = vol_data.shape[pi]
patch_stride[pi] = 1
assert (~ np.any(np.isnan(vol_data))), ('Found a nan for %s' % volfiles[fileidx])
assert np.all(np.isfinite(vol_data)), ('Found a inf for %s' % volfiles[fileidx])
patch_gen = patch(vol_data, this_patch_size, patch_stride=patch_stride, nb_labels_reshape=nb_labels_reshape, batch_size=1, infinite=False, collapse_2d=collapse_2d, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, keep_vol_size=keep_vol_size)
empty_gen = True
patch_idx = (- 1)
for lpatch in patch_gen:
empty_gen = False
patch_idx += 1
if (np.mod(feat_idx, nb_feats) == 0):
vol_data_feats = lpatch
else:
vol_data_feats = np.concatenate([vol_data_feats, lpatch], (np.ndim(lpatch) - 1))
feat_idx += 1
if binary:
vol_data_feats = vol_data_feats.astype(bool)
if (np.mod(feat_idx, nb_feats) == 0):
feats_shape = vol_data_feats[1:]
if ((batch_shape is not None) and (feats_shape != batch_shape)):
batch_idx = (- 1)
batch_shape = None
print('switching patch sizes')
(yield np.vstack(vol_data_batch))
if (batch_idx == (- 1)):
vol_data_batch = [vol_data_feats]
batch_shape = vol_data_feats[1:]
else:
vol_data_batch = [*vol_data_batch, vol_data_feats]
batch_idx += 1
batch_done = (batch_idx == (batch_size - 1))
files_done = (np.mod((fileidx + 1), nb_restart_cycle) == 0)
final_batch = (yield_incomplete_final_batch and files_done and (patch_idx == (nb_patches_per_vol - 1)))
if final_batch:
print(('last batch in %s cycle %d. nb_batch:%d' % (name, fileidx, len(vol_data_batch))))
if (batch_done or final_batch):
batch_idx = (- 1)
q = np.vstack(vol_data_batch)
(yield q)
if empty_gen:
raise ValueError('Patch generator was empty for file %s', volfiles[fileidx])
| 2,719,363,371,483,586,600
|
generator for single volume (or volume patches) from a list of files
simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,
and prepares it for keras model formats
if a patch size is passed, breaks the volume into patches and generates those
|
ext/neuron/neuron/generators.py
|
vol
|
adriaan16/brainstorm
|
python
|
def vol(volpath, ext='.npz', batch_size=1, expected_nb_files=(- 1), expected_files=None, data_proc_fn=None, relabel=None, nb_labels_reshape=0, keep_vol_size=False, name='single_vol', nb_restart_cycle=None, patch_size=None, patch_stride=1, collapse_2d=None, extract_slice=None, force_binary=False, nb_feats=1, patch_rand=False, patch_rand_seed=None, vol_rand_seed=None, binary=False, yield_incomplete_final_batch=True, verbose=False):
'\n generator for single volume (or volume patches) from a list of files\n\n simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,\n and prepares it for keras model formats\n\n if a patch size is passed, breaks the volume into patches and generates those\n '
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
assert (nb_files > 0), ('Could not find any files at %s with extension %s' % (volpath, ext))
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)
if (data_proc_fn is not None):
vol_data = data_proc_fn(vol_data)
nb_patches_per_vol = 1
if ((patch_size is not None) and all(((f is not None) for f in patch_size))):
if ((relabel is None) and (len(patch_size) == (len(vol_data.shape) - 1))):
tmp_patch_size = [f for f in patch_size]
patch_size = [*patch_size, vol_data.shape[(- 1)]]
patch_stride = [f for f in patch_stride]
patch_stride = [*patch_stride, vol_data.shape[(- 1)]]
assert (len(vol_data.shape) == len(patch_size)), ('Vol dims %d are not equal to patch dims %d' % (len(vol_data.shape), len(patch_size)))
nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))
if (nb_restart_cycle is None):
print('setting restart cycle to', nb_files)
nb_restart_cycle = nb_files
assert (nb_restart_cycle <= (nb_files * nb_patches_per_vol)), ('%s restart cycle (%s) too big (%s) in %s' % (name, nb_restart_cycle, (nb_files * nb_patches_per_vol), volpath))
if (expected_nb_files >= 0):
assert (nb_files == expected_nb_files), ('number of files do not match: %d, %d' % (nb_files, expected_nb_files))
if (expected_files is not None):
if (not (volfiles == expected_files)):
print('file lists did not match. You should probably stop execution.', file=sys.stderr)
print(len(volfiles), len(expected_files))
if verbose:
print('nb_restart_cycle:', nb_restart_cycle)
fileidx = (- 1)
batch_idx = (- 1)
feat_idx = 0
batch_shape = None
while 1:
fileidx = np.mod((fileidx + 1), nb_restart_cycle)
if (verbose and (fileidx == 0)):
print(('starting %s cycle' % name))
try:
if verbose:
print(('opening %s' % os.path.join(volpath, volfiles[fileidx])))
file_name = os.path.join(volpath, volfiles[fileidx])
vol_data = _load_medical_volume(file_name, ext, verbose)
except:
debug_error_msg = '#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s'
print((debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0])))
raise
if (data_proc_fn is not None):
vol_data = data_proc_fn(vol_data)
if (relabel is not None):
vol_data = _relabel(vol_data, relabel)
if (patch_size is None):
this_patch_size = vol_data.shape
patch_stride = [1 for f in this_patch_size]
else:
this_patch_size = [f for f in patch_size]
for (pi, p) in enumerate(this_patch_size):
if (p is None):
this_patch_size[pi] = vol_data.shape[pi]
patch_stride[pi] = 1
assert (~ np.any(np.isnan(vol_data))), ('Found a nan for %s' % volfiles[fileidx])
assert np.all(np.isfinite(vol_data)), ('Found a inf for %s' % volfiles[fileidx])
patch_gen = patch(vol_data, this_patch_size, patch_stride=patch_stride, nb_labels_reshape=nb_labels_reshape, batch_size=1, infinite=False, collapse_2d=collapse_2d, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, keep_vol_size=keep_vol_size)
empty_gen = True
patch_idx = (- 1)
for lpatch in patch_gen:
empty_gen = False
patch_idx += 1
if (np.mod(feat_idx, nb_feats) == 0):
vol_data_feats = lpatch
else:
vol_data_feats = np.concatenate([vol_data_feats, lpatch], (np.ndim(lpatch) - 1))
feat_idx += 1
if binary:
vol_data_feats = vol_data_feats.astype(bool)
if (np.mod(feat_idx, nb_feats) == 0):
feats_shape = vol_data_feats[1:]
if ((batch_shape is not None) and (feats_shape != batch_shape)):
batch_idx = (- 1)
batch_shape = None
print('switching patch sizes')
(yield np.vstack(vol_data_batch))
if (batch_idx == (- 1)):
vol_data_batch = [vol_data_feats]
batch_shape = vol_data_feats[1:]
else:
vol_data_batch = [*vol_data_batch, vol_data_feats]
batch_idx += 1
batch_done = (batch_idx == (batch_size - 1))
files_done = (np.mod((fileidx + 1), nb_restart_cycle) == 0)
final_batch = (yield_incomplete_final_batch and files_done and (patch_idx == (nb_patches_per_vol - 1)))
if final_batch:
print(('last batch in %s cycle %d. nb_batch:%d' % (name, fileidx, len(vol_data_batch))))
if (batch_done or final_batch):
batch_idx = (- 1)
q = np.vstack(vol_data_batch)
(yield q)
if empty_gen:
raise ValueError('Patch generator was empty for file %s', volfiles[fileidx])
|
def patch(vol_data, patch_size, patch_stride=1, nb_labels_reshape=1, keep_vol_size=False, batch_size=1, collapse_2d=None, patch_rand=False, patch_rand_seed=None, variable_batch_size=False, infinite=False):
'\n generate patches from volume for keras package\n\n Yields:\n patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape\n '
assert (batch_size >= 1), 'batch_size should be at least 1'
if (patch_size is None):
patch_size = vol_data.shape
for (pi, p) in enumerate(patch_size):
if (p is None):
patch_size[pi] = vol_data.shape[pi]
batch_idx = (- 1)
if variable_batch_size:
batch_size = (yield)
while True:
gen = pl.patch_gen(vol_data, patch_size, stride=patch_stride, rand=patch_rand, rand_seed=patch_rand_seed)
empty_gen = True
for lpatch in gen:
empty_gen = False
lpatch = _categorical_prep(lpatch, nb_labels_reshape, keep_vol_size, patch_size)
if (collapse_2d is not None):
lpatch = np.squeeze(lpatch, (collapse_2d + 1))
if (batch_idx == (- 1)):
if (batch_size == 1):
patch_data_batch = lpatch
else:
patch_data_batch = np.zeros([batch_size, *lpatch.shape[1:]])
patch_data_batch[0, :] = lpatch
else:
patch_data_batch[(batch_idx + 1), :] = lpatch
batch_idx += 1
if (batch_idx == (batch_size - 1)):
batch_idx = (- 1)
batch_size_y = (yield patch_data_batch)
if variable_batch_size:
batch_size = batch_size_y
assert (not empty_gen), ('generator was empty. vol size was %s' % ''.join([('%d ' % d) for d in vol_data.shape]))
if (not infinite):
if (batch_idx >= 0):
patch_data_batch = patch_data_batch[:(batch_idx + 1), :]
(yield patch_data_batch)
break
| -6,907,692,403,567,869,000
|
generate patches from volume for keras package
Yields:
patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape
|
ext/neuron/neuron/generators.py
|
patch
|
adriaan16/brainstorm
|
python
|
def patch(vol_data, patch_size, patch_stride=1, nb_labels_reshape=1, keep_vol_size=False, batch_size=1, collapse_2d=None, patch_rand=False, patch_rand_seed=None, variable_batch_size=False, infinite=False):
'\n generate patches from volume for keras package\n\n Yields:\n patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape\n '
assert (batch_size >= 1), 'batch_size should be at least 1'
if (patch_size is None):
patch_size = vol_data.shape
for (pi, p) in enumerate(patch_size):
if (p is None):
patch_size[pi] = vol_data.shape[pi]
batch_idx = (- 1)
if variable_batch_size:
batch_size = (yield)
while True:
gen = pl.patch_gen(vol_data, patch_size, stride=patch_stride, rand=patch_rand, rand_seed=patch_rand_seed)
empty_gen = True
for lpatch in gen:
empty_gen = False
lpatch = _categorical_prep(lpatch, nb_labels_reshape, keep_vol_size, patch_size)
if (collapse_2d is not None):
lpatch = np.squeeze(lpatch, (collapse_2d + 1))
if (batch_idx == (- 1)):
if (batch_size == 1):
patch_data_batch = lpatch
else:
patch_data_batch = np.zeros([batch_size, *lpatch.shape[1:]])
patch_data_batch[0, :] = lpatch
else:
patch_data_batch[(batch_idx + 1), :] = lpatch
batch_idx += 1
if (batch_idx == (batch_size - 1)):
batch_idx = (- 1)
batch_size_y = (yield patch_data_batch)
if variable_batch_size:
batch_size = batch_size_y
assert (not empty_gen), ('generator was empty. vol size was %s' % .join([('%d ' % d) for d in vol_data.shape]))
if (not infinite):
if (batch_idx >= 0):
patch_data_batch = patch_data_batch[:(batch_idx + 1), :]
(yield patch_data_batch)
break
|
def vol_seg(volpath, segpath, proc_vol_fn=None, proc_seg_fn=None, verbose=False, name='vol_seg', ext='.npz', nb_restart_cycle=None, nb_labels_reshape=(- 1), collapse_2d=None, force_binary=False, nb_input_feats=1, relabel=None, vol_rand_seed=None, seg_binary=False, vol_subname='norm', seg_subname='aseg', **kwargs):
'\n generator with (volume, segmentation)\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n\n ** kwargs are any named arguments for vol(...),\n except verbose, data_proc_fn, ext, nb_labels_reshape and name\n (which this function will control when calling vol())\n '
vol_gen = vol(volpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False, relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=(name + ' vol'), verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
vol_files = [f.replace(vol_subname, seg_subname) for f in _get_file_list(volpath, ext, vol_rand_seed)]
seg_gen = vol(segpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=force_binary, relabel=relabel, vol_rand_seed=vol_rand_seed, data_proc_fn=proc_seg_fn, nb_labels_reshape=nb_labels_reshape, keep_vol_size=True, expected_files=vol_files, name=(name + ' seg'), binary=seg_binary, verbose=False)
while 1:
input_vol = next(vol_gen).astype('float16')
output_vol = next(seg_gen).astype('float16')
(yield (input_vol, output_vol))
| 847,370,237,624,995,200
|
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
|
ext/neuron/neuron/generators.py
|
vol_seg
|
adriaan16/brainstorm
|
python
|
def vol_seg(volpath, segpath, proc_vol_fn=None, proc_seg_fn=None, verbose=False, name='vol_seg', ext='.npz', nb_restart_cycle=None, nb_labels_reshape=(- 1), collapse_2d=None, force_binary=False, nb_input_feats=1, relabel=None, vol_rand_seed=None, seg_binary=False, vol_subname='norm', seg_subname='aseg', **kwargs):
'\n generator with (volume, segmentation)\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n\n ** kwargs are any named arguments for vol(...),\n except verbose, data_proc_fn, ext, nb_labels_reshape and name\n (which this function will control when calling vol())\n '
vol_gen = vol(volpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False, relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=(name + ' vol'), verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
vol_files = [f.replace(vol_subname, seg_subname) for f in _get_file_list(volpath, ext, vol_rand_seed)]
seg_gen = vol(segpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=force_binary, relabel=relabel, vol_rand_seed=vol_rand_seed, data_proc_fn=proc_seg_fn, nb_labels_reshape=nb_labels_reshape, keep_vol_size=True, expected_files=vol_files, name=(name + ' seg'), binary=seg_binary, verbose=False)
while 1:
input_vol = next(vol_gen).astype('float16')
output_vol = next(seg_gen).astype('float16')
(yield (input_vol, output_vol))
|
def vol_cat(volpaths, crop=None, resize_shape=None, rescale=None, verbose=False, name='vol_cat', ext='.npz', nb_labels_reshape=(- 1), vol_rand_seed=None, **kwargs):
'\n generator with (volume, binary_bit) (random order)\n ONLY works with abtch size of 1 for now\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n '
folders = [f for f in sorted(os.listdir(volpaths))]
proc_vol_fn = (lambda x: nrn_proc.vol_proc(x, crop=crop, resize_shape=resize_shape, interp_order=2, rescale=rescale))
generators = ()
generators_len = ()
for folder in folders:
vol_gen = vol(os.path.join(volpaths, folder), **kwargs, ext=ext, vol_rand_seed=vol_rand_seed, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=folder, verbose=False)
generators_len += (len(_get_file_list(os.path.join(volpaths, folder), '.npz')),)
generators += (vol_gen,)
bake_data_test = False
if bake_data_test:
print('fake_data_test', file=sys.stderr)
while 1:
order = np.hstack((np.zeros(generators_len[0]), np.ones(generators_len[1]))).astype('int')
np.random.shuffle(order)
for idx in order:
gen = generators[idx]
z = np.zeros([1, 2])
z[(0, idx)] = 1
data = next(gen).astype('float32')
if (bake_data_test and (idx == 0)):
data = (- data)
(yield (data, z))
| 6,298,195,965,135,200,000
|
generator with (volume, binary_bit) (random order)
ONLY works with abtch size of 1 for now
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
|
ext/neuron/neuron/generators.py
|
vol_cat
|
adriaan16/brainstorm
|
python
|
def vol_cat(volpaths, crop=None, resize_shape=None, rescale=None, verbose=False, name='vol_cat', ext='.npz', nb_labels_reshape=(- 1), vol_rand_seed=None, **kwargs):
'\n generator with (volume, binary_bit) (random order)\n ONLY works with abtch size of 1 for now\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n '
folders = [f for f in sorted(os.listdir(volpaths))]
proc_vol_fn = (lambda x: nrn_proc.vol_proc(x, crop=crop, resize_shape=resize_shape, interp_order=2, rescale=rescale))
generators = ()
generators_len = ()
for folder in folders:
vol_gen = vol(os.path.join(volpaths, folder), **kwargs, ext=ext, vol_rand_seed=vol_rand_seed, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=folder, verbose=False)
generators_len += (len(_get_file_list(os.path.join(volpaths, folder), '.npz')),)
generators += (vol_gen,)
bake_data_test = False
if bake_data_test:
print('fake_data_test', file=sys.stderr)
while 1:
order = np.hstack((np.zeros(generators_len[0]), np.ones(generators_len[1]))).astype('int')
np.random.shuffle(order)
for idx in order:
gen = generators[idx]
z = np.zeros([1, 2])
z[(0, idx)] = 1
data = next(gen).astype('float32')
if (bake_data_test and (idx == 0)):
data = (- data)
(yield (data, z))
|
def add_prior(gen, proc_vol_fn=None, proc_seg_fn=None, prior_type='location', prior_file=None, prior_feed='input', patch_stride=1, patch_size=None, batch_size=1, collapse_2d=None, extract_slice=None, force_binary=False, verbose=False, patch_rand=False, patch_rand_seed=None):
'\n #\n # add a prior generator to a given generator\n # with the number of patches in batch matching output of gen\n '
if (prior_type == 'location'):
prior_vol = nd.volsize2ndgrid(vol_size)
prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])
prior_vol = np.expand_dims(prior_vol, axis=0)
elif (prior_type == 'file'):
with timer.Timer('loading prior', True):
data = np.load(prior_file)
prior_vol = data['prior'].astype('float16')
else:
with timer.Timer('loading prior', True):
prior_vol = prior_file.astype('float16')
if force_binary:
nb_labels = prior_vol.shape[(- 1)]
prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)
prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)
nb_channels = prior_vol.shape[(- 1)]
if (extract_slice is not None):
if isinstance(extract_slice, int):
prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]
else:
prior_vol = prior_vol[:, :, extract_slice, :]
assert ((np.ndim(prior_vol) == 4) or (np.ndim(prior_vol) == 3)), 'prior is the wrong size'
if (patch_size is None):
patch_size = prior_vol.shape[0:3]
assert (len(patch_size) == len(patch_stride))
prior_gen = patch(prior_vol, [*patch_size, nb_channels], patch_stride=[*patch_stride, nb_channels], batch_size=batch_size, collapse_2d=collapse_2d, keep_vol_size=True, infinite=True, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, variable_batch_size=True, nb_labels_reshape=0)
assert (next(prior_gen) is None), 'bad prior gen setup'
while 1:
gen_sample = next(gen)
gs_sample = _get_shape(gen_sample)
prior_batch = prior_gen.send(gs_sample)
(yield (gen_sample, prior_batch))
| 2,237,103,450,112,340,500
|
#
# add a prior generator to a given generator
# with the number of patches in batch matching output of gen
|
ext/neuron/neuron/generators.py
|
add_prior
|
adriaan16/brainstorm
|
python
|
def add_prior(gen, proc_vol_fn=None, proc_seg_fn=None, prior_type='location', prior_file=None, prior_feed='input', patch_stride=1, patch_size=None, batch_size=1, collapse_2d=None, extract_slice=None, force_binary=False, verbose=False, patch_rand=False, patch_rand_seed=None):
'\n #\n # add a prior generator to a given generator\n # with the number of patches in batch matching output of gen\n '
if (prior_type == 'location'):
prior_vol = nd.volsize2ndgrid(vol_size)
prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])
prior_vol = np.expand_dims(prior_vol, axis=0)
elif (prior_type == 'file'):
with timer.Timer('loading prior', True):
data = np.load(prior_file)
prior_vol = data['prior'].astype('float16')
else:
with timer.Timer('loading prior', True):
prior_vol = prior_file.astype('float16')
if force_binary:
nb_labels = prior_vol.shape[(- 1)]
prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)
prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)
nb_channels = prior_vol.shape[(- 1)]
if (extract_slice is not None):
if isinstance(extract_slice, int):
prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]
else:
prior_vol = prior_vol[:, :, extract_slice, :]
assert ((np.ndim(prior_vol) == 4) or (np.ndim(prior_vol) == 3)), 'prior is the wrong size'
if (patch_size is None):
patch_size = prior_vol.shape[0:3]
assert (len(patch_size) == len(patch_stride))
prior_gen = patch(prior_vol, [*patch_size, nb_channels], patch_stride=[*patch_stride, nb_channels], batch_size=batch_size, collapse_2d=collapse_2d, keep_vol_size=True, infinite=True, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, variable_batch_size=True, nb_labels_reshape=0)
assert (next(prior_gen) is None), 'bad prior gen setup'
while 1:
gen_sample = next(gen)
gs_sample = _get_shape(gen_sample)
prior_batch = prior_gen.send(gs_sample)
(yield (gen_sample, prior_batch))
|
def vol_prior(*args, proc_vol_fn=None, proc_seg_fn=None, prior_type='location', prior_file=None, prior_feed='input', patch_stride=1, patch_size=None, batch_size=1, collapse_2d=None, extract_slice=None, force_binary=False, nb_input_feats=1, verbose=False, vol_rand_seed=None, patch_rand=False, **kwargs):
'\n generator that appends prior to (volume, segmentation) depending on input\n e.g. could be ((volume, prior), segmentation)\n '
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
vol_gen = vol(*args, **kwargs, collapse_2d=collapse_2d, force_binary=False, verbose=verbose, vol_rand_seed=vol_rand_seed)
gen = vol(*args, **kwargs, proc_vol_fn=None, proc_seg_fn=None, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_size=patch_size, patch_stride=patch_stride, batch_size=batch_size, vol_rand_seed=vol_rand_seed, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, nb_input_feats=nb_input_feats)
pgen = add_prior(gen, proc_vol_fn=proc_vol_fn, proc_seg_fn=proc_seg_fn, prior_type=prior_type, prior_file=prior_file, prior_feed=prior_feed, patch_stride=patch_stride, patch_size=patch_size, batch_size=batch_size, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, vol_rand_seed=vol_rand_seed)
while 1:
(gen_sample, prior_batch) = next(pgen)
(input_vol, output_vol) = gen_sample
if (prior_feed == 'input'):
(yield ([input_vol, prior_batch], output_vol))
else:
assert (prior_feed == 'output')
(yield (input_vol, [output_vol, prior_batch]))
| -1,544,022,580,091,930,400
|
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
|
ext/neuron/neuron/generators.py
|
vol_prior
|
adriaan16/brainstorm
|
python
|
def vol_prior(*args, proc_vol_fn=None, proc_seg_fn=None, prior_type='location', prior_file=None, prior_feed='input', patch_stride=1, patch_size=None, batch_size=1, collapse_2d=None, extract_slice=None, force_binary=False, nb_input_feats=1, verbose=False, vol_rand_seed=None, patch_rand=False, **kwargs):
'\n generator that appends prior to (volume, segmentation) depending on input\n e.g. could be ((volume, prior), segmentation)\n '
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
vol_gen = vol(*args, **kwargs, collapse_2d=collapse_2d, force_binary=False, verbose=verbose, vol_rand_seed=vol_rand_seed)
gen = vol(*args, **kwargs, proc_vol_fn=None, proc_seg_fn=None, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_size=patch_size, patch_stride=patch_stride, batch_size=batch_size, vol_rand_seed=vol_rand_seed, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, nb_input_feats=nb_input_feats)
pgen = add_prior(gen, proc_vol_fn=proc_vol_fn, proc_seg_fn=proc_seg_fn, prior_type=prior_type, prior_file=prior_file, prior_feed=prior_feed, patch_stride=patch_stride, patch_size=patch_size, batch_size=batch_size, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, vol_rand_seed=vol_rand_seed)
while 1:
(gen_sample, prior_batch) = next(pgen)
(input_vol, output_vol) = gen_sample
if (prior_feed == 'input'):
(yield ([input_vol, prior_batch], output_vol))
else:
assert (prior_feed == 'output')
(yield (input_vol, [output_vol, prior_batch]))
|
def vol_seg_prior(*args, proc_vol_fn=None, proc_seg_fn=None, prior_type='location', prior_file=None, prior_feed='input', patch_stride=1, patch_size=None, batch_size=1, collapse_2d=None, extract_slice=None, force_binary=False, nb_input_feats=1, verbose=False, vol_rand_seed=None, patch_rand=None, **kwargs):
'\n generator that appends prior to (volume, segmentation) depending on input\n e.g. could be ((volume, prior), segmentation)\n '
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
gen = vol_seg(*args, **kwargs, proc_vol_fn=None, proc_seg_fn=None, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_size=patch_size, patch_stride=patch_stride, batch_size=batch_size, vol_rand_seed=vol_rand_seed, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, nb_input_feats=nb_input_feats)
pgen = add_prior(gen, proc_vol_fn=proc_vol_fn, proc_seg_fn=proc_seg_fn, prior_type=prior_type, prior_file=prior_file, prior_feed=prior_feed, patch_stride=patch_stride, patch_size=patch_size, batch_size=batch_size, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed)
while 1:
(gen_sample, prior_batch) = next(pgen)
(input_vol, output_vol) = gen_sample
if (prior_feed == 'input'):
(yield ([input_vol, prior_batch], output_vol))
else:
assert (prior_feed == 'output')
(yield (input_vol, [output_vol, prior_batch]))
| 1,835,908,042,440,738,000
|
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
|
ext/neuron/neuron/generators.py
|
vol_seg_prior
|
adriaan16/brainstorm
|
python
|
def vol_seg_prior(*args, proc_vol_fn=None, proc_seg_fn=None, prior_type='location', prior_file=None, prior_feed='input', patch_stride=1, patch_size=None, batch_size=1, collapse_2d=None, extract_slice=None, force_binary=False, nb_input_feats=1, verbose=False, vol_rand_seed=None, patch_rand=None, **kwargs):
'\n generator that appends prior to (volume, segmentation) depending on input\n e.g. could be ((volume, prior), segmentation)\n '
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
gen = vol_seg(*args, **kwargs, proc_vol_fn=None, proc_seg_fn=None, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_size=patch_size, patch_stride=patch_stride, batch_size=batch_size, vol_rand_seed=vol_rand_seed, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed, nb_input_feats=nb_input_feats)
pgen = add_prior(gen, proc_vol_fn=proc_vol_fn, proc_seg_fn=proc_seg_fn, prior_type=prior_type, prior_file=prior_file, prior_feed=prior_feed, patch_stride=patch_stride, patch_size=patch_size, batch_size=batch_size, collapse_2d=collapse_2d, extract_slice=extract_slice, force_binary=force_binary, verbose=verbose, patch_rand=patch_rand, patch_rand_seed=patch_rand_seed)
while 1:
(gen_sample, prior_batch) = next(pgen)
(input_vol, output_vol) = gen_sample
if (prior_feed == 'input'):
(yield ([input_vol, prior_batch], output_vol))
else:
assert (prior_feed == 'output')
(yield (input_vol, [output_vol, prior_batch]))
|
def vol_seg_hack(volpath, segpath, proc_vol_fn=None, proc_seg_fn=None, verbose=False, name='vol_seg', ext='.npz', nb_restart_cycle=None, nb_labels_reshape=(- 1), collapse_2d=None, force_binary=False, nb_input_feats=1, relabel=None, vol_rand_seed=None, seg_binary=False, vol_subname='norm', seg_subname='aseg', **kwargs):
'\n generator with (volume, segmentation)\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n\n ** kwargs are any named arguments for vol(...),\n except verbose, data_proc_fn, ext, nb_labels_reshape and name\n (which this function will control when calling vol())\n '
vol_gen = vol(volpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False, relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=(name + ' vol'), verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
while 1:
input_vol = next(vol_gen).astype('float16')
(yield input_vol)
| -7,127,576,377,244,396,000
|
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
|
ext/neuron/neuron/generators.py
|
vol_seg_hack
|
adriaan16/brainstorm
|
python
|
def vol_seg_hack(volpath, segpath, proc_vol_fn=None, proc_seg_fn=None, verbose=False, name='vol_seg', ext='.npz', nb_restart_cycle=None, nb_labels_reshape=(- 1), collapse_2d=None, force_binary=False, nb_input_feats=1, relabel=None, vol_rand_seed=None, seg_binary=False, vol_subname='norm', seg_subname='aseg', **kwargs):
'\n generator with (volume, segmentation)\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n\n ** kwargs are any named arguments for vol(...),\n except verbose, data_proc_fn, ext, nb_labels_reshape and name\n (which this function will control when calling vol())\n '
vol_gen = vol(volpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False, relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=(name + ' vol'), verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
while 1:
input_vol = next(vol_gen).astype('float16')
(yield input_vol)
|
def vol_sr_slices(volpath, nb_input_slices, nb_slice_spacing, batch_size=1, ext='.npz', vol_rand_seed=None, nb_restart_cycle=None, name='vol_sr_slices', rand_slices=True, simulate_whole_sparse_vol=False, verbose=False):
'\n default generator for slice-wise super resolution\n '
def indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing):
idx = start_indices[0]
output_batch = np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol)], 0)
input_batch = np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol):(nb_slice_spacing + 1)], 0)
for idx in start_indices[1:]:
out_sel = np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol)], 0)
output_batch = np.vstack([output_batch, out_sel])
input_batch = np.vstack([input_batch, np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol):(nb_slice_spacing + 1)], 0)])
output_batch = np.reshape(output_batch, [batch_size, (- 1), output_batch.shape[(- 1)]])
return (input_batch, output_batch)
print('vol_sr_slices: SHOULD PROPERLY RANDOMIZE accross different subjects', file=sys.stderr)
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
if (nb_restart_cycle is None):
nb_restart_cycle = nb_files
nb_slices_in_subvol = (((nb_input_slices - 1) * (nb_slice_spacing + 1)) + 1)
fileidx = (- 1)
while 1:
fileidx = np.mod((fileidx + 1), nb_restart_cycle)
if (verbose and (fileidx == 0)):
print(('starting %s cycle' % name))
try:
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[fileidx]), ext, verbose)
except:
debug_error_msg = '#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s'
print((debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0])))
raise
nb_slices = vol_data.shape[2]
nb_start_slices = ((nb_slices - nb_slices_in_subvol) + 1)
if simulate_whole_sparse_vol:
init_slice = 0
if rand_slices:
init_slice = np.random.randint(0, high=(nb_start_slices - 1))
all_start_indices = list(range(init_slice, nb_start_slices, (nb_slice_spacing + 1)))
for batch_start in range(0, len(all_start_indices), (batch_size * (nb_input_slices - 1))):
start_indices = [all_start_indices[s] for s in range(batch_start, (batch_start + batch_size))]
(input_batch, output_batch) = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
(yield (input_batch, output_batch))
elif rand_slices:
assert (not simulate_whole_sparse_vol)
start_indices = np.random.choice(range(nb_start_slices), size=batch_size, replace=False)
(input_batch, output_batch) = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
(yield (input_batch, output_batch))
else:
for batch_start in range(0, nb_start_slices, batch_size):
start_indices = list(range(batch_start, (batch_start + batch_size)))
(input_batch, output_batch) = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
(yield (input_batch, output_batch))
| 5,147,564,931,928,816,000
|
default generator for slice-wise super resolution
|
ext/neuron/neuron/generators.py
|
vol_sr_slices
|
adriaan16/brainstorm
|
python
|
def vol_sr_slices(volpath, nb_input_slices, nb_slice_spacing, batch_size=1, ext='.npz', vol_rand_seed=None, nb_restart_cycle=None, name='vol_sr_slices', rand_slices=True, simulate_whole_sparse_vol=False, verbose=False):
'\n \n '
def indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing):
idx = start_indices[0]
output_batch = np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol)], 0)
input_batch = np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol):(nb_slice_spacing + 1)], 0)
for idx in start_indices[1:]:
out_sel = np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol)], 0)
output_batch = np.vstack([output_batch, out_sel])
input_batch = np.vstack([input_batch, np.expand_dims(vol_data[:, :, idx:(idx + nb_slices_in_subvol):(nb_slice_spacing + 1)], 0)])
output_batch = np.reshape(output_batch, [batch_size, (- 1), output_batch.shape[(- 1)]])
return (input_batch, output_batch)
print('vol_sr_slices: SHOULD PROPERLY RANDOMIZE accross different subjects', file=sys.stderr)
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
if (nb_restart_cycle is None):
nb_restart_cycle = nb_files
nb_slices_in_subvol = (((nb_input_slices - 1) * (nb_slice_spacing + 1)) + 1)
fileidx = (- 1)
while 1:
fileidx = np.mod((fileidx + 1), nb_restart_cycle)
if (verbose and (fileidx == 0)):
print(('starting %s cycle' % name))
try:
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[fileidx]), ext, verbose)
except:
debug_error_msg = '#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s'
print((debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0])))
raise
nb_slices = vol_data.shape[2]
nb_start_slices = ((nb_slices - nb_slices_in_subvol) + 1)
if simulate_whole_sparse_vol:
init_slice = 0
if rand_slices:
init_slice = np.random.randint(0, high=(nb_start_slices - 1))
all_start_indices = list(range(init_slice, nb_start_slices, (nb_slice_spacing + 1)))
for batch_start in range(0, len(all_start_indices), (batch_size * (nb_input_slices - 1))):
start_indices = [all_start_indices[s] for s in range(batch_start, (batch_start + batch_size))]
(input_batch, output_batch) = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
(yield (input_batch, output_batch))
elif rand_slices:
assert (not simulate_whole_sparse_vol)
start_indices = np.random.choice(range(nb_start_slices), size=batch_size, replace=False)
(input_batch, output_batch) = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
(yield (input_batch, output_batch))
else:
for batch_start in range(0, nb_start_slices, batch_size):
start_indices = list(range(batch_start, (batch_start + batch_size)))
(input_batch, output_batch) = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
(yield (input_batch, output_batch))
|
def img_seg(volpath, segpath, batch_size=1, verbose=False, nb_restart_cycle=None, name='img_seg', ext='.png', vol_rand_seed=None, **kwargs):
'\n generator for (image, segmentation)\n '
def imggen(path, ext, nb_restart_cycle=None):
'\n TODO: should really use the volume generators for this\n '
files = _get_file_list(path, ext, vol_rand_seed)
if (nb_restart_cycle is None):
nb_restart_cycle = len(files)
idx = (- 1)
while 1:
idx = np.mod((idx + 1), nb_restart_cycle)
im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
(yield im.reshape(((1,) + im.shape)))
img_gen = imggen(volpath, ext, nb_restart_cycle)
seg_gen = imggen(segpath, ext)
while 1:
input_vol = np.vstack([(next(img_gen).astype('float16') / 255) for i in range(batch_size)])
input_vol = np.expand_dims(input_vol, axis=(- 1))
output_vols = [np_utils.to_categorical(next(seg_gen).astype('int8'), num_classes=2) for i in range(batch_size)]
output_vol = np.vstack([np.expand_dims(f, axis=0) for f in output_vols])
(yield (input_vol, output_vol))
| 8,615,579,906,294,732,000
|
generator for (image, segmentation)
|
ext/neuron/neuron/generators.py
|
img_seg
|
adriaan16/brainstorm
|
python
|
def img_seg(volpath, segpath, batch_size=1, verbose=False, nb_restart_cycle=None, name='img_seg', ext='.png', vol_rand_seed=None, **kwargs):
'\n \n '
def imggen(path, ext, nb_restart_cycle=None):
'\n TODO: should really use the volume generators for this\n '
files = _get_file_list(path, ext, vol_rand_seed)
if (nb_restart_cycle is None):
nb_restart_cycle = len(files)
idx = (- 1)
while 1:
idx = np.mod((idx + 1), nb_restart_cycle)
im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
(yield im.reshape(((1,) + im.shape)))
img_gen = imggen(volpath, ext, nb_restart_cycle)
seg_gen = imggen(segpath, ext)
while 1:
input_vol = np.vstack([(next(img_gen).astype('float16') / 255) for i in range(batch_size)])
input_vol = np.expand_dims(input_vol, axis=(- 1))
output_vols = [np_utils.to_categorical(next(seg_gen).astype('int8'), num_classes=2) for i in range(batch_size)]
output_vol = np.vstack([np.expand_dims(f, axis=0) for f in output_vols])
(yield (input_vol, output_vol))
|
def _get_file_list(volpath, ext=None, vol_rand_seed=None):
'\n get a list of files at the given path with the given extension\n '
files = [f for f in sorted(os.listdir(volpath)) if ((ext is None) or f.endswith(ext))]
if (vol_rand_seed is not None):
np.random.seed(vol_rand_seed)
files = np.random.permutation(files).tolist()
return files
| 3,140,400,078,776,378,000
|
get a list of files at the given path with the given extension
|
ext/neuron/neuron/generators.py
|
_get_file_list
|
adriaan16/brainstorm
|
python
|
def _get_file_list(volpath, ext=None, vol_rand_seed=None):
'\n \n '
files = [f for f in sorted(os.listdir(volpath)) if ((ext is None) or f.endswith(ext))]
if (vol_rand_seed is not None):
np.random.seed(vol_rand_seed)
files = np.random.permutation(files).tolist()
return files
|
def _load_medical_volume(filename, ext, verbose=False):
'\n load a medical volume from one of a number of file types\n '
with timer.Timer('load_vol', (verbose >= 2)):
if (ext == '.npz'):
vol_file = np.load(filename)
vol_data = vol_file['vol_data']
elif (ext == 'npy'):
vol_data = np.load(filename)
elif ((ext == '.mgz') or (ext == '.nii') or (ext == '.nii.gz')):
vol_med = nib.load(filename)
vol_data = vol_med.get_data()
else:
raise ValueError(('Unexpected extension %s' % ext))
return vol_data
| 1,786,754,206,587,903,200
|
load a medical volume from one of a number of file types
|
ext/neuron/neuron/generators.py
|
_load_medical_volume
|
adriaan16/brainstorm
|
python
|
def _load_medical_volume(filename, ext, verbose=False):
'\n \n '
with timer.Timer('load_vol', (verbose >= 2)):
if (ext == '.npz'):
vol_file = np.load(filename)
vol_data = vol_file['vol_data']
elif (ext == 'npy'):
vol_data = np.load(filename)
elif ((ext == '.mgz') or (ext == '.nii') or (ext == '.nii.gz')):
vol_med = nib.load(filename)
vol_data = vol_med.get_data()
else:
raise ValueError(('Unexpected extension %s' % ext))
return vol_data
|
def _to_categorical(y, num_classes=None, reshape=True):
'\n # copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float\n\n Converts a class vector (integers) to binary class matrix.\n\n E.g. for use with categorical_crossentropy.\n\n # Arguments\n y: class vector to be converted into a matrix\n (integers from 0 to num_classes).\n num_classes: total number of classes.\n\n # Returns\n A binary matrix representation of the input.\n '
oshape = y.shape
y = np.array(y, dtype='int').ravel()
if (not num_classes):
num_classes = (np.max(y) + 1)
n = y.shape[0]
categorical = np.zeros((n, num_classes), bool)
categorical[(np.arange(n), y)] = 1
if reshape:
categorical = np.reshape(categorical, [*oshape, num_classes])
return categorical
| -1,377,369,910,124,192,800
|
# copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float
Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
|
ext/neuron/neuron/generators.py
|
_to_categorical
|
adriaan16/brainstorm
|
python
|
def _to_categorical(y, num_classes=None, reshape=True):
'\n # copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float\n\n Converts a class vector (integers) to binary class matrix.\n\n E.g. for use with categorical_crossentropy.\n\n # Arguments\n y: class vector to be converted into a matrix\n (integers from 0 to num_classes).\n num_classes: total number of classes.\n\n # Returns\n A binary matrix representation of the input.\n '
oshape = y.shape
y = np.array(y, dtype='int').ravel()
if (not num_classes):
num_classes = (np.max(y) + 1)
n = y.shape[0]
categorical = np.zeros((n, num_classes), bool)
categorical[(np.arange(n), y)] = 1
if reshape:
categorical = np.reshape(categorical, [*oshape, num_classes])
return categorical
|
def _npz_headers(npz, namelist=None):
"\n taken from https://stackoverflow.com/a/43223420\n\n Takes a path to an .npz file, which is a Zip archive of .npy files.\n Generates a sequence of (name, shape, np.dtype).\n\n namelist is a list with variable names, ending in '.npy'. \n e.g. if variable 'var' is in the file, namelist could be ['var.npy']\n "
with zipfile.ZipFile(npz) as archive:
if (namelist is None):
namelist = archive.namelist()
for name in namelist:
if (not name.endswith('.npy')):
continue
npy = archive.open(name)
version = np.lib.format.read_magic(npy)
(shape, fortran, dtype) = np.lib.format._read_array_header(npy, version)
(yield (name[:(- 4)], shape, dtype))
| -6,422,049,485,589,492,000
|
taken from https://stackoverflow.com/a/43223420
Takes a path to an .npz file, which is a Zip archive of .npy files.
Generates a sequence of (name, shape, np.dtype).
namelist is a list with variable names, ending in '.npy'.
e.g. if variable 'var' is in the file, namelist could be ['var.npy']
|
ext/neuron/neuron/generators.py
|
_npz_headers
|
adriaan16/brainstorm
|
python
|
def _npz_headers(npz, namelist=None):
"\n taken from https://stackoverflow.com/a/43223420\n\n Takes a path to an .npz file, which is a Zip archive of .npy files.\n Generates a sequence of (name, shape, np.dtype).\n\n namelist is a list with variable names, ending in '.npy'. \n e.g. if variable 'var' is in the file, namelist could be ['var.npy']\n "
with zipfile.ZipFile(npz) as archive:
if (namelist is None):
namelist = archive.namelist()
for name in namelist:
if (not name.endswith('.npy')):
continue
npy = archive.open(name)
version = np.lib.format.read_magic(npy)
(shape, fortran, dtype) = np.lib.format._read_array_header(npy, version)
(yield (name[:(- 4)], shape, dtype))
|
def imggen(path, ext, nb_restart_cycle=None):
'\n TODO: should really use the volume generators for this\n '
files = _get_file_list(path, ext, vol_rand_seed)
if (nb_restart_cycle is None):
nb_restart_cycle = len(files)
idx = (- 1)
while 1:
idx = np.mod((idx + 1), nb_restart_cycle)
im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
(yield im.reshape(((1,) + im.shape)))
| -1,790,853,479,394,038,300
|
TODO: should really use the volume generators for this
|
ext/neuron/neuron/generators.py
|
imggen
|
adriaan16/brainstorm
|
python
|
def imggen(path, ext, nb_restart_cycle=None):
'\n \n '
files = _get_file_list(path, ext, vol_rand_seed)
if (nb_restart_cycle is None):
nb_restart_cycle = len(files)
idx = (- 1)
while 1:
idx = np.mod((idx + 1), nb_restart_cycle)
im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
(yield im.reshape(((1,) + im.shape)))
|
def main(argv):
'main method for standalone run'
config_generator = FaucetConfigGenerator()
filepath = '/tmp/faucet_config_dump'
egress = 2
access = 3
devices = 1
topo_type = STACK
argv = argv[1:]
help_msg = '\n <python3> build_config.py -e <egress_switches> -a <access_switches> -d <devices per switch>\n -p <config path> -t <topology type (flat, corp, stack)>\n '
try:
(opts, _) = getopt.getopt(argv, 'he:a:d:p:t:', ['egress=', 'access=', 'devices=', 'path=', 'type='])
except getopt.GetoptError:
print(help_msg)
sys.exit(2)
for (opt, arg) in opts:
if (opt == '-h'):
print(help_msg)
sys.exit()
elif (opt in ('-e', '--egress')):
egress = int(arg)
elif (opt in ('-a', '--access')):
access = int(arg)
elif (opt in ('-d', '--devices')):
devices = int(arg)
elif (opt in ('-p', '--path')):
filepath = arg
elif (opt in ('-t', '--type')):
topo_type = arg
if (topo_type == FLAT):
faucet_config = config_generator.create_flat_faucet_config(access, devices)
elif (topo_type == CORP):
faucet_config = config_generator.create_corp_faucet_config()
elif (topo_type == STACK):
faucet_config = config_generator.create_scale_faucet_config(egress, access, devices)
else:
raise Exception(('Unkown topology type: %s' % topo_type))
config_map = proto_dict(faucet_config)
with open(filepath, 'w') as config_file:
yaml.dump(config_map, config_file)
| 5,454,660,802,360,472,000
|
main method for standalone run
|
testing/python_lib/build_config.py
|
main
|
henry54809/forch
|
python
|
def main(argv):
config_generator = FaucetConfigGenerator()
filepath = '/tmp/faucet_config_dump'
egress = 2
access = 3
devices = 1
topo_type = STACK
argv = argv[1:]
help_msg = '\n <python3> build_config.py -e <egress_switches> -a <access_switches> -d <devices per switch>\n -p <config path> -t <topology type (flat, corp, stack)>\n '
try:
(opts, _) = getopt.getopt(argv, 'he:a:d:p:t:', ['egress=', 'access=', 'devices=', 'path=', 'type='])
except getopt.GetoptError:
print(help_msg)
sys.exit(2)
for (opt, arg) in opts:
if (opt == '-h'):
print(help_msg)
sys.exit()
elif (opt in ('-e', '--egress')):
egress = int(arg)
elif (opt in ('-a', '--access')):
access = int(arg)
elif (opt in ('-d', '--devices')):
devices = int(arg)
elif (opt in ('-p', '--path')):
filepath = arg
elif (opt in ('-t', '--type')):
topo_type = arg
if (topo_type == FLAT):
faucet_config = config_generator.create_flat_faucet_config(access, devices)
elif (topo_type == CORP):
faucet_config = config_generator.create_corp_faucet_config()
elif (topo_type == STACK):
faucet_config = config_generator.create_scale_faucet_config(egress, access, devices)
else:
raise Exception(('Unkown topology type: %s' % topo_type))
config_map = proto_dict(faucet_config)
with open(filepath, 'w') as config_file:
yaml.dump(config_map, config_file)
|
def create_scale_faucet_config(self, t1_switches, t2_switches, access_ports):
'Create Faucet config with stacking topology'
setup_vlan = SETUP_VLAN
test_vlan = TEST_VLAN
vlans = {setup_vlan: Vlan(description='Faucet IoT'), test_vlan: Vlan(description='Orchestrated Testing')}
t1_dps = [('nz-kiwi-t1sw%s' % (dp_index + 1)) for dp_index in range(t1_switches)]
t2_dps = [('nz-kiwi-t2sw%s' % (dp_index + 1)) for dp_index in range(t2_switches)]
dps = {}
for (dp_index, dp_name) in enumerate(t1_dps):
tap_vlan = (test_vlan if (not dp_index) else None)
interfaces = self._build_dp_interfaces(dp_index, dps=t1_dps, t2_dps=t2_dps, tagged_vlans=[setup_vlan], tap_vlan=tap_vlan, egress_port=FAUCET_EGRESS_PORT, lacp=True)
dps[dp_name] = self._build_datapath_config((T1_DP_ID_START + dp_index), interfaces, self._generate_dp_mac(T1_DP, dp_index))
for (dp_index, dp_name) in enumerate(t2_dps):
interfaces = self._build_dp_interfaces(dp_index, t1_dps=t1_dps, access_ports=access_ports, native_vlan=setup_vlan, port_acl='uniform_acl', lacp=True)
dps[dp_name] = self._build_datapath_config((T2_DP_ID_START + dp_index), interfaces, self._generate_dp_mac(T2_DP, dp_index))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
| 2,438,252,111,082,969,000
|
Create Faucet config with stacking topology
|
testing/python_lib/build_config.py
|
create_scale_faucet_config
|
henry54809/forch
|
python
|
def create_scale_faucet_config(self, t1_switches, t2_switches, access_ports):
setup_vlan = SETUP_VLAN
test_vlan = TEST_VLAN
vlans = {setup_vlan: Vlan(description='Faucet IoT'), test_vlan: Vlan(description='Orchestrated Testing')}
t1_dps = [('nz-kiwi-t1sw%s' % (dp_index + 1)) for dp_index in range(t1_switches)]
t2_dps = [('nz-kiwi-t2sw%s' % (dp_index + 1)) for dp_index in range(t2_switches)]
dps = {}
for (dp_index, dp_name) in enumerate(t1_dps):
tap_vlan = (test_vlan if (not dp_index) else None)
interfaces = self._build_dp_interfaces(dp_index, dps=t1_dps, t2_dps=t2_dps, tagged_vlans=[setup_vlan], tap_vlan=tap_vlan, egress_port=FAUCET_EGRESS_PORT, lacp=True)
dps[dp_name] = self._build_datapath_config((T1_DP_ID_START + dp_index), interfaces, self._generate_dp_mac(T1_DP, dp_index))
for (dp_index, dp_name) in enumerate(t2_dps):
interfaces = self._build_dp_interfaces(dp_index, t1_dps=t1_dps, access_ports=access_ports, native_vlan=setup_vlan, port_acl='uniform_acl', lacp=True)
dps[dp_name] = self._build_datapath_config((T2_DP_ID_START + dp_index), interfaces, self._generate_dp_mac(T2_DP, dp_index))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
|
def create_flat_faucet_config(self, num_switches, num_access_ports):
'Create Faucet config with flat topology'
setup_vlan = SETUP_VLAN
switches = [('sw%s' % (sw_num + 1)) for sw_num in range(num_switches)]
dps = {}
vlans = {setup_vlan: Vlan(description='Faucet IoT')}
for (sw_num, sw_name) in enumerate(switches):
interfaces = self._build_dp_interfaces(sw_num, dps=switches, egress_port=FAUCET_EGRESS_PORT, tagged_vlans=[setup_vlan], access_ports=num_access_ports, native_vlan=setup_vlan, port_acl='uniform_acl', access_port_start=FLAT_ACCESS_PORT_START, lacp=True)
dps[sw_name] = self._build_datapath_config((FLAT_DP_ID_START + sw_num), interfaces, self._generate_dp_mac(T2_DP, sw_num))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
| -516,401,817,215,027,650
|
Create Faucet config with flat topology
|
testing/python_lib/build_config.py
|
create_flat_faucet_config
|
henry54809/forch
|
python
|
def create_flat_faucet_config(self, num_switches, num_access_ports):
setup_vlan = SETUP_VLAN
switches = [('sw%s' % (sw_num + 1)) for sw_num in range(num_switches)]
dps = {}
vlans = {setup_vlan: Vlan(description='Faucet IoT')}
for (sw_num, sw_name) in enumerate(switches):
interfaces = self._build_dp_interfaces(sw_num, dps=switches, egress_port=FAUCET_EGRESS_PORT, tagged_vlans=[setup_vlan], access_ports=num_access_ports, native_vlan=setup_vlan, port_acl='uniform_acl', access_port_start=FLAT_ACCESS_PORT_START, lacp=True)
dps[sw_name] = self._build_datapath_config((FLAT_DP_ID_START + sw_num), interfaces, self._generate_dp_mac(T2_DP, sw_num))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.