content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import os
import sys
from os.path import expanduser
from bio.ensembl.ontology.loader.db import dal
from bio.ensembl.ontology.loader.models import Ontology, Term, Relation, RelationType, get_one_or_create
# allow ols.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Produce a release calendar')
parser.add_argument('-v', '--verbose', help='Verbose output', action='store_true')
parser.add_argument('-e', '--release', type=int, required=True, help='Release number')
parser.add_argument('-u', '--host_url', type=str, required=False,
help='Db Host Url format engine:///user:pass@host:port')
args = parser.parse_args(sys.argv[1:])
logger.setLevel(
logging.ERROR if args.verbose is None else logging.INFO if args.verbose is False else logging.DEBUG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
logger.debug('Script arguments %s', args)
db_name = 'ensembl_ontology_{}'.format(args.release)
if args.host_url is None:
db_url = 'sqlite:///' + expanduser("~") + '/' + db_name + '.sqlite'
else:
db_url = '{}{}'.format(args.host_url, db_name)
logger.debug('Db Url set to %s', db_url)
response = input("Confirm to proceed (y/N)? ")
if response.upper() != 'Y':
logging.info('Process cancelled')
exit(0)
dal.db_init(db_url)
with dal.session_scope() as session:
ontologies = session.query(Ontology).filter_by(name='phi', namespace='phibase_identifier').all()
for ontology in ontologies:
logger.info('Deleting namespaced ontology %s - %s', ontology.name, ontology.namespace)
rel = session.query(Relation).filter_by(ontology=ontology).delete()
res = session.query(Term).filter_by(ontology=ontology).delete()
logger.info('Wiped %s Terms', res)
logger.debug('...Done')
m_ontology, created = get_one_or_create(Ontology, session,
name='phi',
namespace='phibase_identifier',
create_method_kwargs=dict(
version='1.0',
title='PHIBase identifier')
)
relation_type, created = get_one_or_create(RelationType,
session,
name='is_a')
for i in range(10000):
accession = 'PHI:{}'.format(i)
term = Term(accession=accession, name='{}' % i)
if i == 0:
term.name = 'phibase identifier'
term.is_root = 1
logger.debug('Adding Term %s', accession)
session.add(term)
m_ontology.terms.append(term)
if i != 0:
term.add_parent_relation(m_related, relation_type, session)
else:
m_related = term
if i % 100 == 0:
logger.info('Committing transaction')
session.commit()
logger.info('...Done')
|
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import _window_based_iterator
def test_window_based_iterator():
f = lambda x: x
for w1, w2, d in _window_based_iterator(list('123456789'), 3, f):
if w1 == w2:
assert d == 1
else:
expected = abs(int(w2) - int(w1))
assert expected == d, f"w1: {w1}, w2: {w2}, d: {d}, expected: {expected}"
if __name__ == "__main__":
test_window_based_iterator()
|
import copy
import numpy as np
import time
t0 = time.time()
grid = []
# for line in open("day15_example.txt"):
for line in open("day15_input.txt"):
# for line in open("day15_simple_break.txt"):
line = line.strip()
vec = []
for digit in line:
vec.append(int(digit))
grid.append(vec)
def lookup(grid, row, col):
if row<len(grid):
if col<len(grid[0]):
return grid[row][col]
else:
val = 1 + lookup(grid, row, col-len(grid[0]))
else:
val = 1 + lookup(grid, row-len(grid), col)
while val>9:
val = val-9
return val
row = [np.inf]*len(grid[0])*5
risk = []
for n in range(len(grid)*5):
risk.append(row.copy())
def checkOne(risk, row, col, revisit):
if row==0 and col==0:
risk[row][col] = 0
else:
orig = risk[row][col]
neighbors = []
neighborindices = []
if row > 0:
neighbors.append(risk[row-1][col])
neighborindices.append((row-1, col))
if col > 0:
neighbors.append(risk[row][col-1])
neighborindices.append((row, col-1))
if row < len(risk)-1:
neighbors.append(risk[row+1][col])
neighborindices.append((row+1, col))
if col < len(risk[0])-1:
neighbors.append(risk[row][col+1])
neighborindices.append((row, col+1))
risk[row][col] = min(neighbors) + lookup(grid, row, col)
if risk[row][col] < orig:
revisit.update(neighborindices)
# if orig != np.inf:
# print(neighbors, orig, risk[row][col])
def onePass(risk):
revisit = set()
for row in range(len(risk)):
for col in range(len(risk[0])):
checkOne(risk, row, col, revisit)
return revisit
risk[0][0] = 0
revisit = onePass(risk)
pnum = 0
while (len(revisit)>0):
newset = set()
for (row, col) in revisit:
checkOne(risk, row, col, newset)
revisit = newset
print(revisit)
pnum += 1
print(pnum)
# print(risk)
print(risk[-1])
t1 = time.time()
print("elapsed time:", t1-t0)
|
"""Helper functions for interatcing with Magenta DDSP internals.
"""
import json
import os
from absl import logging
from ddsp.training import train_util
import tensorflow.compat.v2 as tf
from google.cloud import storage
def get_strategy(tpu='', gpus=None):
"""Chooses a distribution strategy.
AI Platform automatically sets TF_CONFIG environment variable based
on provided config file. If training is run on multiple VMs different strategy
needs to be chosen than when it is run on only one VM. This function determines
the strategy based on the information in TF_CONFIG variable.
Args:
tpu:
Argument for DDSP library function call.
Address of the TPU. No TPU if left blank.
gpus:
Argument for DDSP library function call.
List of GPU addresses for synchronous training.
Returns:
A distribution strategy.
"""
if 'TF_CONFIG' in os.environ:
tf_config_str = os.environ.get('TF_CONFIG')
logging.info("TFRecord %s", tf_config_str)
tf_config_dict = json.loads(tf_config_str)
# Exactly one chief worker is always specified inside the TF_CONFIG variable
# in the cluster section. If there are any other workers specified MultiWorker
# strategy needs to be chosen.
if len(tf_config_dict["cluster"]) > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
logging.info('Cluster spec: %s', strategy.cluster_resolver.cluster_spec())
else:
strategy = train_util.get_strategy(tpu=tpu, gpus=gpus)
else:
strategy = train_util.get_strategy(tpu=tpu, gpus=gpus)
return strategy
def copy_config_file_from_gstorage(gstorage_path, container_path):
"""Downloads configuration path from the bucket to the container.
Args:
gstorage_path:
Path to the file inside the bucket that needs to be downloaded.
Format: gs://bucket-name/path/to/file.txt
container_path:
Path inside the container where downloaded file should be stored.
"""
gstorage_path = gstorage_path.strip('gs:/')
bucket_name = gstorage_path.split('/')[0]
blob_name = os.path.relpath(gstorage_path, bucket_name)
print(bucket_name, blob_name)
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.download_to_filename(container_path)
logging.info('Downloaded config file inside the container. Current location: %s', container_path)
|
from osgeo import gdal, ogr, osr
import numpy as np
from scipy.interpolate import RectBivariateSpline
import os
import sys
import matplotlib.pyplot as plt
from region import region
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from descartes import PolygonPatch
class terrain:
def __init__(self):
gdal.UseExceptions()
self.load_region("data_terrain/regions")
self.load_elevation("data_terrain/elevation")
def getFileNames(folder,file_ending):
only_files = []
for root, dirs, files in os.walk(folder):
for name in files:
(base, ext) = os.path.splitext(name)
if ext in file_ending:
only_files.append(os.path.join(root,name))
return only_files
def load_region(self,file_location):
reg = region()
reg.load_region(file_location)
self.regions = reg
def in_region(self,points):
contained = self.regions.in_region(points)
return contained
def GetExtent(gt,cols,rows):
''' Return list of corner coordinates from a geotransform
@type gt: C{tuple/list}
@param gt: geotransform
@type cols: C{int}
@param cols: number of columns in the dataset
@type rows: C{int}
@param rows: number of rows in the dataset
@rtype: C{[float,...,float]}
@return: coordinates of each corner
'''
ext=[]
xarr=[0,cols]
yarr=[0,rows]
for px in xarr:
for py in yarr:
x=gt[0]+(px*gt[1])+(py*gt[2])
y=gt[3]+(px*gt[4])+(py*gt[5])
ext.append([x,y])
yarr.reverse()
return ext
def ReprojectCoords(coords,src_srs,tgt_srs):
''' Reproject a list of x,y coordinates.
@type geom: C{tuple/list}
@param geom: List of [[x,y],...[x,y]] coordinates
@type src_srs: C{osr.SpatialReference}
@param src_srs: OSR SpatialReference object
@type tgt_srs: C{osr.SpatialReference}
@param tgt_srs: OSR SpatialReference object
@rtype: C{tuple/list}
@return: List of transformed [[x,y],...[x,y]] coordinates
'''
trans_coords=[]
transform = osr.CoordinateTransformation( src_srs, tgt_srs)
for x,y in coords:
x,y,z = transform.TransformPoint(x,y)
trans_coords.append([x,y])
return trans_coords
def get_bounds(ds):
gt=ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
ext=terrain.GetExtent(gt,cols,rows)
src_srs=osr.SpatialReference()
src_srs.ImportFromWkt(ds.GetProjection())
#tgt_srs=osr.SpatialReference()
#tgt_srs.ImportFromEPSG(4326)
tgt_srs = src_srs.CloneGeogCS()
geo_ext=terrain.ReprojectCoords(ext,src_srs,tgt_srs)
return [geo_ext[0],geo_ext[2]]
def load_elevation(self,file_location):
print("Loading Elevation Data")
file_names = terrain.getFileNames(file_location,('.img'))
deg_to_feet=364287.
self.overall_box = self.regions.box_region()
data_array = []
x_bounds = []
y_bounds = []
data_resolution = []
x_vals = []
y_vals = []
x_points = []
y_points = []
interp = []
gradients = []
gradX = []
gradY = []
for single_name in file_names:
sys.stdout.write('\rStarted Loading file: ' + single_name + '\033[K')
geo = gdal.Open(single_name)
data_array_single = geo.ReadAsArray()
data_array_single = np.transpose(data_array_single)
sys.stdout.write('\rCalculating bounds of ' + single_name + '\033[K')
bounds = terrain.get_bounds(geo)
if bounds[0][0] < bounds[1][0]:
x_bounds_single = (bounds[0][0],bounds[1][0])
else:
x_bounds_single = (bounds[1][0],bounds[0][0])
data_array_single = np.flip(data_array_single,axis=1)
if bounds[0][1] < bounds[1][1]:
y_bounds_single = (bounds[0][1],bounds[1][1])
else:
y_bounds_single = (bounds[1][1],bounds[0][1])
data_array_single = np.flip(data_array_single,axis=1)
sys.stdout.write('\rGenerating point coordinates for ' + single_name + '\033[K')
data_resolution_single = abs(x_bounds_single[1]-x_bounds_single[0])/float(data_array_single.shape[0])*deg_to_feet
data_resolution.append(data_resolution_single)
x_vals_single = np.linspace(x_bounds_single[0],x_bounds_single[1],num=data_array_single.shape[0])
y_vals_single = np.linspace(y_bounds_single[0],y_bounds_single[1],num=data_array_single.shape[0])
x,y = np.meshgrid(x_vals_single,y_vals_single)
included_points = np.where(np.logical_and(np.logical_and(self.overall_box[0,0] <= x,self.overall_box[1,0] > x),np.logical_and(self.overall_box[0,1] <= y,self.overall_box[1,1] > y)))
x = x[included_points]
y = y[included_points]
x_points.append(x)
y_points.append(y)
x_indices = np.where(np.logical_and(self.overall_box[0,0] <=x_vals_single,self.overall_box[1,0] > x_vals_single))
y_indices = np.where(np.logical_and(self.overall_box[0,1] <=y_vals_single,self.overall_box[1,1] > y_vals_single))
x_vals_single = x_vals_single[x_indices]
y_vals_single = y_vals_single[y_indices]
x_vals.append(x_vals_single)
y_vals.append(y_vals_single)
data_array_single = data_array_single[x_indices]
data_array_single = data_array_single[:,y_indices[0]]
data_array.append(data_array_single)
x_bounds_single = [max(x_bounds_single[0],self.overall_box[0,0]),min(x_bounds_single[1],self.overall_box[1,0])]
y_bounds_single = [max(y_bounds_single[0],self.overall_box[0,1]),min(y_bounds_single[1],self.overall_box[1,1])]
x_bounds.append(x_bounds_single)
y_bounds.append(y_bounds_single)
sys.stdout.write('\rBuilding interpolation function for ' + single_name + ' heights\033[K')
interp.append(RectBivariateSpline(x_vals_single,y_vals_single,data_array_single))
sys.stdout.write('\rDifferentiating and interpolating gradients for ' + single_name + '\033[K')
gradients_single = terrain.calc_slopes(data_array_single,data_resolution_single)
gradients.append(gradients_single)
gradX.append(RectBivariateSpline(x_vals_single,y_vals_single,gradients_single[0]))
gradY.append(RectBivariateSpline(x_vals_single,y_vals_single,gradients_single[1]))
sys.stdout.write('\rDone loading ' + single_name + '\n')
self.data_array = data_array
self.x_bounds = x_bounds
self.y_bounds = y_bounds
self.data_resolution = data_resolution
self.x_vals = x_vals
self.y_vals = y_vals
self.x_points = x_points
self.y_points = y_points
self.interp = interp
self.gradients = gradients
self.gradX = gradX
self.gradY = gradY
print("Done loading regions. Loaded " + str(len(file_names)) + " regions")
def sort_by_data_set(self,coordinates):
out_array = []
for x,y in zip(self.x_bounds, self.y_bounds):
indices = np.where(np.logical_and(np.logical_and(x[0] <= coordinates[0], coordinates[0] < x[1]), np.logical_and(y[0] <= coordinates[1], coordinates[1] < y[1])))
if len(indices) == 2:
out_array.append([coordinates[:,indices[0],indices[1]],indices])
if len(indices) == 1:
out_array.append([coordinates[:,indices[0]],indices])
return out_array
def height_at_coordinates(self,coordinate_in):
interpolated = np.zeros(coordinate_in.shape[1:])
coordinate = self.sort_by_data_set(coordinate_in)
for area,interpolater in zip(coordinate,self.interp):
inter_value = interpolater(area[0][0],area[0][1],grid=False)
#print(area)
if len(coordinate_in.shape) == 2:
interpolated[area[1][0]] = inter_value
if len(coordinate_in.shape) == 3:
interpolated[area[1][0],area[1][1]] = inter_value
return interpolated
def length_of_path(self,path):
#print(path)
heights = self.height_at_coordinates(path)
path = np.concatenate((364287.*path,[heights]),axis=0)
#print("path")
#print(path)
#print("heights")
#print(heights)
distances = np.sqrt(np.sum(np.square(np.diff(path,axis=1)),axis=0))
#print("distances")
#print(distances)
return np.sum(distances)
def gradient_at_coordinates(self,coordinate):
gradX = np.zeros((coordinate.shape[1],coordinate.shape[2]))
gradY = np.zeros((coordinate.shape[1],coordinate.shape[2]))
coordinate = self.sort_by_data_set(coordinate)
for area,gradFuncX,gradFuncY in zip(coordinate,self.gradX,self.gradY):
gradX[area[1][0],area[1][1]]=gradFuncX(area[0][0],area[0][1],grid=False)
gradY[area[1][0],area[1][1]]=gradFuncY(area[0][0],area[0][1],grid=False)
return np.array([gradX,gradY])
def gradient_along_path(self,coordinate):
gradX = np.zeros((coordinate.shape[1]))
gradY = np.zeros((coordinate.shape[1]))
coordinate = self.sort_by_data_set(coordinate)
for area,gradFuncX,gradFuncY in zip(coordinate,self.gradX,self.gradY):
gradX[area[1][0]]=gradFuncX(area[0][0],area[0][1],grid=False)
gradY[area[1][0]]=gradFuncY(area[0][0],area[0][1],grid=False)
return np.array([gradX,gradY])
def calc_slopes(data_array,data_resolution):
gradients = np.gradient(data_array,data_resolution)
return gradients
def main():
ground = terrain()
#ground.load_elevation("data_terrain/elevation")
#ground.visualize_elevation(flat=True)
#ground.calc_slopes()
#ground.visualize_gradients()
#print(ground.gradient_at_coordinates(np.transpose(np.array([[-111.2,41],[-111.3,41.01]]))))
#print(ground.in_region(np.array([[-111,41],[-111.1,41],[-111,41.1],[-111.8,41.1],[-111.83,41.12],[-111.793,41.06],[-111.789,41.08]])))
#ground.visualize_region(on_elevation=True)
#ground.visualize_resort()
if __name__ == "__main__":
main()
|
# MIT License
# Copyright (c) 2019 Runway AI, Inc
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import runway
from runway.data_types import number, image, category
from fasterai.visualize import get_image_colorizer, get_video_colorizer
architecture_description = "DeOldify model to use.\n" \
"Artistic achieves the highest quality results in image coloration, in terms of " \
"interesting details and vibrance. The most notable drawback however is that it's a bit " \
"of a pain to fiddle around with to get the best results.\n" \
"Stable achieves the best results with landscapes and portraits. Notably, it " \
"produces less 'zombies'- where faces or limbs stay gray rather than being colored " \
"in properly.\n" \
"Video is optimized for smooth, consistent and flicker-free video."
render_factor_description = "The default value of 35 has been carefully chosen and should work -ok- for most " \
"scenarios (but probably won't be the -best-). This determines resolution at which " \
"the color portion of the image is rendered. Lower resolution will render faster, and " \
"colors also tend to look more vibrant. Older and lower quality images in particular" \
" will generally benefit by lowering the render factor. Higher render factors are often " \
"better for higher quality images, but the colors may get slightly washed out."
@runway.setup(options={"architecture": category(description=architecture_description,
choices=['Artistic', 'Stable', 'Video'],
default='Artistic')})
def setup(opts):
architecture = opts['architecture']
print('[SETUP] Ran with architecture "{}"'.format(architecture))
if architecture == 'Artistic':
colorizer = get_image_colorizer(artistic=True)
elif architecture == 'Stable':
colorizer = get_image_colorizer(artistic=False)
else:
colorizer = get_video_colorizer().vis
return colorizer
@runway.command(name='generate',
inputs={ 'image': image(description='Image to colorize'),
'render_factor': number(description=render_factor_description,
min=7, max=45, step=1, default=35) },
outputs={ 'image': image(description='Colorized image') })
def generate(model, args):
render_factor = args['render_factor']
print('[GENERATE] Ran with render_factor "{}"'.format(render_factor))
orig_image = args['image'].convert('RGB')
model._clean_mem()
output_image = model.filter.filter(orig_image, orig_image, render_factor=render_factor)
return {
'image': output_image
}
if __name__ == '__main__':
runway.run(host='0.0.0.0', port=8888)
|
# Test the difference in accuracy between sin() and Python's math.sin()
import math
import matplotlib.pyplot as plt
# Calculate sine of 'x' with 'n' iterations
def sin(x: float, n: int) -> float:
result = 0
for i in range(0, n):
result += (-1 ** i) * (x ** ((2 * i) + 1)) / math.factorial((2 * i) + 1)
return result
# Graph the accuracy difference between 'math.sin(x)' and 'sin(x, n)'
def main(x: float, n: int):
iterations = []
results = [[], []]
for i in range(0, n):
a = math.sin(x)
b = sin(x, i)
iterations.append(i)
results[0].append(a)
results[1].append(b)
print(f"Iteration: {i}")
print(f"\tmath.sin(x) = \t{a}")
print(f"\tsin(x, {i}) = \t{b}")
print(f"\tdifference = \t{a - b}\n")
plt.plot(iterations, results[1], label="sin(x, n)")
plt.plot(iterations, results[0], label="math.sin(x)")
plt.xlabel("iterations")
plt.ylabel("result")
plt.legend()
plt.show()
x = float(input("Radians: ")) # Radians
n = 80 # Iterations
# Over 85 iterations results in OverflowError
if __name__ == '__main__':
main(x, n)
|
"""Tests for core module."""
from pathlib import Path
import pytest
from .helpers import append
from .helpers import branch
from .helpers import touch
from retrocookie import core
from retrocookie import git
from retrocookie import retrocookie
def in_template(path: Path) -> Path:
"""Prepend the template directory to the path."""
return "{{cookiecutter.project_slug}}" / path
class Example:
"""Example data for the test cases."""
path = Path("README.md")
text = "Lorem Ipsum\n"
template_path = in_template(path)
@pytest.mark.parametrize(
"text, expected",
[
("Lorem Ipsum\n", "Lorem Ipsum\n"),
(
"This project is called example.\n",
"This project is called {{cookiecutter.project_slug}}.\n",
),
(
"python-version: ${{ matrix.python-version }}",
'python-version: ${{"{{"}} matrix.python-version {{"}}"}}',
),
],
)
def test_rewrite(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
text: str,
expected: str,
) -> None:
"""It rewrites the file contents as expected."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
with branch(instance, "topic", create=True):
append(instance, Example.path, text)
retrocookie(
instance.path, path=cookiecutter.path, branch="topic", create_branch="topic",
)
with branch(cookiecutter, "topic"):
assert expected in cookiecutter.read_text(Example.template_path)
def test_branch(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
) -> None:
"""It creates the specified branch."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
with branch(instance, "topic", create=True):
append(instance, Example.path, Example.text)
retrocookie(
instance.path,
path=cookiecutter.path,
branch="topic",
create_branch="just-another-branch",
)
with branch(cookiecutter, "just-another-branch"):
assert Example.text in cookiecutter.read_text(Example.template_path)
def test_single_commit(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
) -> None:
"""It cherry-picks the specified commit."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
append(instance, Example.path, Example.text)
retrocookie(instance.path, ["HEAD"], path=cookiecutter.path)
assert Example.text in cookiecutter.read_text(Example.template_path)
def test_multiple_commits_sequential(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
) -> None:
"""It cherry-picks the specified commits."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
names = "first", "second"
for name in names:
touch(instance, Path(name))
retrocookie(instance.path, ["HEAD~2.."], path=cookiecutter.path)
for name in names:
path = in_template(Path(name))
assert cookiecutter.exists(path)
def test_multiple_commits_parallel(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
) -> None:
"""It cherry-picks the specified commits."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
names = "first", "second"
for name in names:
with branch(instance, name, create=True):
touch(instance, Path(name))
retrocookie(instance.path, names, path=cookiecutter.path)
for name in names:
path = in_template(Path(name))
assert cookiecutter.exists(path)
def test_find_template_directory_fails(tmp_path: Path) -> None:
"""It raises an exception when there is no template directory."""
repository = git.Repository.init(tmp_path)
with pytest.raises(Exception):
core.find_template_directory(repository)
|
import argparse
import ipaddress
import itertools
import logging
import os
import re
import secrets
import socket
import ssl
import sys
from random import shuffle
from typing import List, Tuple, Union
from urllib.parse import urlsplit
from . import base
_logger = logging.getLogger(__name__)
def configure_logging(log_file: str, level: str) -> None:
""" Configure the logging """
level = base.LOG_LEVELS.get(level.lower(), logging.DEBUG)
log = logging.getLogger()
log.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(base.LOG_FORMAT, style="{"))
log.addHandler(handler)
if log_file:
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(base.LOG_FORMAT, style="{"))
log.addHandler(handler)
def format_transfer(b: int) -> str:
""" Format a number of bytes in a more human readable format """
symbols = [("T", 1 << 40), ("G", 1 << 30), ("M", 1 << 20), ("K", 1 << 10)]
if b < 0:
raise ValueError("Must be bigger than 0")
for symbol, size in symbols:
if b >= size:
return f"{b / size:.1f} {symbol}"
return str(b)
def generate_token() -> bytes:
""" Generate a random token used for identification of clients and tunnels """
return secrets.token_bytes(base.CLIENT_NAME_SIZE)
def generate_ssl_context(
*,
cert: str = None,
key: str = None,
ca: str = None,
server: bool = False,
ciphers: List[str] = None,
check_hostname: bool = False,
) -> ssl.SSLContext:
""" Generate a SSL context for the tunnel """
# Set the protocol and create the basic context
proto = ssl.PROTOCOL_TLS_SERVER if server else ssl.PROTOCOL_TLS_CLIENT
ctx = ssl.SSLContext(proto)
ctx.check_hostname = check_hostname
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
# Prevent the reuse of parameters
if server:
ctx.options |= ssl.OP_SINGLE_DH_USE | ssl.OP_SINGLE_ECDH_USE
# Load a certificate and key for the connection
if cert:
ctx.load_cert_chain(cert, keyfile=key)
# Load the CA to verify the other side
if ca:
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cafile=ca)
# Set possible ciphers to use
if ciphers:
ctx.set_ciphers(ciphers)
# Output debugging
_logger.info("CA usage: %s", bool(ca))
_logger.info("Certificate: %s", bool(cert))
_logger.info("Hostname verification: %s", bool(check_hostname))
_logger.info("Minimal TLS Versions: %s", ctx.minimum_version.name)
ciphers = sorted(c["name"] for c in ctx.get_ciphers())
_logger.info("Ciphers: %s", ", ".join(ciphers))
return ctx
def get_unused_port(min_port: int, max_port: int, udp: bool = False) -> int:
""" Returns a random unused port within the given range or None if all are used """
sock = socket.socket(type=socket.SOCK_DGRAM) if udp else socket.socket()
ports = list(range(min_port, max_port + 1))
shuffle(ports)
for port in ports:
try:
sock.bind(("", port))
sock.close()
return port
except Exception:
pass
return None
def merge_settings(a: int, b: int) -> int:
"""Merge the settings of the tunnel. If one of them is 0 the other one will
take place. otherwise the lower value will be used"""
return min(a, b) if a and b else max(a, b)
def optimize_networks(*networks: List[base.IPvXNetwork]) -> List[base.IPvXNetwork]:
"""Try to optimize the list of networks by using the minimal network
configuration"""
grouped = itertools.groupby(networks, lambda n: n.version)
groups = {}
for version, group in grouped:
group = sorted(set(group))
tmp = set()
for i, a in enumerate(group):
for b in group[i + 1 :]:
if b.subnet_of(a):
tmp.add(b)
break
else:
tmp.add(a)
groups[version] = sorted(tmp)
return sum([g for _, g in sorted(groups.items())], [])
def parse_address(
address: str, host: str = None, port: int = None, multiple: bool = False
) -> Tuple[Union[str, List[str]], int]:
"""Parse an address and split hostname and port. The port is required. The
default host is "" which means all"""
# Only the address without scheme and path. We only support IPs if multiple hosts
# are activated
pattern = r"[0-9.:\[\],]*?" if multiple else r"[0-9a-zA-Z.:\[\],]*?"
match = re.match(fr"^(?P<hosts>{pattern})(:(?P<port>\d+))?$", address)
if not match:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Only host and port are supported."
)
# Try to parse the port first
data = match.groupdict()
if data.get("port"):
port = int(data["port"])
if port <= 0 or port >= 65536:
raise argparse.ArgumentTypeError("Invalid address parsed. Invalid port.")
if port is None:
raise argparse.ArgumentTypeError("Port required.")
# Try parsing the different host addresses
hosts = set()
for h in data.get("hosts", "").split(","):
if not h:
hosts.add(h or host)
continue
try:
parsed = urlsplit(f"http://{h}")
hosts.add(parsed.hostname)
except Exception as e:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Invalid host."
) from e
# Multiple hosts are supported if the flag is set
if len(hosts) > 1 and multiple:
return sorted(hosts), port
# Otherwise we fail
if len(hosts) > 1:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Only one host is required."
)
if len(hosts) == 1:
host = hosts.pop() or host
if host is not None:
return host, port
raise argparse.ArgumentTypeError("Invalid address parsed. Host required.")
def parse_networks(network: str) -> List[base.IPvXNetwork]:
""" Try to parse multiple networks and return them optimized """
try:
return optimize_networks(*map(ipaddress.ip_network, network.split(",")))
except Exception as e:
raise argparse.ArgumentTypeError("Invalid network format") from e
def valid_file(path: str) -> str:
"""Check if a file exists and return the absolute path otherwise raise an
error. This function is used for the argument parsing"""
path = os.path.abspath(path)
if not os.path.isfile(path):
raise argparse.ArgumentTypeError("Not a file.")
return path
def valid_ports(ports: Tuple[int, int]) -> Tuple[int, int]:
""" Check if the argument is a valid port range with IP family """
m = re.match(r"^(\d+):(\d+)?$", ports, re.IGNORECASE)
if m:
a, b = sorted(map(int, m.groups()))
if 0 < a < b < 65536:
return a, b
raise argparse.ArgumentTypeError("Port must be in range (1, 65536)")
raise argparse.ArgumentTypeError("Invalid port scheme.")
|
NU_ID = 0
GAMMA_ID = 1
EFFECTIVE_NUMBER_OF_COVARIATES_ID = 2
LOG_MARGINAL_LAPLACE_DIAG_VALIDATION_CRITERIA_ID = 3
TRAIN_ACCURACY_ID = 4
TEST_ACCURACY_ID = 5
TRAIN_CV_LOG_PROB_ID = 6
TRAIN_CV_ACC_ID = 7
ANMI_ID = 8
|
import django.utils.timezone
from django.db import models
from django.contrib.auth.models import User
POSITIONS = (
('P', 'P'),
('C', 'C'),
('1B', '1B'),
('2B', '2B'),
('3B', '3B'),
('SS', 'SS'),
('LF', 'LF'),
('CF', 'CF'),
('RF', 'RF'),
('DH', 'DH'),
('PH', 'PH'),
('PR', 'PR'),
)
class Team(models.Model):
location = models.CharField(max_length=50)
name = models.CharField(max_length=50)
abbr = models.CharField(max_length=3)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.abbr
class Game(models.Model):
team_home = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='team_home')
team_away = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='team_away')
runs_home = models.IntegerField()
runs_away = models.IntegerField()
date = models.DateField(default=django.utils.timezone.now)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return str(self.team_away) + ': ' + str(self.runs_away) + ', ' + str(self.team_home) + ': ' + str(self.runs_home) + ' (' + str(self.date) + ')'
class Player(models.Model):
name = models.CharField(max_length=100)
num = models.PositiveSmallIntegerField()
team = models.ForeignKey(Team, on_delete=models.CASCADE)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
class LineupSlot(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
player = models.ForeignKey(Player, on_delete=models.CASCADE)
num = models.PositiveSmallIntegerField()
pos = models.CharField(max_length=2, choices=POSITIONS)
starter = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return '#' + str(self.player.num) + ' ' + self.player.name + ' (' + self.pos + ')'
|
# © 2018 Software Freedom Conservancy (SFC)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""
cwlprov Command Line Tool
"""
__author__ = "Stian Soiland-Reyes <https://orcid.org/0000-0001-9842-9718>"
__copyright__ = "© 2018 Software Freedom Conservancy (SFC)"
__license__ = (
"Apache License, version 2.0 (https://www.apache.org/licenses/LICENSE-2.0)"
)
import argparse
import errno
import json
import logging
import os.path
import pathlib
import posixpath
import shlex
import shutil
import sys
import urllib.parse
from enum import IntEnum
from functools import partial
from pathlib import Path
from uuid import UUID
from bdbag.bdbagit import BagError, BDBag
from prov.identifier import Identifier
from prov.model import *
from cwlprov import __version__
from .prov import Provenance
from .ro import ResearchObject
from .utils import *
_logger = logging.getLogger(__name__)
BAGIT_RO_PROFILES = (
"https://w3id.org/ro/bagit/profile",
"http://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-ro-profile.json",
)
CWLPROV_SUPPORTED = (
# Decreasing order as first item is output as example
"https://w3id.org/cwl/prov/0.6.0",
"https://w3id.org/cwl/prov/0.5.0",
"https://w3id.org/cwl/prov/0.4.0",
"https://w3id.org/cwl/prov/0.3.0",
)
MANIFEST_JSON = posixpath.join("metadata", "manifest.json")
TIME_PADDING = " " * 26 # len("2018-08-08 22:44:06.573330")
# PROV namespaces
CWLPROV = Namespace("cwlprov", "https://w3id.org/cwl/prov#")
class Status(IntEnum):
"""Exit codes from main()"""
OK = 0
UNHANDLED_ERROR = errno.EPERM
UNKNOWN_COMMAND = errno.EINVAL
UNKNOWN_FORMAT = errno.EINVAL
IO_ERROR = errno.EIO
BAG_NOT_FOUND = errno.ENOENT
PROV_NOT_FOUND = errno.ENOENT
NOT_A_DIRECTORY = errno.ENOTDIR
UNKNOWN_RUN = errno.ENODATA
UNKNOWN_ACTIVITY = errno.ENODATA
UNKNOWN_TOOL = errno.ENODATA
PERMISSION_ERROR = errno.EACCES
NOT_IMPLEMENTED = errno.ENOSYS
# User-specified exit codes
# http://www.tldp.org/LDP/abs/html/exitcodes.html
MISSING_PROFILE = 166
INVALID_BAG = 167
UNSUPPORTED_CWLPROV_VERSION = 168
CANT_LOAD_CWL = 169
MISSING_PLAN = 170
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="cwlprov explores Research Objects containing provenance of Common Workflow Language executions. <https://w3id.org/cwl/prov/>"
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
# Common options
parser.add_argument(
"--directory",
"-d",
help="Path to CWLProv Research Object (default: .)",
default=None,
)
parser.add_argument(
"--relative",
default=None,
action="store_true",
help="Output paths relative to current directory (default if -d is missing or relative)",
)
parser.add_argument(
"--absolute",
default=None,
action="store_false",
dest="relative",
help="Output absolute paths (default if -d is absolute)",
)
parser.add_argument(
"--output", "-o", default="-", help="File to write output to (default: stdout)"
)
parser.add_argument(
"--verbose",
"-v",
default=0,
action="count",
help="Verbose logging (repeat for more verbose)",
)
parser.add_argument(
"--quiet", "-q", default=False, action="store_true", help="No logging or hints"
)
parser.add_argument(
"--hints", default=True, action="store_true", help="Show hints on cwlprov usage"
)
parser.add_argument(
"--no-hints",
default=True,
action="store_false",
dest="hints",
help="Do not show hints",
)
subparsers = parser.add_subparsers(title="commands", dest="cmd")
parser_validate = subparsers.add_parser(
"validate", help="validate the CWLProv Research Object"
)
parser_info = subparsers.add_parser("info", help="show research object metadata")
parser_who = subparsers.add_parser("who", help="show who ran the workflow")
parser_prov = subparsers.add_parser(
"prov", help="export workflow execution provenance in PROV format"
)
parser_prov.add_argument("id", default=None, nargs="?", help="workflow run UUID")
parser_prov.add_argument(
"--format",
"-f",
default="files",
choices=["files"] + list(MEDIA_TYPES.keys()),
help="Output in PROV format (default: files)",
)
parser_prov.add_argument(
"--formats",
"-F",
default=False,
action="store_true",
help="List available PROV formats",
)
# Common options for parser_input and parser_output
run_option = argparse.ArgumentParser(add_help=False)
run_option.add_argument(
"--run", default=None, help="workflow run UUID that contains step"
)
run_option.add_argument(
"id", default=None, nargs="?", help="step/workflow run UUID"
)
io_outputs = argparse.ArgumentParser(add_help=False)
io_outputs.add_argument(
"--parameters", default=True, action="store_true", help="Show parameter names"
)
io_outputs.add_argument(
"--no-parameters",
default=True,
action="store_false",
dest="parameters",
help="Do not show parameter names",
)
io_outputs.add_argument(
"--format",
default="any",
choices=["any", "files", "values", "uris", "json"],
help="Output format, (default: any)",
)
# Tip: These formats are NOT the same as in parser_prov
parser_input = subparsers.add_parser(
"inputs",
help="list workflow/step input files/values",
parents=[run_option, io_outputs],
)
parser_output = subparsers.add_parser(
"outputs",
help="list workflow/step output files/values",
parents=[run_option, io_outputs],
)
parser_run = subparsers.add_parser("run", help="show workflow execution log")
parser_run.add_argument("id", default=None, nargs="?", help="workflow run UUID")
parser_run.add_argument(
"--step", "-s", default=None, help="Show only step with given UUID"
)
parser_run.add_argument(
"--steps", default=True, action="store_true", help="List steps of workflow"
)
parser_run.add_argument(
"--no-steps",
default=True,
action="store_false",
dest="steps",
help="Do not list steps",
)
parser_run.add_argument(
"--start",
default=True,
action="store_true",
help="Show start timestamps (default)",
)
parser_run.add_argument(
"--no-start",
"-S",
default=True,
action="store_false",
dest="start",
help="Do not show start timestamps",
)
parser_run.add_argument(
"--end", "-e", default=False, action="store_true", help="Show end timestamps"
)
parser_run.add_argument(
"--no-end",
default=False,
action="store_false",
dest="end",
help="Do not show end timestamps",
)
parser_run.add_argument(
"--duration",
default=True,
action="store_true",
help="Show step duration (default)",
)
parser_run.add_argument(
"--no-duration",
"-D",
default=True,
action="store_false",
dest="duration",
help="Do not show step duration",
)
parser_run.add_argument(
"--labels", default=True, action="store_true", help="Show activity labels"
)
parser_run.add_argument(
"--no-labels",
"-L",
default=True,
action="store_false",
dest="labels",
help="Do not show activity labels",
)
parser_run.add_argument(
"--inputs", "-i", default=False, action="store_true", help="Show inputs"
)
parser_run.add_argument(
"--outputs", "-o", default=False, action="store_true", help="Show outputs"
)
parser_runs = subparsers.add_parser(
"runs", help="List all workflow executions in RO"
)
parser_rerun = subparsers.add_parser(
"rerun", help="Rerun a workflow or step", parents=[run_option]
)
parser_rerun.add_argument(
"--cwlrunner",
default="cwl-runner",
help="Executable for running cwl (default: cwl-runner)",
)
parser_rerun.add_argument(
"args", nargs=argparse.REMAINDER, help="Additional arguments to cwl runner"
)
parser_derived = subparsers.add_parser(
"derived",
help="List what was derived from a data item, based on activity usage/generation",
)
parser_derived.add_argument(
"--run", default=None, help="workflow run UUID which provenance to examine"
)
parser_derived.add_argument("data", help="Data file, hash or UUID")
parser_derived.add_argument(
"--recurse",
default=True,
action="store_true",
help="Recurse transitive derivations (default)",
)
parser_derived.add_argument(
"--no-recurse",
default=True,
action="store_false",
dest="recurse",
help="Do not recurse transitive derivations",
)
parser_derived.add_argument(
"--maxdepth",
default=None,
type=int,
help="Maximum depth of transitive derivations (default: infinity)",
)
parser_stats = subparsers.add_parser(
"runtimes",
help="Calculate average step execution runtimes",
parents=[run_option],
)
return parser.parse_args(args)
def _find_bagit_folder(folder=None):
# Absolute so we won't climb to ../../../../../ forever
# and have resolved any symlinks
folder = pathlib.Path(folder or "").absolute()
while True:
_logger.debug("Determining bagit folder: %s", folder)
bagit_file = folder / "bagit.txt"
if bagit_file.is_file():
_logger.info("Detected %s", bagit_file)
return folder
_logger.debug("%s not found", bagit_file)
if folder == folder.parent:
_logger.info("No bagit.txt detected")
return None
folder = folder.parent
def _info_set(bag, key):
v = bag.info.get(key, [])
if isinstance(v, list):
return set(v)
else:
return {v}
def _simpler_uuid(uri):
return str(uri).replace("urn:uuid:", "")
def _as_uuid(w):
try:
uuid = UUID(w.replace("urn:uuid:", ""))
return (uuid.urn, uuid, str(uuid))
except ValueError:
logger.warn("Invalid UUID %s", w)
# return -as-is
return w, None, str(w)
def _prov_with_attr(prov_doc, prov_type, attrib_value, with_attrib=PROV_ATTR_ACTIVITY):
for elem in prov_doc.get_records(prov_type):
if (with_attrib, attrib_value) in elem.attributes:
yield elem
def _prov_attr(attr, elem):
return first(elem.get_attribute(attr))
MEDIA_TYPES = {
"ttl": 'text/turtle; charset="UTF-8"',
"rdf": "application/rdf+xml",
"json": "application/json",
"jsonld": "application/ld+json",
"xml": "application/xml",
"provn": 'text/provenance-notation; charset="UTF-8"',
"nt": "application/n-triples",
}
EXTENSIONS = {v: k for (k, v) in MEDIA_TYPES.items()}
def _prov_format(ro, uri, media_type):
for prov in ro.provenance(uri) or ():
if media_type == ro.mediatype(prov):
return ro.resolve_path(prov)
def _prov_document(ro, uri, args):
# Preferred order
candidates = ("xml", "json", "nt", "ttl", "rdf")
# Note: Not all of these parse consistently with rdflib in py3
rdf_candidates = ("ttl", "nt", "rdf", "jsonld")
for c in candidates:
prov = _prov_format(ro, uri, MEDIA_TYPES.get(c))
if prov:
_logger.info("Loading %s", prov)
if c in rdf_candidates:
doc = ProvDocument.deserialize(source=prov, format="rdf", rdf_format=c)
else:
doc = ProvDocument.deserialize(source=prov, format=c)
return doc.unified()
_logger.warning("No PROV compatible format found for %s", uri)
return None
def _set_log_level(quiet=None, verbose=0):
if quiet: # -q
log_level = logging.ERROR
if not verbose: # default
log_level = logging.WARNING
elif verbose == 1: # -v
log_level = logging.INFO
else: # -v -v
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
class Tool:
def __init__(self, args=None):
self.args = parse_args(args)
if self.args.output != "-":
self.output = open(self.args.output, mode="w", encoding="UTF-8")
else:
self.output = None # sys.stdout
def close(self):
if self.output:
# Close --output file
self.output.close()
self.output = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _determine_relative(self):
args = self.args
if args.relative is False:
_logger.debug("Output paths absolute")
self.relative_paths = None
return
_logger.debug("Determining output paths")
if not self.output: # stdout
_logger.debug("Output paths relative to current directory?")
relative_to = Path()
else:
_logger.debug(
"Output paths relative to (resolved parent) path of %s?", args.output
)
relative_to = Path(args.output).resolve().parent
if args.relative:
# We'll respect the parameter
# Either calculate --relative ; or None for --absolute
_logger.debug("Output paths relative to %s", relative_to)
self.relative_paths = args.relative and relative_to or None
return
assert args.relative is None # only remaining option
_logger.debug("Neither --relative nor --absolute given")
if self.output:
_logger.debug(
"Considering if paths should be relative to --output %s", args.output
)
# Only relative if we can avoid ../ (use --relative to force)
# assuming _determine_folder has already parsed args.directory
try:
# Check if bag folder is reachable from output folder
f = (
self.folder.resolve()
) # Compare as resolved - following symlinks etc
rel = f.relative_to(relative_to)
self.relative_paths = relative_to
_logger.debug(
"Relative as bag %s is within output folder %s", f, relative_to
)
except ValueError:
# Would need ../ - bail out to absolute paths
self.relative_paths = None
_logger.debug(
"Absolute as bag %s not within output folder %s",
self.folder,
relative_to,
)
elif args.directory and Path(args.directory).is_absolute():
_logger.debug(
"Output paths absolute as --directory %s is absolute", args.directory
)
self.relative_paths = None
else:
_logger.debug(
"Output paths relative to %s as --directory %s is relative",
relative_to,
args.directory,
)
self.relative_paths = relative_to
def _determine_logging(self):
if self.args.quiet and self.args.verbose:
_logger.error("Incompatible parameters: --quiet --verbose")
return Status.UNKNOWN_COMMAND
_set_log_level(self.args.quiet, self.args.verbose)
def _determine_folder(self):
folder = self.args.directory or _find_bagit_folder()
if not folder:
_logger.error("Could not find bagit.txt, try cwlprov -d mybag/")
return Status.BAG_NOT_FOUND
self.folder = pathlib.Path(folder)
if not self.folder.exists():
_logger.error("No such file or directory: %s", self.folder)
return Status.BAG_NOT_FOUND
if not self.folder.is_dir():
_logger.error("Not a directory: %s", folder)
return Status.NOT_A_DIRECTORY
bagit_file = self.folder / "bagit.txt"
if not bagit_file.is_file():
_logger.error("File not found: %s", bagit_file)
return Status.BAG_NOT_FOUND
def _determine_hints(self):
# Don't output hints for --quiet or --output
self.hints = self.args.hints and not self.args.quiet and not self.output
def main(self):
# type: (...) -> None
"""cwlprov command line tool"""
args = self.args
status = self._determine_logging()
if status:
return status
# Now that logging is determined we can report on args.output
if self.output:
_logger.debug("Output to %s", args.output)
status = self._determine_hints()
if status:
return status
status = self._determine_folder()
if status:
return status
status = self._determine_relative()
if status:
return status
full_validation = args.cmd == "validate"
_logger.info("Opening BagIt %s", self.folder)
## BagIt check
try:
bag = BDBag(str(self.folder))
except BagError as e:
_logger.fatal(e)
return Status.INVALID_BAG
except PermissionError as e:
_logger.fatal(e)
return Status.PERMISSION_ERROR
except OSError as e:
_logger.fatal(e)
return Status.IO_ERROR
# Unhandled errors will show Python stacktrace
invalid = self.validate_bag(bag, full_validation)
if invalid:
return invalid
self.ro = ResearchObject(bag)
invalid = self.validate_ro(full_validation)
if invalid:
return invalid
if full_validation:
if not args.quiet:
self.print(
"Valid CWLProv RO: %s"
% self._absolute_or_relative_path(self.folder)
)
return Status.OK
# Else, find the other commands
COMMANDS = {
"info": self.info,
"who": self.who,
"prov": self.prov,
"inputs": self.inputs,
"outputs": self.outputs,
"run": self.run,
"runs": self.runs,
"rerun": self.rerun,
"derived": self.derived,
"runtimes": self.runtimes,
}
cmd = COMMANDS.get(args.cmd)
if not cmd:
# Light-weight validation
if not args.quiet:
self.print("Detected CWLProv Research Object: %s" % self.folder)
else:
self.print(self.folder)
return Status.OK
return cmd()
def _resource_path(self, path, absolute=False):
p = self.ro.resolve_path(str(path))
return self._absolute_or_relative_path(p, absolute)
def _absolute_or_relative_path(self, path, absolute=False):
p = Path(path)
if not absolute and self.relative_paths:
cwd = Path(self.relative_paths)
return os.path.relpath(p, cwd)
else:
return Path(p).absolute()
def _wf_id(self, run=None):
w = run or self.args.id or self.ro.workflow_id
# ensure consistent UUID URIs
return _as_uuid(w)
def validate_bag(self, bag, full_validation=False):
try:
valid_bag = bag.validate(fast=not full_validation)
except BagError as e:
_logger.error("BagIt validation failed for: %s: %s", bag.path, e)
return Status.INVALID_BAG
if not valid_bag:
_logger.error("Invalid BagIt folder: %s", bag.path)
# Specific errors already output from bagit library
return Status.INVALID_BAG
# Check we follow right profile
profiles = _info_set(bag, "BagIt-Profile-Identifier")
supported_ro = set(BAGIT_RO_PROFILES).intersection(profiles)
if not supported_ro:
_logger.warning("Missing BdBag profile: %s", bag.path)
if self.hints:
print("Try adding to %s/bag-info.txt:" % bag.path)
print("BagIt-Profile-Identifier: %s" % BAGIT_RO_PROFILES[0])
if full_validation:
return Status.MISSING_PROFILE
# Check we have a manifest
has_manifest = MANIFEST_JSON in bag.tagfile_entries()
if not has_manifest:
_logger.warning("Missing from tagmanifest: %s", MANIFEST_JSON)
return Status.MISSING_MANIFEST
return Status.OK
def validate_ro(self, full_validation=False):
ro = self.ro
args = self.args
# If it has this prefix, it's probably OK
cwlprov = {
p for p in ro.conformsTo if p.startswith("https://w3id.org/cwl/prov/")
}
if not cwlprov:
if full_validation or not args.quiet:
_logger.warning("Missing CWLProv profile: %s", ro.bag.path)
if full_validation and self.hints:
print("Try adding to %s/metadata/manifest.json:" % ro.bag.path)
print(
'{\n "id": "/",\n "conformsTo", "%s",\n ...\n}'
% CWLPROV_SUPPORTED[0]
)
return Status.MISSING_PROFILE
supported_cwlprov = set(CWLPROV_SUPPORTED).intersection(cwlprov)
if cwlprov and not supported_cwlprov:
# Probably a newer one this code don't support yet; it will
# probably be fine
_logger.warning("Unsupported CWLProv version: %s", cwlprov)
if self.hints:
print("Supported profiles:\n %s" % "\n ".join(CWLPROV_SUPPORTED))
if full_validation:
return Status.UNSUPPORTED_CWLPROV_VERSION
return Status.OK
def info(self):
ro = self.ro
args = self.args
# About RO?
if not args.quiet:
self.print(ro.bag.info.get("External-Description", "Research Object"))
self.print("Research Object ID: %s" % ro.id)
cwlprov = {
p for p in ro.conformsTo if p.startswith("https://w3id.org/cwl/prov/")
}
if cwlprov:
self.print("Profile: %s" % many(cwlprov))
w = ro.workflow_id
if w:
self.print("Workflow run ID: %s" % w)
when = ro.bag.info.get("Bagging-Date")
if when:
self.print("Packaged: %s" % when)
return Status.OK
def who(self):
ro = self.ro
args = self.args
# about RO?
createdBy = many(ro.createdBy)
authoredBy = many(ro.authoredBy)
if createdBy or not args.quiet: # skip (unknown) on -q)
self.print("Packaged By: %s" % createdBy or "(unknown)")
if authoredBy or not args.quiet:
self.print("Executed By: %s" % authoredBy or "(unknown)")
return Status.OK
def _derived_from(self, uuid):
pass
def _entity_from_data_argument(self):
# Is it a UUID?
data_uuid = None
data_file = None
try:
data_uuid = UUID(args.data)
except ValueError:
pass
if data_uuid:
_logger.debug("Assuming UUID %s", data_uuid)
else:
# Is it a filename within the RO?
try:
data_file = self.research_object.resolve_path(args.data)
except OSError:
pass
# A file from our current directory?
if os.path.exists(args.data):
data_file = pathlib.Path(args.data)
def derived(self):
ro = self.ro
args = self.args
_data_entity = _entity_from_data_argument(args.data)
return Status.OK
def runtimes(self):
ro = self.ro
args = self.args
error, activity = self._load_activity_from_provenance()
if error:
return error
plans = {}
for step in activity.steps():
plan = step.plan()
if not plan:
plan = step.identifier
durations = plans.setdefault(plan, [])
dur = step.duration()
if dur:
durations.append(dur)
# TODO: Support CSV output?
# Assume no more than 99 hours for well-aligned columns
format = "%015s %015s %015s %04s %s"
if not args.quiet:
self.print(format, *("min avg max n step".split()))
# TODO: Sort from max-to-lowest average?
for plan in plans:
durations = plans[plan]
self.print(
format,
min(durations),
average(durations),
max(durations),
len(durations),
plan.localpart or plan,
)
return Status.OK
def prov(self):
ro = self.ro
args = self.args
uri, uuid, name = self._wf_id()
if args.format == "files":
for prov in ro.provenance(uri) or ():
if args.formats:
format = ro.mediatype(prov) or ""
format = EXTENSIONS.get(format, format)
self.print(f"{format} {(self._resource_path(prov))}")
else:
self.print("%s" % self._resource_path(prov))
else:
media_type = MEDIA_TYPES.get(args.format, args.format)
prov = _prov_format(ro, uri, media_type)
if not prov:
_logger.error("Unrecognized format: %s", args.format)
return Status.UNKNOWN_FORMAT
with prov.open(encoding="UTF-8") as f:
shutil.copyfileobj(f, self.output or sys.stdout)
self.print() # workaround for missing trailing newline
return Status.OK
def inputs(self):
return self._inputs_or_outputs(is_inputs=True)
def outputs(self):
return self._inputs_or_outputs(is_inputs=False)
def _load_provenance(self, wf_uri):
if not self.ro.provenance(wf_uri):
if self.args.run:
_logger.error("No provenance found for specified run: %s", wf_uri)
# We'll need to give up
return Status.UNKNOWN_RUN
else:
_logger.debug("No provenance found for activity: %s", wf_uri)
_logger.info(
"Assuming primary provenance --run %s", self.ro.workflow_id
)
wf_uri, _, _ = _as_uuid(self.ro.workflow_id)
if not self.ro.provenance(wf_uri):
_logger.error("No provenance found for: %s", wf_uri)
return Status.UNKNOWN_RUN
try:
provenance = Provenance(self.ro, wf_uri)
except OSError:
# assume Error already printed by _prov_document
return Status.PROV_NOT_FOUND
self.provenance = provenance
return Status.OK
def _load_activity_from_provenance(self):
wf_uri, wf_uuid, wf_name = self._wf_id(self.args.run)
a_uri, a_uuid, a_name = self._wf_id()
error = self._load_provenance(wf_uri)
if error != Status.OK:
return (error, None)
activity = self.provenance.activity(a_uri)
if activity:
return (Status.OK, activity)
else:
_logger.error("Provenance does not describe step %s: %s", wf_name, a_uri)
if not self.args.run and self.hints:
print(
"If the step is in nested provenance, try '--run UUID' as found in 'cwlprov run'"
)
return (Status.UNKNOWN_ACTIVITY, None)
def _inputs_or_outputs(self, is_inputs):
if is_inputs:
put_s = "Input"
else:
put_s = "Output"
ro = self.ro
args = self.args
wf_uri, wf_uuid, wf_name = self._wf_id(self.args.run)
a_uri, a_uuid, a_name = self._wf_id()
if not self.ro.provenance(wf_uri):
if args.run:
_logger.error("No provenance found for: %s", wf_name)
# We'll need to give up
return Status.UNKNOWN_RUN
else:
_logger.debug("No provenance found for: %s", wf_name)
_logger.info("Assuming primary provenance --run %s", ro.workflow_id)
wf_uri, wf_uuid, wf_name = _as_uuid(ro.workflow_id)
if not ro.provenance(wf_uri):
_logger.error("No provenance found for: %s", wf_name)
return Status.UNKNOWN_RUN
try:
provenance = Provenance(self.ro, wf_uri)
except OSError:
# assume Error already printed by _prov_document
return Status.UNKNOWN_RUN
activity = provenance.activity(a_uri)
if not activity:
_logger.error("Provenance does not describe step %s: %s", wf_name, a_uri)
if not self.args.run and self.hints:
print(
"If the step is in nested provenance, try '--run UUID' as found in 'cwlprov run'"
)
return Status.UNKNOWN_RUN
activity_id = activity.id
if wf_uri != a_uri:
_logger.info("%ss for step %s in workflow %s", put_s, a_name, wf_name)
else:
_logger.info("%ss for workflow %s", put_s, wf_name)
job = {}
if is_inputs:
records = activity.usage()
else:
records = activity.generation()
for u in records:
entity_id = u.entity_id
role = u.role
# Naively assume CWL identifier structure of URI
if not role:
_logger.warning("Unknown role for %s, skipping", u)
role_name = None
continue
# poor mans CWL parameter URI deconstruction
role_name = str(role)
role_name = role_name.split("/")[-1]
role_name = urllib.parse.unquote(role_name)
if args.parameters and not args.quiet and args.format != "json":
self.print("%s %s:", put_s, role_name)
time = u.time
entity = u.entity()
if not entity:
_logger.warning("No provenance for used entity %s", entity_id)
continue
file_candidates = [entity]
file_candidates.extend(entity.specializationOf())
if self.args.format in ("uris", "any"):
self.print(file_candidates[-1].uri)
continue
printed_file = False
for file_candidate in file_candidates:
bundled = self.ro.bundledAs(uri=file_candidate.uri)
if not bundled:
continue
_logger.debug("entity %s bundledAs %s", file_candidate.uri, bundled)
bundled_path = self._resource_path(bundled)
job[role_name] = {}
job[role_name]["class"] = "File"
job[role_name]["path"] = str(bundled_path)
if self.args.format in ("files", "any"):
self.print(bundled_path)
printed_file = True
if self.args.format == "values":
# Warning: This will print all of the file straight to stdout
with open(self._resource_path(bundled, absolute=False)) as f:
shutil.copyfileobj(f, self.output or sys.stdout)
printed_file = True
break
if printed_file:
continue # next usage
# Still here? Perhaps it has prov:value ?
value = entity.value
if value is not None: # but might be False
job[role_name] = value
if self.args.format in ("values", "any"):
self.print(value)
elif self.args.format == "files":
# We'll have to make a new file!
with tempfile.NamedTemporaryFile(delete=False) as f:
b = str(value).encode("utf-8")
f.write(b)
self.print(f.name)
if self.args.format == "json":
self.print(json.dumps(job))
def _entity_as_json(self, entity, absolute=True):
_logger.debug("json from %s", entity)
file_candidates = [entity]
file_candidates.extend(entity.specializationOf())
for file_candidate in file_candidates:
bundled = self.ro.bundledAs(uri=file_candidate.uri)
if not bundled:
continue
_logger.debug("entity %s bundledAs %s", file_candidate.uri, bundled)
bundled_path = self._resource_path(bundled, absolute=absolute)
json = {
"class": "File",
"path": str(bundled_path),
}
if entity.basename:
json["basename"] = entity.basename
if entity.nameroot:
json["nameroot"] = entity.nameroot
if entity.nameext:
json["nameext"] = entity.nameext
f = partial(self._entity_as_json, absolute=absolute)
secondaries = list(map(f, entity.secondary_files()))
if secondaries:
json["secondaries"] = secondaries
_logger.debug("file as json: %s", json)
return json
# Perhaps it has prov:value ?
value = entity.value
if value is not None: # ..but might be False
_logger.debug("value as json: %s", value)
return value
# TODO: Handle Directory
# TODO: Handle collection
# Still here? No idea, fallback is just return the URI :(
json = {
"class": "File", # ??
"location": entity.uri,
}
_logger.debug("uri as json: %s", json)
return json
def _inputs_or_outputs_job(self, activity, is_inputs, absolute):
activity_id = activity.id
job = {}
if is_inputs:
records = activity.usage()
else:
records = activity.generation()
for u in records:
entity_id = u.entity_id
role = u.role
# Naively assume CWL identifier structure of URI
if not role:
_logger.warning("Unknown role for %s, skipping", u)
role_name = None
continue
# poor mans CWL parameter URI deconstruction
role_name = str(role)
role_name = role_name.split("/")[-1]
role_name = urllib.parse.unquote(role_name)
entity = u.entity()
if not entity:
_logger.warning("No provenance for entity %s", entity_id)
continue
job[role_name] = self._entity_as_json(entity, absolute=absolute)
return job
def runs(self):
ro = self.ro
args = self.args
for run in ro.resources_with_provenance():
name = _simpler_uuid(run)
if args.verbose or not args.quiet:
# Also load up the provenance to find its name
prov_doc = _prov_document(ro, run, args)
if not prov_doc:
self.print(name)
_logger.warning("No provenance found for: %s", name)
continue
activity_id = Identifier(run)
activity = first(prov_doc.get_record(activity_id))
if not activity:
_logger.error("Provenance does not describe activity %s", run)
return Status.UNKNOWN_RUN
label = first(activity.get_attribute("prov:label")) or ""
is_master = run == ro.workflow_id
self.print("{} {} {}".format(name, is_master and "*" or " ", label))
else:
self.print(name)
if self.hints:
self.print("Legend:")
self.print(" * master workflow")
def rerun(self):
if not self.args.id or self.args.id == "-":
# Might be used to rerun default workflow
self.args.id = None
if not self.args.id and not self.args.run:
wf_file = self._find_workflow()
_logger.debug("Master workflow, re-using level 0 primary job")
wf_arg = wf_file
job_file = self._find_primary_job()
else:
_logger.debug("Recreating job from level 1 provenance")
(error, a) = self._load_activity_from_provenance()
if error:
return error
_logger.info("Rerunning step <%s> %s", a.id.uri, a.label)
# Create job JSON from original input values
job = self._recreate_job(a, absolute=True)
job_file = self._temporary_job(job)
# Now find which tool was rerun
p = a.plan()
if not p:
_logger.warning("Could not find Association with Plan for %s" % a)
return Status.MISSING_PLAN
_logger.info("Step was executed with plan %s", p.uri)
wf_file = self.ro.resolve_path(p.uri)
if not "#" in p.uri:
# Top-level cwl file
wf_arg = wf_file
else:
# part of cwl file (e.g. a command line tool)
# Workaround as Association links to the identified workflow step,
# but cwltool needs the (sometimes not identified) reference behind
# the step's "run" property
cwl = self._load_cwl(wf_file)
if not cwl:
return Status.CANT_LOAD_CWL
step_id = "#%s" % p.localpart
run = self._find_step_run(cwl, step_id)
if not isinstance(run, str):
_logger.error(
"Not implemented: rerun of inline 'run' of step %s", step_id
)
return Status.NOT_IMPLEMENTED
if run.startswith("#"):
wf_arg = f"{wf_file}{run}"
_logger.info("Tool %s", wf_arg)
else:
_logger.warning(
"Non-local 'run' %s might be missing from RO", step_id
)
wf_arg = run
# TODO: Check if it's in snapshot/ or an absolute URI?
return self._exec_cwlrunner(wf_arg, job_file)
def _load_cwl(self, wf_file):
_logger.debug("Loading CWL as JSON: %s", wf_file)
with open(wf_file) as f:
# FIXME: Load as yaml in case it is not JSON?
cwl = json.load(f)
ver = cwl["cwlVersion"]
_logger.debug("Loaded CWL version: %s", ver)
if not ver.startswith("v1."):
_logger.fatal("Unsupported cwlVersion %s in %s", ver, wf_file)
return None
return cwl
def _find_step_run(self, cwl, step_id):
step = find_dict_with_item(cwl, step_id)
if not step:
_logger.error("Could not find step for ")
_logger.debug("Found CWL step: %s", step)
if step.get("class") in ("Workflow", "CommandLineTool", "ExpressionTool"):
# We can execute directly
return step_id
return step.get("run")
def _exec_cwlrunner(self, wf_arg, job_file):
# Switch to a new temporary directory
tmpdir = tempfile.mkdtemp(prefix="cwlprov.", suffix=".tmp")
_logger.debug("cd %s", tmpdir)
os.chdir(tmpdir)
# Change to the new cwl runner process so Ctrl-C etc works
if " " in self.args.cwlrunner:
# Split out any cwl-runner arguments
cwlargs = shlex.split(self.args.cwlrunner)
else:
cwlargs = [self.args.cwlrunner]
cwlargs.append(str(wf_arg))
cwlargs.append(str(job_file))
cwlargs.extend(self.args.args)
_logger.info("%s", " ".join(cwlargs))
os.execlp(cwlargs[0], *cwlargs)
# Still here? Above should have taken over this python process!
_logger.fatal("Could not execute cwl-runner")
return Status.UNHANDLED_ERROR
def _find_workflow(self):
# TODO find path in manifest
path = "workflow/packed.cwl"
p = self.ro.resolve_path(str(path))
return p
def _find_primary_job(self):
# TODO find path in manifest
path = "workflow/primary-job.json"
p = self.ro.resolve_path(str(path))
return p
def _recreate_job(self, activity, absolute):
# TODO: Actually do it
job = self._inputs_or_outputs_job(activity, is_inputs=True, absolute=absolute)
_logger.debug("Recreated job: %s", job)
return job
def _temporary_job(self, job):
with tempfile.NamedTemporaryFile(
mode="w", prefix="rerun-", suffix=".json", delete=False, encoding="UTF-8"
) as f:
json.dump(job, f, indent=2)
_logger.info("Temporary job: %s", f.name)
return f.name
def _usage(self, activity_id, prov_doc):
args = self.args
if not args.inputs:
return
usage = _prov_with_attr(prov_doc, ProvUsage, activity_id, PROV_ATTR_ACTIVITY)
for u in usage:
entity = _prov_attr(PROV_ATTR_ENTITY, u)
entity_id = entity and _simpler_uuid(entity.uri).replace(
"urn:hash::sha1:", ""
)
role = _prov_attr(PROV_ROLE, u)
time = _prov_attr(PROV_ATTR_TIME, u)
if args.start and args.end:
# 2 col timestamps
time_part = "{} {} ".format(
time or "(unknown usage time) ",
TIME_PADDING,
)
elif args.start or args.end:
# 1 col timestamp
time_part = "%s " % (time or "(unknown usage time) ")
else:
time_part = ""
self.print("{}In {} < {}".format(time_part, entity_id, role or ""))
def _generation(self, activity_id, prov_doc):
args = self.args
if not args.outputs:
return
gen = _prov_with_attr(prov_doc, ProvGeneration, activity_id, PROV_ATTR_ACTIVITY)
for g in gen:
entity = _prov_attr(PROV_ATTR_ENTITY, g)
entity_id = entity and _simpler_uuid(entity.uri).replace(
"urn:hash::sha1:", ""
)
role = _prov_attr(PROV_ROLE, g)
time = _prov_attr(PROV_ATTR_TIME, g)
if args.start and args.end:
# 2 col timestamps
time_part = "{} {} ".format(
TIME_PADDING,
time or "(unknown generation time)",
)
elif args.start or args.end:
# 1 col timestamp
time_part = "%s " % (time or "(unknown generation time)")
else:
time_part = ""
self.print("{}Out {} > {}".format(time_part, entity_id, role or ""))
def print(self, msg="", *args):
if args and isinstance(msg, str) and "%" in msg:
msg = msg % args
print(msg, file=self.output or sys.stdout)
else:
print(msg, *args, file=self.output or sys.stdout)
def run(self):
ro = self.ro
args = self.args
uri, uuid, name = self._wf_id()
if not ro.provenance(uri):
_logger.error("No provenance found for: %s", name)
# if self.hints:
# print("Try --search to examine all provenance files")
return Status.UNKNOWN_RUN
prov_doc = _prov_document(ro, uri, args)
if not prov_doc:
# Error already printed by _prov_document
return Status.UNKNOWN_RUN
if args.verbose:
self.print("Workflow run:", name)
activity_id = Identifier(uri)
activity = first(prov_doc.get_record(activity_id))
if not activity:
_logger.error("Provenance does not describe activity %s", uri)
return Status.UNKNOWN_RUN
if args.verbose:
self.print(activity)
label = ""
if args.labels:
label = " %s " % (first(activity.get_attribute("prov:label")) or "")
start = first(_prov_with_attr(prov_doc, ProvStart, activity_id))
start_time = start and _prov_attr(PROV_ATTR_TIME, start)
end = first(_prov_with_attr(prov_doc, ProvEnd, activity_id))
end_time = end and _prov_attr(PROV_ATTR_TIME, end)
if args.verbose and start:
self.print(start)
padded_start_time = ""
if args.end and args.start:
# 2 columns
padded_start_time = f"{start_time} {TIME_PADDING} "
elif args.end or args.start:
# 1 column, we don't care which
padded_start_time = "%s " % (start_time)
self.print(f"{padded_start_time}Flow {name} [{label}")
# inputs
self._usage(activity_id, prov_doc)
# steps
have_nested = False
if args.steps:
started = _prov_with_attr(
prov_doc, ProvStart, activity_id, PROV_ATTR_STARTER
)
steps = map(partial(_prov_attr, PROV_ATTR_ACTIVITY), started)
for child in steps:
c_activity = first(prov_doc.get_record(child))
if args.verbose:
self.print(c_activity)
c_label = ""
if args.labels:
c_label = " %s " % (
first(c_activity.get_attribute("prov:label")) or ""
)
c_start = first(_prov_with_attr(prov_doc, ProvStart, child))
c_start_time = c_start and _prov_attr(PROV_ATTR_TIME, c_start)
c_end = first(_prov_with_attr(prov_doc, ProvEnd, child))
c_end_time = c_end and _prov_attr(PROV_ATTR_TIME, c_end)
c_duration = ""
if args.duration:
if c_start_time and c_end_time:
c_duration = " (%s)" % (c_end_time - c_start_time)
else:
c_duration = " (unknown duration)"
c_provenance = ro.provenance(child.uri)
have_nested = have_nested or c_provenance
c_id = _simpler_uuid(child.uri)
c_start_time = args.start and (
"%s " % c_start_time or "(unknown start time) "
)
c_end_time = args.end and "%s " % (c_end_time or TIME_PADDING)
self.print(
"%s%sStep %s %s%s%s"
% (
c_start_time or "",
c_end_time or "",
c_id,
c_provenance and "*" or " ",
c_label,
c_duration,
)
)
self._usage(child, prov_doc)
self._generation(child, prov_doc)
# generated
self._generation(activity_id, prov_doc)
if args.verbose and end:
self.print(end)
# end
padded_end_time = ""
if args.end and args.start:
padded_end_time = f"{TIME_PADDING} {end_time} "
elif args.end or args.start:
padded_end_time = "%s " % (end_time)
w_duration = ""
if args.duration:
if start_time and end_time:
w_duration = " (%s)" % (end_time - start_time)
else:
w_duration = " (unknown duration)"
self.print(f"{padded_end_time}Flow {name} ]{label}{w_duration}")
if self.hints:
print("Legend:")
print(" [ Workflow start")
if args.inputs:
print(" < Used as input")
if args.outputs:
print(" > Generated as output")
if have_nested:
print(
" * Nested provenance, use UUID to explore: cwlprov run %s" % c_id
)
print(" ] Workflow end")
return Status.OK
def main(args=None):
with Tool(args) as tool:
try:
return tool.main()
except OSError as e:
_logger.fatal(e)
return Status.IO_ERROR
if __name__ == "__main__":
main(sys.argv[1:])
|
# Given a n-ary tree, find its maximum depth.
#
# The maximum depth is the number of nodes along the longest path
# from the root node down to the farthest leaf node.
#
# Input: root = [1,null,3,2,4,null,5,6]
# Output: 3
#
# Input: root = [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14]
# Output: 5
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def maxDepth(self, root):
if not root:
return 0
stack = []
stack.append([root, 1])
maxDepth = 0
while len(stack):
node, depth = stack.pop()
maxDepth = max(maxDepth, depth)
depth += 1
for child in node.children:
stack.append([child, depth])
return maxDepth
|
FREECADPATH = '/usr/lib/freecad/lib'
import sys
sys.path.append(FREECADPATH)
from collections import defaultdict
import FreeCAD
import Mesh
import os
def export_stl(body_obj,filename):
__objs__=[]
__objs__.append(FreeCAD.getDocument("rs_winder").getObject(body_obj))
Mesh.export(__objs__,filename)
del __objs__
## List of sizes to generate
## Large range = h=2.8, d=12-18
## Normal range = h=1.6, d=7-12
spring1_d = ["7.0", "7.5", "8.0", "8.5", "9.0", "9.5", "10.0", "10.5", "11.0", "11.5", "12.0", "12.5"]
bowl1_d = ["9.0", "9.5", "10.0", "10.5", "11.0", "11.5", "11.8", "12.0", "12.3", "12.5", "13.0", "13.5", "14.0", "14.5"]
spring2_d = ["13.0", "13.5", "14.0", "14.5", "15.0", "15.5", "16.0", "16.5", "17.0", "17.5", "18.0", "18.5"]
bowl2_d = ["15.0", "15.5", "16.0", "16.5", "17.0", "17.3", "17.5", "18.0", "18.5", "19.0", "19.5", "20.0", "20.5"]
## Open project file and spreadsheet
FreeCAD.openDocument("rs-winder.FCStd")
doc = App.ActiveDocument
sheet = doc.Spreadsheet002
## Create dirs
os.mkdir("Release")
os.mkdir("Release/normal")
os.mkdir("Release/large")
os.mkdir("Release/normal/winder-base")
os.mkdir("Release/normal/housing-barrel")
os.mkdir("Release/normal/barrel-bowl")
os.mkdir("Release/normal/plunger")
os.mkdir("Release/large/winder-base")
os.mkdir("Release/large/housing-barrel")
os.mkdir("Release/large/barrel-bowl")
os.mkdir("Release/large/plunger")
###############################
## Generate normal winder ##
###############################
## Setup normal winder params
sheet.set("spr_h", "1.6")
sheet.set("arb_d", "2.1")
sheet.set("hook_d", "0.5")
sheet.set("body_d", "25.0")
doc.recompute(None,True,True)
## Generate winder base (with arbor)
doc.getObject('Body002').Tip=doc.getObject('Pocket005')
doc.getObject('Pocket005').Reversed = 0
doc.recompute(None,True,True)
export_stl("Body002", "Release/normal/winder-base/rs-winder-base-normal-arbor.stl")
## Generate winder base (with arbor hole)
doc.getObject('Body002').Tip=doc.getObject('Pocket015')
doc.getObject('Pocket005').Reversed = 1
doc.recompute(None,True,True)
export_stl("Body002", "Release/normal/winder-base/rs-winder-base-normal-hole.stl")
## Generate housing/plunger (based on spring diameter)
for n in spring1_d:
sheet.set("spr_d", n)
sheet.set("version", n+"MM")
if len(n) < 4:
sheet.set("version_x_offs", "-5.50")
else:
sheet.set("version_x_offs", "-6.60")
doc.recompute(None,True,True)
export_stl("Body", "Release/normal/plunger/rs-winder-plunger-"+n+"mm.stl")
export_stl("Body001", "Release/normal/housing-barrel/rs-winder-housing-"+n+"mm.stl")
## Generate bowls (based actual barrel diameter)
for n in bowl1_d:
sheet.set("barrel_d", n)
doc.recompute(None,True,True)
export_stl("Body003", "Release/normal/barrel-bowl/rs-winder-bowl-"+n+"mm.stl")
###############################
## Generate large winder ##
###############################
## Setup large winder params
sheet.set("spr_h", "2.8")
sheet.set("arb_d", "3.125")
sheet.set("hook_d", "0.55")
sheet.set("body_d", "30.0")
sheet.set("version_x_offs", "-6.60")
doc.recompute(None,True,True)
## Generate winder base (with arbor)
doc.getObject('Body002').Tip=doc.getObject('Pocket005')
doc.getObject('Pocket005').Reversed = 0
doc.recompute(None,True,True)
export_stl("Body002", "Release/large/winder-base/rs-winder-base-large-arbor.stl")
## Generate winder base (with arbor hole)
doc.getObject('Body002').Tip=doc.getObject('Pocket015')
doc.getObject('Pocket005').Reversed = 1
doc.recompute(None,True,True)
export_stl("Body002", "Release/large/winder-base/rs-winder-base-large-hole.stl")
## Generate housing/plunger (based on spring diameter)
for n in spring2_d:
sheet.set("spr_d", n)
sheet.set("version", n+"MM")
doc.recompute(None,True,True)
export_stl("Body", "Release/large/plunger/rs-winder-plunger-"+n+"mm.stl")
export_stl("Body001", "Release/large/housing-barrel/rs-winder-housing-"+n+"mm.stl")
## Generate bowls (based actual barrel diameter)
for n in bowl2_d:
sheet.set("barrel_d", n)
doc.recompute(None,True,True)
export_stl("Body003", "Release/large/barrel-bowl/rs-winder-bowl-"+n+"mm.stl")
|
#utility functions for demo
import os
import yaml
import sys
import json
import pandas as pd
import numpy as np
import urllib
import xmltodict
import csv
import warnings
import tensorflow as tf
from tqdm import tqdm
from keras_retinanet import models
from keras.utils import multi_gpu_model
from deepforest import _ROOT
def label_to_name(class_dict, label):
""" Map label to name.
"""
name = class_dict[label]
return name
def read_config(config_path):
try:
with open(config_path, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
except Exception as e:
raise FileNotFoundError("There is no config at, yields {}".format(config_path, e))
return config
def read_model(model_path, config):
"""
Read keras retinanet model from keras.model.save()
"""
#Suppress user warning, module does not need to be compiled for prediction
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
model = models.load_model(model_path, backbone_name='resnet50')
return model
#Download progress bar
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def use_release(save_dir=os.path.join(_ROOT, "data/"), prebuilt_model="NEON"):
'''Check the existance of, or download the latest model release from github
Args:
save_dir (str): Directory to save filepath, default to "data" in deepforest repo
prebuilt_model: Currently only accepts "NEON", but could be expanded to include other prebuilt models. The local model will be called {prebuilt_model}.h5 on disk.
Returns:
release_tag, output_path (str): path to downloaded model
'''
#Find latest github tag release from the DeepLidar repo
_json = json.loads(
urllib.request.urlopen(
urllib.request.Request(
'https://api.github.com/repos/Weecology/DeepForest/releases/latest',
headers={'Accept': 'application/vnd.github.v3+json'},
)).read())
asset = _json['assets'][0]
url = asset['browser_download_url']
#Naming based on pre-built model
output_path = os.path.join(save_dir, prebuilt_model + ".h5")
#Check the release tagged locally
try:
release_txt = pd.read_csv(save_dir + "current_release.csv")
except:
release_txt = pd.DataFrame({"current_release": [None]})
#Download the current release it doesn't exist
if not release_txt.current_release[0] == _json["html_url"]:
print("Downloading model from DeepForest release {}, see {} for details".format(
_json["tag_name"], _json["html_url"]))
with DownloadProgressBar(unit='B',
unit_scale=True,
miniters=1,
desc=url.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
print("Model was downloaded and saved to {}".format(output_path))
#record the release tag locally
release_txt = pd.DataFrame({"current_release": [_json["html_url"]]})
release_txt.to_csv(save_dir + "current_release.csv")
else:
print(
"Model from DeepForest release {} was already downloaded. Loading model from file."
.format(_json["html_url"]))
return _json["html_url"], output_path
def xml_to_annotations(xml_path):
"""Load annotations from xml format (e.g. RectLabel editor) and convert them into retinanet annotations format.
Args:
xml_path (str): Path to the annotations xml, formatted by RectLabel
Returns:
Annotations (pandas dataframe): in the format -> path/to/image.png,x1,y1,x2,y2,class_name
"""
#parse
with open(xml_path) as fd:
doc = xmltodict.parse(fd.read())
#grab xml objects
try:
tile_xml = doc["annotation"]["object"]
except Exception as e:
raise Exception("error {} for path {} with doc annotation{}".format(
e, xml_path, doc["annotation"]))
xmin = []
xmax = []
ymin = []
ymax = []
label = []
if type(tile_xml) == list:
treeID = np.arange(len(tile_xml))
#Construct frame if multiple trees
for tree in tile_xml:
xmin.append(tree["bndbox"]["xmin"])
xmax.append(tree["bndbox"]["xmax"])
ymin.append(tree["bndbox"]["ymin"])
ymax.append(tree["bndbox"]["ymax"])
label.append(tree['name'])
else:
#One tree
treeID = 0
xmin.append(tile_xml["bndbox"]["xmin"])
xmax.append(tile_xml["bndbox"]["xmax"])
ymin.append(tile_xml["bndbox"]["ymin"])
ymax.append(tile_xml["bndbox"]["ymax"])
label.append(tile_xml['name'])
rgb_name = os.path.basename(doc["annotation"]["filename"])
#set dtypes
xmin = [int(x) for x in xmin]
xmax = [int(x) for x in xmax]
ymin = [int(x) for x in ymin]
ymax = [int(x) for x in ymax]
annotations = pd.DataFrame({
"image_path": rgb_name,
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
"label": label
})
return (annotations)
def create_classes(annotations_file):
"""Create a class list in the format accepted by keras retinanet
Args:
annotations_file: an annotation csv in the retinanet format path/to/image.png,x1,y1,x2,y2,class_name
Returns:
path to classes file
"""
annotations = pd.read_csv(
annotations_file, names=["image_path", "xmin", "ymin", "xmax", "ymax", "label"])
#get dir to place along file annotations
dirname = os.path.split(annotations_file)[0]
classes_path = os.path.join(dirname, "classes.csv")
#get unique labels
labels = annotations.label.dropna().unique()
n_classes = labels.shape[0]
print("There are {} unique labels: {} ".format(n_classes, list(labels)))
#write label
with open(classes_path, 'w') as csv_file:
writer = csv.writer(csv_file)
for index, label in enumerate(labels):
writer.writerow([label, index])
return classes_path
def number_of_images(annotations_file):
"""How many images in the annotations file?
Args:
annotations_file (str):
Returns:
n (int): Number of images
"""
df = pd.read_csv(annotations_file,
index_col=False,
names=["image_path", "xmin", "ymin", "xmax", "ymax"])
n = len(df.image_path.unique())
return n
def format_args(annotations_file, classes_file, config, images_per_epoch=None):
"""Format config file to match argparse list for retinainet
Args:
annotations_file: a path to a csv dataframe of annotations to get number of images, no header
config (dict): a dictionary object to convert into a list for argparse
images_per_epoch (int): Override default steps per epoch (n images/batch size) by manually setting a number of images
Returns:
arg_list (list): a list structure that mimics argparse input arguments for retinanet
"""
#Format args. Retinanet uses argparse, so they need to be passed as a list
args = {}
#remember that .yml reads None as a str
if not config["weights"] == 'None':
args["--weights"] = config["weights"]
args["--backbone"] = config["backbone"]
args["--image-min-side"] = config["image-min-side"]
args["--multi-gpu"] = config["multi-gpu"]
args["--epochs"] = config["epochs"]
if images_per_epoch:
args["--steps"] = round(images_per_epoch / int(config["batch_size"]))
else:
args["--steps"] = round(
int(number_of_images(annotations_file)) / int(config["batch_size"]))
args["--batch-size"] = config["batch_size"]
args["--tensorboard-dir"] = None
args["--workers"] = config["workers"]
args["--max-queue-size"] = config["max_queue_size"]
args["--freeze-layers"] = config["freeze_layers"]
args["--score-threshold"] = config["score_threshold"]
if config["save_path"]:
args["--save-path"] = config["save_path"]
if config["snapshot_path"]:
args["--snapshot-path"] = config["snapshot_path"]
arg_list = [[k, v] for k, v in args.items()]
arg_list = [val for sublist in arg_list for val in sublist]
#boolean arguments
if config["save-snapshot"] is False:
print("Disabling snapshot saving")
arg_list = arg_list + ["--no-snapshots"]
if config["freeze_resnet"] is True:
arg_list = arg_list + ["--freeze-backbone"]
if config["random_transform"] is True:
print("Turning on random transform generator")
arg_list = arg_list + ["--random-transform"]
if config["multi-gpu"] > 1:
arg_list = arg_list + ["--multi-gpu-force"]
if config["multiprocessing"]:
arg_list = arg_list + ["--multiprocessing"]
#positional arguments first
arg_list = arg_list + ["csv", annotations_file, classes_file]
if not config["validation_annotations"] == "None":
arg_list = arg_list + ["--val-annotations", config["validation_annotations"]]
#All need to be str classes to mimic sys.arg
arg_list = [str(x) for x in arg_list]
return arg_list
|
# https://app.codesignal.com/arcade/code-arcade/loop-tunnel/7BFPq6TpsNjzgcpXy/
def leastFactorial(n):
# What's the highest factorial number that is bigger than n?
i, fact = (1, 1)
# Keep increasing a cumulative factorial until n can't no longer contain
# it. If n becomes 1, then n was a factorial, if n becomes 0, then the
# next factorial is above n.
while(n/fact > 1):
fact *= i
i += 1
return fact
|
myl1 = [1,2,3,4]
print("Before clearing type is: "+ str(type(myl1)))
myl1 = myl1.clear()
print(myl1)
print("After clearing type is: "+ str(type(myl1)))
|
# © 2015-2020 Deltatech
# See README.rst file on addons root folder for license details
import datetime
import functools
import itertools
import logging
from ast import literal_eval
import psycopg2
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
from odoo.tools import mute_logger
_logger = logging.getLogger("merge.object")
class MergeDummy(models.TransientModel):
_name = "merge.dummy"
_description = "Merge Object Dummy"
name = fields.Char()
class MergeObjectLine(models.TransientModel):
_name = "merge.object.line"
_description = "Merge Object Line"
_order = "min_id asc"
wizard_id = fields.Many2one("merge.object.wizard", "Wizard")
min_id = fields.Integer("MinID")
aggr_ids = fields.Char("Ids", required=True)
class MergeObject(models.TransientModel):
"""
The idea behind this wizard is to create a list of potential objects to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the object list to merge.
"""
_name = "merge.object.wizard"
_description = "Merge Object Wizard"
_model_merge = "merge.dummy"
_table_merge = "merge_dummy"
@api.model
def default_get(self, fields_list):
res = super(MergeObject, self).default_get(fields_list)
active_ids = self.env.context.get("active_ids")
if self.env.context.get("active_model") == self._model_merge and active_ids:
res["state"] = "selection"
res["object_ids"] = [(6, 0, active_ids)]
res["dst_object_id"] = self._get_ordered_object(active_ids)[-1].id
return res
# Group by
group_by_name = fields.Boolean("Name")
state = fields.Selection(
[("option", "Option"), ("selection", "Selection"), ("finished", "Finished")],
readonly=True,
required=True,
string="State",
default="option",
)
number_group = fields.Integer("Group of Objects", readonly=True)
current_line_id = fields.Many2one("merge.object.line", string="Current Line")
line_ids = fields.One2many("merge.object.line", "wizard_id", string="Lines")
object_ids = fields.Many2many(_model_merge, string="Objects")
dst_object_id = fields.Many2one(_model_merge, string="Destination Object")
maximum_group = fields.Integer("Maximum of Group of Objects")
# ----------------------------------------
# Update method. Core methods to merge steps
# ----------------------------------------
def _get_fk_on(self, table):
"""return a list of many2one relation with the given table.
:param table : the name of the sql table to return relations
:returns a list of tuple 'table name', 'column name'.
"""
query = """
SELECT cl1.relname as table, att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
self._cr.execute(query, (table,))
return self._cr.fetchall()
@api.model
def _update_foreign_keys(self, src_objects, dst_object):
"""Update all foreign key from the src_object to dst_object. All many2one fields will be updated.
:param src_objects : merge source res.object recordset (does not include destination one)
:param dst_object : record of destination res.object
"""
_logger.debug(
"_update_foreign_keys for dst_object: %s for src_objects: %s", dst_object.id, str(src_objects.ids)
)
# find the many2one relation to a object
Object = self.env[self._model_merge]
relations = self._get_fk_on(self._table_merge)
self.flush()
for table, column in relations:
if "merge_object_" in table: # ignore two tables
continue
# get list of columns of current table (exept the current fk column)
# pylint: disable=E8103
query = "SELECT column_name FROM information_schema.columns WHERE table_name LIKE '%s'" % (table)
self._cr.execute(query, ())
columns = []
for data in self._cr.fetchall():
if data[0] != column:
columns.append(data[0])
# do the update for the current table/column in SQL
query_dic = {
"table": table,
"column": column,
"value": columns[0],
}
if len(columns) <= 1:
# unique key treated
query = (
"""
UPDATE "%(table)s" as ___tu
SET "%(column)s" = %%s
WHERE
"%(column)s" = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
"%(column)s" = %%s AND
___tu.%(value)s = ___tw.%(value)s
)"""
% query_dic
)
for src_object in src_objects:
self._cr.execute(query, (dst_object.id, src_object.id, dst_object.id))
else:
try:
with mute_logger("odoo.sql_db"), self._cr.savepoint():
query = 'UPDATE "%(table)s" SET "%(column)s" = %%s WHERE "%(column)s" IN %%s' % query_dic
self._cr.execute(
query,
(
dst_object.id,
tuple(src_objects.ids),
),
)
# handle the recursivity with parent relation
if column == Object._parent_name and table == self._table_merge:
query = (
"""
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM %(table)s
UNION
SELECT cycle.id, %(table)s.parent_id
FROM %(table)s, cycle
WHERE %(table)s.id = cycle.parent_id AND
cycle.id != cycle.parent_id
)
SELECT id FROM cycle WHERE id = parent_id AND id = %%s
"""
% query_dic
)
self._cr.execute(query, (dst_object.id,))
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent object_id is useless, better delete it
query = 'DELETE FROM "%(table)s" WHERE "%(column)s" IN %%s' % query_dic
self._cr.execute(query, (tuple(src_objects.ids),))
self.invalidate_cache()
@api.model
def _update_reference_fields(self, src_objects, dst_object):
"""Update all reference fields from the src_object to dst_object.
:param src_objects : merge source res.object recordset (does not include destination one)
:param dst_object : record of destination res.object
"""
_logger.debug("_update_reference_fields for dst_object: %s for src_objects: %r", dst_object.id, src_objects.ids)
def update_records(model, src, field_model="model", field_id="res_id"):
Model = self.env[model] if model in self.env else None
if Model is None:
return
records = Model.sudo().search([(field_model, "=", self._model_merge), (field_id, "=", src.id)])
try:
with mute_logger("odoo.sql_db"), self._cr.savepoint(), self.env.clear_upon_failure():
records.sudo().write({field_id: dst_object.id})
records.flush()
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent object_id is useless, better delete it
records.sudo().unlink()
update_records = functools.partial(update_records)
for scr_object in src_objects:
update_records("calendar.event", src=scr_object, field_model="res_model")
update_records("ir.attachment", src=scr_object, field_model="res_model")
update_records("mail.followers", src=scr_object, field_model="res_model")
update_records("portal.share", src=scr_object, field_model="res_model")
update_records("rating.rating", src=scr_object, field_model="res_model")
update_records("mail.activity", src=scr_object, field_model="res_model")
update_records("mail.message", src=scr_object)
update_records("ir.model.data", src=scr_object)
records = self.env["ir.model.fields"].search([("ttype", "=", "reference")])
for record in records.sudo():
try:
Model = self.env[record.model]
field = Model._fields[record.name]
except KeyError:
# unknown model or field => skip
continue
if field.compute is not None:
continue
for src_object in src_objects:
records_ref = Model.sudo().search([(record.name, "=", "%s,%d" % (self._model_merge, src_object.id))])
values = {
record.name: "%s,%d" % (self._model_merge, dst_object.id),
}
records_ref.sudo().write(values)
self.flush()
def _get_summable_fields(self):
"""Returns the list of fields that should be summed when merging objects"""
return []
@api.model
def _update_values(self, src_objects, dst_object):
"""Update values of dst_object with the ones from the src_objects.
:param src_objects : recordset of source res.object
:param dst_object : record of destination res.object
"""
_logger.debug("_update_values for dst_object: %s for src_objects: %r", dst_object.id, src_objects.ids)
model_fields = dst_object.fields_get().keys()
summable_fields = self._get_summable_fields()
def write_serializer(item):
if isinstance(item, models.BaseModel):
return item.id
else:
return item
# get all fields that are not computed or x2many
values = dict()
for column in model_fields:
field = dst_object._fields[column]
if field.type not in ("many2many", "one2many") and field.compute is None:
for item in itertools.chain(src_objects, [dst_object]):
if item[column]:
if column in summable_fields and values.get(column):
values[column] += write_serializer(item[column])
else:
values[column] = write_serializer(item[column])
# remove fields that can not be updated (id and parent_id)
values.pop("id", None)
parent_id = values.pop("parent_id", None)
dst_object.write(values)
# try to update the parent_id
if parent_id and parent_id != dst_object.id:
try:
dst_object.write({"parent_id": parent_id})
except ValidationError:
_logger.info(
"Skip recursive object hierarchies for parent_id %s of object: %s", parent_id, dst_object.id
)
def _merge(self, object_ids, dst_object=None, extra_checks=True):
"""private implementation of merge object
:param object_ids : ids of object to merge
:param dst_object : record of destination res.object
:param extra_checks: pass False to bypass extra sanity check (e.g. email address)
"""
Object = self.env[self._model_merge]
object_ids = Object.browse(object_ids).exists()
if len(object_ids) < 2:
return
if len(object_ids) > 3:
raise UserError(
_(
"For safety reasons, you cannot merge more than 3 objects together."
" You can re-open the wizard several times if needed."
)
)
# check if the list of objects to merge contains child/parent relation
if "parent_id" in Object._fields:
child_ids = self.env[self._model_merge]
for object_id in object_ids:
child_ids |= Object.search([("id", "child_of", [object_id.id])]) - object_id
if object_ids & child_ids:
raise UserError(_("You cannot merge a object with one of his parent."))
# remove dst_object from objects to merge
if dst_object and dst_object in object_ids:
src_objects = object_ids - dst_object
else:
ordered_objects = self._get_ordered_object(object_ids.ids)
dst_object = ordered_objects[-1]
src_objects = ordered_objects[:-1]
_logger.info("dst_object: %s", dst_object.id)
# call sub methods to do the merge
self._update_foreign_keys(src_objects, dst_object)
self._update_reference_fields(src_objects, dst_object)
self._update_values(src_objects, dst_object)
self._log_merge_operation(src_objects, dst_object)
# delete source object, since they are merged
src_objects.unlink()
def _log_merge_operation(self, src_objects, dst_object):
_logger.info("(uid = %s) merged the objects %r with %s", self._uid, src_objects.ids, dst_object.id)
# ----------------------------------------
# Helpers
# ----------------------------------------
@api.model
def _generate_query(self, fields, maximum_group=100):
"""Build the SQL query on res.object table to group them according to given criteria
:param fields : list of column names to group by the objects
:param maximum_group : limit of the query
"""
# make the list of column to group by in sql query
sql_fields = []
for field in fields:
if field in ["email", "name"]:
sql_fields.append("lower(%s)" % field)
elif field in ["vat"]:
sql_fields.append("replace(%s, ' ', '')" % field)
else:
sql_fields.append(field)
group_fields = ", ".join(sql_fields)
# where clause : for given group by columns, only keep the 'not null' record
filters = []
for field in fields:
if field in ["email", "name", "vat"]:
filters.append((field, "IS NOT", "NULL"))
criteria = " AND ".join("{} {} {}".format(field, operator, value) for field, operator, value in filters)
# build the query
text = [
"SELECT min(id), array_agg(id)",
"FROM %s" % self._table_merge,
]
if criteria:
text.append("WHERE %s" % criteria)
text.extend(["GROUP BY %s" % group_fields, "HAVING COUNT(*) >= 2", "ORDER BY min(id)"])
if maximum_group:
text.append(
"LIMIT %s" % maximum_group,
)
return " ".join(text)
@api.model
def _compute_selected_groupby(self):
"""Returns the list of field names the object can be grouped (as merge
criteria) according to the option checked on the wizard
"""
groups = []
group_by_prefix = "group_by_"
for field_name in self._fields:
if field_name.startswith(group_by_prefix):
if getattr(self, field_name, False):
groups.append(field_name[len(group_by_prefix) :])
if not groups:
raise UserError(_("You have to specify a filter for your selection."))
return groups
@api.model
def _object_use_in(self, aggr_ids, models):
"""Check if there is no occurence of this group of object in the selected model
:param aggr_ids : stringified list of object ids separated with a comma (sql array_agg)
:param models : dict mapping a model name with its foreign key with res_object table
"""
return any(self.env[model].search_count([(field, "in", aggr_ids)]) for model, field in models.items())
@api.model
def _get_ordered_object(self, object_ids):
"""Helper : returns a `res.object` recordset ordered by create_date/active fields
:param object_ids : list of object ids to sort
"""
return (
self.env[self._model_merge]
.browse(object_ids)
.sorted(
key=lambda p: (p.create_date or datetime.datetime(1970, 1, 1)),
reverse=True,
)
)
def _compute_models(self):
""" Compute the different models needed by the system if you want to exclude some objects. """
model_mapping = {}
return model_mapping
# ----------------------------------------
# Actions
# ----------------------------------------
def action_skip(self):
""" Skip this wizard line. Don't compute any thing, and simply redirect to the new step."""
if self.current_line_id:
self.current_line_id.unlink()
return self._action_next_screen()
def _action_next_screen(self):
"""return the action of the next screen ; this means the wizard is set to treat the
next wizard line. Each line is a subset of object that can be merged together.
If no line left, the end screen will be displayed (but an action is still returned).
"""
self.invalidate_cache() # FIXME: is this still necessary?
values = {}
if self.line_ids:
# in this case, we try to find the next record.
current_line = self.line_ids[0]
current_object_ids = literal_eval(current_line.aggr_ids)
values.update(
{
"current_line_id": current_line.id,
"object_ids": [(6, 0, current_object_ids)],
"dst_object_id": self._get_ordered_object(current_object_ids)[-1].id,
"state": "selection",
}
)
else:
values.update({"current_line_id": False, "object_ids": [], "state": "finished"})
self.write(values)
return {
"type": "ir.actions.act_window",
"res_model": self._name,
"res_id": self.id,
"view_mode": "form",
"target": "new",
}
def _process_query(self, query):
"""Execute the select request and write the result in this wizard
:param query : the SQL query used to fill the wizard line
"""
self.ensure_one()
model_mapping = self._compute_models()
# group object query
self._cr.execute(query)
counter = 0
for min_id, aggr_ids in self._cr.fetchall():
# To ensure that the used objects are accessible by the user
objects = self.env[self._model_merge].search([("id", "in", aggr_ids)])
if len(objects) < 2:
continue
# exclude object according to options
if model_mapping and self._object_use_in(objects.ids, model_mapping):
continue
self.env["merge.object.line"].create({"wizard_id": self.id, "min_id": min_id, "aggr_ids": objects.ids})
counter += 1
self.write({"state": "selection", "number_group": counter})
_logger.info("counter: %s", counter)
def action_start_manual_process(self):
"""Start the process 'Merge with Manual Check'. Fill the wizard according to the group_by and exclude
options, and redirect to the first step (treatment of first wizard line). After, for each subset of
object to merge, the wizard will be actualized.
- Compute the selected groups (with duplication)
- If the user has selected the 'exclude_xxx' fields, avoid the objects
"""
self.ensure_one()
groups = self._compute_selected_groupby()
query = self._generate_query(groups, self.maximum_group)
self._process_query(query)
return self._action_next_screen()
def action_start_automatic_process(self):
"""Start the process 'Merge Automatically'. This will fill the wizard with the same mechanism as 'Merge
with Manual Check', but instead of refreshing wizard with the current line, it will automatically process
all lines by merging object grouped according to the checked options.
"""
self.ensure_one()
self.action_start_manual_process() # here we don't redirect to the next screen, since it is automatic process
self.write({"state": "finished"})
return {
"type": "ir.actions.act_window",
"res_model": self._name,
"res_id": self.id,
"view_mode": "form",
"target": "new",
}
def parent_migration_process_cb(self):
self.ensure_one()
return {
"type": "ir.actions.act_window",
"res_model": self._name,
"res_id": self.id,
"view_mode": "form",
"target": "new",
}
def action_update_all_process(self):
self.ensure_one()
return self._action_next_screen()
def action_merge(self):
"""Merge Object button. Merge the selected objects, and redirect to
the end screen (since there is no other wizard line to process.
"""
if not self.object_ids:
self.write({"state": "finished"})
return {
"type": "ir.actions.act_window",
"res_model": self._name,
"res_id": self.id,
"view_mode": "form",
"target": "new",
}
self._merge(self.object_ids.ids, self.dst_object_id)
if self.current_line_id:
self.current_line_id.unlink()
return self._action_next_screen()
|
# Import packages
from video_funcs import peri_stimulus_video_clip, register_arena, get_background
from termcolor import colored
# ========================================================
# SET PARAMETERS
# ========================================================
# file path of behaviour video
video_file_path = 'Z:\\branco\\Federico\\raw_behaviour\\maze\\video\\180606_CA2762.avi'
# file path of behaviour video
save_file_path = 'C:\\Users\\Federico\\Desktop'
# file path of fisheye correction -- set to an invalid location such as '' to skip fisheye correction
# A corrective mapping for the branco lab's typical camera is included in the repo!
fisheye_map_location = 'gibb.npy'
# frame of stimulus onset
stim_frame = 3000
# seconds before stimulus to start video
window_pre = 5
# seconds before stimulus to start video
window_post = 10
# frames per second of video
fps = 30
# name of experiment
experiment = 'Barnes wall up'
# name of mouse
mouse_id = 'CA3481'
# stimulus type
stim_type = 'visual'
# x and y offset as set in the behaviour software
x_offset = 120
y_offset = 300
# for flight image analysis: darkness relative to background threshold
# (relative darkness, number of dark pixels)
dark_threshold = [.55,950]
# ========================================================
# GET BACKGROUND
# ========================================================
print(colored('Fetching background', 'green'))
background_image = get_background(
video_file_path,start_frame=1, avg_over=1)
# ========================================================
# REGISTER ARENA
# ========================================================
print(colored('Registering arena', 'green'))
registration = register_arena(
background_image, fisheye_map_location, x_offset, y_offset)
# ========================================================
# SAVE CLIPS AND FLIGHT IMAGES
# ========================================================
print(colored('Creating flight clip and image', 'green'))
start_frame = int(stim_frame-(window_pre*fps))
stop_frame = int(stim_frame+(window_post*fps))
videoname = '{}_{}_{}-{}\''.format(experiment,mouse_id,stim_type, round(stim_frame / fps / 60))
peri_stimulus_video_clip(video_file_path, videoname, save_file_path, start_frame, stop_frame, stim_frame,
registration, x_offset, y_offset, dark_threshold,
save_clip = True, display_clip = True, counter = True, make_flight_image = True)
|
# Generated by Django 3.2.6 on 2021-08-20 01:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='data_contratacao',
),
migrations.RemoveField(
model_name='user',
name='demitido',
),
migrations.RemoveField(
model_name='user',
name='ultima_mudanca',
),
]
|
def new():
'''Add a new world
'''
pass
|
from typing import Optional
import numpy as np
from ... import BaseTask
from assembly_gym.environment.generic import RobotComponent
from .. import Reward
class EndEffectorAccelerationReward(Reward[BaseTask]):
"""
A reward for punishing high (linear and angular) end-effector accelerations.
"""
def __init__(self, robot_name: str = "ur10", intermediate_timestep_reward_scale: float = 0.8,
final_timestep_reward_scale: Optional[float] = None, max_acceleration: float = 100.0):
"""
:param intermediate_timestep_reward_scale: scaling factor (applied to the reward at every step in which the gym
environment does not terminate)
:param final_timestep_reward_scale: scaling factor (applied to the reward at the step in which the gym
environment terminates)
:param max_acceleration: the maximum acceleration to use for normalizing the (unscaled)
reward to lie in [-1, 0]
"""
name = "endeffector_acceleration_reward"
super().__init__(name, intermediate_timestep_reward_scale, final_timestep_reward_scale, clip=False,
abbreviated_name="ee_acc")
self.__robot_name = robot_name
self.__gripper: Optional[RobotComponent] = None
self.__max_acceleration: float = max_acceleration
def _reset(self) -> None:
self.__gripper = self.task.environment.robots[self.__robot_name].gripper
self.__previous_linear_velocity = np.zeros(3)
self.__previous_angular_velocity = np.zeros(3)
def _calculate_reward_unnormalized(self) -> float:
linear_velocity, angular_velocity = self.__gripper.velocity
linear_acceleration = (linear_velocity - self.__previous_linear_velocity) / self.task.environment.time_step
angular_acceleration = (angular_velocity - self.__previous_angular_velocity) / self.task.environment.time_step
linear_acceleration_len = np.linalg.norm(linear_acceleration)
angular_acceleration_len = np.linalg.norm(angular_acceleration)
cost = np.linalg.norm(linear_acceleration_len + angular_acceleration_len)
return -cost
def _get_min_reward_unnormalized(self) -> float:
return -self.__max_acceleration
|
from bokeh.plotting import figure
from bokeh.embed import components
from cachetools import cached
import datetime
from flask import current_app
import logging
import numpy as np
import pandas as pd
import requests
class InstrumdatKey:
def __init__(self, ticker, date=None):
self.ticker = ticker.upper()
self.date = date if date is not None else datetime.date.today()
def __eq__(self, other):
return self.ticker == other.ticker \
and self.date == other.date
def __hash__(self):
return hash(str(self))
def __str__(self):
return '{}: {}'.format(self.date, self.ticker)
@cached(cache={})
def _fetch_instr(instrumdat_key, years=1):
ticker = instrumdat_key.ticker
if instrumdat_key.date != datetime.date.today():
s = 'cannot fetch data as of {}'
raise ValueError(s.format(instrumdat_key.date))
api_key = current_app.config['QUANDL_API_KEY']
url = 'https://www.quandl.com/api/v3/datatables/WIKI/PRICES.json'
start = instrumdat_key.date - datetime.timedelta(days=years * 365)
logging.info("Fetching data for ticker `{}'".format(ticker))
r = requests.get(url, params={
'ticker': ticker,
'date.gt': start.isoformat(),
'api_key': api_key,
})
logging.info("{}".format(r.url.replace(api_key, 'API_KEY')))
logging.info("Status code: {}".format(r.status_code))
if r.status_code != 200:
raise ValueError("invalid ticker `{}'".format(ticker))
jdata = r.json()
df = pd.DataFrame(
jdata['datatable']['data'],
columns=[x['name'] for x in jdata['datatable']['columns']]
)
if df.empty:
raise ValueError("ticker `{}' returned no data".format(ticker))
return df
def fetch_instr(ticker):
"""Fetch historical data for a financial instrument."""
if ticker is None or len(ticker) == 0:
raise ValueError('missing ticker argument')
instrumdat_key = InstrumdatKey(ticker)
df = _fetch_instr(instrumdat_key)
return df
def moving_average(x, window, fill=True):
"""Return moving average."""
if len(x) < window:
s = 'series length {} is smaller than window({})'
raise ValueError(s.format(len(x), window))
w = np.ones(window) / float(window)
a = np.convolve(x, w, 'valid')
if fill:
a = np.concatenate((np.nan * np.ones(window - 1), a))
return a
def build_plot(df, instrument_variables, maxdays=252):
n = min(len(df), maxdays)
df = df[-n:]
# prepare some data
dates = np.array(df['date'].values, dtype=np.datetime64)
window = 21
# create a new plot with a a datetime axis type
p = figure(width=800, height=350, x_axis_type="datetime")
# add renderers
if 'close' in instrument_variables:
p.circle(dates, df['close'],
size=4, color='darkgrey', alpha=0.2, legend='Close')
avg = moving_average(df['close'], window)
p.line(dates, avg, color='navy', legend='Average close')
if 'adj_close' in instrument_variables:
p.circle(dates, df['adj_close'],
size=4, color='darkgreen', alpha=0.2, legend='Adjusted close')
avg = moving_average(df['adj_close'], window)
p.line(dates, avg, color='green', legend='Avg. adj. close')
if 'open' in instrument_variables:
p.circle(dates, df['open'],
size=4, color='darkred', alpha=0.2, legend='Open')
avg = moving_average(df['open'], window)
p.line(dates, avg, color='red', legend='Average open')
if 'adj_open' in instrument_variables:
p.circle(dates, df['adj_open'],
size=4, color='orange', alpha=0.2, legend='Adjusted open')
avg = moving_average(df['adj_open'], window)
p.line(dates, avg, color='brown', legend='Avg. adj. open')
# customize by setting attributes
p.title.text = \
"One-month moving average, as of {}".format(max(dates))
p.legend.location = "top_left"
p.grid.grid_line_alpha = 0
p.xaxis.axis_label = 'Date'
p.yaxis.axis_label = 'Price'
p.ygrid.band_fill_color = "orange"
p.ygrid.band_fill_alpha = 0.1
return components(p)
|
from torch import Tensor
from torch._C import dtype
import torch.nn
import torch.nn.functional
from kge import Config, Dataset
from kge.job import Job
from kge.model import KgeEmbedder
from kge.misc import round_to_points
from typing import List
class DiachronicEmbedder(KgeEmbedder):
def __init__(
self,
config: Config,
dataset: Dataset,
configuration_key: str,
vocab_size: int,
init_for_load_only=False,
):
super().__init__(
config, dataset, configuration_key, init_for_load_only=init_for_load_only
)
# read config
self.normalize_p = self.get_option("normalize.p")
self.regularize = self.check_option("regularize", ["", "lp"])
self.sparse = self.get_option("sparse")
self.se_prop = self.get_option("se_prop")
self.config.check("train.trace_level", ["batch", "epoch"])
self.vocab_size = vocab_size
self.time_size = self.get_option("time_size")
round_embedder_dim_to = self.get_option("round_dim_to")
if len(round_embedder_dim_to) > 0:
self.dim = round_to_points(round_embedder_dim_to, self.dim)
self.t_emb_dim = self.dim - int(self.se_prop * self.dim)
self.s_emb_dim = int(self.se_prop * self.dim)
self._ent_emb = torch.nn.Embedding(
self.vocab_size, self.s_emb_dim, sparse=self.sparse
)
self._freq_emb = torch.nn.Embedding(
self.vocab_size, self.t_emb_dim, sparse=self.sparse
)
self._phi_emb = torch.nn.Embedding(
self.vocab_size, self.t_emb_dim, sparse=self.sparse
)
self._amp_emb = torch.nn.Embedding(
self.vocab_size, self.t_emb_dim, sparse=self.sparse
)
if not init_for_load_only:
# initialize weights
self.initialize(self._ent_emb.weight.data)
self.initialize(self._freq_emb.weight.data)
self.initialize(self._phi_emb.weight.data)
self.initialize(self._amp_emb.weight.data)
self._normalize_embeddings()
# TODO handling negative dropout because using it with ax searches for now
dropout = self.get_option("dropout")
if dropout < 0 and config.get("train.auto_correct"):
config.log(
"Setting {}.dropout to 0., "
"was set to {}.".format(configuration_key, dropout)
)
dropout = 0
self.dropout = torch.nn.Dropout(dropout)
def _normalize_embeddings(self):
if self.normalize_p > 0:
with torch.no_grad():
self._ent_emb.weight.data = torch.nn.functional.normalize(
self._freq_emb.weight.data, p=self.normalize_p, dim=-1
)
self._freq_emb.weight.data = torch.nn.functional.normalize(
self._freq_emb.weight.data, p=self.normalize_p, dim=-1
)
self._phi_emb.weight.data = torch.nn.functional.normalize(
self._freq_emb.weight.data, p=self.normalize_p, dim=-1
)
self._amp_emb.weight.data = torch.nn.functional.normalize(
self._freq_emb.weight.data, p=self.normalize_p, dim=-1
)
def prepare_job(self, job: Job, **kwargs):
from kge.job import TrainingJob
super().prepare_job(job, **kwargs)
if self.normalize_p > 0 and isinstance(job, TrainingJob):
# just to be sure it's right initially
job.pre_run_hooks.append(lambda job: self._normalize_embeddings())
# normalize after each batch
job.post_batch_hooks.append(lambda job: self._normalize_embeddings())
@torch.no_grad()
def init_pretrained(self, pretrained_embedder: "DiachronicEmbedder") -> None:
(
self_intersect_ind,
pretrained_intersect_ind,
) = self._intersect_ids_with_pretrained_embedder(pretrained_embedder)
self._ent_emb.weight[
torch.from_numpy(self_intersect_ind).to(self._ent_emb.weight.device).long()
] = pretrained_embedder._postprocess(
pretrained_embedder._ent_emb(
torch.from_numpy(pretrained_intersect_ind).to(
self._ent_emb.weight.device
)
)
)
self._freq_emb.weight[
torch.from_numpy(self_intersect_ind).to(self._freq_emb.weight.device).long()
] = pretrained_embedder._postprocess(
pretrained_embedder._freq_emb(
torch.from_numpy(pretrained_intersect_ind).to(
self._freq_emb.weight.device
)
)
)
self._phi_emb.weight[
torch.from_numpy(self_intersect_ind).to(self._phi_emb.weight.device).long()
] = pretrained_embedder._postprocess(
pretrained_embedder._phi_emb(
torch.from_numpy(pretrained_intersect_ind).to(
self._phi_emb.weight.device
)
)
)
self._amp_emb.weight[
torch.from_numpy(self_intersect_ind).to(self._amp_emb.weight.device).long()
] = pretrained_embedder._postprocess(
pretrained_embedder._amp_emb(
torch.from_numpy(pretrained_intersect_ind).to(
self._amp_emb.weight.device
)
)
)
def _embed(self, indexes: Tensor, time: Tensor) -> Tensor:
# TODO: add other activations than sin
return torch.cat(
(
self._ent_emb(indexes),
(
self._amp_emb(indexes)
* torch.sin(
self._freq_emb(indexes) * time.view(-1, 1)
+ self._phi_emb(indexes)
)
),
),
1,
)
def embed(self, indexes: Tensor, time: Tensor) -> Tensor:
return self._postprocess(self._embed(indexes, time))
def embed_all(self) -> Tensor:
return self._postprocess(self._embeddings_all())
def _postprocess(self, embeddings: Tensor) -> Tensor:
if self.dropout.p > 0:
embeddings = self.dropout(embeddings)
return embeddings
def _embeddings_all(self) -> Tensor:
return self._embed(
torch.arange(
self.vocab_size, dtype=torch.long, device=self._freq_emb.weight.device
),
torch.arange(
self.time_size, dtype=torch.long, device=self._freq_emb.weight.device
),
)
def _get_regularize_weight(self) -> Tensor:
return self.get_option("regularize_weight")
def penalty(self, **kwargs) -> List[Tensor]:
# TODO factor out to a utility method
result = super().penalty(**kwargs)
if self.regularize == "" or self.get_option("regularize_weight") == 0.0:
pass
elif self.regularize == "lp":
p = (
self.get_option("regularize_args.p")
if self.has_option("regularize_args.p")
else 2
)
regularize_weight = self._get_regularize_weight()
if not self.get_option("regularize_args.weighted"):
# unweighted Lp regularization
parameters = self._embeddings_all()
result += [
(
f"{self.configuration_key}.L{p}_penalty",
(regularize_weight / p * parameters.norm(p=p) ** p).sum(),
)
]
else:
# weighted Lp regularization
unique_indexes, counts = torch.unique(
kwargs["indexes"], return_counts=True
)
parameters = self._embed(unique_indexes)
if p % 2 == 1:
parameters = torch.abs(parameters)
result += [
(
f"{self.configuration_key}.L{p}_penalty",
(
regularize_weight
/ p
* (parameters ** p * counts.float().view(-1, 1))
).sum()
# In contrast to unweighted Lp regularization, rescaling by
# number of triples/indexes is necessary here so that penalty
# term is correct in expectation
/ len(kwargs["indexes"]),
)
]
else: # unknown regularization
raise ValueError(f"Invalid value regularize={self.regularize}")
return result
|
from flask import Flask, app
from config import config_options,DevConfig
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'main.login'
login_manager.login_message_category = 'info'
def create_app(config_name):
app=Flask(__name__)
#Creating the main configurations
app.config.from_object(config_options[config_name])
#Initializing flask extensions
db.init_app(app)
login_manager.init_app(app)
# setting config
from .request import configure_request
configure_request(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
"""
Rackspace Cloud Backup API Utilities
"""
from __future__ import print_function
import cloudbackup.utils.printer # noqa
import logging
import os
def announce_banner(banner):
"""
Output a banner to both the log and normal output functionality
"""
log = logging.getLogger(__name__)
marker = '===================================================='
log.info(marker)
log.info(banner)
log.info(marker)
print(marker)
print(banner)
def normpath(platfrm, fpath):
"""
Normalize the path in an expected manner
"""
if platfrm.find('windows') > -1:
return fpath.replace('/', '\\')
else:
return fpath.replace('\\', '/')
def joinpath(platfrm, dirname, *base):
''' Join paths and normalize it based on the platform
'''
return normpath(platfrm, os.path.join(dirname, *base))
def basename(platfrm, fpath):
"""
Return the basename of the path given
"""
if platfrm.find('windows') > - 1:
return os.path.basename(normpath('linux', fpath))
else:
return os.path.basename(fpath)
def splitall(platfrm, fpath):
''' return a list with the path broken up'''
if platfrm.find('windows') > -1:
np = normpath('linux', fpath)
else:
np = fpath[:]
plist = list()
while True:
sp = os.path.split(np)
if len(sp[0]) == 0:
# the case for Windows
plist.append(sp[1])
break
elif len(sp[1]) == 0:
# the case for Posix
plist.append(sp[0])
break
else:
plist.append(sp[1])
np = sp[0]
plist.reverse()
return plist
|
from typing import Optional
from ._db_span import db_span
from ._const import BEGIN, COMMIT, ROLLBACK, SQLITE
try:
import tortoise.backends.sqlite
except ImportError:
pass
else:
_tortoise_sqlite_client_execute_query = (
tortoise.backends.sqlite.client.SqliteClient.execute_query
)
_tortoise_sqlite_client_execute_insert = (
tortoise.backends.sqlite.client.SqliteClient.execute_insert
)
_tortoise_sqlite_client_execute_query_dict = (
tortoise.backends.sqlite.client.SqliteClient.execute_query_dict
)
"""transaction"""
_tortoise_sqlite_client_execute_many = (
tortoise.backends.sqlite.client.TransactionWrapper.execute_many
)
_tortoise_sqlite_client_start = (
tortoise.backends.sqlite.client.TransactionWrapper.start
)
_tortoise_sqlite_client_commit = (
tortoise.backends.sqlite.client.TransactionWrapper.commit
)
_tortoise_sqlite_client_rollback = (
tortoise.backends.sqlite.client.TransactionWrapper.rollback
)
item_list = [
"_tortoise_sqlite_client_execute_query",
"_tortoise_sqlite_client_execute_insert",
"_tortoise_sqlite_client_execute_query_dict",
"_tortoise_sqlite_client_execute_many",
"_tortoise_sqlite_client_start",
"_tortoise_sqlite_client_commit",
"_tortoise_sqlite_client_rollback",
]
async def sqlite_execute_query_wrapper(
self, query: str, values: Optional[list] = None
):
with await db_span(self, query=query, db_instance=SQLITE):
return await _tortoise_sqlite_client_execute_query(self, query, values)
async def sqlite_execute_insert_wrapper(self, query: str, values: list):
with await db_span(self, query=query, db_instance=SQLITE):
return await _tortoise_sqlite_client_execute_insert(
self, query, values
)
async def sqlite_execute_query_dict_wrapper(
self, query: str, values: Optional[list] = None
):
with await db_span(self, query=query, db_instance=SQLITE):
return await _tortoise_sqlite_client_execute_query_dict(
self, query, values
)
"""transaction"""
async def sqlite_execute_many_wrapper(self, query: str, values: list):
with await db_span(self, query=query, db_instance=SQLITE):
return await _tortoise_sqlite_client_execute_many(self, query, values)
async def sqlite_trans_start_wrapper(self):
with await db_span(self, query=BEGIN, db_instance=SQLITE):
return await _tortoise_sqlite_client_start(self)
async def sqlite_trans_commit_wrapper(self):
with await db_span(self, query=COMMIT, db_instance=SQLITE):
return await _tortoise_sqlite_client_commit(self)
async def sqlite_trans_rollback_wrapper(self):
with await db_span(self, query=ROLLBACK, db_instance=SQLITE):
return await _tortoise_sqlite_client_rollback(self)
def install_patch():
if any(item not in globals() for item in item_list):
raise Exception("sqlite patch install fail")
tortoise.backends.sqlite.client.SqliteClient.execute_query = (
sqlite_execute_query_wrapper
)
tortoise.backends.sqlite.client.SqliteClient.execute_insert = (
sqlite_execute_insert_wrapper
)
tortoise.backends.sqlite.client.SqliteClient.execute_query_dict = (
sqlite_execute_query_dict_wrapper
)
"""transaction"""
tortoise.backends.sqlite.client.TransactionWrapper.execute_many = (
sqlite_execute_many_wrapper
)
tortoise.backends.sqlite.client.TransactionWrapper.start = (
sqlite_trans_start_wrapper
)
tortoise.backends.sqlite.client.TransactionWrapper.commit = (
sqlite_trans_commit_wrapper
)
tortoise.backends.sqlite.client.TransactionWrapper.rollback = (
sqlite_trans_rollback_wrapper
)
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
# Emboss filter
def emboss_filter(img, K_size=3):
H, W = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)
tmp = out.copy()
# Kernel
K = [[-2., -1., 0.], [-1., 1., 1.], [0., 1., 2.]]
## filtering
for y in range(H):
for x in range(W):
out[pad+y, pad+x] = np.sum(K * tmp[y:y+K_size, x:x+K_size])
out = np.clip(out, 0, 255)
out = out[pad:pad+H, pad:pad+W].astype(np.uint8)
return out
# Read image
img = cv2.imread("imori.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Emboss filter
out = emboss_filter(img)
# Show and save image
cv2.imwrite("Myresult/out18.jpg", out)
cv2.namedWindow("result", 0)
cv2.resizeWindow("result", 256, 256)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import time
from .smoothing import smooth_path
from .rrt import TreeNode, configs
from .utils import irange, argmin, RRT_ITERATIONS, RRT_RESTARTS, RRT_SMOOTHING, INF, elapsed_time
def asymmetric_extend(q1, q2, extend_fn, backward=False):
if backward:
return reversed(list(extend_fn(q2, q1)))
return extend_fn(q1, q2)
def rrt_connect(q1, q2, distance_fn, sample_fn, extend_fn, collision_fn,
iterations=RRT_ITERATIONS, max_time=INF):
# TODO: collision(q1, q2)
start_time = time.time()
if collision_fn(q1) or collision_fn(q2):
return None
nodes1, nodes2 = [TreeNode(q1)], [TreeNode(q2)]
for iteration in irange(iterations):
if max_time <= elapsed_time(start_time):
break
swap = len(nodes1) > len(nodes2)
tree1, tree2 = nodes1, nodes2
if swap:
tree1, tree2 = nodes2, nodes1
s = sample_fn()
last1 = argmin(lambda n: distance_fn(n.config, s), tree1)
for q in asymmetric_extend(last1.config, s, extend_fn, swap):
if collision_fn(q):
break
last1 = TreeNode(q, parent=last1)
tree1.append(last1)
last2 = argmin(lambda n: distance_fn(n.config, last1.config), tree2)
for q in asymmetric_extend(last2.config, last1.config, extend_fn, not swap):
if collision_fn(q):
break
last2 = TreeNode(q, parent=last2)
tree2.append(last2)
else:
path1, path2 = last1.retrace(), last2.retrace()
if swap:
path1, path2 = path2, path1
#print('{} iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2)))
return configs(path1[:-1] + path2[::-1])
return None
# TODO: version which checks whether the segment is valid
def direct_path(q1, q2, extend_fn, collision_fn):
if collision_fn(q1) or collision_fn(q2):
return None
path = [q1]
for q in extend_fn(q1, q2):
if collision_fn(q):
return None
path.append(q)
return path
def birrt(q1, q2, distance, sample, extend, collision,
restarts=RRT_RESTARTS, iterations=RRT_ITERATIONS, smooth=RRT_SMOOTHING, max_time=INF):
start_time = time.time()
if collision(q1) or collision(q2):
return None
path = direct_path(q1, q2, extend, collision)
if path is not None:
return path
for attempt in irange(restarts + 1):
if max_time <= elapsed_time(start_time):
break
path = rrt_connect(q1, q2, distance, sample, extend, collision,
iterations=iterations, max_time=max_time - elapsed_time(start_time))
if path is not None:
#print('{} attempts'.format(attempt))
if smooth is None:
return path
return smooth_path(path, extend, collision, iterations=smooth)
return None
|
# Adapters
# Protean
from protean.adapters.broker import Brokers
from protean.adapters.broker.celery import CeleryBroker, ProteanTask
from protean.adapters.broker.inline import InlineBroker
from protean.adapters.email import EmailProviders
from protean.adapters.email.dummy import DummyEmailProvider
from protean.adapters.email.sendgrid import SendgridEmailProvider
from protean.adapters.repository import Providers
from protean.adapters.repository.elasticsearch import ElasticsearchModel, ESProvider
from protean.adapters.repository.memory import MemoryModel, MemoryProvider
from protean.adapters.repository.sqlalchemy import SAProvider, SqlalchemyModel
__all__ = (
"Brokers",
"CeleryBroker",
"ProteanTask",
"InlineBroker",
"EmailProviders",
"DummyEmailProvider",
"SendgridEmailProvider",
"Providers",
"ESProvider",
"ElasticsearchModel",
"SAProvider",
"SqlalchemyModel",
"MemoryProvider",
"MemoryModel",
)
|
# -*- coding: utf-8 -*-
from __future__ import division
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.utils.translation import ugettext as _
from django.shortcuts import ( # noqa
get_object_or_404, redirect)
from relate.utils import retry_transaction_decorator
from django.core.exceptions import ( # noqa
PermissionDenied, SuspiciousOperation,
ObjectDoesNotExist)
from django import http
from course.models import (
FlowSession, FlowPageVisitGrade,
get_flow_grading_opportunity,
get_feedback_for_grade,
update_bulk_feedback)
from course.constants import participation_role
from course.utils import (
course_view, render_course_page,
get_session_grading_rule,
FlowPageContext)
from course.views import get_now_or_fake_time
from django.conf import settings
from django.utils import translation
# {{{ grading driver
@course_view
@retry_transaction_decorator()
def grade_flow_page(pctx, flow_session_id, page_ordinal):
page_ordinal = int(page_ordinal)
if pctx.role not in [
participation_role.instructor,
participation_role.teaching_assistant]:
raise PermissionDenied(
_("must be instructor or TA to view grades"))
flow_session = get_object_or_404(FlowSession, id=int(flow_session_id))
if flow_session.course.pk != pctx.course.pk:
raise SuspiciousOperation(
_("Flow session not part of specified course"))
if flow_session.participation is None:
raise SuspiciousOperation(
_("Cannot grade anonymous session"))
fpctx = FlowPageContext(pctx.repo, pctx.course, flow_session.flow_id,
page_ordinal, participation=flow_session.participation,
flow_session=flow_session, request=pctx.request)
if fpctx.page_desc is None:
raise http.Http404()
# {{{ enable flow session zapping
all_flow_sessions = list(FlowSession.objects
.filter(
course=pctx.course,
flow_id=flow_session.flow_id,
participation__isnull=False,
in_progress=flow_session.in_progress)
.order_by(
"participation__user__last_name",
"start_time"))
next_flow_session_id = None
prev_flow_session_id = None
for i, other_flow_session in enumerate(all_flow_sessions):
if other_flow_session.pk == flow_session.pk:
if i > 0:
prev_flow_session_id = all_flow_sessions[i-1].id
if i + 1 < len(all_flow_sessions):
next_flow_session_id = all_flow_sessions[i+1].id
# }}}
# {{{ reproduce student view
form = None
feedback = None
answer_data = None
grade_data = None
most_recent_grade = None
if fpctx.page.expects_answer():
if fpctx.prev_answer_visit is not None:
answer_data = fpctx.prev_answer_visit.answer
most_recent_grade = fpctx.prev_answer_visit.get_most_recent_grade()
if most_recent_grade is not None:
feedback = get_feedback_for_grade(most_recent_grade)
grade_data = most_recent_grade.grade_data
else:
feedback = None
grade_data = None
else:
feedback = None
from course.page.base import PageBehavior
page_behavior = PageBehavior(
show_correctness=True,
show_answer=False,
may_change_answer=False)
form = fpctx.page.make_form(
fpctx.page_context, fpctx.page_data.data,
answer_data, page_behavior)
if form is not None:
form_html = fpctx.page.form_to_html(
pctx.request, fpctx.page_context, form, answer_data)
else:
form_html = None
# }}}
# {{{ grading form
if (fpctx.page.expects_answer()
and fpctx.page.is_answer_gradable()
and fpctx.prev_answer_visit is not None
and not flow_session.in_progress):
request = pctx.request
if pctx.request.method == "POST":
grading_form = fpctx.page.post_grading_form(
fpctx.page_context, fpctx.page_data, grade_data,
request.POST, request.FILES)
if grading_form.is_valid():
grade_data = fpctx.page.update_grade_data_from_grading_form(
fpctx.page_context, fpctx.page_data, grade_data,
grading_form, request.FILES)
with translation.override(settings.RELATE_ADMIN_EMAIL_LOCALE):
feedback = fpctx.page.grade(
fpctx.page_context, fpctx.page_data,
answer_data, grade_data)
if feedback is not None:
correctness = feedback.correctness
else:
correctness = None
if feedback is not None:
feedback_json, bulk_feedback_json = feedback.as_json()
else:
feedback_json = bulk_feedback_json = None
most_recent_grade = FlowPageVisitGrade(
visit=fpctx.prev_answer_visit,
grader=pctx.request.user,
graded_at_git_commit_sha=pctx.course_commit_sha,
grade_data=grade_data,
max_points=fpctx.page.max_points(fpctx.page_data),
correctness=correctness,
feedback=feedback_json)
most_recent_grade.save()
update_bulk_feedback(
fpctx.prev_answer_visit.page_data,
most_recent_grade,
bulk_feedback_json)
grading_rule = get_session_grading_rule(
flow_session, flow_session.participation.role,
fpctx.flow_desc, get_now_or_fake_time(request))
from course.flow import grade_flow_session
grade_flow_session(fpctx, flow_session, grading_rule)
else:
grading_form = fpctx.page.make_grading_form(
fpctx.page_context, fpctx.page_data, grade_data)
else:
grading_form = None
if grading_form is not None:
from crispy_forms.layout import Submit
grading_form.helper.form_class += " relate-grading-form"
grading_form.helper.add_input(
Submit(
"submit", _("Submit"),
accesskey="s",
css_class="relate-grading-save-button"))
grading_form_html = fpctx.page.grading_form_to_html(
pctx.request, fpctx.page_context, grading_form, grade_data)
else:
grading_form_html = None
# }}}
# {{{ compute points_awarded
max_points = None
points_awarded = None
if (fpctx.page.expects_answer()
and fpctx.page.is_answer_gradable()):
max_points = fpctx.page.max_points(fpctx.page_data)
if feedback is not None and feedback.correctness is not None:
points_awarded = max_points * feedback.correctness
# }}}
grading_rule = get_session_grading_rule(
flow_session, flow_session.participation.role,
fpctx.flow_desc, get_now_or_fake_time(pctx.request))
if grading_rule.grade_identifier is not None:
grading_opportunity = get_flow_grading_opportunity(
pctx.course, flow_session.flow_id, fpctx.flow_desc,
grading_rule)
else:
grading_opportunity = None
return render_course_page(
pctx,
"course/grade-flow-page.html",
{
"flow_identifier": fpctx.flow_id,
"flow_session": flow_session,
"flow_desc": fpctx.flow_desc,
"ordinal": fpctx.ordinal,
"page_data": fpctx.page_data,
"body": fpctx.page.body(
fpctx.page_context, fpctx.page_data.data),
"form": form,
"form_html": form_html,
"feedback": feedback,
"max_points": max_points,
"points_awarded": points_awarded,
"most_recent_grade": most_recent_grade,
"grading_opportunity": grading_opportunity,
"prev_flow_session_id": prev_flow_session_id,
"next_flow_session_id": next_flow_session_id,
"grading_form": grading_form,
"grading_form_html": grading_form_html,
})
# }}}
# {{{ grading statistics
@course_view
def show_grading_statistics(pctx, flow_id):
if pctx.role not in [
participation_role.instructor,
participation_role.teaching_assistant]:
raise PermissionDenied(
_("must be instructor or TA to view grading stats"))
grades = (FlowPageVisitGrade.objects
.filter(
visit__flow_session__course=pctx.course,
visit__flow_session__flow_id=flow_id,
# There are just way too many autograder grades, which makes this
# report super slow.
grader__isnull=False)
.order_by(
"visit__id",
"grade_time")
.select_related("visit")
.select_related("grader")
.select_related("visit__page_data"))
graders = set()
# tuples: (ordinal, id)
pages = set()
counts = {}
grader_counts = {}
page_counts = {}
def commit_grade_info(grade):
grader = grade.grader
page = (grade.visit.page_data.ordinal,
grade.visit.page_data.group_id + "/" + grade.visit.page_data.page_id)
graders.add(grader)
pages.add(page)
key = (page, grade.grader)
counts[key] = counts.get(key, 0) + 1
grader_counts[grader] = grader_counts.get(grader, 0) + 1
page_counts[page] = page_counts.get(page, 0) + 1
last_grade = None
for grade in grades.iterator():
if last_grade is not None and last_grade.visit != grade.visit:
commit_grade_info(last_grade)
last_grade = grade
if last_grade is not None:
commit_grade_info(last_grade)
graders = sorted(graders,
key=lambda grader: grader.last_name if grader is not None else None)
pages = sorted(pages)
stats_table = [
[
counts.get((page, grader), 0)
for grader in graders
]
for page in pages
]
page_counts = [
page_counts.get(page, 0)
for page in pages
]
grader_counts = [
grader_counts.get(grader, 0)
for grader in graders
]
return render_course_page(
pctx,
"course/grading-statistics.html",
{
"flow_id": flow_id,
"pages": pages,
"graders": graders,
"pages_stats_counts": zip(pages, stats_table, page_counts),
"grader_counts": grader_counts,
})
# }}}
# vim: foldmethod=marker
|
tupla = 'zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze', 'doze', 'treze' \
, 'quatorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte'
while True:
escolher = int(input('Digite um número entre 0 e 20: '))
while escolher > 20 or escolher <= -1:
escolher = int(input('Digite um número entre 0 e 20: '))
print(f'Você digitou o número {tupla[escolher]}')
continuar = str(input('Quer continuar: [S/N] ')).upper()
while continuar not in 'SN':
continuar = str(input('Quer continuar: [S/N] '))
if continuar == 'N':
break
|
import av
import av.datasets
content = av.datasets.curated('pexels/time-lapse-video-of-night-sky-857195.mp4')
with av.open(content) as container:
# Signal that we only want to look at keyframes.
stream = container.streams.video[0]
stream.codec_context.skip_frame = 'NONKEY'
for frame in container.decode(stream):
print(frame)
# We use `frame.pts` as `frame.index` won't make must sense with the `skip_frame`.
frame.to_image().save(
'night-sky.{:04d}.jpg'.format(frame.pts),
quality=80,
)
|
class Solution:
def largestAltitude(self, gain: List[int]) -> int:
highest = 0
temp = 0
for num in gain:
temp += num
if temp > highest:
highest = temp
return highest
|
# The code for AlexNet is copied and adapted from the TensorFlow repository
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/alexnet/alexnet_benchmark.py.
import ray
import numpy as np
import tarfile, io
import boto3
import PIL.Image as Image
import tensorflow as tf
import ray.array.remote as ra
STDDEV = 0.001 # The standard deviation of the network weight initialization.
def load_chunk(tarfile, size=None):
"""Load a number of images from a single imagenet .tar file.
This function also converts the image from grayscale to RGB if necessary.
Args:
tarfile (tarfile.TarFile): The archive from which the files get loaded.
size (Optional[Tuple[int, int]]): Resize the image to this size if provided.
Returns:
numpy.ndarray: Contains the image data in format [batch, w, h, c]
"""
result = []
filenames = []
for member in tarfile.getmembers():
filename = member.path
content = tarfile.extractfile(member)
img = Image.open(content)
rgbimg = Image.new("RGB", img.size)
rgbimg.paste(img)
if size != None:
rgbimg = rgbimg.resize(size, Image.ANTIALIAS)
result.append(np.array(rgbimg).reshape(1, rgbimg.size[0], rgbimg.size[1], 3))
filenames.append(filename)
return np.concatenate(result), filenames
@ray.remote(num_return_vals=2)
def load_tarfile_from_s3(bucket, s3_key, size=[]):
"""Load an imagenet .tar file.
Args:
bucket (str): Bucket holding the imagenet .tar.
s3_key (str): s3 key from which the .tar file is loaded.
size (List[int]): Resize the image to this size if size != []; len(size) == 2 required.
Returns:
np.ndarray: The image data (see load_chunk).
"""
s3 = boto3.client("s3")
response = s3.get_object(Bucket=bucket, Key=s3_key)
output = io.BytesIO()
chunk = response["Body"].read(1024 * 8)
while chunk:
output.write(chunk)
chunk = response["Body"].read(1024 * 8)
output.seek(0) # go to the beginning of the .tar file
tar = tarfile.open(mode="r", fileobj=output)
return load_chunk(tar, size=size if size != [] else None)
def load_tarfiles_from_s3(bucket, s3_keys, size=[]):
"""Load a number of imagenet .tar files.
Args:
bucket (str): Bucket holding the imagenet .tars.
s3_keys (List[str]): List of s3 keys from which the .tar files are being
loaded.
size (List[int]): Resize the image to this size if size does not equal [].
The length of size must be 2.
Returns:
np.ndarray: Contains object IDs to the chunks of the images (see load_chunk).
"""
return [load_tarfile_from_s3.remote(bucket, s3_key, size) for s3_key in s3_keys]
def setup_variables(params, placeholders, kernelshape, biasshape):
"""Create the variables for each layer.
Args:
params (List): Network parameters used for creating feed_dicts
placeholders (List): Placeholders used for feeding weights into
kernelshape (List): Shape of the kernel used for the conv layer
biasshape (List): Shape of the bias used
Returns:
None
"""
kernel = tf.Variable(tf.truncated_normal(kernelshape, stddev=STDDEV))
biases = tf.Variable(tf.constant(0.0, shape=biasshape, dtype=tf.float32),
trainable=True, name='biases')
kernel_new = tf.placeholder(tf.float32, shape=kernel.get_shape())
biases_new = tf.placeholder(tf.float32, shape=biases.get_shape())
update_kernel = kernel.assign(kernel_new)
update_biases = biases.assign(biases_new)
params += [kernel, biases]
placeholders += [kernel_new, biases_new]
def conv_layer(parameters, prev_layer, shape, scope):
"""Constructs a convolutional layer for the network.
Args:
parameters (List): Parameters used in constructing layer.
prevlayer (Tensor): The previous layer to connect the network together.
shape (List): The strides used for convolution
scope (Scope): Current scope of tensorflow
Returns:
Tensor: Activation of layer
"""
kernel = parameters[-2]
bias = parameters[-1]
conv = tf.nn.conv2d(prev_layer, kernel, shape, padding='SAME')
add_bias = tf.nn.bias_add(conv, bias)
return tf.nn.relu(add_bias, name=scope)
def net_initialization():
images = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
y_true = tf.placeholder(tf.float32, shape=[None, 1000])
parameters = []
placeholders = []
# conv1
with tf.name_scope('conv1') as scope:
setup_variables(parameters, placeholders, [11, 11, 3, 96], [96])
conv1 = conv_layer(parameters, images, [1, 4, 4, 1], scope)
# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool1')
# lrn1
pool1_lrn = tf.nn.lrn(pool1, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization")
# conv2
with tf.name_scope('conv2') as scope:
setup_variables(parameters, placeholders, [5, 5, 96, 256], [256])
conv2 = conv_layer(parameters, pool1_lrn, [1, 1, 1, 1], scope)
pool2 = tf.nn.max_pool(conv2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool2')
# lrn2
pool2_lrn = tf.nn.lrn(pool2, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization")
# conv3
with tf.name_scope('conv3') as scope:
setup_variables(parameters, placeholders, [3, 3, 256, 384], [384])
conv3 = conv_layer(parameters, pool2_lrn, [1, 1, 1, 1], scope)
# conv4
with tf.name_scope('conv4') as scope:
setup_variables(parameters, placeholders, [3, 3, 384, 384], [384])
conv4 = conv_layer(parameters, conv3, [1, 1, 1, 1], scope)
# conv5
with tf.name_scope('conv5') as scope:
setup_variables(parameters, placeholders, [3, 3, 384, 256], [256])
conv5 = conv_layer(parameters, conv4, [1, 1, 1, 1], scope)
# pool5
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool5')
# lrn5
pool5_lrn = tf.nn.lrn(pool5, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization")
dropout = tf.placeholder(tf.float32)
with tf.name_scope('fc1') as scope:
n_input = int(np.prod(pool5_lrn.get_shape().as_list()[1:]))
setup_variables(parameters, placeholders, [n_input, 4096], [4096])
fc_in = tf.reshape(pool5_lrn, [-1, n_input])
fc_layer1 = tf.nn.tanh(tf.nn.bias_add(tf.matmul(fc_in, parameters[-2]), parameters[-1]))
fc_out1 = tf.nn.dropout(fc_layer1, dropout)
with tf.name_scope('fc2') as scope:
n_input = int(np.prod(fc_out1.get_shape().as_list()[1:]))
setup_variables(parameters, placeholders, [n_input, 4096], [4096])
fc_in = tf.reshape(fc_out1, [-1, n_input])
fc_layer2 = tf.nn.tanh(tf.nn.bias_add(tf.matmul(fc_in, parameters[-2]), parameters[-1]))
fc_out2 = tf.nn.dropout(fc_layer2, dropout)
with tf.name_scope('fc3') as scope:
n_input = int(np.prod(fc_out2.get_shape().as_list()[1:]))
setup_variables(parameters, placeholders, [n_input, 1000], [1000])
fc_in = tf.reshape(fc_out2, [-1, n_input])
fc_layer3 = tf.nn.softmax(tf.nn.bias_add(tf.matmul(fc_in, parameters[-2]), parameters[-1]))
y_pred = fc_layer3 / tf.reduce_sum(fc_layer3,
reduction_indices=len(fc_layer3.get_shape()) - 1,
keep_dims=True)
# manual computation of crossentropy
y_pred = tf.clip_by_value(y_pred, tf.cast(1e-10, dtype=tf.float32),
tf.cast(1. - 1e-10, dtype=tf.float32))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_true * tf.log(y_pred),
reduction_indices=len(y_pred.get_shape()) - 1))
opt = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9) # Any other optimizier can be placed here
correct_pred = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
comp_grads = opt.compute_gradients(cross_entropy, parameters)
application = opt.apply_gradients(zip(placeholders, parameters))
sess = tf.Session()
init_all_variables = tf.initialize_all_variables()
# In order to set the weights of the TensorFlow graph on a worker, we add
# assignment nodes. To get the network weights (as a list of numpy arrays)
# and to set the network weights (from a list of numpy arrays), use the
# methods get_weights and set_weights. This can be done from within a remote
# function or on the driver.
def get_and_set_weights_methods():
assignment_placeholders = []
assignment_nodes = []
for var in tf.trainable_variables():
assignment_placeholders.append(tf.placeholder(var.value().dtype, var.get_shape().as_list()))
assignment_nodes.append(var.assign(assignment_placeholders[-1]))
def get_weights():
return [v.eval(session=sess) for v in tf.trainable_variables()]
def set_weights(new_weights):
sess.run(assignment_nodes, feed_dict={p: w for p, w in zip(assignment_placeholders, new_weights)})
return get_weights, set_weights
get_weights, set_weights = get_and_set_weights_methods()
return comp_grads, sess, application, accuracy, images, y_true, dropout, placeholders, init_all_variables, get_weights, set_weights
def net_reinitialization(net_vars):
return net_vars
@ray.remote
def num_images(batches):
"""Counts number of images in batches.
Args:
batches (List): Collection of batches of images and labels.
Returns:
int: The number of images
"""
shape_ids = [ra.shape.remote(batch) for batch in batches]
return sum([shape[0] for shape in ray.get(shape_ids)])
@ray.remote
def compute_mean_image(batches):
"""Computes the mean image given a list of batches of images.
Args:
batches (List[ObjectID]): A list of batches of images.
Returns:
ndarray: The mean image
"""
if len(batches) == 0:
raise Exception("No images were passed into `compute_mean_image`.")
sum_image_ids = [ra.sum.remote(batch, axis=0) for batch in batches]
n_images = num_images.remote(batches)
return np.sum(ray.get(sum_image_ids), axis=0).astype("float64") / ray.get(n_images)
@ray.remote(num_return_vals=4)
def shuffle_arrays(first_images, first_labels, second_images, second_labels):
"""Shuffles the images and labels from two batches.
Args:
first_images (ndarray): First batch of images.
first_labels (ndarray): First batch of labels.
second_images (ndarray): Second batch of images.
second_labels (ndarray): Second batch of labels.
Returns:
ndarray: First batch of shuffled images.
ndarray: First batch of shuffled labels.
ndarray: Second bach of shuffled images.
ndarray: Second batch of shuffled labels.
"""
images = np.concatenate((first_images, second_images))
labels = np.concatenate((first_labels, second_labels))
total_length = len(images)
first_len = len(first_images)
random_indices = np.random.permutation(total_length)
new_first_images = images[random_indices[0:first_len]]
new_first_labels = labels[random_indices[0:first_len]]
new_second_images = images[random_indices[first_len:total_length]]
new_second_labels = labels[random_indices[first_len:total_length]]
return new_first_images, new_first_labels, new_second_images, new_second_labels
def shuffle_pair(first_batch, second_batch):
"""Shuffle two batches of data.
Args:
first_batch (Tuple[ObjectID. ObjectID]): The first batch to be shuffled. The
first component is the object ID of a batch of images, and the second
component is the object ID of the corresponding batch of labels.
second_batch (Tuple[ObjectID, ObjectID]): The second batch to be shuffled.
The first component is the object ID of a batch of images, and the second
component is the object ID of the corresponding batch of labels.
Returns:
Tuple[ObjectID, ObjectID]: The first batch of shuffled data.
Tuple[ObjectID, ObjectID]: Two second bach of shuffled data.
"""
images1, labels1, images2, labels2 = shuffle_arrays.remote(first_batch[0], first_batch[1], second_batch[0], second_batch[1])
return (images1, labels1), (images2, labels2)
@ray.remote
def filenames_to_labels(filenames, filename_label_dict):
"""Converts filename strings to integer labels.
Args:
filenames (List[str]): The filenames of the images.
filename_label_dict (Dict[str, int]): A dictionary mapping filenames to
integer labels.
Returns:
ndarray: Integer labels
"""
return np.asarray([int(filename_label_dict[filename]) for filename in filenames])
def one_hot(x):
"""Converts integer labels to one hot vectors.
Args:
x (int): Index to be set to one
Returns:
ndarray: One hot vector.
"""
zero = np.zeros([1000])
zero[x] = 1.0
return zero
def crop_images(images):
"""Randomly crop a batch of images.
This is used to generate many slightly different images from each training
example.
Args:
images (ndarray): A batch of images to crop. The shape of images should be
batch_size x height x width x channels.
Returns:
ndarray: A batch of cropped images.
"""
original_height = 256
original_width = 256
cropped_height = 224
cropped_width = 224
height_offset = np.random.randint(original_height - cropped_height + 1)
width_offset = np.random.randint(original_width - cropped_width + 1)
return images[:, height_offset:(height_offset + cropped_height), width_offset:(width_offset + cropped_width), :]
def shuffle(batches):
"""Shuffle the data.
This method groups the batches together in pairs and within each pair shuffles
the data between the two members.
Args:
batches (List[Tuple[ObjectID, ObjectID]]): This is a list of tuples, where
each tuple consists of two object IDs. The first component is an object ID
for a batch of images, and the second component is an object ID for the
corresponding batch of labels.
Returns:
List[Tuple[ObjectID, ObjectID]]: The shuffled data.
"""
# Randomly permute the order of the batches.
permuted_batches = np.random.permutation(batches)
new_batches = []
for i in range(len(batches) / 2):
# Swap data between consecutive batches.
shuffled_batch1, shuffled_batch2 = shuffle_pair(permuted_batches[2 * i], permuted_batches[2 * i + 1])
new_batches += [shuffled_batch1, shuffled_batch2]
if len(batches) % 2 == 1:
# If there is an odd number of batches, don't forget the last one.
new_batches.append(permuted_batches[-1])
return new_batches
@ray.remote
def compute_grad(X, Y, mean, weights):
"""Computes the gradient of the network.
Args:
X (ndarray): Numpy array of images in the form of [224, 224,3]
Y (ndarray): Labels corresponding to each image
mean (ndarray): Mean image to subtract from images
weights (List[ndarray]): The network weights.
Returns:
List of gradients for each variable
"""
comp_grads, sess, _, _, images, y_true, dropout, placeholders, _, get_weights, set_weights = ray.reusables.net_vars
# Set the network weights.
set_weights(weights)
# Choose a subset of the batch to compute on and crop the images.
random_indices = np.random.randint(0, len(X), size=128)
subset_X = crop_images(X[random_indices] - mean)
subset_Y = np.asarray([one_hot(label) for label in Y[random_indices]])
# Compute the gradients.
return sess.run([g for (g, v) in comp_grads], feed_dict={images: subset_X, y_true: subset_Y, dropout: 0.5})
@ray.remote
def compute_accuracy(X, Y, weights):
"""Returns the accuracy of the network
Args:
X (ndarray): A batch of images.
Y (ndarray): A batch of labels.
weights (List[ndarray]): The network weights.
Returns:
The accuracy of the network on the given batch.
"""
_, sess, _, accuracy, images, y_true, dropout, placeholders, _, get_weights, set_weights = ray.reusables.net_vars
# Set the network weights.
set_weights(weights)
one_hot_Y = np.asarray([one_hot(label) for label in Y])
cropped_X = crop_images(X)
return sess.run(accuracy, feed_dict={images: cropped_X, y_true: one_hot_Y, dropout: 1.0})
|
#!/usr/bin/env python
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
REST API with Bigtable storage
"""
from google.cloud import bigtable
from google.cloud.bigtable import row_filters
from google.cloud.bigtable.row_set import RowSet,RowRange
from proto.instancemetric_pb2 import Metrics
from google.protobuf.json_format import MessageToDict
import json
import time
from flask import Flask
from flask import request
from flask import Response
def rowkey(host, dc, region, t):
if t is not None:
return "".join([host, "#", dc, "#", region, "#", str(t)])
else:
return "".join([host, "#", dc, "#", region, "$"])
class QueryHandler(object):
def __init__(self, project, instance_id, table_id):
self.project = project
self.instance_id = instance_id
self.table_id = table_id
self.client = bigtable.Client(project=self.project, admin=False)
self.instance = self.client.instance(self.instance_id)
self.table = self.instance.table(self.table_id)
def query(self, host, dc, region, t, limit=1, window=60):
t0 = int(t) - window
t1 = int(t)
start_key = rowkey(host, dc, region, t0)
end_key = rowkey(host, dc, region, t1)
row_set = RowSet()
row_set.add_row_range(RowRange(start_key, end_key))
return self.table.read_rows(
limit=limit,
filter_=row_filters.CellsColumnLimitFilter(1),
row_set=row_set
)
app = Flask('MetricsApp')
@app.before_first_request
def init():
import os
project = os.environ.get('PROJECT', 'myproject')
instance_id = os.environ.get('INSTANCE', 'metrics')
table_id = os.environ.get('TABLE', 'metrics')
app.QUERY_HANDLER = QueryHandler(project, instance_id, table_id)
def read_metrics(host, dc, region, limit, window):
t = int(time.time())
rows = app.QUERY_HANDLER.query(host, dc, region, t, limit, window)
a = []
for row in rows:
for cf in row.cells:
cell = row.cells[cf]
for col in cell:
for x in cell[col]:
m = Metrics()
m.ParseFromString(x.value)
a.append(m)
return a
@app.route('/metrics')
def metrics():
host = request.args.get('host')
dc = request.args.get('dc')
region = request.args.get('region')
limit = request.args.get('limit')
window = request.args.get('w')
if limit is None:
limit = 1
else:
limit = int(limit)
if window is None:
window = 60 * 60 # 1 hour
else:
window = int(window)
rows = read_metrics(host, dc, region, limit, window)
a = []
for row in rows:
d = MessageToDict(row, including_default_value_fields=True, preserving_proto_field_name=True)
a.append(json.dumps(d))
return Response(response='[' + ",".join(a) + ']',
status=200,
mimetype='application/json')
def get_cpu(vm):
if 'cpu' in vm:
if 'cpu_data_cputime_percent' in vm['cpu']:
return vm['cpu']['cpu_data_cputime_percent']
return None
def filter_vms_by_cpu(vm):
utilization = get_cpu(vm)
if utilization is not None:
return utilization > 0.8
return False
@app.route('/top')
def top():
host = request.args.get('host')
dc = request.args.get('dc')
region = request.args.get('region')
limit = request.args.get('limit')
top_n = request.args.get('n')
window = request.args.get('w')
t = request.args.get('t')
if t is None:
t = int(time.time())
else:
t = int(t)
if top_n is None:
top_n = 3
else:
top_n = int(top_n)
if limit is None:
limit = 1
else:
limit = int(limit)
if window is None:
window = 3600
else:
window = int(window)
rows = app.QUERY_HANDLER.query(host=host, dc=dc, region=region, t=t, limit=limit, window=window)
msgs = []
for row in rows:
for cf in row.cells:
cell = row.cells[cf]
for col in cell:
for x in cell[col]:
m = Metrics()
m.ParseFromString(x.value)
d = MessageToDict(m, including_default_value_fields=True, preserving_proto_field_name=True)
msgs.append(d)
results = []
for msg in msgs:
ts = msg['timestamp']
if 'vm' in msg:
top_vms = []
highcpu = filter(filter_vms_by_cpu, msg['vm'])
highcpu.sort(key=get_cpu)
vms = highcpu[:top_n]
for vm in vms:
top_vms.append({'vmid': vm['vmid'], 'cpu': get_cpu(vm)})
results.append({'host': msg['host_info']['host'], 'ts': ts, 'vms': top_vms})
result = [json.dumps(vm) for vm in results]
return Response(response='[' + ",".join(result) + ']',
status=200,
mimetype='application/json')
if __name__ == '__main__':
pass
|
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\setup_component.py
# Compiled at: 2018-11-30 15:48:11
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import CompoundDisconnectable, SerializableListenableProperties, EventObject, clamp, listenable_property
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import RadioButtonControl, StepEncoderControl, ToggleButtonControl, ButtonControl, control_list
from ableton.v2.control_surface.mode import ModesComponent
from .pad_velocity_curve import PadVelocityCurveSettings
PAD_SETTING_STEP_SIZE = 20
MAX_DISPLAY_BRIGHTNESS = 255
MIN_USER_FACING_LED_BRIGHTNESS = 13
MIN_USER_FACING_DISPLAY_BRIGHTNESS = 2
class GeneralSettings(EventObject):
workflow = listenable_property.managed(b'scene')
class HardwareSettings(SerializableListenableProperties):
min_led_brightness = MIN_USER_FACING_LED_BRIGHTNESS
max_led_brightness = 127
led_brightness = listenable_property.managed(max_led_brightness)
min_display_brightness = MIN_USER_FACING_DISPLAY_BRIGHTNESS
max_display_brightness = MAX_DISPLAY_BRIGHTNESS
display_brightness = listenable_property.managed(max_display_brightness)
class DisplayDebugSettings(SerializableListenableProperties):
show_row_spaces = listenable_property.managed(False)
show_row_margins = listenable_property.managed(False)
show_row_middle = listenable_property.managed(False)
show_button_spaces = listenable_property.managed(False)
show_unlit_button = listenable_property.managed(False)
show_lit_button = listenable_property.managed(False)
class Settings(CompoundDisconnectable):
def __init__(self, preferences=None, *a, **k):
assert preferences is not None
super(Settings, self).__init__(*a, **k)
self._general = self.register_disconnectable(GeneralSettings())
self._pad_settings = self.register_disconnectable(preferences.setdefault(b'settings_pad_velocity_curve', PadVelocityCurveSettings()))
self._hardware = self.register_disconnectable(preferences.setdefault(b'settings_hardware', HardwareSettings()))
self._display_debug = self.register_disconnectable(preferences.setdefault(b'settings_display_debug', DisplayDebugSettings()))
return
@property
def general(self):
return self._general
@property
def pad_settings(self):
return self._pad_settings
@property
def hardware(self):
return self._hardware
@property
def display_debug(self):
return self._display_debug
class GeneralSettingsComponent(Component):
workflow_encoder = StepEncoderControl()
led_brightness_encoder = StepEncoderControl(num_steps=60)
display_brightness_encoder = StepEncoderControl(num_steps=120)
def __init__(self, settings=None, hardware_settings=None, *a, **k):
assert settings is not None
assert hardware_settings is not None
super(GeneralSettingsComponent, self).__init__(*a, **k)
self._settings = settings
self._hardware_settings = hardware_settings
self.workflow_encoder.connect_property(settings, b'workflow', lambda v: b'clip' if v > 0 else b'scene')
return
@led_brightness_encoder.value
def led_brightness_encoder(self, value, encoder):
self._hardware_settings.led_brightness = clamp(self._hardware_settings.led_brightness + value, self._hardware_settings.min_led_brightness, self._hardware_settings.max_led_brightness)
@display_brightness_encoder.value
def display_brightness_encoder(self, value, encoder):
self._hardware_settings.display_brightness = clamp(self._hardware_settings.display_brightness + value, self._hardware_settings.min_display_brightness, self._hardware_settings.max_display_brightness)
class PadSettingsComponent(Component):
sensitivity_encoder = StepEncoderControl(num_steps=PAD_SETTING_STEP_SIZE)
gain_encoder = StepEncoderControl(num_steps=PAD_SETTING_STEP_SIZE)
dynamics_encoder = StepEncoderControl(num_steps=PAD_SETTING_STEP_SIZE)
def __init__(self, pad_settings=None, hardware_settings=None, *a, **k):
assert pad_settings is not None
super(PadSettingsComponent, self).__init__(*a, **k)
self._pad_settings = pad_settings
return
@sensitivity_encoder.value
def sensitivity_encoder(self, value, encoder):
self._pad_settings.sensitivity = clamp(self._pad_settings.sensitivity + value, self._pad_settings.min_sensitivity, self._pad_settings.max_sensitivity)
@gain_encoder.value
def gain_encoder(self, value, encoder):
self._pad_settings.gain = clamp(self._pad_settings.gain + value, self._pad_settings.min_gain, self._pad_settings.max_gain)
@dynamics_encoder.value
def dynamics_encoder(self, value, encoder):
self._pad_settings.dynamics = clamp(self._pad_settings.dynamics + value, self._pad_settings.min_dynamics, self._pad_settings.max_dynamics)
class DisplayDebugSettingsComponent(Component):
show_row_spaces_button = ToggleButtonControl()
show_row_margins_button = ToggleButtonControl()
show_row_middle_button = ToggleButtonControl()
show_button_spaces_button = ToggleButtonControl()
show_unlit_button_button = ToggleButtonControl()
show_lit_button_button = ToggleButtonControl()
def __init__(self, settings=None, *a, **k):
assert settings is not None
super(DisplayDebugSettingsComponent, self).__init__(*a, **k)
self.show_row_spaces_button.connect_property(settings, b'show_row_spaces')
self.show_row_margins_button.connect_property(settings, b'show_row_margins')
self.show_row_middle_button.connect_property(settings, b'show_row_middle')
self.show_button_spaces_button.connect_property(settings, b'show_button_spaces')
self.show_unlit_button_button.connect_property(settings, b'show_unlit_button')
self.show_lit_button_button.connect_property(settings, b'show_lit_button')
return
class InfoComponent(Component):
install_firmware_button = ButtonControl()
def __init__(self, firmware_switcher=None, *a, **k):
assert firmware_switcher is not None
super(InfoComponent, self).__init__(*a, **k)
self._firmware_switcher = firmware_switcher
self.install_firmware_button.enabled = self._firmware_switcher.can_switch_firmware
return
@install_firmware_button.pressed
def install_firmware_button(self, button):
self._firmware_switcher.switch_firmware()
class SetupComponent(ModesComponent):
category_radio_buttons = control_list(RadioButtonControl, checked_color=b'Option.Selected', unchecked_color=b'Option.Unselected')
make_it_go_boom_button = ButtonControl()
make_it_go_boom = listenable_property.managed(False)
def __init__(self, settings=None, pad_curve_sender=None, firmware_switcher=None, *a, **k):
assert settings is not None
super(SetupComponent, self).__init__(*a, **k)
self._settings = settings
self._pad_curve_sender = pad_curve_sender
has_option = self.application.has_option
self.make_it_go_boom_button.enabled = not has_option(b'_Push2DeveloperMode') and has_option(b'_MakePush2GoBoom')
self._general = GeneralSettingsComponent(parent=self, settings=settings.general, hardware_settings=settings.hardware, is_enabled=False)
self._info = InfoComponent(parent=self, firmware_switcher=firmware_switcher, is_enabled=False)
self._pad_settings = PadSettingsComponent(parent=self, pad_settings=settings.pad_settings, is_enabled=False)
self._display_debug = DisplayDebugSettingsComponent(parent=self, settings=settings.display_debug, is_enabled=False)
self.add_mode(b'Settings', [self._general, self._pad_settings])
self.add_mode(b'Info', [self._info])
if self.application.has_option(b'_Push2DeveloperMode'):
self.add_mode(b'Display Debug', [self._display_debug])
self.selected_mode = b'Settings'
self.category_radio_buttons.control_count = len(self.modes)
self.category_radio_buttons.checked_index = 0
return
@make_it_go_boom_button.pressed
def make_it_go_boom_button(self, _button):
self.make_it_go_boom = True
@property
def general(self):
return self._general
@property
def info(self):
return self._info
@property
def pad_settings(self):
return self._pad_settings
@property
def display_debug(self):
return self._display_debug
@property
def settings(self):
return self._settings
@property
def velocity_curve(self):
return self._pad_curve_sender
@category_radio_buttons.checked
def category_radio_buttons(self, button):
self.selected_mode = self.modes[button.index] |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../fsmpy"))
# -- Project information -----------------------------------------------------
project = 'fsmpy - fuzzy_set_measures'
copyright = '2022, Machine Learning and Vision Research Group'
author = 'Machine Learning and Vision Research Group'
# -- General configuration ---------------------------------------------------
viewcode_follow_imported_members = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
]
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
filepath = os.path.abspath(os.path.join("../src/", filename + "." + domain))
if len(info["fullname"].split(".")) > 1:
line_index = 0
parts = info["fullname"].split(".")
lines = open(filepath, "r").readlines()
start_index = 0
for part in parts:
start_index = next(
i + start_index
for i, l in enumerate(lines[start_index:])
if (start_index == 0 and part in l) or (start_index > 0 and part in l and "def" in l)
)
line_index = start_index
else:
try:
lines = open(filepath, "r").readlines()
except FileNotFoundError:
filepath = os.path.abspath(os.path.join("../src/", filename, "__init__." + domain))
lines = open(filepath, "r").readlines()
line_index = next(
i
for i, l in enumerate(lines)
if info["fullname"] in l
)
return "https://github.com/MachineLearningVisionRG/fsmpy/tree/main/src/{}.py#L{}".format(filename, line_index + 1)
numpydoc_validation_checks = {
"all"
}
numpydoc_xref_aliases = {
"BaseEstimator": "sklearn.base.BaseEstimator"
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
intersphinx_mapping = {
'python': ('http://docs.python.org/2', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'sklearn': ("https://scikit-learn.org/stable/", None)
}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
]
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
"""Dark matter - electron scattering
"""
import numericalunits as nu
import numpy as np
from scipy.interpolate import RegularGridInterpolator, interp1d
from scipy.integrate import quad, dblquad
from scipy.stats import binom
import wimprates as wr
export, __all__ = wr.exporter()
__all__ += ['dme_shells', 'l_to_letter', 'l_to_number']
# Load form factor and construct interpolators
shell_data = wr.load_pickle('dme/dme_ionization_ff.pkl')
for _shell, _sd in shell_data.items():
_sd['log10ffsquared_itp'] = RegularGridInterpolator(
(_sd['lnks'], _sd['lnqs']),
np.log10(_sd['ffsquared']),
bounds_error=False, fill_value=-float('inf'),)
dme_shells = [(5, 1), (5, 0), (4, 2), (4, 1), (4, 0)]
l_to_number = dict(s=0, p=1, d=2, f=3)
l_to_letter = {v: k for k, v in l_to_number.items()}
@export
def shell_str(n, l):
if isinstance(l, str):
return str(n) + l
return str(n) + l_to_letter[l]
@export
def dme_ionization_ff(shell, e_er, q):
"""Return dark matter electron scattering ionization form factor
Outside the parametrized range, the form factor is assumed 0
to give conservative results.
:param shell: Name of atomic shell, e.g. '4p'
Note not all shells are included in the data.
:param e_er: Electronic recoil energy
:param q: Momentun transfer
"""
if isinstance(shell, tuple):
shell = shell_str(*shell)
lnq = np.log(q / (nu.me * nu.c0 * nu.alphaFS))
# From Mathematica: (*ER*) (2 lnkvalues[[j]])/Log[10]
# log10 (E/Ry) = 2 lnk / ln10
# lnk = log10(E/Ry) * ln10 / 2
# = lng(E/Ry) / 2
# Ry = rydberg = 13.6 eV
ry = nu.me * nu.e ** 4 / (8 * nu.eps0 ** 2 * nu.hPlanck ** 2)
lnk = np.log(e_er / ry) / 2
return 10**(shell_data[shell]['log10ffsquared_itp'](np.vstack([lnk, lnq]).T))
@export
def binding_es_for_dme(n, l):
"""Return binding energy of Xenon's (n, l) orbital
according to Essig et al. 2017 Table II
Note these are different from e.g. Ibe et al. 2017!
"""
return {'4s': 213.8,'4p': 163.5,'4d': 75.6,'5s': 25.7,'5p': 12.4}[shell_str(n, l)] * nu.eV
@export
def rates_to_ne(e_er, drs,W=None, max_n_el=16,p_primary=1, p_secondary=0.83,swap_4s4p=False):
"""Return (n_electrons, {shell: rate / (kg day) for each electron count})
:param W: Work function (energy need to produce a quantum)
:param max_n_el: Maximum number of electrons to consider.
:param p_primary: Probability that primary electron survives
:param p_secondary: Probability that secondary quanta is survives as electrons
:param swap_4s4p: If True, swap differential rates of 4s and 4p
"""
additional_quanta = {'4s': 3,'4p': 6,'4d': 4,'5s': 0,'5p': 0}
if W is None:
W = 13.8 * nu.eV
n_el = np.arange(max_n_el + 1, dtype=np.int)
result = dict()
# We need an "energy bin size" to multiply with (or do some fancy integration)
# I'll use the differences between the points at which the differential
# rates were computed.
# To ensure this doesn't give a bias, nearby bins can't differ too much
# (e.g. use a linspace or a high-n logspace/geomspace)
binsizes = np.array(np.diff(e_er).tolist() + [e_er[-1] - e_er[-2]])
for shell, rates in drs.items():
if swap_4s4p:
# Somehow we can reproduce 1703.00910
# if we swap 4s <-> 4p here??
if shell == '4s':
rates = drs['4p']
elif shell == '4p':
rates = drs['4s']
# Convert from energy to n_electrons
r_n = np.zeros(len(n_el))
for e, r in zip(e_er, rates * binsizes):
n_secondary = int(np.floor(e / W)) + additional_quanta[shell]
r_n += r * (p_primary * binom.pmf(n_el-1, n=n_secondary, p=p_secondary)
+ (1 - p_primary) * binom.pmf(n_el, n=n_secondary, p=p_secondary))
# We can't see "0-electron events"
# Set their rate to 0 so we don't sum them accidentally
r_n[0] = 0
result[shell] = r_n
return n_el, result
@export
def v_min_dme(eb, erec, q, mw):
"""Minimal DM velocity for DM-electron scattering
:param eb: binding energy of shell
:param erec: electronic recoil energy energy
:param q: momentum transfer
:param mw: DM mass
"""
return (erec + eb) / q + q / (2 * mw)
# Precompute velocity integrals for t=None
@export
def velocity_integral_without_time(halo_model=None):
"""Precomputes the inverse_speed_integral by fixing
the upper bound to vmax=konst and varring the lower bound vmin
only used i the case where there is no time dependence in vmax i.e. t=None"""
halo_model = wr.StandardHaloModel() if halo_model is None else halo_model
_v_mins = np.linspace(0.0001, 1, 1000) * wr.v_max(None, halo_model.v_esc) # v_max carries units
_ims = np.array([quad(lambda v: 1/v*halo_model.velocity_dist(v,None),
_v_min, wr.v_max(None, halo_model.v_esc ))[0]
for _v_min in _v_mins])
"""
_ims = np.zeros(len(_v_mins))
integrand=np.zeros(len(_v_mins))
for _v_min,index in zip(_v_mins,range(len(_v_mins))):
f = lambda v: 1/v*halo_model.velocity_dist(v,None) #velocity_dist returns units
integrand[index]= f(_v_min)
_ims[index] = quad(f, _v_min, wr.v_max(None, halo_model.v_esc))[0]"""
# Store interpolator in unit-dependent numbers
# have to pass unit carring velocity to the interpolator
# and return carryies units of the integral [1/velo]
inv_mean_speed = interp1d(_v_mins,_ims,fill_value=0, bounds_error=False)
# If we don't have 0 < v_min < v_max, we want to return 0
# so the integrand vanishes
return inv_mean_speed
@export
@wr.vectorize_first #For-loop-vectorize the first argument of the function
def rate_dme(erec, n, l, mw, sigma_dme,inv_mean_speed=None,halo_model=None,f_dm='1',t=None,**kwargs):
"""Return differential rate of dark matter electron scattering vs energy
(i.e. dr/dE, not dr/dlogE)
:param erec: Electronic recoil energy
:param n: Principal quantum numbers of the shell that is hit
:param l: Angular momentum quantum number of the shell that is hit
:param mw: DM mass
:param sigma_dme: DM-free electron scattering cross-section at fixed
momentum transfer q=0
:param f_dm: One of the following:
'1': |F_DM|^2 = 1, contact interaction / heavy mediator (default)
'1_q': |F_DM|^2 = (\alpha m_e c / q), dipole moment
'1_q2': |F_DM|^2 = (\alpha m_e c / q)^2, ultralight mediator
:param t: A J2000.0 timestamp.
:param halo_model, Halo to be used if not given, the standard is used.
:param inv_mean_speed: function to compute inverse_mean_speed integral for a given vmin
"""
halo_model = wr.StandardHaloModel() if halo_model is None else halo_model
inv_mean_speed = inv_mean_speed
shell = shell_str(n, l)
eb = binding_es_for_dme(n, l)
f_dm = {
'1': lambda q: 1,
'1_q': lambda q: nu.alphaFS * nu.me * nu.c0 / q,
'1_q2': lambda q: (nu.alphaFS * nu.me * nu.c0 / q)**2
}[f_dm]
# No bounds are given for the q integral
# but the form factors are only specified in a limited range of q
qmax = (np.exp(shell_data[shell]['lnqs'].max())
* (nu.me * nu.c0 * nu.alphaFS))
if t is None and inv_mean_speed is not None:
# Use precomputed inverse mean speed so we only have to do a single integral
def diff_xsec(q):
vmin = v_min_dme(eb, erec, q, mw)
result = q*dme_ionization_ff(shell, erec, q)*f_dm(q)**2*inv_mean_speed(vmin)
# Note the interpolator return carryies units
return result
r = quad(diff_xsec, 0, qmax)[0]
else:
# Have to do double integral
# Note dblquad expects the function to be f(y, x), not f(x, y)...
def diff_xsec(v, q):
result = q * dme_ionization_ff(shell, erec, q) * f_dm(q)**2
result *= 1 / v * halo_model.velocity_dist(v, t)
return result
r = dblquad(
diff_xsec,
0,
qmax,
lambda q: v_min_dme(eb, erec, q, mw),
lambda _: wr.v_max(t, halo_model.v_esc),
**kwargs)[0]
mu_e = mw * nu.me / (mw + nu.me)
return (
halo_model.rho_dm / mw * (1 / wr.mn())
# d/lnE -> d/E
* 1 / erec
# Prefactors in cross-section
* sigma_dme / (8 * mu_e ** 2)
* r)
@export
def rate_dme_wrapper(m_gev,s_cm2,f_dm,t=None,halo_model=None,max_n_el=7):
"""Return differential rate of dark matter electron scattering vs energy
(i.e. dr/dE, not dr/dlogE)"""
e_er = np.geomspace(1, 400, 100)
m_gev = m_gev
s_cm2 = s_cm2
drs = dict()
#Precompute the inv mean speed integral for t=None
if t==None:
inv_mean_speed = wr.velocity_integral_without_time(halo_model=halo_model) #halo_model=None --> standard halo
else:
inv_mean_speed = None
for n, l in wr.dme_shells:
drs[wr.shell_str(n,l)]=rate_dme(e_er*nu.eV,n,l,mw=m_gev,sigma_dme=s_cm2,
inv_mean_speed=inv_mean_speed,
halo_model=halo_model,f_dm=f_dm,t=t)
n_el, drsn = rates_to_ne(e_er*nu.eV,drs,swap_4s4p=True,max_n_el=max_n_el)
total_rate = np.sum(list(drsn.values()), axis=0) #set total rate units outside the function: e.g /(1/(kg*day))
return n_el,total_rate |
from volt_meter import VoltMeter
#volt_meter = VoltMeter(17, .03, 2.25)
volt_meter = VoltMeter(23, .03, 2.25)
#volt_meter = VoltMeter(25, .03, 2.35)
def get_input():
value1 = input("Input value from 0 to 100: ")
volt_meter.set_meter_value(float(value1), 0, 100)
get_input()
if __name__ == "__main__":
get_input()
|
# Approach 4: Search Space Reduction
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if len(matrix) == 0 or len(matrix[0]) == 0:
return False
height = len(matrix)
weidth = len(matrix[0])
row = height - 1
col = 0
while row >= 0 and col < weidth:
if matrix[row][col] < target:
col += 1
elif matrix[row][col] > target:
row -= 1
else:
return True
return False |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import cv2 as cv
import numpy as np
import torch
from torch.utils.data import Dataset
class CasiaSurfDataset(Dataset):
PROTOCOLS = {'train': 'train', 'dev': 'dev_ref', 'test': 'test_res'}
def __init__(self, protocol: int, dir_: str = 'data/CASIA_SURF', mode: str = 'train', depth=False, ir=False,
transform=None):
self.dir = dir_
self.mode = mode
submode = PROTOCOLS[mode]
file_name = '4@{}_{}.txt'.format(protocol, submode)
with open(os.path.join(dir_, file_name), 'r') as file:
self.items = []
for line in file:
if self.mode == 'train':
img_name, label = tuple(line[:-1].split(' '))
self.items.append(
(self.get_all_modalities(img_name, depth, ir), label))
elif self.mode == 'dev':
folder_name, label = tuple(line[:-1].split(' '))
profile_dir = os.path.join(
self.dir, folder_name, 'profile')
for file in os.listdir(profile_dir):
img_name = os.path.join(folder_name, 'profile', file)
self.items.append(
(self.get_all_modalities(img_name, depth, ir), label))
elif self.mode == 'test':
folder_name = line[:-1].split(' ')[0]
profile_dir = os.path.join(
self.dir, folder_name, 'profile')
for file in os.listdir(profile_dir):
img_name = os.path.join(folder_name, 'profile', file)
self.items.append(
(self.get_all_modalities(img_name, depth, ir), -1))
self.transform = transform
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_names, label = self.items[idx]
images = []
for img_name in img_names:
img_path = os.path.join(self.dir, img_name)
img = cv.imread(img_path, flags=1)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
if self.transform is not None:
img = self.transform(label=label, img=img)['image']
img = np.transpose(img, (2, 0, 1)).astype(np.float32)
images += [torch.tensor(img)]
return torch.cat(images, dim=0), 1-int(label)
def get_all_modalities(self, img_path: str, depth: bool = True, ir: bool = True) -> list:
result = [img_path]
if depth:
result += [re.sub('profile', 'depth', img_path)]
if ir:
result += [re.sub('profile', 'ir', img_path)]
return result
|
def generate_instance_identity_document(instance):
return {
"instanceId": instance.id,
}
|
#!/usr/bin/env python3
"""
$ ./tools/js-dep-visualizer.py
$ dot -Tpng var/zulip-deps.dot -o var/zulip-deps.png
"""
import os
import re
import subprocess
import sys
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Set, Tuple
Edge = Tuple[str, str]
EdgeSet = Set[Edge]
Method = str
MethodDict = DefaultDict[Edge, List[Method]]
TOOLS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(TOOLS_DIR)
sys.path.insert(0, ROOT_DIR)
from tools.lib.graph import (
Graph,
make_dot_file,
best_edge_to_remove,
)
JS_FILES_DIR = os.path.join(ROOT_DIR, 'static/js')
OUTPUT_FILE_PATH = os.path.relpath(os.path.join(ROOT_DIR, 'var/zulip-deps.dot'))
PNG_FILE_PATH = os.path.relpath(os.path.join(ROOT_DIR, 'var/zulip-deps.png'))
def get_js_edges():
# type: () -> Tuple[EdgeSet, MethodDict]
names = set()
modules = [] # type: List[Dict[str, Any]]
for js_file in os.listdir(JS_FILES_DIR):
if not js_file.endswith('.js') and not js_file.endswith('.ts'):
continue
name = js_file[:-3] # Remove .js or .ts
path = os.path.join(JS_FILES_DIR, js_file)
names.add(name)
modules.append(dict(
name=name,
path=path,
regex=re.compile(r'[^_]{}\.\w+\('.format(name))
))
comment_regex = re.compile(r'\s+//')
call_regex = re.compile(r'[^_](\w+\.\w+)\(')
methods = defaultdict(list) # type: DefaultDict[Edge, List[Method]]
edges = set()
for module in modules:
parent = module['name']
with open(module['path']) as f:
for line in f:
if comment_regex.match(line):
continue
if 'subs.forEach' in line:
continue
m = call_regex.search(line)
if not m:
continue
for g in m.groups():
child, method = g.split('.')
if (child not in names):
continue
if child == parent:
continue
tup = (parent, child)
edges.add(tup)
methods[tup].append(method)
return edges, methods
def find_edges_to_remove(graph, methods):
# type: (Graph, MethodDict) -> Tuple[Graph, List[Edge]]
EXEMPT_EDGES = [
# These are sensible dependencies, so don't cut them.
('rows', 'message_store'),
('filter', 'stream_data'),
('server_events', 'user_events'),
('compose_fade', 'stream_data'),
('narrow', 'message_list'),
('stream_list', 'topic_list',),
('subs', 'stream_muting'),
('hashchange', 'settings'),
('tutorial', 'narrow'),
('activity', 'resize'),
('hashchange', 'drafts'),
('compose', 'echo'),
('compose', 'resize'),
('settings', 'resize'),
('compose', 'unread_ops'),
('compose', 'drafts'),
('echo', 'message_edit'),
('echo', 'stream_list'),
('hashchange', 'narrow'),
('hashchange', 'subs'),
('message_edit', 'echo'),
('popovers', 'message_edit'),
('unread_ui', 'activity'),
('message_fetch', 'message_util'),
('message_fetch', 'resize'),
('message_util', 'resize'),
('notifications', 'tutorial'),
('message_util', 'unread_ui'),
('muting_ui', 'stream_list'),
('muting_ui', 'unread_ui'),
('stream_popover', 'subs'),
('stream_popover', 'muting_ui'),
('narrow', 'message_fetch'),
('narrow', 'message_util'),
('narrow', 'navigate'),
('unread_ops', 'unread_ui'),
('narrow', 'unread_ops'),
('navigate', 'unread_ops'),
('pm_list', 'unread_ui'),
('stream_list', 'unread_ui'),
('popovers', 'compose'),
('popovers', 'muting_ui'),
('popovers', 'narrow'),
('popovers', 'resize'),
('pm_list', 'resize'),
('notifications', 'navigate'),
('compose', 'socket'),
('stream_muting', 'message_util'),
('subs', 'stream_list'),
('ui', 'message_fetch'),
('ui', 'unread_ops'),
('condense', 'message_viewport'),
('compose_actions', 'compose'),
('compose_actions', 'resize'),
('settings_streams', 'stream_data'),
('drafts', 'hashchange'),
('settings_notifications', 'stream_edit'),
('compose', 'stream_edit'),
('subs', 'stream_edit'),
('narrow_state', 'stream_data'),
('stream_edit', 'stream_list'),
('reactions', 'emoji_picker'),
('message_edit', 'resize'),
] # type: List[Edge]
def is_exempt(edge):
# type: (Tuple[str, str]) -> bool
parent, child = edge
if edge == ('server_events', 'reload'):
return False
if parent in ['server_events', 'user_events', 'stream_events',
'message_events', 'reload']:
return True
if child == 'rows':
return True
return edge in EXEMPT_EDGES
APPROVED_CUTS = [
('stream_edit', 'stream_events'),
('unread_ui', 'pointer'),
('typing_events', 'narrow'),
('echo', 'message_events'),
('resize', 'navigate'),
('narrow', 'search'),
('subs', 'stream_events'),
('stream_color', 'tab_bar'),
('stream_color', 'subs'),
('stream_data', 'narrow'),
('unread', 'narrow'),
('composebox_typeahead', 'compose'),
('message_list', 'message_edit'),
('message_edit', 'compose'),
('message_store', 'compose'),
('settings_notifications', 'subs'),
('settings', 'settings_muting'),
('message_fetch', 'tutorial'),
('settings', 'subs'),
('activity', 'narrow'),
('compose', 'compose_actions'),
('compose', 'subs'),
('compose_actions', 'drafts'),
('compose_actions', 'narrow'),
('compose_actions', 'unread_ops'),
('drafts', 'compose'),
('drafts', 'echo'),
('echo', 'compose'),
('echo', 'narrow'),
('echo', 'pm_list'),
('echo', 'ui'),
('message_fetch', 'activity'),
('message_fetch', 'narrow'),
('message_fetch', 'pm_list'),
('message_fetch', 'stream_list'),
('message_fetch', 'ui'),
('narrow', 'ui'),
('message_util', 'compose'),
('subs', 'compose'),
('narrow', 'hashchange'),
('subs', 'hashchange'),
('navigate', 'narrow'),
('navigate', 'stream_list'),
('pm_list', 'narrow'),
('pm_list', 'stream_popover'),
('muting_ui', 'stream_popover'),
('popovers', 'stream_popover'),
('topic_list', 'stream_popover'),
('stream_edit', 'subs'),
('topic_list', 'narrow'),
('stream_list', 'narrow'),
('stream_list', 'pm_list'),
('stream_list', 'unread_ops'),
('notifications', 'ui'),
('notifications', 'narrow'),
('notifications', 'unread_ops'),
('typing', 'narrow'),
('message_events', 'compose'),
('stream_muting', 'stream_list'),
('subs', 'narrow'),
('unread_ui', 'pm_list'),
('unread_ui', 'stream_list'),
('overlays', 'hashchange'),
('emoji_picker', 'reactions'),
]
def cut_is_legal(edge):
# type: (Edge) -> bool
parent, child = edge
if child in ['reload', 'popovers', 'overlays', 'notifications',
'server_events', 'compose_actions']:
return True
return edge in APPROVED_CUTS
graph.remove_exterior_nodes()
removed_edges = list()
print()
while graph.num_edges() > 0:
edge = best_edge_to_remove(graph, is_exempt)
if edge is None:
print('we may not be allowing edge cuts!!!')
break
if cut_is_legal(edge):
graph = graph.minus_edge(edge)
graph.remove_exterior_nodes()
removed_edges.append(edge)
else:
for removed_edge in removed_edges:
print(removed_edge)
print()
edge_str = str(edge) + ','
print(edge_str)
for method in methods[edge]:
print(' ' + method)
break
return graph, removed_edges
def report_roadmap(edges, methods):
# type: (List[Edge], MethodDict) -> None
child_modules = {child for parent, child in edges}
module_methods = defaultdict(set) # type: DefaultDict[str, Set[str]]
callers = defaultdict(set) # type: DefaultDict[Tuple[str, str], Set[str]]
for parent, child in edges:
for method in methods[(parent, child)]:
module_methods[child].add(method)
callers[(child, method)].add(parent)
for child in sorted(child_modules):
# Since children are found using the regex pattern, it is difficult
# to know if they are JS or TS files without checking which files
# exist. Instead, just print the name of the module.
print(child)
for method in module_methods[child]:
print(' ' + child + '.' + method)
for caller in sorted(callers[(child, method)]):
print(' ' + caller)
print()
print()
def produce_partial_output(graph):
# type: (Graph) -> None
print(graph.num_edges())
buffer = make_dot_file(graph)
graph.report()
with open(OUTPUT_FILE_PATH, 'w') as f:
f.write(buffer)
subprocess.check_call(["dot", "-Tpng", OUTPUT_FILE_PATH, "-o", PNG_FILE_PATH])
print()
print('See dot file here: {}'.format(OUTPUT_FILE_PATH))
print('See output png file: {}'.format(PNG_FILE_PATH))
def run():
# type: () -> None
edges, methods = get_js_edges()
graph = Graph(edges)
graph, removed_edges = find_edges_to_remove(graph, methods)
if graph.num_edges() == 0:
report_roadmap(removed_edges, methods)
else:
produce_partial_output(graph)
if __name__ == '__main__':
run()
|
# -*- coding: utf-8 -*-
import os
import pyloco
from .error import FreError
from .app import AppBuildAnalyzer
from .control import FrelptController
class FrelptTask(pyloco.Task):
_name_ = "frelpt"
_version_ = "0.1.1"
def __init__(self, parent):
self.add_data_argument("target", help="filepath to the source file having 'pushdown' frelpt directive")
self.add_data_argument("build", help="Linux command to compile a target application")
self.add_data_argument("clean", help="Linux command to clean a target application")
self.add_option_argument("-o", "--outdir", default=os.getcwd(), help="output directory")
def perform(self, targs):
retval = 0
try:
# check if target file exist
if not os.path.isfile(targs.target):
raise FreError("Failed target argument of '%s'"%str(targs.target))
parent = self.get_proxy()
# run application build analyzer
app_analyzer = AppBuildAnalyzer(parent)
argv = [targs.build, targs.clean, "--outdir", targs.outdir]
retval, _forward = app_analyzer.run(argv)
# run frelpt controller
ctrl = FrelptController(parent)
argv = [targs.target , "--outdir", targs.outdir]
retval, _ = ctrl.run(argv, forward=_forward)
except FreError as err:
raise
except Exception as err:
raise
return retval
|
"""
Management of Docker Containers
.. versionadded:: 2015.8.0
.. versionchanged:: 2017.7.0
This module has replaced the legacy docker execution module.
:depends: docker_ Python module
.. _`create_container()`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container
.. _`create_host_config()`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_host_config
.. _`connect_container_to_network()`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network
.. _`create_network()`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.create_network
.. _`logs()`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.logs
.. _`IPAM pool`: http://docker-py.readthedocs.io/en/stable/api.html#docker.types.IPAMPool
.. _docker: https://pypi.python.org/pypi/docker
.. _docker-py: https://pypi.python.org/pypi/docker-py
.. _lxc-attach: https://linuxcontainers.org/lxc/manpages/man1/lxc-attach.1.html
.. _nsenter: http://man7.org/linux/man-pages/man1/nsenter.1.html
.. _docker-exec: http://docs.docker.com/reference/commandline/cli/#exec
.. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html
.. _timelib: https://pypi.python.org/pypi/timelib
.. _`trusted builds`: https://blog.docker.com/2013/11/introducing-trusted-builds/
.. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate
.. note::
Older releases of the Python bindings for Docker were called docker-py_ in
PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are
supported. These python bindings can easily be installed using
:py:func:`pip.install <salt.modules.pip.install>`:
.. code-block:: bash
salt myminion pip.install docker
To upgrade from docker-py_ to docker_, you must first uninstall docker-py_,
and then install docker_:
.. code-block:: bash
salt myminion pip.uninstall docker-py
salt myminion pip.install docker
.. _docker-authentication:
Authentication
--------------
If you have previously performed a ``docker login`` from the minion, then the
credentials saved in ``~/.docker/config.json`` will be used for any actions
which require authentication. If not, then credentials can be configured in
any of the following locations:
- Minion config file
- Grains
- Pillar data
- Master config file (requires :conf_minion:`pillar_opts` to be set to ``True``
in Minion config file in order to work)
.. important::
Versions prior to 3000 require that Docker credentials are configured in
Pillar data. Be advised that Pillar data is still recommended though,
because this keeps the configuration from being stored on the Minion.
Also, keep in mind that if one gets your ``~/.docker/config.json``, the
password can be decoded from its contents.
The configuration schema is as follows:
.. code-block:: yaml
docker-registries:
<registry_url>:
username: <username>
password: <password>
For example:
.. code-block:: yaml
docker-registries:
hub:
username: foo
password: s3cr3t
.. note::
As of the 2016.3.7, 2016.11.4, and 2017.7.0 releases of Salt, credentials
for the Docker Hub can be configured simply by specifying ``hub`` in place
of the registry URL. In earlier releases, it is necessary to specify the
actual registry URL for the Docker Hub (i.e.
``https://index.docker.io/v1/``).
More than one registry can be configured. Salt will look for Docker credentials
in the ``docker-registries`` Pillar key, as well as any key ending in
``-docker-registries``. For example:
.. code-block:: yaml
docker-registries:
'https://mydomain.tld/registry:5000':
username: foo
password: s3cr3t
foo-docker-registries:
https://index.foo.io/v1/:
username: foo
password: s3cr3t
bar-docker-registries:
https://index.bar.io/v1/:
username: foo
password: s3cr3t
To login to the configured registries, use the :py:func:`docker.login
<salt.modules.dockermod.login>` function. This only needs to be done once for a
given registry, and it will store/update the credentials in
``~/.docker/config.json``.
.. note::
For Salt releases before 2016.3.7 and 2016.11.4, :py:func:`docker.login
<salt.modules.dockermod.login>` is not available. Instead, Salt will try to
authenticate using each of your configured registries for each push/pull,
behavior which is not correct and has been resolved in newer releases.
Configuration Options
---------------------
The following configuration options can be set to fine-tune how Salt uses
Docker:
- ``docker.url``: URL to the docker service (default: local socket).
- ``docker.version``: API version to use (should not need to be set manually in
the vast majority of cases)
- ``docker.exec_driver``: Execution driver to use, one of ``nsenter``,
``lxc-attach``, or ``docker-exec``. See the :ref:`Executing Commands Within a
Running Container <docker-execution-driver>` section for more details on how
this config parameter is used.
These configuration options are retrieved using :py:mod:`config.get
<salt.modules.config.get>` (click the link for further information).
.. _docker-execution-driver:
Executing Commands Within a Running Container
---------------------------------------------
.. note::
With the release of Docker 1.13.1, the Execution Driver has been removed.
Starting in versions 2016.3.6, 2016.11.4, and 2017.7.0, Salt defaults to
using ``docker exec`` to run commands in containers, however for older Salt
releases it will be necessary to set the ``docker.exec_driver`` config
option to either ``docker-exec`` or ``nsenter`` for Docker versions 1.13.1
and newer.
Multiple methods exist for executing commands within Docker containers:
- lxc-attach_: Default for older versions of docker
- nsenter_: Enters container namespace to run command
- docker-exec_: Native support for executing commands in Docker containers
(added in Docker 1.3)
Adding a configuration option (see :py:func:`config.get
<salt.modules.config.get>`) called ``docker.exec_driver`` will tell Salt which
execution driver to use:
.. code-block:: yaml
docker.exec_driver: docker-exec
If this configuration option is not found, Salt will use the appropriate
interface (either nsenter_ or lxc-attach_) based on the ``Execution Driver``
value returned from ``docker info``. docker-exec_ will not be used by default,
as it is presently (as of version 1.6.2) only able to execute commands as the
effective user of the container. Thus, if a ``USER`` directive was used to run
as a non-privileged user, docker-exec_ would be unable to perform the action as
root. Salt can still use docker-exec_ as an execution driver, but must be
explicitly configured (as in the example above) to do so at this time.
If possible, try to manually specify the execution driver, as it will save Salt
a little work.
This execution module provides functions that shadow those from the :mod:`cmd
<salt.modules.cmdmod>` module. They are as follows:
- :py:func:`docker.retcode <salt.modules.dockermod.retcode>`
- :py:func:`docker.run <salt.modules.dockermod.run>`
- :py:func:`docker.run_all <salt.modules.dockermod.run_all>`
- :py:func:`docker.run_stderr <salt.modules.dockermod.run_stderr>`
- :py:func:`docker.run_stdout <salt.modules.dockermod.run_stdout>`
- :py:func:`docker.script <salt.modules.dockermod.script>`
- :py:func:`docker.script_retcode <salt.modules.dockermod.script_retcode>`
Detailed Function Documentation
-------------------------------
"""
import bz2
import copy
import fnmatch
import functools
import gzip
import json
import logging
import os
import pipes
import re
import shutil
import string
import subprocess
import sys
import time
import uuid
import salt.client.ssh.state
import salt.exceptions
import salt.fileclient
import salt.pillar
import salt.utils.dockermod.translate.container
import salt.utils.dockermod.translate.network
import salt.utils.functools
import salt.utils.json
import salt.utils.path
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.state import HighState
__docformat__ = "restructuredtext en"
# pylint: disable=import-error
try:
import docker
HAS_DOCKER_PY = True
except ImportError:
HAS_DOCKER_PY = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
try:
import timelib
HAS_TIMELIB = True
except ImportError:
HAS_TIMELIB = False
# pylint: enable=import-error
HAS_NSENTER = bool(salt.utils.path.which("nsenter"))
# Set up logging
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {
"import_": "import",
"ps_": "ps",
"rm_": "rm",
"signal_": "signal",
"start_": "start",
"tag_": "tag",
"apply_": "apply",
}
# Minimum supported versions
MIN_DOCKER = (1, 9, 0)
MIN_DOCKER_PY = (1, 6, 0)
VERSION_RE = r"([\d.]+)"
NOTSET = object()
# Define the module's virtual name and alias
__virtualname__ = "docker"
__virtual_aliases__ = ("dockerng", "moby")
__proxyenabled__ = ["docker"]
__outputter__ = {
"sls": "highstate",
"apply_": "highstate",
"highstate": "highstate",
}
def __virtual__():
"""
Only load if docker libs are present
"""
if HAS_DOCKER_PY:
try:
docker_py_versioninfo = _get_docker_py_versioninfo()
except Exception: # pylint: disable=broad-except
# May fail if we try to connect to a docker daemon but can't
return (False, "Docker module found, but no version could be extracted")
# Don't let a failure to interpret the version keep this module from
# loading. Log a warning (log happens in _get_docker_py_versioninfo()).
if docker_py_versioninfo is None:
return (False, "Docker module found, but no version could be extracted")
if docker_py_versioninfo >= MIN_DOCKER_PY:
try:
docker_versioninfo = version().get("VersionInfo")
except Exception: # pylint: disable=broad-except
docker_versioninfo = None
if docker_versioninfo is None or docker_versioninfo >= MIN_DOCKER:
return __virtualname__
else:
return (
False,
"Insufficient Docker version (required: {}, "
"installed: {})".format(
".".join(map(str, MIN_DOCKER)),
".".join(map(str, docker_versioninfo)),
),
)
return (
False,
"Insufficient docker-py version (required: {}, "
"installed: {})".format(
".".join(map(str, MIN_DOCKER_PY)),
".".join(map(str, docker_py_versioninfo)),
),
)
return (False, "Could not import docker module, is docker-py installed?")
class DockerJSONDecoder(json.JSONDecoder):
def decode(self, s, _w=None):
objs = []
for line in s.splitlines():
if not line:
continue
obj, _ = self.raw_decode(line)
objs.append(obj)
return objs
def _get_docker_py_versioninfo():
"""
Returns the version_info tuple from docker-py
"""
try:
return docker.version_info
except AttributeError:
pass
def _get_client(timeout=NOTSET, **kwargs):
client_kwargs = {}
if timeout is not NOTSET:
client_kwargs["timeout"] = timeout
for key, val in (("base_url", "docker.url"), ("version", "docker.version")):
param = __salt__["config.option"](val, NOTSET)
if param is not NOTSET:
client_kwargs[key] = param
if "base_url" not in client_kwargs and "DOCKER_HOST" in os.environ:
# Check if the DOCKER_HOST environment variable has been set
client_kwargs["base_url"] = os.environ.get("DOCKER_HOST")
if "version" not in client_kwargs:
# Let docker-py auto detect docker version incase
# it's not defined by user.
client_kwargs["version"] = "auto"
docker_machine = __salt__["config.option"]("docker.machine", NOTSET)
if docker_machine is not NOTSET:
docker_machine_json = __salt__["cmd.run"](
["docker-machine", "inspect", docker_machine], python_shell=False
)
try:
docker_machine_json = salt.utils.json.loads(docker_machine_json)
docker_machine_tls = docker_machine_json["HostOptions"]["AuthOptions"]
docker_machine_ip = docker_machine_json["Driver"]["IPAddress"]
client_kwargs["base_url"] = "https://" + docker_machine_ip + ":2376"
client_kwargs["tls"] = docker.tls.TLSConfig(
client_cert=(
docker_machine_tls["ClientCertPath"],
docker_machine_tls["ClientKeyPath"],
),
ca_cert=docker_machine_tls["CaCertPath"],
assert_hostname=False,
verify=True,
)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(
"Docker machine {} failed: {}".format(docker_machine, exc)
)
try:
# docker-py 2.0 renamed this client attribute
ret = docker.APIClient(**client_kwargs)
except AttributeError:
# pylint: disable=not-callable
ret = docker.Client(**client_kwargs)
# pylint: enable=not-callable
log.debug("docker-py API version: %s", getattr(ret, "api_version", None))
return ret
def _get_state(inspect_results):
"""
Helper for deriving the current state of the container from the inspect
results.
"""
if inspect_results.get("State", {}).get("Paused", False):
return "paused"
elif inspect_results.get("State", {}).get("Running", False):
return "running"
else:
return "stopped"
# Decorators
def _docker_client(wrapped):
"""
Decorator to run a function that requires the use of a docker.Client()
instance.
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
"""
Ensure that the client is present
"""
kwargs = __utils__["args.clean_kwargs"](**kwargs)
timeout = kwargs.pop("client_timeout", NOTSET)
if "docker.client" not in __context__ or not hasattr(
__context__["docker.client"], "timeout"
):
__context__["docker.client"] = _get_client(timeout=timeout, **kwargs)
orig_timeout = None
if (
timeout is not NOTSET
and hasattr(__context__["docker.client"], "timeout")
and __context__["docker.client"].timeout != timeout
):
# Temporarily override timeout
orig_timeout = __context__["docker.client"].timeout
__context__["docker.client"].timeout = timeout
ret = wrapped(*args, **kwargs)
if orig_timeout is not None:
__context__["docker.client"].timeout = orig_timeout
return ret
return wrapper
def _refresh_mine_cache(wrapped):
"""
Decorator to trigger a refresh of salt mine data.
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
"""
refresh salt mine on exit.
"""
returned = wrapped(*args, **__utils__["args.clean_kwargs"](**kwargs))
if _check_update_mine():
__salt__["mine.send"]("docker.ps", verbose=True, all=True, host=True)
return returned
return wrapper
def _check_update_mine():
try:
ret = __context__["docker.update_mine"]
except KeyError:
ret = __context__["docker.update_mine"] = __salt__["config.option"](
"docker.update_mine", default=True
)
return ret
# Helper functions
def _change_state(name, action, expected, *args, **kwargs):
"""
Change the state of a container
"""
pre = state(name)
if action != "restart" and pre == expected:
return {
"result": False,
"state": {"old": expected, "new": expected},
"comment": ("Container '{}' already {}".format(name, expected)),
}
_client_wrapper(action, name, *args, **kwargs)
_clear_context()
try:
post = state(name)
except CommandExecutionError:
# Container doesn't exist anymore
post = None
ret = {"result": post == expected, "state": {"old": pre, "new": post}}
return ret
def _clear_context():
"""
Clear the state/exists values stored in context
"""
# Can't use 'for key in __context__' or six.iterkeys(__context__) because
# an exception will be raised if the size of the dict is modified during
# iteration.
keep_context = (
"docker.client",
"docker.exec_driver",
"docker._pull_status",
"docker.docker_version",
"docker.docker_py_version",
)
for key in list(__context__):
try:
if key.startswith("docker.") and key not in keep_context:
__context__.pop(key)
except AttributeError:
pass
def _get_md5(name, path):
"""
Get the MD5 checksum of a file from a container
"""
output = run_stdout(
name, "md5sum {}".format(pipes.quote(path)), ignore_retcode=True
)
try:
return output.split()[0]
except IndexError:
# Destination file does not exist or could not be accessed
return None
def _get_exec_driver():
"""
Get the method to be used in shell commands
"""
contextkey = "docker.exec_driver"
if contextkey not in __context__:
from_config = __salt__["config.option"](contextkey, None)
# This if block can be removed once we make docker-exec a default
# option, as it is part of the logic in the commented block above.
if from_config is not None:
__context__[contextkey] = from_config
return from_config
# The execution driver was removed in Docker 1.13.1, docker-exec is now
# the default.
driver = info().get("ExecutionDriver", "docker-exec")
if driver == "docker-exec":
__context__[contextkey] = driver
elif driver.startswith("lxc-"):
__context__[contextkey] = "lxc-attach"
elif driver.startswith("native-") and HAS_NSENTER:
__context__[contextkey] = "nsenter"
elif not driver.strip() and HAS_NSENTER:
log.warning(
"ExecutionDriver from 'docker info' is blank, falling "
"back to using 'nsenter'. To squelch this warning, set "
"docker.exec_driver. See the Salt documentation for the "
"docker module for more information."
)
__context__[contextkey] = "nsenter"
else:
raise NotImplementedError(
"Unknown docker ExecutionDriver '{}', or didn't find "
"command to attach to the container".format(driver)
)
return __context__[contextkey]
def _get_top_level_images(imagedata, subset=None):
"""
Returns a list of the top-level images (those which are not parents). If
``subset`` (an iterable) is passed, the top-level images in the subset will
be returned, otherwise all top-level images will be returned.
"""
try:
parents = [imagedata[x]["ParentId"] for x in imagedata]
filter_ = subset if subset is not None else imagedata
return [x for x in filter_ if x not in parents]
except (KeyError, TypeError):
raise CommandExecutionError(
"Invalid image data passed to _get_top_level_images(). Please "
"report this issue. Full image data: {}".format(imagedata)
)
def _prep_pull():
"""
Populate __context__ with the current (pre-pull) image IDs (see the
docstring for _pull_status for more information).
"""
__context__["docker._pull_status"] = [x[:12] for x in images(all=True)]
def _scrub_links(links, name):
"""
Remove container name from HostConfig:Links values to enable comparing
container configurations correctly.
"""
if isinstance(links, list):
ret = []
for l in links:
ret.append(l.replace("/{}/".format(name), "/", 1))
else:
ret = links
return ret
def _ulimit_sort(ulimit_val):
if isinstance(ulimit_val, list):
return sorted(
ulimit_val,
key=lambda x: (x.get("Name"), x.get("Hard", 0), x.get("Soft", 0)),
)
return ulimit_val
def _size_fmt(num):
"""
Format bytes as human-readable file sizes
"""
try:
num = int(num)
if num < 1024:
return "{} bytes".format(num)
num /= 1024.0
for unit in ("KiB", "MiB", "GiB", "TiB", "PiB"):
if num < 1024.0:
return "{:3.1f} {}".format(num, unit)
num /= 1024.0
except Exception: # pylint: disable=broad-except
log.error("Unable to format file size for '%s'", num)
return "unknown"
@_docker_client
def _client_wrapper(attr, *args, **kwargs):
"""
Common functionality for running low-level API calls
"""
catch_api_errors = kwargs.pop("catch_api_errors", True)
func = getattr(__context__["docker.client"], attr, None)
if func is None or not hasattr(func, "__call__"):
raise SaltInvocationError("Invalid client action '{}'".format(attr))
if attr in ("push", "pull"):
try:
# Refresh auth config from config.json
__context__["docker.client"].reload_config()
except AttributeError:
pass
err = ""
try:
log.debug(
'Attempting to run docker-py\'s "%s" function '
"with args=%s and kwargs=%s",
attr,
args,
kwargs,
)
ret = func(*args, **kwargs)
except docker.errors.APIError as exc:
if catch_api_errors:
# Generic handling of Docker API errors
raise CommandExecutionError(
"Error {}: {}".format(exc.response.status_code, exc.explanation)
)
else:
# Allow API errors to be caught further up the stack
raise
except docker.errors.DockerException as exc:
# More general docker exception (catches InvalidVersion, etc.)
raise CommandExecutionError(exc.__str__())
except Exception as exc: # pylint: disable=broad-except
err = exc.__str__()
else:
return ret
# If we're here, it's because an exception was caught earlier, and the
# API command failed.
msg = "Unable to perform {}".format(attr)
if err:
msg += ": {}".format(err)
raise CommandExecutionError(msg)
def _build_status(data, item):
"""
Process a status update from a docker build, updating the data structure
"""
stream = item["stream"]
if "Running in" in stream:
data.setdefault("Intermediate_Containers", []).append(
stream.rstrip().split()[-1]
)
if "Successfully built" in stream:
data["Id"] = stream.rstrip().split()[-1]
def _import_status(data, item, repo_name, repo_tag):
"""
Process a status update from docker import, updating the data structure
"""
status = item["status"]
try:
if "Downloading from" in status:
return
elif all(x in string.hexdigits for x in status):
# Status is an image ID
data["Image"] = "{}:{}".format(repo_name, repo_tag)
data["Id"] = status
except (AttributeError, TypeError):
pass
def _pull_status(data, item):
"""
Process a status update from a docker pull, updating the data structure.
For containers created with older versions of Docker, there is no
distinction in the status updates between layers that were already present
(and thus not necessary to download), and those which were actually
downloaded. Because of this, any function that needs to invoke this
function needs to pre-fetch the image IDs by running _prep_pull() in any
function that calls _pull_status(). It is important to grab this
information before anything is pulled so we aren't looking at the state of
the images post-pull.
We can't rely on the way that __context__ is utilized by the images()
function, because by design we clear the relevant context variables once
we've made changes to allow the next call to images() to pick up any
changes that were made.
"""
def _already_exists(id_):
"""
Layer already exists
"""
already_pulled = data.setdefault("Layers", {}).setdefault("Already_Pulled", [])
if id_ not in already_pulled:
already_pulled.append(id_)
def _new_layer(id_):
"""
Pulled a new layer
"""
pulled = data.setdefault("Layers", {}).setdefault("Pulled", [])
if id_ not in pulled:
pulled.append(id_)
if "docker._pull_status" not in __context__:
log.warning(
"_pull_status context variable was not populated, information on "
"downloaded layers may be inaccurate. Please report this to the "
"SaltStack development team, and if possible include the image "
"(and tag) that was being pulled."
)
__context__["docker._pull_status"] = NOTSET
status = item["status"]
if status == "Already exists":
_already_exists(item["id"])
elif status in "Pull complete":
_new_layer(item["id"])
elif status.startswith("Status: "):
data["Status"] = status[8:]
elif status == "Download complete":
if __context__["docker._pull_status"] is not NOTSET:
id_ = item["id"]
if id_ in __context__["docker._pull_status"]:
_already_exists(id_)
else:
_new_layer(id_)
def _push_status(data, item):
"""
Process a status update from a docker push, updating the data structure
"""
status = item["status"].lower()
if "id" in item:
if "already pushed" in status or "already exists" in status:
# Layer already exists
already_pushed = data.setdefault("Layers", {}).setdefault(
"Already_Pushed", []
)
already_pushed.append(item["id"])
elif "successfully pushed" in status or status == "pushed":
# Pushed a new layer
pushed = data.setdefault("Layers", {}).setdefault("Pushed", [])
pushed.append(item["id"])
def _error_detail(data, item):
"""
Process an API error, updating the data structure
"""
err = item["errorDetail"]
if "code" in err:
try:
msg = ": ".join(
(item["errorDetail"]["code"], item["errorDetail"]["message"])
)
except TypeError:
msg = "{}: {}".format(
item["errorDetail"]["code"], item["errorDetail"]["message"],
)
else:
msg = item["errorDetail"]["message"]
data.append(msg)
# Functions to handle docker-py client args
def get_client_args(limit=None):
"""
.. versionadded:: 2016.3.6,2016.11.4,2017.7.0
.. versionchanged:: 2017.7.0
Replaced the container config args with the ones from the API's
``create_container`` function.
.. versionchanged:: 2018.3.0
Added ability to limit the input to specific client functions
Many functions in Salt have been written to support the full list of
arguments for a given function in the `docker-py Low-level API`_. However,
depending on the version of docker-py installed on the minion, the
available arguments may differ. This function will get the arguments for
various functions in the installed version of docker-py, to be used as a
reference.
limit
An optional list of categories for which to limit the return. This is
useful if only a specific set of arguments is desired, and also keeps
other function's argspecs from needlessly being examined.
**AVAILABLE LIMITS**
- ``create_container`` - arguments accepted by `create_container()`_ (used
by :py:func:`docker.create <salt.modules.dockermod.create>`)
- ``host_config`` - arguments accepted by `create_host_config()`_ (used to
build the host config for :py:func:`docker.create
<salt.modules.dockermod.create>`)
- ``connect_container_to_network`` - arguments used by
`connect_container_to_network()`_ to construct an endpoint config when
connecting to a network (used by
:py:func:`docker.connect_container_to_network
<salt.modules.dockermod.connect_container_to_network>`)
- ``create_network`` - arguments accepted by `create_network()`_ (used by
:py:func:`docker.create_network <salt.modules.dockermod.create_network>`)
- ``ipam_config`` - arguments used to create an `IPAM pool`_ (used by
:py:func:`docker.create_network <salt.modules.dockermod.create_network>`
in the process of constructing an IPAM config dictionary)
CLI Example:
.. code-block:: bash
salt myminion docker.get_client_args
salt myminion docker.get_client_args logs
salt myminion docker.get_client_args create_container,connect_container_to_network
"""
return __utils__["docker.get_client_args"](limit=limit)
def _get_create_kwargs(
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_args=None,
**kwargs
):
"""
Take input kwargs and return a kwargs dict to pass to docker-py's
create_container() function.
"""
networks = kwargs.pop("networks", {})
if kwargs.get("network_mode", "") in networks:
networks = {kwargs["network_mode"]: networks[kwargs["network_mode"]]}
else:
networks = {}
kwargs = __utils__["docker.translate_input"](
salt.utils.dockermod.translate.container,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**__utils__["args.clean_kwargs"](**kwargs)
)
if networks:
kwargs["networking_config"] = _create_networking_config(networks)
if client_args is None:
try:
client_args = get_client_args(["create_container", "host_config"])
except CommandExecutionError as exc:
log.error(
"docker.create: Error getting client args: '%s'",
exc.__str__(),
exc_info=True,
)
raise CommandExecutionError("Failed to get client args: {}".format(exc))
full_host_config = {}
host_kwargs = {}
create_kwargs = {}
# Using list() becausee we'll be altering kwargs during iteration
for arg in list(kwargs):
if arg in client_args["host_config"]:
host_kwargs[arg] = kwargs.pop(arg)
continue
if arg in client_args["create_container"]:
if arg == "host_config":
full_host_config.update(kwargs.pop(arg))
else:
create_kwargs[arg] = kwargs.pop(arg)
continue
create_kwargs["host_config"] = _client_wrapper("create_host_config", **host_kwargs)
# In the event that a full host_config was passed, overlay it on top of the
# one we just created.
create_kwargs["host_config"].update(full_host_config)
# The "kwargs" dict at this point will only contain unused args
return create_kwargs, kwargs
def compare_containers(first, second, ignore=None):
"""
.. versionadded:: 2017.7.0
.. versionchanged:: 2018.3.0
Renamed from ``docker.compare_container`` to
``docker.compare_containers`` (old function name remains as an alias)
Compare two containers' Config and and HostConfig and return any
differences between the two.
first
Name or ID of first container
second
Name or ID of second container
ignore
A comma-separated list (or Python list) of keys to ignore when
comparing. This is useful when comparing two otherwise identical
containers which have different hostnames.
CLI Examples:
.. code-block:: bash
salt myminion docker.compare_containers foo bar
salt myminion docker.compare_containers foo bar ignore=Hostname
"""
ignore = __utils__["args.split_input"](ignore or [])
result1 = inspect_container(first)
result2 = inspect_container(second)
ret = {}
for conf_dict in ("Config", "HostConfig"):
for item in result1[conf_dict]:
if item in ignore:
continue
val1 = result1[conf_dict][item]
val2 = result2[conf_dict].get(item)
if item in ("OomKillDisable",) or (val1 is None or val2 is None):
if bool(val1) != bool(val2):
ret.setdefault(conf_dict, {})[item] = {"old": val1, "new": val2}
elif item == "Image":
image1 = inspect_image(val1)["Id"]
image2 = inspect_image(val2)["Id"]
if image1 != image2:
ret.setdefault(conf_dict, {})[item] = {"old": image1, "new": image2}
else:
if item == "Links":
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
if item == "Ulimits":
val1 = _ulimit_sort(val1)
val2 = _ulimit_sort(val2)
if item == "Env":
val1 = sorted(val1)
val2 = sorted(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {"old": val1, "new": val2}
# Check for optionally-present items that were in the second container
# and not the first.
for item in result2[conf_dict]:
if item in ignore or item in ret.get(conf_dict, {}):
# We're either ignoring this or we already processed this
# when iterating through result1. Either way, skip it.
continue
val1 = result1[conf_dict].get(item)
val2 = result2[conf_dict][item]
if item in ("OomKillDisable",) or (val1 is None or val2 is None):
if bool(val1) != bool(val2):
ret.setdefault(conf_dict, {})[item] = {"old": val1, "new": val2}
elif item == "Image":
image1 = inspect_image(val1)["Id"]
image2 = inspect_image(val2)["Id"]
if image1 != image2:
ret.setdefault(conf_dict, {})[item] = {"old": image1, "new": image2}
else:
if item == "Links":
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
if item == "Ulimits":
val1 = _ulimit_sort(val1)
val2 = _ulimit_sort(val2)
if item == "Env":
val1 = sorted(val1)
val2 = sorted(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {"old": val1, "new": val2}
return ret
compare_container = salt.utils.functools.alias_function(
compare_containers, "compare_container"
)
def compare_container_networks(first, second):
"""
.. versionadded:: 2018.3.0
Returns the differences between two containers' networks. When a network is
only present one of the two containers, that network's diff will simply be
represented with ``True`` for the side of the diff in which the network is
present) and ``False`` for the side of the diff in which the network is
absent.
This function works by comparing the contents of both containers'
``Networks`` keys (under ``NetworkSettings``) in the return data from
:py:func:`docker.inspect_container
<salt.modules.dockermod.inspect_container>`. Because each network contains
some items that either A) only set at runtime, B) naturally varying from
container to container, or both, by default the following keys in each
network are examined:
- **Aliases**
- **Links**
- **IPAMConfig**
The exception to this is if ``IPAMConfig`` is unset (i.e. null) in one
container but not the other. This happens when no static IP configuration
is set, and automatic IP configuration is in effect. So, in order to report
on changes between automatic IP configuration in one container and static
IP configuration in another container (as we need to do for the
:py:func:`docker_container.running <salt.states.docker_container.running>`
state), automatic IP configuration will also be checked in these cases.
This function uses the :conf_minion:`docker.compare_container_networks`
minion config option to determine which keys to examine. This provides
flexibility in the event that features added in a future Docker release
necessitate changes to how Salt compares networks. In these cases, rather
than waiting for a new Salt release one can just set
:conf_minion:`docker.compare_container_networks`.
.. versionchanged:: 3000
This config option can now also be set in pillar data and grains.
Additionally, it can be set in the master config file, provided that
:conf_minion:`pillar_opts` is enabled on the minion.
.. note::
The checks for automatic IP configuration described above only apply if
``IPAMConfig`` is among the keys set for static IP checks in
:conf_minion:`docker.compare_container_networks`.
first
Name or ID of first container (old)
second
Name or ID of second container (new)
CLI Example:
.. code-block:: bash
salt myminion docker.compare_container_networks foo bar
"""
def _get_nets(data):
return data.get("NetworkSettings", {}).get("Networks", {})
compare_keys = __salt__["config.option"]("docker.compare_container_networks")
result1 = inspect_container(first) if not isinstance(first, dict) else first
result2 = inspect_container(second) if not isinstance(second, dict) else second
nets1 = _get_nets(result1)
nets2 = _get_nets(result2)
state1 = state(first)
state2 = state(second)
# When you attempt and fail to set a static IP (for instance, because the
# IP is not in the network's subnet), Docker will raise an exception but
# will (incorrectly) leave the record for that network in the inspect
# results for the container. Work around this behavior (bug?) by checking
# which containers are actually connected.
all_nets = set(nets1)
all_nets.update(nets2)
for net_name in all_nets:
try:
connected_containers = inspect_network(net_name).get("Containers", {})
except Exception as exc: # pylint: disable=broad-except
# Shouldn't happen unless a network was removed outside of Salt
# between the time that a docker_container.running state started
# and when this comparison took place.
log.warning("Failed to inspect Docker network %s: %s", net_name, exc)
continue
else:
if (
state1 == "running"
and net_name in nets1
and result1["Id"] not in connected_containers
):
del nets1[net_name]
if (
state2 == "running"
and net_name in nets2
and result2["Id"] not in connected_containers
):
del nets2[net_name]
ret = {}
def _check_ipconfig(ret, net_name, **kwargs):
# Make some variables to make the logic below easier to understand
nets1_missing = "old" not in kwargs
if nets1_missing:
nets1_static = False
else:
nets1_static = bool(kwargs["old"])
nets1_autoip = not nets1_static and not nets1_missing
nets2_missing = "new" not in kwargs
if nets2_missing:
nets2_static = False
else:
nets2_static = bool(kwargs["new"])
nets2_autoip = not nets2_static and not nets2_missing
autoip_keys = compare_keys.get("automatic", [])
if nets1_autoip and (nets2_static or nets2_missing):
for autoip_key in autoip_keys:
autoip_val = nets1[net_name].get(autoip_key)
if autoip_val:
ret.setdefault(net_name, {})[autoip_key] = {
"old": autoip_val,
"new": None,
}
if nets2_static:
ret.setdefault(net_name, {})["IPAMConfig"] = {
"old": None,
"new": kwargs["new"],
}
if not any(x in ret.get(net_name, {}) for x in autoip_keys):
ret.setdefault(net_name, {})["IPConfiguration"] = {
"old": "automatic",
"new": "static" if nets2_static else "not connected",
}
elif nets2_autoip and (nets1_static or nets1_missing):
for autoip_key in autoip_keys:
autoip_val = nets2[net_name].get(autoip_key)
if autoip_val:
ret.setdefault(net_name, {})[autoip_key] = {
"old": None,
"new": autoip_val,
}
if not any(x in ret.get(net_name, {}) for x in autoip_keys):
ret.setdefault(net_name, {})["IPConfiguration"] = {
"old": "static" if nets1_static else "not connected",
"new": "automatic",
}
if nets1_static:
ret.setdefault(net_name, {})["IPAMConfig"] = {
"old": kwargs["old"],
"new": None,
}
else:
old_val = kwargs.get("old")
new_val = kwargs.get("new")
if old_val != new_val:
# Static IP configuration present in both containers and there
# are differences, so report them
ret.setdefault(net_name, {})["IPAMConfig"] = {
"old": old_val,
"new": new_val,
}
for net_name in (x for x in nets1 if x not in nets2):
# Network is not in the network_settings, but the container is attached
# to the network
for key in compare_keys.get("static", []):
val = nets1[net_name].get(key)
if key == "IPAMConfig":
_check_ipconfig(ret, net_name, old=val)
if val:
if key == "Aliases":
try:
val.remove(result1["Config"]["Hostname"])
except (ValueError, AttributeError):
pass
else:
if not val:
# The only alias was the default one for the
# hostname
continue
ret.setdefault(net_name, {})[key] = {"old": val, "new": None}
for net_name in nets2:
if net_name not in nets1:
# Container is not attached to the network, but network is present
# in the network_settings
for key in compare_keys.get("static", []):
val = nets2[net_name].get(key)
if key == "IPAMConfig":
_check_ipconfig(ret, net_name, new=val)
continue
elif val:
if key == "Aliases":
try:
val.remove(result2["Config"]["Hostname"])
except (ValueError, AttributeError):
pass
else:
if not val:
# The only alias was the default one for the
# hostname
continue
ret.setdefault(net_name, {})[key] = {"old": None, "new": val}
else:
for key in compare_keys.get("static", []):
old_val = nets1[net_name][key]
new_val = nets2[net_name][key]
for item in (old_val, new_val):
# Normalize for list order
try:
item.sort()
except AttributeError:
pass
if key == "Aliases":
# Normalize for hostname alias
try:
old_val.remove(result1["Config"]["Hostname"])
except (AttributeError, ValueError):
pass
try:
old_val.remove(result1["Id"][:12])
except (AttributeError, ValueError):
pass
if not old_val:
old_val = None
try:
new_val.remove(result2["Config"]["Hostname"])
except (AttributeError, ValueError):
pass
try:
new_val.remove(result2["Id"][:12])
except (AttributeError, ValueError):
pass
if not new_val:
new_val = None
elif key == "IPAMConfig":
_check_ipconfig(ret, net_name, old=old_val, new=new_val)
# We don't need the final check since it's included in the
# _check_ipconfig helper
continue
if bool(old_val) is bool(new_val) is False:
continue
elif old_val != new_val:
ret.setdefault(net_name, {})[key] = {"old": old_val, "new": new_val}
return ret
def compare_networks(first, second, ignore="Name,Id,Created,Containers"):
"""
.. versionadded:: 2018.3.0
Compare two networks and return any differences between the two
first
Name or ID of first container
second
Name or ID of second container
ignore : Name,Id,Created,Containers
A comma-separated list (or Python list) of keys to ignore when
comparing.
CLI Example:
.. code-block:: bash
salt myminion docker.compare_network foo bar
"""
ignore = __utils__["args.split_input"](ignore or [])
net1 = inspect_network(first) if not isinstance(first, dict) else first
net2 = inspect_network(second) if not isinstance(second, dict) else second
ret = {}
for item in net1:
if item in ignore:
continue
else:
# Don't re-examine this item when iterating through net2 below
ignore.append(item)
val1 = net1[item]
val2 = net2.get(item)
if bool(val1) is bool(val2) is False:
continue
elif item == "IPAM":
for subkey in val1:
subval1 = val1[subkey]
subval2 = val2.get(subkey)
if bool(subval1) is bool(subval2) is False:
continue
elif subkey == "Config":
kvsort = lambda x: (list(x.keys()), list(x.values()))
config1 = sorted(val1["Config"], key=kvsort)
config2 = sorted(val2.get("Config", []), key=kvsort)
if config1 != config2:
ret.setdefault("IPAM", {})["Config"] = {
"old": config1,
"new": config2,
}
elif subval1 != subval2:
ret.setdefault("IPAM", {})[subkey] = {
"old": subval1,
"new": subval2,
}
elif val1 != val2:
ret[item] = {"old": val1, "new": val2}
# Check for optionally-present items that were in the second network
# and not the first.
for item in (x for x in net2 if x not in ignore):
val1 = net1.get(item)
val2 = net2[item]
if bool(val1) is bool(val2) is False:
continue
elif val1 != val2:
ret[item] = {"old": val1, "new": val2}
return ret
def connected(name, verbose=False):
"""
.. versionadded:: 2018.3.0
Return a list of running containers attached to the specified network
name
Network name
verbose : False
If ``True``, return extended info about each container (IP
configuration, etc.)
CLI Example:
.. code-block:: bash
salt myminion docker.connected net_name
"""
containers = inspect_network(name).get("Containers", {})
ret = {}
for cid, cinfo in containers.items():
# The Containers dict is keyed by container ID, but we want the results
# to be keyed by container name, so we need to pop off the Name and
# then add the Id key to the cinfo dict.
try:
name = cinfo.pop("Name")
except (KeyError, AttributeError):
# Should never happen
log.warning(
"'Name' key not present in container definition for "
"container ID '%s' within inspect results for Docker "
"network '%s'. Full container definition: %s",
cid,
name,
cinfo,
)
continue
else:
cinfo["Id"] = cid
ret[name] = cinfo
if not verbose:
return list(ret)
return ret
def login(*registries):
"""
.. versionadded:: 2016.3.7,2016.11.4,2017.7.0
Performs a ``docker login`` to authenticate to one or more configured
repositories. See the documentation at the top of this page to configure
authentication credentials.
Multiple registry URLs (matching those configured in Pillar) can be passed,
and Salt will attempt to login to *just* those registries. If no registry
URLs are provided, Salt will attempt to login to *all* configured
registries.
**RETURN DATA**
A dictionary containing the following keys:
- ``Results`` - A dictionary mapping registry URLs to the authentication
result. ``True`` means a successful login, ``False`` means a failed
login.
- ``Errors`` - A list of errors encountered during the course of this
function.
CLI Example:
.. code-block:: bash
salt myminion docker.login
salt myminion docker.login hub
salt myminion docker.login hub https://mydomain.tld/registry/
"""
# NOTE: This function uses the "docker login" CLI command so that login
# information is added to the config.json, since docker-py isn't designed
# to do so.
registry_auth = __salt__["config.get"]("docker-registries", {})
ret = {"retcode": 0}
errors = ret.setdefault("Errors", [])
if not isinstance(registry_auth, dict):
errors.append("'docker-registries' Pillar value must be a dictionary")
registry_auth = {}
for reg_name, reg_conf in __salt__["config.option"](
"*-docker-registries", wildcard=True
).items():
try:
registry_auth.update(reg_conf)
except TypeError:
errors.append(
"Docker registry '{}' was not specified as a "
"dictionary".format(reg_name)
)
# If no registries passed, we will auth to all of them
if not registries:
registries = list(registry_auth)
results = ret.setdefault("Results", {})
for registry in registries:
if registry not in registry_auth:
errors.append("No match found for registry '{}'".format(registry))
continue
try:
username = registry_auth[registry]["username"]
password = registry_auth[registry]["password"]
except TypeError:
errors.append("Invalid configuration for registry '{}'".format(registry))
except KeyError as exc:
errors.append("Missing {} for registry '{}'".format(exc, registry))
else:
cmd = ["docker", "login", "-u", username, "-p", password]
if registry.lower() != "hub":
cmd.append(registry)
log.debug(
"Attempting to login to docker registry '%s' as user '%s'",
registry,
username,
)
login_cmd = __salt__["cmd.run_all"](
cmd, python_shell=False, output_loglevel="quiet",
)
results[registry] = login_cmd["retcode"] == 0
if not results[registry]:
if login_cmd["stderr"]:
errors.append(login_cmd["stderr"])
elif login_cmd["stdout"]:
errors.append(login_cmd["stdout"])
if errors:
ret["retcode"] = 1
return ret
def logout(*registries):
"""
.. versionadded:: 3001
Performs a ``docker logout`` to remove the saved authentication details for
one or more configured repositories.
Multiple registry URLs (matching those configured in Pillar) can be passed,
and Salt will attempt to logout of *just* those registries. If no registry
URLs are provided, Salt will attempt to logout of *all* configured
registries.
**RETURN DATA**
A dictionary containing the following keys:
- ``Results`` - A dictionary mapping registry URLs to the authentication
result. ``True`` means a successful logout, ``False`` means a failed
logout.
- ``Errors`` - A list of errors encountered during the course of this
function.
CLI Example:
.. code-block:: bash
salt myminion docker.logout
salt myminion docker.logout hub
salt myminion docker.logout hub https://mydomain.tld/registry/
"""
# NOTE: This function uses the "docker logout" CLI command to remove
# authentication information from config.json. docker-py does not support
# this usecase (see https://github.com/docker/docker-py/issues/1091)
# To logout of all known (to Salt) docker registries, they have to be collected first
registry_auth = __salt__["config.get"]("docker-registries", {})
ret = {"retcode": 0}
errors = ret.setdefault("Errors", [])
if not isinstance(registry_auth, dict):
errors.append("'docker-registries' Pillar value must be a dictionary")
registry_auth = {}
for reg_name, reg_conf in __salt__["config.option"](
"*-docker-registries", wildcard=True
).items():
try:
registry_auth.update(reg_conf)
except TypeError:
errors.append(
"Docker registry '{}' was not specified as a "
"dictionary".format(reg_name)
)
# If no registries passed, we will logout of all known registries
if not registries:
registries = list(registry_auth)
results = ret.setdefault("Results", {})
for registry in registries:
if registry not in registry_auth:
errors.append("No match found for registry '{}'".format(registry))
continue
else:
cmd = ["docker", "logout"]
if registry.lower() != "hub":
cmd.append(registry)
log.debug("Attempting to logout of docker registry '%s'", registry)
logout_cmd = __salt__["cmd.run_all"](
cmd, python_shell=False, output_loglevel="quiet",
)
results[registry] = logout_cmd["retcode"] == 0
if not results[registry]:
if logout_cmd["stderr"]:
errors.append(logout_cmd["stderr"])
elif logout_cmd["stdout"]:
errors.append(logout_cmd["stdout"])
if errors:
ret["retcode"] = 1
return ret
# Functions for information gathering
def depends(name):
"""
Returns the containers and images, if any, which depend on the given image
name
Name or ID of image
**RETURN DATA**
A dictionary containing the following keys:
- ``Containers`` - A list of containers which depend on the specified image
- ``Images`` - A list of IDs of images which depend on the specified image
CLI Example:
.. code-block:: bash
salt myminion docker.depends myimage
salt myminion docker.depends 0123456789ab
"""
# Resolve tag or short-SHA to full SHA
image_id = inspect_image(name)["Id"]
container_depends = []
for container in ps_(all=True, verbose=True).values():
if container["Info"]["Image"] == image_id:
container_depends.extend([x.lstrip("/") for x in container["Names"]])
return {
"Containers": container_depends,
"Images": [
x[:12] for x, y in images(all=True).items() if y["ParentId"] == image_id
],
}
def diff(name):
"""
Get information on changes made to container's filesystem since it was
created. Equivalent to running the ``docker diff`` Docker CLI command.
name
Container name or ID
**RETURN DATA**
A dictionary containing any of the following keys:
- ``Added`` - A list of paths that were added.
- ``Changed`` - A list of paths that were changed.
- ``Deleted`` - A list of paths that were deleted.
These keys will only be present if there were changes, so if the container
has no differences the return dict will be empty.
CLI Example:
.. code-block:: bash
salt myminion docker.diff mycontainer
"""
changes = _client_wrapper("diff", name)
kind_map = {0: "Changed", 1: "Added", 2: "Deleted"}
ret = {}
for change in changes:
key = kind_map.get(change["Kind"], "Unknown")
ret.setdefault(key, []).append(change["Path"])
if "Unknown" in ret:
log.error(
"Unknown changes detected in docker.diff of container %s. "
"This is probably due to a change in the Docker API. Please "
"report this to the SaltStack developers",
name,
)
return ret
def exists(name):
"""
Check if a given container exists
name
Container name or ID
**RETURN DATA**
A boolean (``True`` if the container exists, otherwise ``False``)
CLI Example:
.. code-block:: bash
salt myminion docker.exists mycontainer
"""
contextkey = "docker.exists.{}".format(name)
if contextkey in __context__:
return __context__[contextkey]
try:
c_info = _client_wrapper("inspect_container", name, catch_api_errors=False)
except docker.errors.APIError:
__context__[contextkey] = False
else:
__context__[contextkey] = True
return __context__[contextkey]
def history(name, quiet=False):
"""
Return the history for an image. Equivalent to running the ``docker
history`` Docker CLI command.
name
Container name or ID
quiet : False
If ``True``, the return data will simply be a list of the commands run
to build the container.
.. code-block:: bash
$ salt myminion docker.history nginx:latest quiet=True
myminion:
- FROM scratch
- ADD file:ef063ed0ae9579362871b9f23d2bc0781ef7cd4de6ac822052cf6c9c5a12b1e2 in /
- CMD [/bin/bash]
- MAINTAINER NGINX Docker Maintainers "docker-maint@nginx.com"
- apt-key adv --keyserver pgp.mit.edu --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62
- echo "deb http://nginx.org/packages/mainline/debian/ wheezy nginx" >> /etc/apt/sources.list
- ENV NGINX_VERSION=1.7.10-1~wheezy
- apt-get update && apt-get install -y ca-certificates nginx=${NGINX_VERSION} && rm -rf /var/lib/apt/lists/*
- ln -sf /dev/stdout /var/log/nginx/access.log
- ln -sf /dev/stderr /var/log/nginx/error.log
- VOLUME [/var/cache/nginx]
- EXPOSE map[80/tcp:{} 443/tcp:{}]
- CMD [nginx -g daemon off;]
https://github.com/saltstack/salt/pull/22421
**RETURN DATA**
If ``quiet=False``, the return value will be a list of dictionaries
containing information about each step taken to build the image. The keys
in each step include the following:
- ``Command`` - The command executed in this build step
- ``Id`` - Layer ID
- ``Size`` - Cumulative image size, in bytes
- ``Size_Human`` - Cumulative image size, in human-readable units
- ``Tags`` - Tag(s) assigned to this layer
- ``Time_Created_Epoch`` - Time this build step was completed (Epoch
time)
- ``Time_Created_Local`` - Time this build step was completed (Minion's
local timezone)
CLI Example:
.. code-block:: bash
salt myminion docker.exists mycontainer
"""
response = _client_wrapper("history", name)
key_map = {
"CreatedBy": "Command",
"Created": "Time_Created_Epoch",
}
command_prefix = re.compile(r"^/bin/sh -c (?:#\(nop\) )?")
ret = []
# history is most-recent first, reverse this so it is ordered top-down
for item in reversed(response):
step = {}
for key, val in item.items():
step_key = key_map.get(key, key)
if step_key == "Command":
if not val:
# We assume that an empty build step is 'FROM scratch'
val = "FROM scratch"
else:
val = command_prefix.sub("", val)
step[step_key] = val
if "Time_Created_Epoch" in step:
step["Time_Created_Local"] = time.strftime(
"%Y-%m-%d %H:%M:%S %Z", time.localtime(step["Time_Created_Epoch"])
)
for param in ("Size",):
if param in step:
step["{}_Human".format(param)] = _size_fmt(step[param])
ret.append(copy.deepcopy(step))
if quiet:
return [x.get("Command") for x in ret]
return ret
def images(verbose=False, **kwargs):
"""
Returns information about the Docker images on the Minion. Equivalent to
running the ``docker images`` Docker CLI command.
all : False
If ``True``, untagged images will also be returned
verbose : False
If ``True``, a ``docker inspect`` will be run on each image returned.
**RETURN DATA**
A dictionary with each key being an image ID, and each value some general
info about that image (time created, size, tags associated with the image,
etc.)
CLI Example:
.. code-block:: bash
salt myminion docker.images
salt myminion docker.images all=True
"""
if "docker.images" not in __context__:
response = _client_wrapper("images", all=kwargs.get("all", False))
key_map = {
"Created": "Time_Created_Epoch",
}
for img in response:
img_id = img.pop("Id", None)
if img_id is None:
continue
for item in img:
img_state = (
"untagged"
if img["RepoTags"]
in (
["<none>:<none>"], # docker API <1.24
None, # docker API >=1.24
)
else "tagged"
)
bucket = __context__.setdefault("docker.images", {})
bucket = bucket.setdefault(img_state, {})
img_key = key_map.get(item, item)
bucket.setdefault(img_id, {})[img_key] = img[item]
if "Time_Created_Epoch" in bucket.get(img_id, {}):
bucket[img_id]["Time_Created_Local"] = time.strftime(
"%Y-%m-%d %H:%M:%S %Z",
time.localtime(bucket[img_id]["Time_Created_Epoch"]),
)
for param in ("Size", "VirtualSize"):
if param in bucket.get(img_id, {}):
bucket[img_id]["{}_Human".format(param)] = _size_fmt(
bucket[img_id][param]
)
context_data = __context__.get("docker.images", {})
ret = copy.deepcopy(context_data.get("tagged", {}))
if kwargs.get("all", False):
ret.update(copy.deepcopy(context_data.get("untagged", {})))
# If verbose info was requested, go get it
if verbose:
for img_id in ret:
ret[img_id]["Info"] = inspect_image(img_id)
return ret
def info():
"""
Returns a dictionary of system-wide information. Equivalent to running
the ``docker info`` Docker CLI command.
CLI Example:
.. code-block:: bash
salt myminion docker.info
"""
return _client_wrapper("info")
def inspect(name):
"""
.. versionchanged:: 2017.7.0
Volumes and networks are now checked, in addition to containers and
images.
This is a generic container/image/volume/network inspecton function. It
will run the following functions in order:
- :py:func:`docker.inspect_container
<salt.modules.dockermod.inspect_container>`
- :py:func:`docker.inspect_image <salt.modules.dockermod.inspect_image>`
- :py:func:`docker.inspect_volume <salt.modules.dockermod.inspect_volume>`
- :py:func:`docker.inspect_network <salt.modules.dockermod.inspect_network>`
The first of these to find a match will be returned.
name
Container/image/volume/network name or ID
**RETURN DATA**
A dictionary of container/image/volume/network information
CLI Example:
.. code-block:: bash
salt myminion docker.inspect mycontainer
salt myminion docker.inspect busybox
"""
try:
return inspect_container(name)
except CommandExecutionError as exc:
if "does not exist" not in exc.strerror:
raise
try:
return inspect_image(name)
except CommandExecutionError as exc:
if not exc.strerror.startswith("Error 404"):
raise
try:
return inspect_volume(name)
except CommandExecutionError as exc:
if not exc.strerror.startswith("Error 404"):
raise
try:
return inspect_network(name)
except CommandExecutionError as exc:
if not exc.strerror.startswith("Error 404"):
raise
raise CommandExecutionError(
"Error 404: No such image/container/volume/network: {}".format(name)
)
def inspect_container(name):
"""
Retrieves container information. Equivalent to running the ``docker
inspect`` Docker CLI command, but will only look for container information.
name
Container name or ID
**RETURN DATA**
A dictionary of container information
CLI Example:
.. code-block:: bash
salt myminion docker.inspect_container mycontainer
salt myminion docker.inspect_container 0123456789ab
"""
return _client_wrapper("inspect_container", name)
def inspect_image(name):
"""
Retrieves image information. Equivalent to running the ``docker inspect``
Docker CLI command, but will only look for image information.
.. note::
To inspect an image, it must have been pulled from a registry or built
locally. Images on a Docker registry which have not been pulled cannot
be inspected.
name
Image name or ID
**RETURN DATA**
A dictionary of image information
CLI Examples:
.. code-block:: bash
salt myminion docker.inspect_image busybox
salt myminion docker.inspect_image centos:6
salt myminion docker.inspect_image 0123456789ab
"""
ret = _client_wrapper("inspect_image", name)
for param in ("Size", "VirtualSize"):
if param in ret:
ret["{}_Human".format(param)] = _size_fmt(ret[param])
return ret
def list_containers(**kwargs):
"""
Returns a list of containers by name. This is different from
:py:func:`docker.ps <salt.modules.dockermod.ps_>` in that
:py:func:`docker.ps <salt.modules.dockermod.ps_>` returns its results
organized by container ID.
all : False
If ``True``, stopped containers will be included in return data
CLI Example:
.. code-block:: bash
salt myminion docker.inspect_image <image>
"""
ret = set()
for item in ps_(all=kwargs.get("all", False)).values():
names = item.get("Names")
if not names:
continue
for c_name in [x.lstrip("/") for x in names or []]:
ret.add(c_name)
return sorted(ret)
def list_tags():
"""
Returns a list of tagged images
CLI Example:
.. code-block:: bash
salt myminion docker.list_tags
"""
ret = set()
for item in images().values():
if not item.get("RepoTags"):
continue
ret.update(set(item["RepoTags"]))
return sorted(ret)
def resolve_image_id(name):
"""
.. versionadded:: 2018.3.0
Given an image name (or partial image ID), return the full image ID. If no
match is found among the locally-pulled images, then ``False`` will be
returned.
CLI Examples:
.. code-block:: bash
salt myminion docker.resolve_image_id foo
salt myminion docker.resolve_image_id foo:bar
salt myminion docker.resolve_image_id 36540f359ca3
"""
try:
inspect_result = inspect_image(name)
return inspect_result["Id"]
except CommandExecutionError:
# No matching image pulled locally, or inspect_image otherwise failed
pass
except KeyError:
log.error(
"Inspecting docker image '%s' returned an unexpected data " "structure: %s",
name,
inspect_result,
)
return False
def resolve_tag(name, **kwargs):
"""
.. versionadded:: 2017.7.2
.. versionchanged:: 2018.3.0
Instead of matching against pulled tags using
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>`, this
function now simply inspects the passed image name using
:py:func:`docker.inspect_image <salt.modules.dockermod.inspect_image>`
and returns the first matching tag. If no matching tags are found, it
is assumed that the passed image is an untagged image ID, and the full
ID is returned.
Inspects the specified image name and returns the first matching tag in the
inspect results. If the specified image is not pulled locally, this
function will return ``False``.
name
Image name to resolve. If the image is found but there are no tags,
this means that the image name passed was an untagged image. In this
case the image ID will be returned.
all : False
If ``True``, a list of all matching tags will be returned. If the image
is found but there are no tags, then a list will still be returned, but
it will simply contain the image ID.
.. versionadded:: 2018.3.0
tags
.. deprecated:: 2018.3.0
CLI Examples:
.. code-block:: bash
salt myminion docker.resolve_tag busybox
salt myminion docker.resolve_tag centos:7 all=True
salt myminion docker.resolve_tag c9f378ac27d9
"""
kwargs = __utils__["args.clean_kwargs"](**kwargs)
all_ = kwargs.pop("all", False)
if kwargs:
__utils__["args.invalid_kwargs"](kwargs)
try:
inspect_result = inspect_image(name)
tags = inspect_result["RepoTags"]
if all_:
if tags:
return tags
# If the image is untagged, don't return an empty list, return
# back the resolved ID at he end of this function.
else:
return tags[0]
except CommandExecutionError:
# No matching image pulled locally, or inspect_image otherwise failed
return False
except KeyError:
log.error(
"Inspecting docker image '%s' returned an unexpected data " "structure: %s",
name,
inspect_result,
)
except IndexError:
# The image passed is an untagged image ID
pass
return [inspect_result["Id"]] if all_ else inspect_result["Id"]
def logs(name, **kwargs):
"""
.. versionchanged:: 2018.3.0
Support for all of docker-py's `logs()`_ function's arguments, with the
exception of ``stream``.
Returns the logs for the container. An interface to docker-py's `logs()`_
function.
name
Container name or ID
stdout : True
Return stdout lines
stderr : True
Return stdout lines
timestamps : False
Show timestamps
tail : all
Output specified number of lines at the end of logs. Either an integer
number of lines or the string ``all``.
since
Show logs since the specified time, passed as a UNIX epoch timestamp.
Optionally, if timelib_ is installed on the minion the timestamp can be
passed as a string which will be resolved to a date using
``timelib.strtodatetime()``.
follow : False
If ``True``, this function will block until the container exits and
return the logs when it does. The default behavior is to return what is
in the log at the time this function is executed.
.. note:
Since it blocks, this option should be used with caution.
CLI Examples:
.. code-block:: bash
# All logs
salt myminion docker.logs mycontainer
# Last 100 lines of log
salt myminion docker.logs mycontainer tail=100
# Just stderr
salt myminion docker.logs mycontainer stdout=False
# Logs since a specific UNIX timestamp
salt myminion docker.logs mycontainer since=1511688459
# Flexible format for "since" argument (requires timelib)
salt myminion docker.logs mycontainer since='1 hour ago'
salt myminion docker.logs mycontainer since='1 week ago'
salt myminion docker.logs mycontainer since='1 fortnight ago'
"""
kwargs = __utils__["args.clean_kwargs"](**kwargs)
if "stream" in kwargs:
raise SaltInvocationError("The 'stream' argument is not supported")
try:
kwargs["since"] = int(kwargs["since"])
except KeyError:
pass
except (ValueError, TypeError):
# Try to resolve down to a datetime.datetime object using timelib. If
# it's not installed, pass the value as-is and let docker-py throw an
# APIError.
if HAS_TIMELIB:
try:
kwargs["since"] = timelib.strtodatetime(kwargs["since"])
except Exception as exc: # pylint: disable=broad-except
log.warning(
"docker.logs: Failed to parse '%s' using timelib: %s",
kwargs["since"],
exc,
)
# logs() returns output as bytestrings
return salt.utils.stringutils.to_unicode(_client_wrapper("logs", name, **kwargs))
def pid(name):
"""
Returns the PID of a container
name
Container name or ID
CLI Example:
.. code-block:: bash
salt myminion docker.pid mycontainer
salt myminion docker.pid 0123456789ab
"""
return inspect_container(name)["State"]["Pid"]
def port(name, private_port=None):
"""
Returns port mapping information for a given container. Equivalent to
running the ``docker port`` Docker CLI command.
name
Container name or ID
.. versionchanged:: 2019.2.0
This value can now be a pattern expression (using the
pattern-matching characters defined in fnmatch_). If a pattern
expression is used, this function will return a dictionary mapping
container names which match the pattern to the mappings for those
containers. When no pattern expression is used, a dictionary of the
mappings for the specified container name will be returned.
.. _fnmatch: https://docs.python.org/2/library/fnmatch.html
private_port : None
If specified, get information for that specific port. Can be specified
either as a port number (i.e. ``5000``), or as a port number plus the
protocol (i.e. ``5000/udp``).
If this argument is omitted, all port mappings will be returned.
**RETURN DATA**
A dictionary of port mappings, with the keys being the port and the values
being the mapping(s) for that port.
CLI Examples:
.. code-block:: bash
salt myminion docker.port mycontainer
salt myminion docker.port mycontainer 5000
salt myminion docker.port mycontainer 5000/udp
"""
pattern_used = bool(re.search(r"[*?\[]", name))
names = fnmatch.filter(list_containers(all=True), name) if pattern_used else [name]
if private_port is None:
pattern = "*"
else:
# Sanity checks
if isinstance(private_port, int):
pattern = "{}/*".format(private_port)
else:
err = (
"Invalid private_port '{}'. Must either be a port number, "
"or be in port/protocol notation (e.g. 5000/tcp)".format(private_port)
)
try:
port_num, _, protocol = private_port.partition("/")
protocol = protocol.lower()
if not port_num.isdigit() or protocol not in ("tcp", "udp"):
raise SaltInvocationError(err)
pattern = port_num + "/" + protocol
except AttributeError:
raise SaltInvocationError(err)
ret = {}
for c_name in names:
# docker.client.Client.port() doesn't do what we need, so just inspect
# the container and get the information from there. It's what they're
# already doing (poorly) anyway.
mappings = inspect_container(c_name).get("NetworkSettings", {}).get("Ports", {})
ret[c_name] = {x: mappings[x] for x in fnmatch.filter(mappings, pattern)}
return ret.get(name, {}) if not pattern_used else ret
def ps_(filters=None, **kwargs):
"""
Returns information about the Docker containers on the Minion. Equivalent
to running the ``docker ps`` Docker CLI command.
all : False
If ``True``, stopped containers will also be returned
host: False
If ``True``, local host's network topology will be included
verbose : False
If ``True``, a ``docker inspect`` will be run on each container
returned.
filters: None
A dictionary of filters to be processed on the container list.
Available filters:
- exited (int): Only containers with specified exit code
- status (str): One of restarting, running, paused, exited
- label (str): format either "key" or "key=value"
**RETURN DATA**
A dictionary with each key being an container ID, and each value some
general info about that container (time created, name, command, etc.)
CLI Example:
.. code-block:: bash
salt myminion docker.ps
salt myminion docker.ps all=True
salt myminion docker.ps filters="{'label': 'role=web'}"
"""
response = _client_wrapper("containers", all=True, filters=filters)
key_map = {
"Created": "Time_Created_Epoch",
}
context_data = {}
for container in response:
c_id = container.pop("Id", None)
if c_id is None:
continue
for item in container:
c_state = (
"running"
if container.get("Status", "").lower().startswith("up ")
else "stopped"
)
bucket = context_data.setdefault(c_state, {})
c_key = key_map.get(item, item)
bucket.setdefault(c_id, {})[c_key] = container[item]
if "Time_Created_Epoch" in bucket.get(c_id, {}):
bucket[c_id]["Time_Created_Local"] = time.strftime(
"%Y-%m-%d %H:%M:%S %Z",
time.localtime(bucket[c_id]["Time_Created_Epoch"]),
)
ret = copy.deepcopy(context_data.get("running", {}))
if kwargs.get("all", False):
ret.update(copy.deepcopy(context_data.get("stopped", {})))
# If verbose info was requested, go get it
if kwargs.get("verbose", False):
for c_id in ret:
ret[c_id]["Info"] = inspect_container(c_id)
if kwargs.get("host", False):
ret.setdefault("host", {}).setdefault("interfaces", {}).update(
__salt__["network.interfaces"]()
)
return ret
def state(name):
"""
Returns the state of the container
name
Container name or ID
**RETURN DATA**
A string representing the current state of the container (either
``running``, ``paused``, or ``stopped``)
CLI Example:
.. code-block:: bash
salt myminion docker.state mycontainer
"""
contextkey = "docker.state.{}".format(name)
if contextkey in __context__:
return __context__[contextkey]
__context__[contextkey] = _get_state(inspect_container(name))
return __context__[contextkey]
def search(name, official=False, trusted=False):
"""
Searches the registry for an image
name
Search keyword
official : False
Limit results to official builds
trusted : False
Limit results to `trusted builds`_
**RETURN DATA**
A dictionary with each key being the name of an image, and the following
information for each image:
- ``Description`` - Image description
- ``Official`` - A boolean (``True`` if an official build, ``False`` if
not)
- ``Stars`` - Number of stars the image has on the registry
- ``Trusted`` - A boolean (``True`` if a trusted build, ``False`` if not)
CLI Example:
.. code-block:: bash
salt myminion docker.search centos
salt myminion docker.search centos official=True
"""
response = _client_wrapper("search", name)
if not response:
raise CommandExecutionError(
"No images matched the search string '{}'".format(name)
)
key_map = {
"description": "Description",
"is_official": "Official",
"is_trusted": "Trusted",
"star_count": "Stars",
}
limit = []
if official:
limit.append("Official")
if trusted:
limit.append("Trusted")
results = {}
for item in response:
c_name = item.pop("name", None)
if c_name is not None:
for key in item:
mapped_key = key_map.get(key, key)
results.setdefault(c_name, {})[mapped_key] = item[key]
if not limit:
return results
ret = {}
for key, val in results.items():
for item in limit:
if val.get(item, False):
ret[key] = val
break
return ret
def top(name):
"""
Runs the `docker top` command on a specific container
name
Container name or ID
CLI Example:
**RETURN DATA**
A list of dictionaries containing information about each process
.. code-block:: bash
salt myminion docker.top mycontainer
salt myminion docker.top 0123456789ab
"""
response = _client_wrapper("top", name)
# Read in column names
columns = {}
for idx, col_name in enumerate(response["Titles"]):
columns[idx] = col_name
# Build return dict
ret = []
for process in response["Processes"]:
cur_proc = {}
for idx, val in enumerate(process):
cur_proc[columns[idx]] = val
ret.append(cur_proc)
return ret
def version():
"""
Returns a dictionary of Docker version information. Equivalent to running
the ``docker version`` Docker CLI command.
CLI Example:
.. code-block:: bash
salt myminion docker.version
"""
ret = _client_wrapper("version")
version_re = re.compile(VERSION_RE)
if "Version" in ret:
match = version_re.match(str(ret["Version"]))
if match:
ret["VersionInfo"] = tuple([int(x) for x in match.group(1).split(".")])
if "ApiVersion" in ret:
match = version_re.match(str(ret["ApiVersion"]))
if match:
ret["ApiVersionInfo"] = tuple([int(x) for x in match.group(1).split(".")])
return ret
def _create_networking_config(networks):
log.debug("creating networking config from {}".format(networks))
return _client_wrapper(
"create_networking_config",
{
k: _client_wrapper("create_endpoint_config", **v)
for k, v in networks.items()
},
)
# Functions to manage containers
@_refresh_mine_cache
def create(
image,
name=None,
start=False,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
**kwargs
):
"""
Create a new container
image
Image from which to create the container
name
Name for the new container. If not provided, Docker will randomly
generate one for you (it will be included in the return data).
start : False
If ``True``, start container after creating it
.. versionadded:: 2018.3.0
skip_translate
This function translates Salt CLI or SLS input into the format which
docker-py expects. However, in the event that Salt's translation logic
fails (due to potential changes in the Docker Remote API, or to bugs in
the translation code), this argument can be used to exert granular
control over which arguments are translated and which are not.
Pass this argument as a comma-separated list (or Python list) of
arguments, and translation for each passed argument name will be
skipped. Alternatively, pass ``True`` and *all* translation will be
skipped.
Skipping tranlsation allows for arguments to be formatted directly in
the format which docker-py expects. This allows for API changes and
other issues to be more easily worked around. An example of using this
option to skip translation would be:
.. code-block:: bash
salt myminion docker.create image=centos:7.3.1611 skip_translate=environment environment="{'FOO': 'bar'}"
See the following links for more information:
- `docker-py Low-level API`_
- `Docker Engine API`_
ignore_collisions : False
Since many of docker-py's arguments differ in name from their CLI
counterparts (with which most Docker users are more familiar), Salt
detects usage of these and aliases them to the docker-py version of
that argument. However, if both the alias and the docker-py version of
the same argument (e.g. ``env`` and ``environment``) are used, an error
will be raised. Set this argument to ``True`` to suppress these errors
and keep the docker-py version of the argument.
validate_ip_addrs : True
For parameters which accept IP addresses as input, IP address
validation will be performed. To disable, set this to ``False``
client_timeout : 60
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
.. note::
This is only used if Salt needs to pull the requested image.
**CONTAINER CONFIGURATION ARGUMENTS**
auto_remove (or *rm*) : False
Enable auto-removal of the container on daemon side when the
container’s process exits (analogous to running a docker container with
``--rm`` on the CLI).
Examples:
- ``auto_remove=True``
- ``rm=True``
binds
Files/directories to bind mount. Each bind mount should be passed in
one of the following formats:
- ``<host_path>:<container_path>`` - ``host_path`` is mounted within
the container as ``container_path`` with read-write access.
- ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is
mounted within the container as ``container_path`` with read-write
access. Additionally, the specified selinux context will be set
within the container.
- ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is
mounted within the container as ``container_path``, with the
read-only or read-write setting explicitly defined.
- ``<host_path>:<container_path>:<read_only>,<selinux_context>`` -
``host_path`` is mounted within the container as ``container_path``,
with the read-only or read-write setting explicitly defined.
Additionally, the specified selinux context will be set within the
container.
``<read_only>`` can be either ``ro`` for read-write access, or ``ro``
for read-only access. When omitted, it is assumed to be read-write.
``<selinux_context>`` can be ``z`` if the volume is shared between
multiple containers, or ``Z`` if the volume should be private.
.. note::
When both ``<read_only>`` and ``<selinux_context>`` are specified,
there must be a comma before ``<selinux_context>``.
Binds can be expressed as a comma-separated list or a Python list,
however in cases where both ro/rw and an selinux context are specified,
the binds *must* be specified as a Python list.
Examples:
- ``binds=/srv/www:/var/www:ro``
- ``binds=/srv/www:/var/www:rw``
- ``binds=/srv/www:/var/www``
- ``binds="['/srv/www:/var/www:ro,Z']"``
- ``binds="['/srv/www:/var/www:rw,Z']"``
- ``binds=/srv/www:/var/www:Z``
.. note::
The second and third examples above are equivalent to each other,
as are the last two examples.
blkio_weight
Block IO weight (relative weight), accepts a weight value between 10
and 1000.
Example: ``blkio_weight=100``
blkio_weight_device
Block IO weight (relative device weight), specified as a list of
expressions in the format ``PATH:WEIGHT``
Example: ``blkio_weight_device=/dev/sda:100``
cap_add
List of capabilities to add within the container. Can be passed as a
comma-separated list or a Python list. Requires Docker 1.2.0 or
newer.
Examples:
- ``cap_add=SYS_ADMIN,MKNOD``
- ``cap_add="[SYS_ADMIN, MKNOD]"``
cap_drop
List of capabilities to drop within the container. Can be passed as a
comma-separated string or a Python list. Requires Docker 1.2.0 or
newer.
Examples:
- ``cap_drop=SYS_ADMIN,MKNOD``,
- ``cap_drop="[SYS_ADMIN, MKNOD]"``
command (or *cmd*)
Command to run in the container
Example: ``command=bash`` or ``cmd=bash``
.. versionchanged:: 2015.8.1
``cmd`` is now also accepted
cpuset_cpus (or *cpuset*)
CPUs on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of CPUs
(e.g. ``0,1``).
Examples:
- ``cpuset_cpus="0-3"``
- ``cpuset="0,1"``
cpuset_mems
Memory nodes on which which to allow execution, specified as a string
containing a range (e.g. ``0-3``) or a comma-separated list of MEMs
(e.g. ``0,1``). Only effective on NUMA systems.
Examples:
- ``cpuset_mems="0-3"``
- ``cpuset_mems="0,1"``
cpu_group
The length of a CPU period in microseconds
Example: ``cpu_group=100000``
cpu_period
Microseconds of CPU time that the container can get in a CPU period
Example: ``cpu_period=50000``
cpu_shares
CPU shares (relative weight), specified as an integer between 2 and 1024.
Example: ``cpu_shares=512``
detach : False
If ``True``, run the container's command in the background (daemon
mode)
Example: ``detach=True``
devices
List of host devices to expose within the container
Examples:
- ``devices="/dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r"``
- ``devices="['/dev/net/tun', '/dev/xvda1:/dev/xvda1', '/dev/xvdb1:/dev/xvdb1:r']"``
device_read_bps
Limit read rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb``, or
``gb``.
Examples:
- ``device_read_bps="/dev/sda:1mb,/dev/sdb:5mb"``
- ``device_read_bps="['/dev/sda:100mb', '/dev/sdb:5mb']"``
device_read_iops
Limit read rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations.
Examples:
- ``device_read_iops="/dev/sda:1000,/dev/sdb:500"``
- ``device_read_iops="['/dev/sda:1000', '/dev/sdb:500']"``
device_write_bps
Limit write rate (bytes per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is either an
integer number of bytes, or a string ending in ``kb``, ``mb`` or
``gb``.
Examples:
- ``device_write_bps="/dev/sda:100mb,/dev/sdb:50mb"``
- ``device_write_bps="['/dev/sda:100mb', '/dev/sdb:50mb']"``
device_write_iops
Limit write rate (I/O per second) from a device, specified as a list
of expressions in the format ``PATH:RATE``, where ``RATE`` is a number
of I/O operations.
Examples:
- ``device_write_iops="/dev/sda:1000,/dev/sdb:500"``
- ``device_write_iops="['/dev/sda:1000', '/dev/sdb:500']"``
dns
List of DNS nameservers. Can be passed as a comma-separated list or a
Python list.
Examples:
- ``dns=8.8.8.8,8.8.4.4``
- ``dns="['8.8.8.8', '8.8.4.4']"``
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
dns_opt
Additional options to be added to the container’s ``resolv.conf`` file
Example: ``dns_opt=ndots:9``
dns_search
List of DNS search domains. Can be passed as a comma-separated list
or a Python list.
Examples:
- ``dns_search=foo1.domain.tld,foo2.domain.tld``
- ``dns_search="[foo1.domain.tld, foo2.domain.tld]"``
domainname
The domain name to use for the container
Example: ``domainname=domain.tld``
entrypoint
Entrypoint for the container. Either a string (e.g. ``"mycmd --arg1
--arg2"``) or a Python list (e.g. ``"['mycmd', '--arg1', '--arg2']"``)
Examples:
- ``entrypoint="cat access.log"``
- ``entrypoint="['cat', 'access.log']"``
environment (or *env*)
Either a dictionary of environment variable names and their values, or
a Python list of strings in the format ``VARNAME=value``.
Examples:
- ``environment='VAR1=value,VAR2=value'``
- ``environment="['VAR1=value', 'VAR2=value']"``
- ``environment="{'VAR1': 'value', 'VAR2': 'value'}"``
extra_hosts
Additional hosts to add to the container's /etc/hosts file. Can be
passed as a comma-separated list or a Python list. Requires Docker
1.3.0 or newer.
Examples:
- ``extra_hosts=web1:10.9.8.7,web2:10.9.8.8``
- ``extra_hosts="['web1:10.9.8.7', 'web2:10.9.8.8']"``
- ``extra_hosts="{'web1': '10.9.8.7', 'web2': '10.9.8.8'}"``
.. note::
To skip IP address validation, use ``validate_ip_addrs=False``
group_add
List of additional group names and/or IDs that the container process
will run as
Examples:
- ``group_add=web,network``
- ``group_add="['web', 'network']"``
hostname
Hostname of the container. If not provided, and if a ``name`` has been
provided, the ``hostname`` will default to the ``name`` that was
passed.
Example: ``hostname=web1``
.. warning::
If the container is started with ``network_mode=host``, the
hostname will be overridden by the hostname of the Minion.
interactive (or *stdin_open*): False
Leave stdin open, even if not attached
Examples:
- ``interactive=True``
- ``stdin_open=True``
ipc_mode (or *ipc*)
Set the IPC mode for the container. The default behavior is to create a
private IPC namespace for the container, but this option can be
used to change that behavior:
- ``container:<container_name_or_id>`` reuses another container shared
memory, semaphores and message queues
- ``host``: use the host's shared memory, semaphores and message queues
Examples:
- ``ipc_mode=container:foo``
- ``ipc=host``
.. warning::
Using ``host`` gives the container full access to local shared
memory and is therefore considered insecure.
isolation
Specifies the type of isolation technology used by containers
Example: ``isolation=hyperv``
.. note::
The default value on Windows server is ``process``, while the
default value on Windows client is ``hyperv``. On Linux, only
``default`` is supported.
labels (or *label*)
Add metadata to the container. Labels can be set both with and without
values:
Examples:
- ``labels=foo,bar=baz``
- ``labels="['foo', 'bar=baz']"``
.. versionchanged:: 2018.3.0
Labels both with and without values can now be mixed. Earlier
releases only permitted one method or the other.
links
Link this container to another. Links should be specified in the format
``<container_name_or_id>:<link_alias>``. Multiple links can be passed,
ether as a comma separated list or a Python list.
Examples:
- ``links=web1:link1,web2:link2``,
- ``links="['web1:link1', 'web2:link2']"``
- ``links="{'web1': 'link1', 'web2': 'link2'}"``
log_driver
Set container's logging driver. Requires Docker 1.6 or newer.
Example:
- ``log_driver=syslog``
.. note::
The logging driver feature was improved in Docker 1.13 introducing
option name changes. Please see Docker's `Configure logging
drivers`_ documentation for more information.
.. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/
log_opt
Config options for the ``log_driver`` config option. Requires Docker
1.6 or newer.
Example:
- ``log_opt="syslog-address=tcp://192.168.0.42,syslog-facility=daemon"``
- ``log_opt="['syslog-address=tcp://192.168.0.42', 'syslog-facility=daemon']"``
- ``log_opt="{'syslog-address': 'tcp://192.168.0.42', 'syslog-facility': 'daemon'}"``
lxc_conf
Additional LXC configuration parameters to set before starting the
container.
Examples:
- ``lxc_conf="lxc.utsname=docker,lxc.arch=x86_64"``
- ``lxc_conf="['lxc.utsname=docker', 'lxc.arch=x86_64']"``
- ``lxc_conf="{'lxc.utsname': 'docker', 'lxc.arch': 'x86_64'}"``
.. note::
These LXC configuration parameters will only have the desired
effect if the container is using the LXC execution driver, which
has been deprecated for some time.
mac_address
MAC address to use for the container. If not specified, a random MAC
address will be used.
Example: ``mac_address=01:23:45:67:89:0a``
mem_limit (or *memory*) : 0
Memory limit. Can be specified in bytes or using single-letter units
(i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no
memory limit.
Examples:
- ``mem_limit=512M``
- ``memory=1073741824``
mem_swappiness
Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
Example: ``mem_swappiness=60``
memswap_limit (or *memory_swap*) : -1
Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A
value of ``0`` means no swap limit.
Examples:
- ``memswap_limit=1G``
- ``memory_swap=2147483648``
network_disabled : False
If ``True``, networking will be disabled within the container
Example: ``network_disabled=True``
network_mode : bridge
One of the following:
- ``bridge`` - Creates a new network stack for the container on the
docker bridge
- ``none`` - No networking (equivalent of the Docker CLI argument
``--net=none``). Not to be confused with Python's ``None``.
- ``container:<name_or_id>`` - Reuses another container's network stack
- ``host`` - Use the host's network stack inside the container
.. warning::
Using ``host`` mode gives the container full access to the hosts
system's services (such as D-Bus), and is therefore considered
insecure.
Examples:
- ``network_mode=null``
- ``network_mode=container:web1``
oom_kill_disable
Whether to disable OOM killer
Example: ``oom_kill_disable=False``
oom_score_adj
An integer value containing the score given to the container in order
to tune OOM killer preferences
Example: ``oom_score_adj=500``
pid_mode
Set to ``host`` to use the host container's PID namespace within the
container. Requires Docker 1.5.0 or newer.
Example: ``pid_mode=host``
pids_limit
Set the container's PID limit. Set to ``-1`` for unlimited.
Example: ``pids_limit=2000``
port_bindings (or *publish*)
Bind exposed ports which were exposed using the ``ports`` argument to
:py:func:`docker.create <salt.modules.dockermod.create>`. These
should be passed in the same way as the ``--publish`` argument to the
``docker run`` CLI command:
- ``ip:hostPort:containerPort`` - Bind a specific IP and port on the
host to a specific port within the container.
- ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a
specific port within the container.
- ``hostPort:containerPort`` - Bind a specific port on all of the
host's interfaces to a specific port within the container.
- ``containerPort`` - Bind an ephemeral port on all of the host's
interfaces to a specific port within the container.
Multiple bindings can be separated by commas, or passed as a Python
list. The below two examples are equivalent:
- ``port_bindings="5000:5000,2123:2123/udp,8080"``
- ``port_bindings="['5000:5000', '2123:2123/udp', 8080]"``
Port bindings can also include ranges:
- ``port_bindings="14505-14506:4505-4506"``
.. note::
When specifying a protocol, it must be passed in the
``containerPort`` value, as seen in the examples above.
ports
A list of ports to expose on the container. Can be passed as
comma-separated list or a Python list. If the protocol is omitted, the
port will be assumed to be a TCP port.
Examples:
- ``ports=1111,2222/udp``
- ``ports="[1111, '2222/udp']"``
privileged : False
If ``True``, runs the exec process with extended privileges
Example: ``privileged=True``
publish_all_ports (or *publish_all*): False
Publish all ports to the host
Example: ``publish_all_ports=True``
read_only : False
If ``True``, mount the container’s root filesystem as read only
Example: ``read_only=True``
restart_policy (or *restart*)
Set a restart policy for the container. Must be passed as a string in
the format ``policy[:retry_count]`` where ``policy`` is one of
``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count``
is an optional limit to the number of retries. The retry count is ignored
when using the ``always`` or ``unless-stopped`` restart policy.
Examples:
- ``restart_policy=on-failure:5``
- ``restart_policy=always``
security_opt
Security configuration for MLS systems such as SELinux and AppArmor.
Can be passed as a comma-separated list or a Python list.
Examples:
- ``security_opt=apparmor:unconfined,param2:value2``
- ``security_opt='["apparmor:unconfined", "param2:value2"]'``
.. important::
Some security options can contain commas. In these cases, this
argument *must* be passed as a Python list, as splitting by comma
will result in an invalid configuration.
.. note::
See the documentation for security_opt at
https://docs.docker.com/engine/reference/run/#security-configuration
shm_size
Size of /dev/shm
Example: ``shm_size=128M``
stop_signal
The signal used to stop the container. The default is ``SIGTERM``.
Example: ``stop_signal=SIGRTMIN+3``
stop_timeout
Timeout to stop the container, in seconds
Example: ``stop_timeout=5``
storage_opt
Storage driver options for the container
Examples:
- ``storage_opt='dm.basesize=40G'``
- ``storage_opt="['dm.basesize=40G']"``
- ``storage_opt="{'dm.basesize': '40G'}"``
sysctls (or *sysctl*)
Set sysctl options for the container
Examples:
- ``sysctl='fs.nr_open=1048576,kernel.pid_max=32768'``
- ``sysctls="['fs.nr_open=1048576', 'kernel.pid_max=32768']"``
- ``sysctls="{'fs.nr_open': '1048576', 'kernel.pid_max': '32768'}"``
tmpfs
A map of container directories which should be replaced by tmpfs
mounts, and their corresponding mount options. Can be passed as Python
list of PATH:VALUE mappings, or a Python dictionary. However, since
commas usually appear in the values, this option *cannot* be passed as
a comma-separated list.
Examples:
- ``tmpfs="['/run:rw,noexec,nosuid,size=65536k', '/var/lib/mysql:rw,noexec,nosuid,size=600m']"``
- ``tmpfs="{'/run': 'rw,noexec,nosuid,size=65536k', '/var/lib/mysql': 'rw,noexec,nosuid,size=600m'}"``
tty : False
Attach TTYs
Example: ``tty=True``
ulimits (or *ulimit*)
List of ulimits. These limits should be passed in the format
``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being
optional. Can be passed as a comma-separated list or a Python list.
Examples:
- ``ulimits="nofile=1024:1024,nproc=60"``
- ``ulimits="['nofile=1024:1024', 'nproc=60']"``
user
User under which to run exec process
Example: ``user=foo``
userns_mode (or *user_ns_mode*)
Sets the user namsepace mode, when the user namespace remapping option
is enabled.
Example: ``userns_mode=host``
volumes (or *volume*)
List of directories to expose as volumes. Can be passed as a
comma-separated list or a Python list.
Examples:
- ``volumes=/mnt/vol1,/mnt/vol2``
- ``volume="['/mnt/vol1', '/mnt/vol2']"``
volumes_from
Container names or IDs from which the container will get volumes. Can
be passed as a comma-separated list or a Python list.
Example: ``volumes_from=foo``, ``volumes_from=foo,bar``,
``volumes_from="[foo, bar]"``
volume_driver
Sets the container's volume driver
Example: ``volume_driver=foobar``
working_dir (or *workdir*)
Working directory inside the container
Examples:
- ``working_dir=/var/log/nginx``
- ``workdir=/var/www/myapp``
**RETURN DATA**
A dictionary containing the following keys:
- ``Id`` - ID of the newly-created container
- ``Name`` - Name of the newly-created container
CLI Example:
.. code-block:: bash
# Create a data-only container
salt myminion docker.create myuser/mycontainer volumes="/mnt/vol1,/mnt/vol2"
# Create a CentOS 7 container that will stay running once started
salt myminion docker.create centos:7 name=mycent7 interactive=True tty=True command=bash
"""
if kwargs.pop("inspect", True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**kwargs
)
if unused_kwargs:
log.warning(
"The following arguments were ignored because they are not "
"recognized by docker-py: %s",
sorted(unused_kwargs),
)
log.debug(
"docker.create: creating container %susing the following " "arguments: %s",
"with name '{}' ".format(name) if name is not None else "",
kwargs,
)
time_started = time.time()
response = _client_wrapper("create_container", image, name=name, **kwargs)
response["Time_Elapsed"] = time.time() - time_started
_clear_context()
if name is None:
name = inspect_container(response["Id"])["Name"].lstrip("/")
response["Name"] = name
if start:
try:
start_(name)
except CommandExecutionError as exc:
raise CommandExecutionError(
"Failed to start container after creation",
info={"response": response, "error": exc.__str__()},
)
else:
response["Started"] = True
return response
@_refresh_mine_cache
def run_container(
image,
name=None,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
bg=False,
replace=False,
force=False,
networks=None,
**kwargs
):
"""
.. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
"""
if kwargs.pop("inspect", True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
removed_ids = None
if name is not None:
try:
pre_state = __salt__["docker.state"](name)
except CommandExecutionError:
pass
else:
if pre_state == "running" and not (replace and force):
raise CommandExecutionError(
"Container '{}' exists and is running. Run with "
"replace=True and force=True to force removal of the "
"existing container.".format(name)
)
elif not replace:
raise CommandExecutionError(
"Container '{}' exists. Run with replace=True to "
"remove the existing container".format(name)
)
else:
# We don't have to try/except this, we want it to raise a
# CommandExecutionError if we fail to remove the existing
# container so that we gracefully abort before attempting to go
# any further.
removed_ids = rm_(name, force=force)
log_kwargs = {}
for argname in get_client_args("logs")["logs"]:
try:
log_kwargs[argname] = kwargs.pop(argname)
except KeyError:
pass
# Ignore the stream argument if passed
log_kwargs.pop("stream", None)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**kwargs
)
# _get_create_kwargs() will have processed auto_remove and put it into the
# host_config, so check the host_config to see whether or not auto_remove
# was enabled.
auto_remove = kwargs.get("host_config", {}).get("AutoRemove", False)
if unused_kwargs:
log.warning(
"The following arguments were ignored because they are not "
"recognized by docker-py: %s",
sorted(unused_kwargs),
)
if networks:
if isinstance(networks, str):
networks = {x: {} for x in networks.split(",")}
if not isinstance(networks, dict) or not all(
isinstance(x, dict) for x in networks.values()
):
raise SaltInvocationError("Invalid format for networks argument")
log.debug(
"docker.create: creating container %susing the following " "arguments: %s",
"with name '{}' ".format(name) if name is not None else "",
kwargs,
)
time_started = time.time()
# Create the container
ret = _client_wrapper("create_container", image, name=name, **kwargs)
if removed_ids:
ret["Replaces"] = removed_ids
if name is None:
name = inspect_container(ret["Id"])["Name"].lstrip("/")
ret["Name"] = name
def _append_warning(ret, msg):
warnings = ret.pop("Warnings", None)
if warnings is None:
warnings = [msg]
elif isinstance(ret, list):
warnings.append(msg)
else:
warnings = [warnings, msg]
ret["Warnings"] = warnings
exc_info = {"return": ret}
try:
if networks:
try:
for net_name, net_conf in networks.items():
__salt__["docker.connect_container_to_network"](
ret["Id"], net_name, **net_conf
)
except CommandExecutionError as exc:
# Make an effort to remove the container if auto_remove was enabled
if auto_remove:
try:
rm_(name)
except CommandExecutionError as rm_exc:
exc_info.setdefault("other_errors", []).append(
"Failed to auto_remove container: {}".format(rm_exc)
)
# Raise original exception with additional info
raise CommandExecutionError(exc.__str__(), info=exc_info)
# Start the container
output = []
start_(ret["Id"])
if not bg:
# Can't use logs() here because we've disabled "stream" in that
# function. Also, note that if you want to troubleshoot this for loop
# in a debugger like pdb or pudb, you'll want to use auto_remove=False
# when running the function, since the container will likely exit
# before you finish stepping through with a debugger. If the container
# exits during iteration, the next iteration of the generator will
# raise an exception since the container will no longer exist.
try:
for line in _client_wrapper(
"logs", ret["Id"], stream=True, timestamps=False
):
output.append(salt.utils.stringutils.to_unicode(line))
except CommandExecutionError:
msg = (
"Failed to get logs from container. This may be because "
"the container exited before Salt was able to attach to "
"it to retrieve the logs. Consider setting auto_remove "
"to False."
)
_append_warning(ret, msg)
# Container has exited, note the elapsed time
ret["Time_Elapsed"] = time.time() - time_started
_clear_context()
if not bg:
ret["Logs"] = "".join(output)
if not auto_remove:
try:
cinfo = inspect_container(ret["Id"])
except CommandExecutionError:
_append_warning(ret, "Failed to inspect container after running")
else:
cstate = cinfo.get("State", {})
cstatus = cstate.get("Status")
if cstatus != "exited":
_append_warning(ret, "Container state is not 'exited'")
ret["ExitCode"] = cstate.get("ExitCode")
except CommandExecutionError as exc:
try:
exc_info.update(exc.info)
except (TypeError, ValueError):
# In the event exc.info wasn't a dict (extremely unlikely), append
# it to other_errors as a fallback.
exc_info.setdefault("other_errors", []).append(exc.info)
# Re-raise with all of the available additional info
raise CommandExecutionError(exc.__str__(), info=exc_info)
return ret
def copy_from(name, source, dest, overwrite=False, makedirs=False):
"""
Copy a file from inside a container to the Minion
name
Container name
source
Path of the file on the container's filesystem
dest
Destination on the Minion. Must be an absolute path. If the destination
is a directory, the file will be copied into that directory.
overwrite : False
Unless this option is set to ``True``, then if a file exists at the
location specified by the ``dest`` argument, an error will be raised.
makedirs : False
Create the parent directory on the container if it does not already
exist.
**RETURN DATA**
A boolean (``True`` if successful, otherwise ``False``)
CLI Example:
.. code-block:: bash
salt myminion docker.copy_from mycontainer /var/log/nginx/access.log /home/myuser
"""
c_state = state(name)
if c_state != "running":
raise CommandExecutionError("Container '{}' is not running".format(name))
# Destination file sanity checks
if not os.path.isabs(dest):
raise SaltInvocationError("Destination path must be absolute")
if os.path.isdir(dest):
# Destination is a directory, full path to dest file will include the
# basename of the source file.
dest = os.path.join(dest, os.path.basename(source))
dest_dir = dest
else:
# Destination was not a directory. We will check to see if the parent
# dir is a directory, and then (if makedirs=True) attempt to create the
# parent directory.
dest_dir = os.path.split(dest)[0]
if not os.path.isdir(dest_dir):
if makedirs:
try:
os.makedirs(dest_dir)
except OSError as exc:
raise CommandExecutionError(
"Unable to make destination directory {}: {}".format(
dest_dir, exc
)
)
else:
raise SaltInvocationError(
"Directory {} does not exist".format(dest_dir)
)
if not overwrite and os.path.exists(dest):
raise CommandExecutionError(
"Destination path {} already exists. Use overwrite=True to "
"overwrite it".format(dest)
)
# Source file sanity checks
if not os.path.isabs(source):
raise SaltInvocationError("Source path must be absolute")
else:
if (
retcode(name, "test -e {}".format(pipes.quote(source)), ignore_retcode=True)
== 0
):
if (
retcode(
name, "test -f {}".format(pipes.quote(source)), ignore_retcode=True
)
!= 0
):
raise SaltInvocationError("Source must be a regular file")
else:
raise SaltInvocationError("Source file {} does not exist".format(source))
# Before we try to replace the file, compare checksums.
source_md5 = _get_md5(name, source)
if source_md5 == __salt__["file.get_sum"](dest, "md5"):
log.debug("%s:%s and %s are the same file, skipping copy", name, source, dest)
return True
log.debug("Copying %s from container '%s' to local path %s", source, name, dest)
try:
src_path = ":".join((name, source))
except TypeError:
src_path = "{}:{}".format(name, source)
cmd = ["docker", "cp", src_path, dest_dir]
__salt__["cmd.run"](cmd, python_shell=False)
return source_md5 == __salt__["file.get_sum"](dest, "md5")
# Docker cp gets a file from the container, alias this to copy_from
cp = salt.utils.functools.alias_function(copy_from, "cp")
def copy_to(name, source, dest, exec_driver=None, overwrite=False, makedirs=False):
"""
Copy a file from the host into a container
name
Container name
source
File to be copied to the container. Can be a local path on the Minion
or a remote file from the Salt fileserver.
dest
Destination on the container. Must be an absolute path. If the
destination is a directory, the file will be copied into that
directory.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
overwrite : False
Unless this option is set to ``True``, then if a file exists at the
location specified by the ``dest`` argument, an error will be raised.
makedirs : False
Create the parent directory on the container if it does not already
exist.
**RETURN DATA**
A boolean (``True`` if successful, otherwise ``False``)
CLI Example:
.. code-block:: bash
salt myminion docker.copy_to mycontainer /tmp/foo /root/foo
"""
if exec_driver is None:
exec_driver = _get_exec_driver()
return __salt__["container_resource.copy_to"](
name,
__salt__["container_resource.cache_file"](source),
dest,
container_type=__virtualname__,
exec_driver=exec_driver,
overwrite=overwrite,
makedirs=makedirs,
)
def export(name, path, overwrite=False, makedirs=False, compression=None, **kwargs):
"""
Exports a container to a tar archive. It can also optionally compress that
tar archive, and push it up to the Master.
name
Container name or ID
path
Absolute path on the Minion where the container will be exported
overwrite : False
Unless this option is set to ``True``, then if a file exists at the
location specified by the ``path`` argument, an error will be raised.
makedirs : False
If ``True``, then if the parent directory of the file specified by the
``path`` argument does not exist, Salt will attempt to create it.
compression : None
Can be set to any of the following:
- ``gzip`` or ``gz`` for gzip compression
- ``bzip2`` or ``bz2`` for bzip2 compression
- ``xz`` or ``lzma`` for XZ compression (requires `xz-utils`_, as well
as the ``lzma`` module from Python 3.3, available in Python 2 and
Python 3.0-3.2 as `backports.lzma`_)
This parameter can be omitted and Salt will attempt to determine the
compression type by examining the filename passed in the ``path``
parameter.
.. _`xz-utils`: http://tukaani.org/xz/
.. _`backports.lzma`: https://pypi.python.org/pypi/backports.lzma
push : False
If ``True``, the container will be pushed to the master using
:py:func:`cp.push <salt.modules.cp.push>`.
.. note::
This requires :conf_master:`file_recv` to be set to ``True`` on the
Master.
**RETURN DATA**
A dictionary will containing the following keys:
- ``Path`` - Path of the file that was exported
- ``Push`` - Reports whether or not the file was successfully pushed to the
Master
*(Only present if push=True)*
- ``Size`` - Size of the file, in bytes
- ``Size_Human`` - Size of the file, in human-readable units
- ``Time_Elapsed`` - Time in seconds taken to perform the export
CLI Examples:
.. code-block:: bash
salt myminion docker.export mycontainer /tmp/mycontainer.tar
salt myminion docker.export mycontainer /tmp/mycontainer.tar.xz push=True
"""
err = "Path '{}' is not absolute".format(path)
try:
if not os.path.isabs(path):
raise SaltInvocationError(err)
except AttributeError:
raise SaltInvocationError(err)
if os.path.exists(path) and not overwrite:
raise CommandExecutionError("{} already exists".format(path))
if compression is None:
if path.endswith(".tar.gz") or path.endswith(".tgz"):
compression = "gzip"
elif path.endswith(".tar.bz2") or path.endswith(".tbz2"):
compression = "bzip2"
elif path.endswith(".tar.xz") or path.endswith(".txz"):
if HAS_LZMA:
compression = "xz"
else:
raise CommandExecutionError(
"XZ compression unavailable. Install the backports.lzma "
"module and xz-utils to enable XZ compression."
)
elif compression == "gz":
compression = "gzip"
elif compression == "bz2":
compression = "bzip2"
elif compression == "lzma":
compression = "xz"
if compression and compression not in ("gzip", "bzip2", "xz"):
raise SaltInvocationError("Invalid compression type '{}'".format(compression))
parent_dir = os.path.dirname(path)
if not os.path.isdir(parent_dir):
if not makedirs:
raise CommandExecutionError(
"Parent dir {} of destination path does not exist. Use "
"makedirs=True to create it.".format(parent_dir)
)
try:
os.makedirs(parent_dir)
except OSError as exc:
raise CommandExecutionError(
"Unable to make parent dir {}: {}".format(parent_dir, exc)
)
if compression == "gzip":
try:
out = gzip.open(path, "wb")
except OSError as exc:
raise CommandExecutionError(
"Unable to open {} for writing: {}".format(path, exc)
)
elif compression == "bzip2":
compressor = bz2.BZ2Compressor()
elif compression == "xz":
compressor = lzma.LZMACompressor()
time_started = time.time()
try:
if compression != "gzip":
# gzip doesn't use a Compressor object, it uses a .open() method to
# open the filehandle. If not using gzip, we need to open the
# filehandle here. We make sure to close it in the "finally" block
# below.
out = __utils__["files.fopen"](
path, "wb"
) # pylint: disable=resource-leakage
response = _client_wrapper("export", name)
buf = None
while buf != "":
buf = response.read(4096)
if buf:
if compression in ("bzip2", "xz"):
data = compressor.compress(buf)
if data:
out.write(data)
else:
out.write(buf)
if compression in ("bzip2", "xz"):
# Flush any remaining data out of the compressor
data = compressor.flush()
if data:
out.write(data)
out.flush()
except Exception as exc: # pylint: disable=broad-except
try:
os.remove(path)
except OSError:
pass
raise CommandExecutionError(
"Error occurred during container export: {}".format(exc)
)
finally:
out.close()
ret = {"Time_Elapsed": time.time() - time_started}
ret["Path"] = path
ret["Size"] = os.stat(path).st_size
ret["Size_Human"] = _size_fmt(ret["Size"])
# Process push
if kwargs.get(push, False):
ret["Push"] = __salt__["cp.push"](path)
return ret
@_refresh_mine_cache
def rm_(name, force=False, volumes=False, **kwargs):
"""
Removes a container
name
Container name or ID
force : False
If ``True``, the container will be killed first before removal, as the
Docker API will not permit a running container to be removed. This
option is set to ``False`` by default to prevent accidental removal of
a running container.
stop : False
If ``True``, the container will be stopped first before removal, as the
Docker API will not permit a running container to be removed. This
option is set to ``False`` by default to prevent accidental removal of
a running container.
.. versionadded:: 2017.7.0
timeout
Optional timeout to be passed to :py:func:`docker.stop
<salt.modules.dockermod.stop>` if stopping the container.
.. versionadded:: 2018.3.0
volumes : False
Also remove volumes associated with container
**RETURN DATA**
A list of the IDs of containers which were removed
CLI Example:
.. code-block:: bash
salt myminion docker.rm mycontainer
salt myminion docker.rm mycontainer force=True
"""
kwargs = __utils__["args.clean_kwargs"](**kwargs)
stop_ = kwargs.pop("stop", False)
timeout = kwargs.pop("timeout", None)
auto_remove = False
if kwargs:
__utils__["args.invalid_kwargs"](kwargs)
if state(name) == "running" and not (force or stop_):
raise CommandExecutionError(
"Container '{}' is running, use force=True to forcibly "
"remove this container".format(name)
)
if stop_ and not force:
inspect_results = inspect_container(name)
try:
auto_remove = inspect_results["HostConfig"]["AutoRemove"]
except KeyError:
log.error(
"Failed to find AutoRemove in inspect results, Docker API may "
"have changed. Full results: %s",
inspect_results,
)
stop(name, timeout=timeout)
pre = ps_(all=True)
if not auto_remove:
_client_wrapper("remove_container", name, v=volumes, force=force)
_clear_context()
return [x for x in pre if x not in ps_(all=True)]
def rename(name, new_name):
"""
.. versionadded:: 2017.7.0
Renames a container. Returns ``True`` if successful, and raises an error if
the API returns one. If unsuccessful and the API returns no error (should
not happen), then ``False`` will be returned.
name
Name or ID of existing container
new_name
New name to assign to container
CLI Example:
.. code-block:: bash
salt myminion docker.rename foo bar
"""
id_ = inspect_container(name)["Id"]
log.debug("Renaming container '%s' (ID: %s) to '%s'", name, id_, new_name)
_client_wrapper("rename", id_, new_name)
# Confirm that the ID of the container corresponding to the new name is the
# same as it was before.
return inspect_container(new_name)["Id"] == id_
# Functions to manage images
def build(
path=None,
repository=None,
tag=None,
cache=True,
rm=True,
api_response=False,
fileobj=None,
dockerfile=None,
buildargs=None,
):
"""
.. versionchanged:: 2018.3.0
If the built image should be tagged, then the repository and tag must
now be passed separately using the ``repository`` and ``tag``
arguments, rather than together in the (now deprecated) ``image``
argument.
Builds a docker image from a Dockerfile or a URL
path
Path to directory on the Minion containing a Dockerfile
repository
Optional repository name for the image being built
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image (required if ``repository`` is passed)
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
cache : True
Set to ``False`` to force the build process not to use the Docker image
cache, and pull all required intermediate image layers
rm : True
Remove intermediate containers created during build
api_response : False
If ``True``: an ``API_Response`` key will be present in the return
data, containing the raw output from the Docker API.
fileobj
Allows for a file-like object containing the contents of the Dockerfile
to be passed in place of a file ``path`` argument. This argument should
not be used from the CLI, only from other Salt code.
dockerfile
Allows for an alternative Dockerfile to be specified. Path to
alternative Dockefile is relative to the build path for the Docker
container.
.. versionadded:: 2016.11.0
buildargs
A dictionary of build arguments provided to the docker build process.
**RETURN DATA**
A dictionary containing one or more of the following keys:
- ``Id`` - ID of the newly-built image
- ``Time_Elapsed`` - Time in seconds taken to perform the build
- ``Intermediate_Containers`` - IDs of containers created during the course
of the build process
*(Only present if rm=False)*
- ``Images`` - A dictionary containing one or more of the following keys:
- ``Already_Pulled`` - Layers that that were already present on the
Minion
- ``Pulled`` - Layers that that were pulled
*(Only present if the image specified by the "repository" and "tag"
arguments was not present on the Minion, or if cache=False)*
- ``Status`` - A string containing a summary of the pull action (usually a
message saying that an image was downloaded, or that it was up to date).
*(Only present if the image specified by the "repository" and "tag"
arguments was not present on the Minion, or if cache=False)*
CLI Example:
.. code-block:: bash
salt myminion docker.build /path/to/docker/build/dir
salt myminion docker.build https://github.com/myuser/myrepo.git repository=myimage tag=latest
salt myminion docker.build /path/to/docker/build/dir dockerfile=Dockefile.different repository=myimage tag=dev
"""
_prep_pull()
if repository or tag:
if not repository and tag:
# Have to have both or neither
raise SaltInvocationError(
"If tagging, both a repository and tag are required"
)
else:
if not isinstance(repository, str):
repository = str(repository)
if not isinstance(tag, str):
tag = str(tag)
# For the build function in the low-level API, the "tag" refers to the full
# tag (e.g. myuser/myimage:mytag). This is different than in other
# functions, where the repo and tag are passed separately.
image_tag = "{}:{}".format(repository, tag) if repository and tag else None
time_started = time.time()
response = _client_wrapper(
"build",
path=path,
tag=image_tag,
quiet=False,
fileobj=fileobj,
rm=rm,
nocache=not cache,
dockerfile=dockerfile,
buildargs=buildargs,
)
ret = {"Time_Elapsed": time.time() - time_started}
_clear_context()
if not response:
raise CommandExecutionError(
"Build failed for {}, no response returned from Docker API".format(path)
)
stream_data = []
for line in response:
stream_data.extend(salt.utils.json.loads(line, cls=DockerJSONDecoder))
errors = []
# Iterate through API response and collect information
for item in stream_data:
try:
item_type = next(iter(item))
except StopIteration:
continue
if item_type == "status":
_pull_status(ret, item)
if item_type == "stream":
_build_status(ret, item)
elif item_type == "errorDetail":
_error_detail(errors, item)
if "Id" not in ret:
# API returned information, but there was no confirmation of a
# successful build.
msg = "Build failed for {}".format(path)
log.error(msg)
log.error(stream_data)
if errors:
msg += ". Error(s) follow:\n\n{}".format("\n\n".join(errors))
raise CommandExecutionError(msg)
resolved_tag = resolve_tag(ret["Id"], all=True)
if resolved_tag:
ret["Image"] = resolved_tag
else:
ret["Warning"] = "Failed to tag image as {}".format(image_tag)
if api_response:
ret["API_Response"] = stream_data
if rm:
ret.pop("Intermediate_Containers", None)
return ret
def commit(name, repository, tag="latest", message=None, author=None):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Commits a container, thereby promoting it to an image. Equivalent to
running the ``docker commit`` Docker CLI command.
name
Container name or ID to commit
repository
Repository name for the image being committed
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
message
Commit message (Optional)
author
Author name (Optional)
**RETURN DATA**
A dictionary containing the following keys:
- ``Id`` - ID of the newly-created image
- ``Image`` - Name of the newly-created image
- ``Time_Elapsed`` - Time in seconds taken to perform the commit
CLI Example:
.. code-block:: bash
salt myminion docker.commit mycontainer myuser/myimage mytag
"""
if not isinstance(repository, str):
repository = str(repository)
if not isinstance(tag, str):
tag = str(tag)
time_started = time.time()
response = _client_wrapper(
"commit", name, repository=repository, tag=tag, message=message, author=author
)
ret = {"Time_Elapsed": time.time() - time_started}
_clear_context()
image_id = None
for id_ in ("Id", "id", "ID"):
if id_ in response:
image_id = response[id_]
break
if image_id is None:
raise CommandExecutionError("No image ID was returned in API response")
ret["Id"] = image_id
return ret
def dangling(prune=False, force=False):
"""
Return top-level images (those on which no other images depend) which do
not have a tag assigned to them. These include:
- Images which were once tagged but were later untagged, such as those
which were superseded by committing a new copy of an existing tagged
image.
- Images which were loaded using :py:func:`docker.load
<salt.modules.dockermod.load>` (or the ``docker load`` Docker CLI
command), but not tagged.
prune : False
Remove these images
force : False
If ``True``, and if ``prune=True``, then forcibly remove these images.
**RETURN DATA**
If ``prune=False``, the return data will be a list of dangling image IDs.
If ``prune=True``, the return data will be a dictionary with each key being
the ID of the dangling image, and the following information for each image:
- ``Comment`` - Any error encountered when trying to prune a dangling image
*(Only present if prune failed)*
- ``Removed`` - A boolean (``True`` if prune was successful, ``False`` if
not)
CLI Example:
.. code-block:: bash
salt myminion docker.dangling
salt myminion docker.dangling prune=True
"""
all_images = images(all=True)
dangling_images = [
x[:12]
for x in _get_top_level_images(all_images)
if all_images[x]["RepoTags"] is None
]
if not prune:
return dangling_images
ret = {}
for image in dangling_images:
try:
ret.setdefault(image, {})["Removed"] = rmi(image, force=force)
except Exception as exc: # pylint: disable=broad-except
err = exc.__str__()
log.error(err)
ret.setdefault(image, {})["Comment"] = err
ret[image]["Removed"] = False
return ret
def import_(source, repository, tag="latest", api_response=False):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Imports content from a local tarball or a URL as a new docker image
source
Content to import (URL or absolute path to a tarball). URL can be a
file on the Salt fileserver (i.e.
``salt://path/to/rootfs/tarball.tar.xz``. To import a file from a
saltenv other than ``base`` (e.g. ``dev``), pass it at the end of the
URL (ex. ``salt://path/to/rootfs/tarball.tar.xz?saltenv=dev``).
repository
Repository name for the image being imported
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
api_response : False
If ``True`` an ``api_response`` key will be present in the return data,
containing the raw output from the Docker API.
**RETURN DATA**
A dictionary containing the following keys:
- ``Id`` - ID of the newly-created image
- ``Image`` - Name of the newly-created image
- ``Time_Elapsed`` - Time in seconds taken to perform the commit
CLI Example:
.. code-block:: bash
salt myminion docker.import /tmp/cent7-minimal.tar.xz myuser/centos
salt myminion docker.import /tmp/cent7-minimal.tar.xz myuser/centos:7
salt myminion docker.import salt://dockerimages/cent7-minimal.tar.xz myuser/centos:7
"""
if not isinstance(repository, str):
repository = str(repository)
if not isinstance(tag, str):
tag = str(tag)
path = __salt__["container_resource.cache_file"](source)
time_started = time.time()
response = _client_wrapper("import_image", path, repository=repository, tag=tag)
ret = {"Time_Elapsed": time.time() - time_started}
_clear_context()
if not response:
raise CommandExecutionError(
"Import failed for {}, no response returned from Docker API".format(source)
)
elif api_response:
ret["API_Response"] = response
errors = []
# Iterate through API response and collect information
for item in response:
try:
item_type = next(iter(item))
except StopIteration:
continue
if item_type == "status":
_import_status(ret, item, repository, tag)
elif item_type == "errorDetail":
_error_detail(errors, item)
if "Id" not in ret:
# API returned information, but there was no confirmation of a
# successful push.
msg = "Import failed for {}".format(source)
if errors:
msg += ". Error(s) follow:\n\n{}".format("\n\n".join(errors))
raise CommandExecutionError(msg)
return ret
def load(path, repository=None, tag=None):
"""
.. versionchanged:: 2018.3.0
If the loaded image should be tagged, then the repository and tag must
now be passed separately using the ``repository`` and ``tag``
arguments, rather than together in the (now deprecated) ``image``
argument.
Load a tar archive that was created using :py:func:`docker.save
<salt.modules.dockermod.save>` (or via the Docker CLI using ``docker save``).
path
Path to docker tar archive. Path can be a file on the Minion, or the
URL of a file on the Salt fileserver (i.e.
``salt://path/to/docker/saved/image.tar``). To load a file from a
saltenv other than ``base`` (e.g. ``dev``), pass it at the end of the
URL (ex. ``salt://path/to/rootfs/tarball.tar.xz?saltenv=dev``).
repository
If specified, the topmost layer of the newly-loaded image will be
tagged with the specified repo using :py:func:`docker.tag
<salt.modules.dockermod.tag_>`. If a repository name is provided, then
the ``tag`` argument is also required.
.. versionadded:: 2018.3.0
tag
Tag name to go along with the repository name, if the loaded image is
to be tagged.
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``Path`` - Path of the file that was saved
- ``Layers`` - A list containing the IDs of the layers which were loaded.
Any layers in the file that was loaded, which were already present on the
Minion, will not be included.
- ``Image`` - Name of tag applied to topmost layer
*(Only present if tag was specified and tagging was successful)*
- ``Time_Elapsed`` - Time in seconds taken to load the file
- ``Warning`` - Message describing any problems encountered in attempt to
tag the topmost layer
*(Only present if tag was specified and tagging failed)*
CLI Example:
.. code-block:: bash
salt myminion docker.load /path/to/image.tar
salt myminion docker.load salt://path/to/docker/saved/image.tar repository=myuser/myimage tag=mytag
"""
if (repository or tag) and not (repository and tag):
# Have to have both or neither
raise SaltInvocationError("If tagging, both a repository and tag are required")
local_path = __salt__["container_resource.cache_file"](path)
if not os.path.isfile(local_path):
raise CommandExecutionError("Source file {} does not exist".format(path))
pre = images(all=True)
cmd = ["docker", "load", "-i", local_path]
time_started = time.time()
result = __salt__["cmd.run_all"](cmd)
ret = {"Time_Elapsed": time.time() - time_started}
_clear_context()
post = images(all=True)
if result["retcode"] != 0:
msg = "Failed to load image(s) from {}".format(path)
if result["stderr"]:
msg += ": {}".format(result["stderr"])
raise CommandExecutionError(msg)
ret["Path"] = path
new_layers = [x for x in post if x not in pre]
ret["Layers"] = [x[:12] for x in new_layers]
top_level_images = _get_top_level_images(post, subset=new_layers)
if repository or tag:
if len(top_level_images) > 1:
ret["Warning"] = (
"More than one top-level image layer was loaded ({}), no "
"image was tagged".format(", ".join(top_level_images))
)
else:
# Normally just joining the two would be quicker than a str.format,
# but since we can't be positive the repo and tag will both be
# strings when passed (e.g. a numeric tag would be loaded as an int
# or float), and because the tag_ function will stringify them if
# need be, a str.format is the correct thing to do here.
tagged_image = "{}:{}".format(repository, tag)
try:
result = tag_(top_level_images[0], repository=repository, tag=tag)
ret["Image"] = tagged_image
except IndexError:
ret["Warning"] = (
"No top-level image layers were loaded, no " "image was tagged"
)
except Exception as exc: # pylint: disable=broad-except
ret["Warning"] = "Failed to tag {} as {}: {}".format(
top_level_images[0], tagged_image, exc
)
return ret
def layers(name):
"""
Returns a list of the IDs of layers belonging to the specified image, with
the top-most layer (the one correspnding to the passed name) appearing
last.
name
Image name or ID
CLI Example:
.. code-block:: bash
salt myminion docker.layers centos:7
"""
ret = []
cmd = ["docker", "history", "-q", name]
for line in reversed(
__salt__["cmd.run_stdout"](cmd, python_shell=False).splitlines()
):
ret.append(line)
if not ret:
raise CommandExecutionError("Image '{}' not found".format(name))
return ret
def pull(
image,
insecure_registry=False,
api_response=False,
client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
):
"""
.. versionchanged:: 2018.3.0
If no tag is specified in the ``image`` argument, all tags for the
image will be pulled. For this reason is it recommended to pass
``image`` using the ``repo:tag`` notation.
Pulls an image from a Docker registry
image
Image to be pulled
insecure_registry : False
If ``True``, the Docker client will permit the use of insecure
(non-HTTPS) registries.
api_response : False
If ``True``, an ``API_Response`` key will be present in the return
data, containing the raw output from the Docker API.
.. note::
This may result in a **lot** of additional return data, especially
for larger images.
client_timeout
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``Layers`` - A dictionary containing one or more of the following keys:
- ``Already_Pulled`` - Layers that that were already present on the
Minion
- ``Pulled`` - Layers that that were pulled
- ``Status`` - A string containing a summary of the pull action (usually a
message saying that an image was downloaded, or that it was up to date).
- ``Time_Elapsed`` - Time in seconds taken to perform the pull
CLI Example:
.. code-block:: bash
salt myminion docker.pull centos
salt myminion docker.pull centos:6
"""
_prep_pull()
kwargs = {"stream": True, "client_timeout": client_timeout}
if insecure_registry:
kwargs["insecure_registry"] = insecure_registry
time_started = time.time()
response = _client_wrapper("pull", image, **kwargs)
ret = {"Time_Elapsed": time.time() - time_started, "retcode": 0}
_clear_context()
if not response:
raise CommandExecutionError(
"Pull failed for {}, no response returned from Docker API".format(image)
)
elif api_response:
ret["API_Response"] = response
errors = []
# Iterate through API response and collect information
for event in response:
log.debug("pull event: %s", event)
try:
event = salt.utils.json.loads(event)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(
"Unable to interpret API event: '{}'".format(event),
info={"Error": exc.__str__()},
)
try:
event_type = next(iter(event))
except StopIteration:
continue
if event_type == "status":
_pull_status(ret, event)
elif event_type == "errorDetail":
_error_detail(errors, event)
if errors:
ret["Errors"] = errors
ret["retcode"] = 1
return ret
def push(
image,
insecure_registry=False,
api_response=False,
client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
):
"""
.. versionchanged:: 2015.8.4
The ``Id`` and ``Image`` keys are no longer present in the return data.
This is due to changes in the Docker Remote API.
Pushes an image to a Docker registry. See the documentation at top of this
page to configure authentication credentials.
image
Image to be pushed. If just the repository name is passed, then all
tagged images for the specified repo will be pushed. If the image name
is passed in ``repo:tag`` notation, only the specified image will be
pushed.
insecure_registry : False
If ``True``, the Docker client will permit the use of insecure
(non-HTTPS) registries.
api_response : False
If ``True``, an ``API_Response`` key will be present in the return
data, containing the raw output from the Docker API.
client_timeout
Timeout in seconds for the Docker client. This is not a timeout for
this function, but for receiving a response from the API.
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``Layers`` - A dictionary containing one or more of the following keys:
- ``Already_Pushed`` - Layers that that were already present on the
Minion
- ``Pushed`` - Layers that that were pushed
- ``Time_Elapsed`` - Time in seconds taken to perform the push
CLI Example:
.. code-block:: bash
salt myminion docker.push myuser/mycontainer
salt myminion docker.push myuser/mycontainer:mytag
"""
if not isinstance(image, str):
image = str(image)
kwargs = {"stream": True, "client_timeout": client_timeout}
if insecure_registry:
kwargs["insecure_registry"] = insecure_registry
time_started = time.time()
response = _client_wrapper("push", image, **kwargs)
ret = {"Time_Elapsed": time.time() - time_started, "retcode": 0}
_clear_context()
if not response:
raise CommandExecutionError(
"Push failed for {}, no response returned from Docker API".format(image)
)
elif api_response:
ret["API_Response"] = response
errors = []
# Iterate through API response and collect information
for event in response:
try:
event = salt.utils.json.loads(event)
except Exception as exc: # pylint: disable=broad-except
raise CommandExecutionError(
"Unable to interpret API event: '{}'".format(event),
info={"Error": exc.__str__()},
)
try:
event_type = next(iter(event))
except StopIteration:
continue
if event_type == "status":
_push_status(ret, event)
elif event_type == "errorDetail":
_error_detail(errors, event)
if errors:
ret["Errors"] = errors
ret["retcode"] = 1
return ret
def rmi(*names, **kwargs):
"""
Removes an image
name
Name (in ``repo:tag`` notation) or ID of image.
force : False
If ``True``, the image will be removed even if the Minion has
containers created from that image
prune : True
If ``True``, untagged parent image layers will be removed as well, set
this to ``False`` to keep them.
**RETURN DATA**
A dictionary will be returned, containing the following two keys:
- ``Layers`` - A list of the IDs of image layers that were removed
- ``Tags`` - A list of the tags that were removed
- ``Errors`` - A list of any errors that were encountered
CLI Examples:
.. code-block:: bash
salt myminion docker.rmi busybox
salt myminion docker.rmi busybox force=True
salt myminion docker.rmi foo bar baz
"""
pre_images = images(all=True)
pre_tags = list_tags()
force = kwargs.get("force", False)
noprune = not kwargs.get("prune", True)
errors = []
for name in names:
image_id = inspect_image(name)["Id"]
try:
_client_wrapper(
"remove_image",
image_id,
force=force,
noprune=noprune,
catch_api_errors=False,
)
except docker.errors.APIError as exc:
if exc.response.status_code == 409:
errors.append(exc.explanation)
deps = depends(name)
if deps["Containers"] or deps["Images"]:
err = "Image is in use by "
if deps["Containers"]:
err += "container(s): {}".format(", ".join(deps["Containers"]))
if deps["Images"]:
if deps["Containers"]:
err += " and "
err += "image(s): {}".format(", ".join(deps["Images"]))
errors.append(err)
else:
errors.append(
"Error {}: {}".format(exc.response.status_code, exc.explanation)
)
_clear_context()
ret = {
"Layers": [x for x in pre_images if x not in images(all=True)],
"Tags": [x for x in pre_tags if x not in list_tags()],
"retcode": 0,
}
if errors:
ret["Errors"] = errors
ret["retcode"] = 1
return ret
def save(name, path, overwrite=False, makedirs=False, compression=None, **kwargs):
"""
Saves an image and to a file on the minion. Equivalent to running the
``docker save`` Docker CLI command, but unlike ``docker save`` this will
also work on named images instead of just images IDs.
name
Name or ID of image. Specify a specific tag by using the ``repo:tag``
notation.
path
Absolute path on the Minion where the image will be exported
overwrite : False
Unless this option is set to ``True``, then if the destination file
exists an error will be raised.
makedirs : False
If ``True``, then if the parent directory of the file specified by the
``path`` argument does not exist, Salt will attempt to create it.
compression : None
Can be set to any of the following:
- ``gzip`` or ``gz`` for gzip compression
- ``bzip2`` or ``bz2`` for bzip2 compression
- ``xz`` or ``lzma`` for XZ compression (requires `xz-utils`_, as well
as the ``lzma`` module from Python 3.3, available in Python 2 and
Python 3.0-3.2 as `backports.lzma`_)
This parameter can be omitted and Salt will attempt to determine the
compression type by examining the filename passed in the ``path``
parameter.
.. note::
Since the Docker API does not support ``docker save``, compression
will be a bit slower with this function than with
:py:func:`docker.export <salt.modules.dockermod.export>` since the
image(s) will first be saved and then the compression done
afterwards.
.. _`xz-utils`: http://tukaani.org/xz/
.. _`backports.lzma`: https://pypi.python.org/pypi/backports.lzma
push : False
If ``True``, the container will be pushed to the master using
:py:func:`cp.push <salt.modules.cp.push>`.
.. note::
This requires :conf_master:`file_recv` to be set to ``True`` on the
Master.
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``Path`` - Path of the file that was saved
- ``Push`` - Reports whether or not the file was successfully pushed to the
Master
*(Only present if push=True)*
- ``Size`` - Size of the file, in bytes
- ``Size_Human`` - Size of the file, in human-readable units
- ``Time_Elapsed`` - Time in seconds taken to perform the save
CLI Examples:
.. code-block:: bash
salt myminion docker.save centos:7 /tmp/cent7.tar
salt myminion docker.save 0123456789ab cdef01234567 /tmp/saved.tar
"""
err = "Path '{}' is not absolute".format(path)
try:
if not os.path.isabs(path):
raise SaltInvocationError(err)
except AttributeError:
raise SaltInvocationError(err)
if os.path.exists(path) and not overwrite:
raise CommandExecutionError("{} already exists".format(path))
if compression is None:
if path.endswith(".tar.gz") or path.endswith(".tgz"):
compression = "gzip"
elif path.endswith(".tar.bz2") or path.endswith(".tbz2"):
compression = "bzip2"
elif path.endswith(".tar.xz") or path.endswith(".txz"):
if HAS_LZMA:
compression = "xz"
else:
raise CommandExecutionError(
"XZ compression unavailable. Install the backports.lzma "
"module and xz-utils to enable XZ compression."
)
elif compression == "gz":
compression = "gzip"
elif compression == "bz2":
compression = "bzip2"
elif compression == "lzma":
compression = "xz"
if compression and compression not in ("gzip", "bzip2", "xz"):
raise SaltInvocationError("Invalid compression type '{}'".format(compression))
parent_dir = os.path.dirname(path)
if not os.path.isdir(parent_dir):
if not makedirs:
raise CommandExecutionError(
"Parent dir '{}' of destination path does not exist. Use "
"makedirs=True to create it.".format(parent_dir)
)
if compression:
saved_path = __utils__["files.mkstemp"]()
else:
saved_path = path
# use the image name if its valid if not use the image id
image_to_save = (
name if name in inspect_image(name)["RepoTags"] else inspect_image(name)["Id"]
)
cmd = ["docker", "save", "-o", saved_path, image_to_save]
time_started = time.time()
result = __salt__["cmd.run_all"](cmd, python_shell=False)
if result["retcode"] != 0:
err = "Failed to save image(s) to {}".format(path)
if result["stderr"]:
err += ": {}".format(result["stderr"])
raise CommandExecutionError(err)
if compression:
if compression == "gzip":
try:
out = gzip.open(path, "wb")
except OSError as exc:
raise CommandExecutionError(
"Unable to open {} for writing: {}".format(path, exc)
)
elif compression == "bzip2":
compressor = bz2.BZ2Compressor()
elif compression == "xz":
compressor = lzma.LZMACompressor()
try:
with __utils__["files.fopen"](saved_path, "rb") as uncompressed:
# No need to decode on read and encode on on write, since we're
# reading and immediately writing out bytes.
if compression != "gzip":
# gzip doesn't use a Compressor object, it uses a .open()
# method to open the filehandle. If not using gzip, we need
# to open the filehandle here.
out = __utils__["files.fopen"](path, "wb")
buf = None
while buf != "":
buf = uncompressed.read(4096)
if buf:
if compression in ("bzip2", "xz"):
data = compressor.compress(buf)
if data:
out.write(data)
else:
out.write(buf)
if compression in ("bzip2", "xz"):
# Flush any remaining data out of the compressor
data = compressor.flush()
if data:
out.write(data)
out.flush()
except Exception as exc: # pylint: disable=broad-except
try:
os.remove(path)
except OSError:
pass
raise CommandExecutionError(
"Error occurred during image save: {}".format(exc)
)
finally:
try:
# Clean up temp file
os.remove(saved_path)
except OSError:
pass
out.close()
ret = {"Time_Elapsed": time.time() - time_started}
ret["Path"] = path
ret["Size"] = os.stat(path).st_size
ret["Size_Human"] = _size_fmt(ret["Size"])
# Process push
if kwargs.get("push", False):
ret["Push"] = __salt__["cp.push"](path)
return ret
def tag_(name, repository, tag="latest", force=False):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Tag an image into a repository and return ``True``. If the tag was
unsuccessful, an error will be raised.
name
ID of image
repository
Repository name for the image to be built
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image to be built
.. versionadded:: 2018.3.0
image
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
force : False
Force apply tag
CLI Example:
.. code-block:: bash
salt myminion docker.tag 0123456789ab myrepo/mycontainer mytag
"""
if not isinstance(repository, str):
repository = str(repository)
if not isinstance(tag, str):
tag = str(tag)
image_id = inspect_image(name)["Id"]
response = _client_wrapper(
"tag", image_id, repository=repository, tag=tag, force=force
)
_clear_context()
# Only non-error return case is a True return, so just return the response
return response
# Network Management
def networks(names=None, ids=None):
"""
.. versionchanged:: 2017.7.0
The ``names`` and ``ids`` can be passed as a comma-separated list now,
as well as a Python list.
.. versionchanged:: 2018.3.0
The ``Containers`` key for each network is no longer always empty.
List existing networks
names
Filter by name
ids
Filter by id
CLI Example:
.. code-block:: bash
salt myminion docker.networks names=network-web
salt myminion docker.networks ids=1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
"""
if names is not None:
names = __utils__["args.split_input"](names)
if ids is not None:
ids = __utils__["args.split_input"](ids)
response = _client_wrapper("networks", names=names, ids=ids)
# Work around https://github.com/docker/docker-py/issues/1775
for idx, netinfo in enumerate(response):
try:
containers = inspect_network(netinfo["Id"])["Containers"]
except Exception: # pylint: disable=broad-except
continue
else:
if containers:
response[idx]["Containers"] = containers
return response
def create_network(
name,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.dockermod.CLIENT_TIMEOUT,
**kwargs
):
"""
.. versionchanged:: 2018.3.0
Support added for network configuration options other than ``driver``
and ``driver_opts``, as well as IPAM configuration.
Create a new network
.. note::
This function supports all arguments for network and IPAM pool
configuration which are available for the release of docker-py
installed on the minion. For that reason, the arguments described below
in the :ref:`NETWORK CONFIGURATION ARGUMENTS
<salt-modules-dockermod-create-network-netconf>` and :ref:`IP ADDRESS
MANAGEMENT (IPAM) <salt-modules-dockermod-create-network-ipam>`
sections may not accurately reflect what is available on the minion.
The :py:func:`docker.get_client_args
<salt.modules.dockermod.get_client_args>` function can be used to check
the available arguments for the installed version of docker-py (they
are found in the ``network_config`` and ``ipam_config`` sections of the
return data), but Salt will not prevent a user from attempting to use
an argument which is unsupported in the release of Docker which is
installed. In those cases, network creation be attempted but will fail.
name
Network name
skip_translate
This function translates Salt CLI or SLS input into the format which
docker-py expects. However, in the event that Salt's translation logic
fails (due to potential changes in the Docker Remote API, or to bugs in
the translation code), this argument can be used to exert granular
control over which arguments are translated and which are not.
Pass this argument as a comma-separated list (or Python list) of
arguments, and translation for each passed argument name will be
skipped. Alternatively, pass ``True`` and *all* translation will be
skipped.
Skipping tranlsation allows for arguments to be formatted directly in
the format which docker-py expects. This allows for API changes and
other issues to be more easily worked around. See the following links
for more information:
- `docker-py Low-level API`_
- `Docker Engine API`_
.. versionadded:: 2018.3.0
ignore_collisions : False
Since many of docker-py's arguments differ in name from their CLI
counterparts (with which most Docker users are more familiar), Salt
detects usage of these and aliases them to the docker-py version of
that argument. However, if both the alias and the docker-py version of
the same argument (e.g. ``options`` and ``driver_opts``) are used, an error
will be raised. Set this argument to ``True`` to suppress these errors
and keep the docker-py version of the argument.
.. versionadded:: 2018.3.0
validate_ip_addrs : True
For parameters which accept IP addresses as input, IP address
validation will be performed. To disable, set this to ``False``
.. note::
When validating subnets, whether or not the IP portion of the
subnet is a valid subnet boundary will not be checked. The IP will
portion will be validated, and the subnet size will be checked to
confirm it is a valid number (1-32 for IPv4, 1-128 for IPv6).
.. versionadded:: 2018.3.0
.. _salt-modules-dockermod-create-network-netconf:
**NETWORK CONFIGURATION ARGUMENTS**
driver
Network driver
Example: ``driver=macvlan``
driver_opts (or *driver_opt*, or *options*)
Options for the network driver. Either a dictionary of option names and
values or a Python list of strings in the format ``varname=value``.
Examples:
- ``driver_opts='macvlan_mode=bridge,parent=eth0'``
- ``driver_opts="['macvlan_mode=bridge', 'parent=eth0']"``
- ``driver_opts="{'macvlan_mode': 'bridge', 'parent': 'eth0'}"``
check_duplicate : True
If ``True``, checks for networks with duplicate names. Since networks
are primarily keyed based on a random ID and not on the name, and
network name is strictly a user-friendly alias to the network which is
uniquely identified using ID, there is no guaranteed way to check for
duplicates. This option providess a best effort, checking for any
networks which have the same name, but it is not guaranteed to catch
all name collisions.
Example: ``check_duplicate=False``
internal : False
If ``True``, restricts external access to the network
Example: ``internal=True``
labels
Add metadata to the network. Labels can be set both with and without
values:
Examples (*with* values):
- ``labels="label1=value1,label2=value2"``
- ``labels="['label1=value1', 'label2=value2']"``
- ``labels="{'label1': 'value1', 'label2': 'value2'}"``
Examples (*without* values):
- ``labels=label1,label2``
- ``labels="['label1', 'label2']"``
enable_ipv6 (or *ipv6*) : False
Enable IPv6 on the network
Example: ``enable_ipv6=True``
.. note::
While it should go without saying, this argument must be set to
``True`` to :ref:`configure an IPv6 subnet
<salt-states-docker-network-present-ipam>`. Also, if this option is
turned on without an IPv6 subnet explicitly configured, you will
get an error unless you have set up a fixed IPv6 subnet. Consult
the `Docker IPv6 docs`_ for information on how to do this.
.. _`Docker IPv6 docs`: https://docs.docker.com/v17.09/engine/userguide/networking/default_network/ipv6/
attachable : False
If ``True``, and the network is in the global scope, non-service
containers on worker nodes will be able to connect to the network.
Example: ``attachable=True``
.. note::
While support for this option was added in API version 1.24, its
value was not added to the inpsect results until API version 1.26.
The version of Docker which is available for CentOS 7 runs API
version 1.24, meaning that while Salt can pass this argument to the
API, it has no way of knowing the value of this config option in an
existing Docker network.
scope
Specify the network's scope (``local``, ``global`` or ``swarm``)
Example: ``scope=local``
ingress : False
If ``True``, create an ingress network which provides the routing-mesh in
swarm mode
Example: ``ingress=True``
.. _salt-modules-dockermod-create-network-ipam:
**IP ADDRESS MANAGEMENT (IPAM)**
This function supports networks with either IPv4, or both IPv4 and IPv6. If
configuring IPv4, then you can pass the IPAM arguments as shown below, as
individual arguments on the Salt CLI. However, if configuring IPv4 and
IPv6, the arguments must be passed as a list of dictionaries, in the
``ipam_pools`` argument. See the **CLI Examples** below. `These docs`_ also
have more information on these arguments.
.. _`These docs`: http://docker-py.readthedocs.io/en/stable/api.html#docker.types.IPAMPool
*IPAM ARGUMENTS*
ipam_driver
IPAM driver to use, if different from the default one
Example: ``ipam_driver=foo``
ipam_opts
Options for the IPAM driver. Either a dictionary of option names
and values or a Python list of strings in the format
``varname=value``.
Examples:
- ``ipam_opts='foo=bar,baz=qux'``
- ``ipam_opts="['foo=bar', 'baz=quz']"``
- ``ipam_opts="{'foo': 'bar', 'baz': 'qux'}"``
*IPAM POOL ARGUMENTS*
subnet
Subnet in CIDR format that represents a network segment
Example: ``subnet=192.168.50.0/25``
iprange (or *ip_range*)
Allocate container IP from a sub-range within the subnet
Subnet in CIDR format that represents a network segment
Example: ``iprange=192.168.50.64/26``
gateway
IPv4 gateway for the master subnet
Example: ``gateway=192.168.50.1``
aux_addresses (or *aux_address*)
A dictionary of mapping container names to IP addresses which should be
allocated for them should they connect to the network. Either a
dictionary of option names and values or a Python list of strings in
the format ``host=ipaddr``.
Examples:
- ``aux_addresses='foo.bar.tld=192.168.50.10,hello.world.tld=192.168.50.11'``
- ``aux_addresses="['foo.bar.tld=192.168.50.10', 'hello.world.tld=192.168.50.11']"``
- ``aux_addresses="{'foo.bar.tld': '192.168.50.10', 'hello.world.tld': '192.168.50.11'}"``
CLI Examples:
.. code-block:: bash
salt myminion docker.create_network web_network driver=bridge
# IPv4
salt myminion docker.create_network macvlan_network driver=macvlan driver_opts="{'parent':'eth0'}" gateway=172.20.0.1 subnet=172.20.0.0/24
# IPv4 and IPv6
salt myminion docker.create_network mynet ipam_pools='[{"subnet": "10.0.0.0/24", "gateway": "10.0.0.1"}, {"subnet": "fe3f:2180:26:1::60/123", "gateway": "fe3f:2180:26:1::61"}]'
"""
kwargs = __utils__["docker.translate_input"](
salt.utils.dockermod.translate.network,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**__utils__["args.clean_kwargs"](**kwargs)
)
if "ipam" not in kwargs:
ipam_kwargs = {}
for key in [
x
for x in ["ipam_driver", "ipam_opts"]
+ get_client_args("ipam_config")["ipam_config"]
if x in kwargs
]:
ipam_kwargs[key] = kwargs.pop(key)
ipam_pools = kwargs.pop("ipam_pools", ())
# Don't go through the work of building a config dict if no
# IPAM-specific configuration was passed. Just create the network
# without specifying IPAM configuration.
if ipam_pools or ipam_kwargs:
kwargs["ipam"] = __utils__["docker.create_ipam_config"](
*ipam_pools, **ipam_kwargs
)
response = _client_wrapper("create_network", name, **kwargs)
_clear_context()
# Only non-error return case is a True return, so just return the response
return response
def remove_network(network_id):
"""
Remove a network
network_id
Network name or ID
CLI Examples:
.. code-block:: bash
salt myminion docker.remove_network mynet
salt myminion docker.remove_network 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
"""
response = _client_wrapper("remove_network", network_id)
_clear_context()
return True
def inspect_network(network_id):
"""
Inspect Network
network_id
ID of network
CLI Example:
.. code-block:: bash
salt myminion docker.inspect_network 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
"""
response = _client_wrapper("inspect_network", network_id)
_clear_context()
# Only non-error return case is a True return, so just return the response
return response
def connect_container_to_network(container, net_id, **kwargs):
"""
.. versionadded:: 2015.8.3
.. versionchanged:: 2017.7.0
Support for ``ipv4_address`` argument added
.. versionchanged:: 2018.3.0
All arguments are now passed through to
`connect_container_to_network()`_, allowing for any new arguments added
to this function to be supported automagically.
Connect container to network. See the `connect_container_to_network()`_
docs for information on supported arguments.
container
Container name or ID
net_id
Network name or ID
CLI Examples:
.. code-block:: bash
salt myminion docker.connect_container_to_network web-1 mynet
salt myminion docker.connect_container_to_network web-1 mynet ipv4_address=10.20.0.10
salt myminion docker.connect_container_to_network web-1 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
"""
kwargs = __utils__["args.clean_kwargs"](**kwargs)
log.debug(
"Connecting container '%s' to network '%s' with the following "
"configuration: %s",
container,
net_id,
kwargs,
)
response = _client_wrapper(
"connect_container_to_network", container, net_id, **kwargs
)
log.debug(
"Successfully connected container '%s' to network '%s'", container, net_id
)
_clear_context()
return True
def disconnect_container_from_network(container, network_id):
"""
.. versionadded:: 2015.8.3
Disconnect container from network
container
Container name or ID
network_id
Network name or ID
CLI Examples:
.. code-block:: bash
salt myminion docker.disconnect_container_from_network web-1 mynet
salt myminion docker.disconnect_container_from_network web-1 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
"""
log.debug("Disconnecting container '%s' from network '%s'", container, network_id)
response = _client_wrapper(
"disconnect_container_from_network", container, network_id
)
log.debug(
"Successfully disconnected container '%s' from network '%s'",
container,
network_id,
)
_clear_context()
return True
def disconnect_all_containers_from_network(network_id):
"""
.. versionadded:: 2018.3.0
Runs :py:func:`docker.disconnect_container_from_network
<salt.modules.dockermod.disconnect_container_from_network>` on all
containers connected to the specified network, and returns the names of all
containers that were disconnected.
network_id
Network name or ID
CLI Examples:
.. code-block:: bash
salt myminion docker.disconnect_all_containers_from_network mynet
salt myminion docker.disconnect_all_containers_from_network 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
"""
connected_containers = connected(network_id)
ret = []
failed = []
for cname in connected_containers:
try:
disconnect_container_from_network(cname, network_id)
ret.append(cname)
except CommandExecutionError as exc:
msg = exc.__str__()
if "404" not in msg:
# If 404 was in the error, then the container no longer exists,
# so to avoid a race condition we won't consider 404 errors to
# men that removal failed.
failed.append(msg)
if failed:
raise CommandExecutionError(
"One or more containers failed to be removed",
info={"removed": ret, "errors": failed},
)
return ret
# Volume Management
def volumes(filters=None):
"""
List existing volumes
.. versionadded:: 2015.8.4
filters
There is one available filter: dangling=true
CLI Example:
.. code-block:: bash
salt myminion docker.volumes filters="{'dangling': True}"
"""
response = _client_wrapper("volumes", filters=filters)
# Only non-error return case is a True return, so just return the response
return response
def create_volume(name, driver=None, driver_opts=None):
"""
Create a new volume
.. versionadded:: 2015.8.4
name
name of volume
driver
Driver of the volume
driver_opts
Options for the driver volume
CLI Example:
.. code-block:: bash
salt myminion docker.create_volume my_volume driver=local
"""
response = _client_wrapper(
"create_volume", name, driver=driver, driver_opts=driver_opts
)
_clear_context()
# Only non-error return case is a True return, so just return the response
return response
def remove_volume(name):
"""
Remove a volume
.. versionadded:: 2015.8.4
name
Name of volume
CLI Example:
.. code-block:: bash
salt myminion docker.remove_volume my_volume
"""
response = _client_wrapper("remove_volume", name)
_clear_context()
return True
def inspect_volume(name):
"""
Inspect Volume
.. versionadded:: 2015.8.4
name
Name of volume
CLI Example:
.. code-block:: bash
salt myminion docker.inspect_volume my_volume
"""
response = _client_wrapper("inspect_volume", name)
_clear_context()
# Only non-error return case is a True return, so just return the response
return response
# Functions to manage container state
@_refresh_mine_cache
def kill(name):
"""
Kill all processes in a running container instead of performing a graceful
shutdown
name
Container name or ID
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``comment`` - Only present if the container cannot be killed
CLI Example:
.. code-block:: bash
salt myminion docker.kill mycontainer
"""
return _change_state(name, "kill", "stopped")
@_refresh_mine_cache
def pause(name):
"""
Pauses a container
name
Container name or ID
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``comment`` - Only present if the container cannot be paused
CLI Example:
.. code-block:: bash
salt myminion docker.pause mycontainer
"""
orig_state = state(name)
if orig_state == "stopped":
return {
"result": False,
"state": {"old": orig_state, "new": orig_state},
"comment": ("Container '{}' is stopped, cannot pause".format(name)),
}
return _change_state(name, "pause", "paused")
freeze = salt.utils.functools.alias_function(pause, "freeze")
def restart(name, timeout=10):
"""
Restarts a container
name
Container name or ID
timeout : 10
Timeout in seconds after which the container will be killed (if it has
not yet gracefully shut down)
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``restarted`` - If restart was successful, this key will be present and
will be set to ``True``.
CLI Examples:
.. code-block:: bash
salt myminion docker.restart mycontainer
salt myminion docker.restart mycontainer timeout=20
"""
ret = _change_state(name, "restart", "running", timeout=timeout)
if ret["result"]:
ret["restarted"] = True
return ret
@_refresh_mine_cache
def signal_(name, signal):
"""
Send a signal to a container. Signals can be either strings or numbers, and
are defined in the **Standard Signals** section of the ``signal(7)``
manpage. Run ``man 7 signal`` on a Linux host to browse this manpage.
name
Container name or ID
signal
Signal to send to container
**RETURN DATA**
If the signal was successfully sent, ``True`` will be returned. Otherwise,
an error will be raised.
CLI Example:
.. code-block:: bash
salt myminion docker.signal mycontainer SIGHUP
"""
_client_wrapper("kill", name, signal=signal)
return True
@_refresh_mine_cache
def start_(name):
"""
Start a container
name
Container name or ID
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``comment`` - Only present if the container cannot be started
CLI Example:
.. code-block:: bash
salt myminion docker.start mycontainer
"""
orig_state = state(name)
if orig_state == "paused":
return {
"result": False,
"state": {"old": orig_state, "new": orig_state},
"comment": ("Container '{}' is paused, cannot start".format(name)),
}
return _change_state(name, "start", "running")
@_refresh_mine_cache
def stop(name, timeout=None, **kwargs):
"""
Stops a running container
name
Container name or ID
unpause : False
If ``True`` and the container is paused, it will be unpaused before
attempting to stop the container.
timeout
Timeout in seconds after which the container will be killed (if it has
not yet gracefully shut down)
.. versionchanged:: 2017.7.0
If this argument is not passed, then the container's configuration
will be checked. If the container was created using the
``stop_timeout`` argument, then the configured timeout will be
used, otherwise the timeout will be 10 seconds.
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``comment`` - Only present if the container can not be stopped
CLI Examples:
.. code-block:: bash
salt myminion docker.stop mycontainer
salt myminion docker.stop mycontainer unpause=True
salt myminion docker.stop mycontainer timeout=20
"""
if timeout is None:
try:
# Get timeout from container config
timeout = inspect_container(name)["Config"]["StopTimeout"]
except KeyError:
# Fall back to a global default defined in salt.utils.dockermod
timeout = salt.utils.dockermod.SHUTDOWN_TIMEOUT
orig_state = state(name)
if orig_state == "paused":
if kwargs.get("unpause", False):
unpause_result = _change_state(name, "unpause", "running")
if unpause_result["result"] is False:
unpause_result["comment"] = "Failed to unpause container '{}'".format(
name
)
return unpause_result
else:
return {
"result": False,
"state": {"old": orig_state, "new": orig_state},
"comment": (
"Container '{}' is paused, run with "
"unpause=True to unpause before stopping".format(name)
),
}
ret = _change_state(name, "stop", "stopped", timeout=timeout)
ret["state"]["old"] = orig_state
return ret
@_refresh_mine_cache
def unpause(name):
"""
Unpauses a container
name
Container name or ID
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``comment`` - Only present if the container can not be unpaused
CLI Example:
.. code-block:: bash
salt myminion docker.pause mycontainer
"""
orig_state = state(name)
if orig_state == "stopped":
return {
"result": False,
"state": {"old": orig_state, "new": orig_state},
"comment": ("Container '{}' is stopped, cannot unpause".format(name)),
}
return _change_state(name, "unpause", "running")
unfreeze = salt.utils.functools.alias_function(unpause, "unfreeze")
def wait(name, ignore_already_stopped=False, fail_on_exit_status=False):
"""
Wait for the container to exit gracefully, and return its exit code
.. note::
This function will block until the container is stopped.
name
Container name or ID
ignore_already_stopped
Boolean flag that prevents execution to fail, if a container
is already stopped.
fail_on_exit_status
Boolean flag to report execution as failure if ``exit_status``
is different than 0.
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``exit_status`` - Exit status for the container
- ``comment`` - Only present if the container is already stopped
CLI Example:
.. code-block:: bash
salt myminion docker.wait mycontainer
"""
try:
pre = state(name)
except CommandExecutionError:
# Container doesn't exist anymore
return {
"result": ignore_already_stopped,
"comment": "Container '{}' absent".format(name),
}
already_stopped = pre == "stopped"
response = _client_wrapper("wait", name)
_clear_context()
try:
post = state(name)
except CommandExecutionError:
# Container doesn't exist anymore
post = None
if already_stopped:
success = ignore_already_stopped
elif post == "stopped":
success = True
else:
success = False
result = {
"result": success,
"state": {"old": pre, "new": post},
"exit_status": response,
}
if already_stopped:
result["comment"] = "Container '{}' already stopped".format(name)
if fail_on_exit_status and result["result"]:
result["result"] = result["exit_status"] == 0
return result
def prune(
containers=False,
networks=False,
images=False,
build=False,
volumes=False,
system=None,
**filters
):
"""
.. versionadded:: 2019.2.0
Prune Docker's various subsystems
.. note::
This requires docker-py version 2.1.0 or later.
containers : False
If ``True``, prunes stopped containers (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/container_prune/#filtering
images : False
If ``True``, prunes unused images (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/image_prune/#filtering
networks : False
If ``False``, prunes unreferenced networks (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
build : False
If ``True``, clears the builder cache
.. note::
Only supported in Docker 17.07.x and newer. Additionally, filters
do not apply to this argument.
volumes : False
If ``True``, prunes unreferenced volumes (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/volume_prune/
system
If ``True``, prunes containers, images, networks, and builder cache.
Assumed to be ``True`` if none of ``containers``, ``images``,
``networks``, or ``build`` are set to ``True``.
.. note::
``volumes=True`` must still be used to prune volumes
filters
- ``dangling=True`` (images only) - remove only dangling images
- ``until=<timestamp>`` - only remove objects created before given
timestamp. Not applicable to volumes. See the documentation links
above for examples of valid time expressions.
- ``label`` - only remove objects matching the label expression. Valid
expressions include ``labelname`` or ``labelname=value``.
CLI Examples:
.. code-block:: bash
salt myminion docker.prune system=True
salt myminion docker.prune system=True until=12h
salt myminion docker.prune images=True dangling=True
salt myminion docker.prune images=True label=foo,bar=baz
"""
if system is None and not any((containers, images, networks, build)):
system = True
filters = __utils__["args.clean_kwargs"](**filters)
for fname in list(filters):
if not isinstance(filters[fname], bool):
# support comma-separated values
filters[fname] = salt.utils.args.split_input(filters[fname])
ret = {}
if system or containers:
ret["containers"] = _client_wrapper("prune_containers", filters=filters)
if system or images:
ret["images"] = _client_wrapper("prune_images", filters=filters)
if system or networks:
ret["networks"] = _client_wrapper("prune_networks", filters=filters)
if system or build:
try:
# Doesn't exist currently in docker-py as of 3.0.1
ret["build"] = _client_wrapper("prune_build", filters=filters)
except SaltInvocationError:
# It's not in docker-py yet, POST directly to the API endpoint
ret["build"] = _client_wrapper(
"_result",
_client_wrapper("_post", _client_wrapper("_url", "/build/prune")),
True,
)
if volumes:
ret["volumes"] = _client_wrapper("prune_volumes", filters=filters)
return ret
# Functions to run commands inside containers
@_refresh_mine_cache
def _run(
name,
cmd,
exec_driver=None,
output=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
ignore_retcode=False,
use_vt=False,
keep_env=None,
):
"""
Common logic for docker.run functions
"""
if exec_driver is None:
exec_driver = _get_exec_driver()
ret = __salt__["container_resource.run"](
name,
cmd,
container_type=__virtualname__,
exec_driver=exec_driver,
output=output,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
keep_env=keep_env,
)
if output in (None, "all"):
return ret
else:
return ret[output]
@_refresh_mine_cache
def _script(
name,
source,
saltenv="base",
args=None,
template=None,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
ignore_retcode=False,
use_vt=False,
keep_env=None,
):
"""
Common logic to run a script on a container
"""
def _cleanup_tempfile(path):
"""
Remove the tempfile allocated for the script
"""
try:
os.remove(path)
except OSError as exc:
log.error("cmd.script: Unable to clean tempfile '%s': %s", path, exc)
path = __utils__["files.mkstemp"](
dir="/tmp", prefix="salt", suffix=os.path.splitext(source)[1]
)
if template:
fn_ = __salt__["cp.get_template"](source, path, template, saltenv)
if not fn_:
_cleanup_tempfile(path)
return {
"pid": 0,
"retcode": 1,
"stdout": "",
"stderr": "",
"cache_error": True,
}
else:
fn_ = __salt__["cp.cache_file"](source, saltenv)
if not fn_:
_cleanup_tempfile(path)
return {
"pid": 0,
"retcode": 1,
"stdout": "",
"stderr": "",
"cache_error": True,
}
shutil.copyfile(fn_, path)
if exec_driver is None:
exec_driver = _get_exec_driver()
copy_to(name, path, path, exec_driver=exec_driver)
run(name, "chmod 700 " + path)
ret = run_all(
name,
path + " " + str(args) if args else path,
exec_driver=exec_driver,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
keep_env=keep_env,
)
_cleanup_tempfile(path)
run(name, "rm " + path)
return ret
def retcode(
name,
cmd,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
use_vt=False,
ignore_retcode=False,
keep_env=None,
):
"""
Run :py:func:`cmd.retcode <salt.modules.cmdmod.retcode>` within a container
name
Container name or ID in which to run the command
cmd
Command to run
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the command
output_loglevel : debug
Level at which to log the output from the command. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.retcode mycontainer 'ls -l /etc'
"""
return _run(
name,
cmd,
exec_driver=exec_driver,
output="retcode",
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
use_vt=use_vt,
ignore_retcode=ignore_retcode,
keep_env=keep_env,
)
def run(
name,
cmd,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
use_vt=False,
ignore_retcode=False,
keep_env=None,
):
"""
Run :py:func:`cmd.run <salt.modules.cmdmod.run>` within a container
name
Container name or ID in which to run the command
cmd
Command to run
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the command
output_loglevel : debug
Level at which to log the output from the command. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.run mycontainer 'ls -l /etc'
"""
return _run(
name,
cmd,
exec_driver=exec_driver,
output=None,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
use_vt=use_vt,
ignore_retcode=ignore_retcode,
keep_env=keep_env,
)
def run_all(
name,
cmd,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
use_vt=False,
ignore_retcode=False,
keep_env=None,
):
"""
Run :py:func:`cmd.run_all <salt.modules.cmdmod.run_all>` within a container
.. note::
While the command is run within the container, it is initiated from the
host. Therefore, the PID in the return dict is from the host, not from
the container.
name
Container name or ID in which to run the command
cmd
Command to run
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the command
output_loglevel : debug
Level at which to log the output from the command. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.run_all mycontainer 'ls -l /etc'
"""
return _run(
name,
cmd,
exec_driver=exec_driver,
output="all",
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
use_vt=use_vt,
ignore_retcode=ignore_retcode,
keep_env=keep_env,
)
def run_stderr(
name,
cmd,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
use_vt=False,
ignore_retcode=False,
keep_env=None,
):
"""
Run :py:func:`cmd.run_stderr <salt.modules.cmdmod.run_stderr>` within a
container
name
Container name or ID in which to run the command
cmd
Command to run
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the command
output_loglevel : debug
Level at which to log the output from the command. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.run_stderr mycontainer 'ls -l /etc'
"""
return _run(
name,
cmd,
exec_driver=exec_driver,
output="stderr",
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
use_vt=use_vt,
ignore_retcode=ignore_retcode,
keep_env=keep_env,
)
def run_stdout(
name,
cmd,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
use_vt=False,
ignore_retcode=False,
keep_env=None,
):
"""
Run :py:func:`cmd.run_stdout <salt.modules.cmdmod.run_stdout>` within a
container
name
Container name or ID in which to run the command
cmd
Command to run
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the command
output_loglevel : debug
Level at which to log the output from the command. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.run_stdout mycontainer 'ls -l /etc'
"""
return _run(
name,
cmd,
exec_driver=exec_driver,
output="stdout",
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
use_vt=use_vt,
ignore_retcode=ignore_retcode,
keep_env=keep_env,
)
def script(
name,
source,
saltenv="base",
args=None,
template=None,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
ignore_retcode=False,
use_vt=False,
keep_env=None,
):
"""
Run :py:func:`cmd.script <salt.modules.cmdmod.script>` within a container
.. note::
While the command is run within the container, it is initiated from the
host. Therefore, the PID in the return dict is from the host, not from
the container.
name
Container name or ID
source
Path to the script. Can be a local path on the Minion or a remote file
from the Salt fileserver.
args
A string containing additional command-line options to pass to the
script.
template : None
Templating engine to use on the script before running.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the script
output_loglevel : debug
Level at which to log the output from the script. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.script mycontainer salt://docker_script.py
salt myminion docker.script mycontainer salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt myminion docker.script mycontainer salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' output_loglevel=quiet
"""
return _script(
name,
source,
saltenv=saltenv,
args=args,
template=template,
exec_driver=exec_driver,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
keep_env=keep_env,
)
def script_retcode(
name,
source,
saltenv="base",
args=None,
template=None,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel="debug",
ignore_retcode=False,
use_vt=False,
keep_env=None,
):
"""
Run :py:func:`cmd.script_retcode <salt.modules.cmdmod.script_retcode>`
within a container
name
Container name or ID
source
Path to the script. Can be a local path on the Minion or a remote file
from the Salt fileserver.
args
A string containing additional command-line options to pass to the
script.
template : None
Templating engine to use on the script before running.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the script
output_loglevel : debug
Level at which to log the output from the script. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.script_retcode mycontainer salt://docker_script.py
salt myminion docker.script_retcode mycontainer salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt myminion docker.script_retcode mycontainer salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' output_loglevel=quiet
"""
return _script(
name,
source,
saltenv=saltenv,
args=args,
template=template,
exec_driver=exec_driver,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
keep_env=keep_env,
)["retcode"]
def _mk_fileclient():
"""
Create a file client and add it to the context.
"""
if "cp.fileclient" not in __context__:
__context__["cp.fileclient"] = salt.fileclient.get_file_client(__opts__)
def _generate_tmp_path():
return os.path.join("/tmp", "salt.docker.{}".format(uuid.uuid4().hex[:6]))
def _prepare_trans_tar(name, sls_opts, mods=None, pillar=None, extra_filerefs=""):
"""
Prepares a self contained tarball that has the state
to be applied in the container
"""
chunks = _compile_state(sls_opts, mods)
# reuse it from salt.ssh, however this function should
# be somewhere else
refs = salt.client.ssh.state.lowstate_file_refs(chunks, extra_filerefs)
_mk_fileclient()
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__["cp.fileclient"], chunks, refs, pillar, name
)
return trans_tar
def _compile_state(sls_opts, mods=None):
"""
Generates the chunks of lowdata from the list of modules
"""
st_ = HighState(sls_opts)
if not mods:
return st_.compile_low_chunks()
high_data, errors = st_.render_highstate({sls_opts["saltenv"]: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors
high_data, req_in_errors = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
return st_.state.compile_high_data(high_data)
def call(name, function, *args, **kwargs):
"""
Executes a Salt function inside a running container
.. versionadded:: 2016.11.0
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
function
Salt execution module function
CLI Example:
.. code-block:: bash
salt myminion docker.call test.ping
salt myminion test.arg arg1 arg2 key1=val1
salt myminion dockerng.call compassionate_mirzakhani test.arg arg1 arg2 key1=val1
"""
# where to put the salt-thin
thin_dest_path = _generate_tmp_path()
mkdirp_thin_argv = ["mkdir", "-p", thin_dest_path]
# make thin_dest_path in the container
ret = run_all(name, subprocess.list2cmdline(mkdirp_thin_argv))
if ret["retcode"] != 0:
return {"result": False, "comment": ret["stderr"]}
if function is None:
raise CommandExecutionError("Missing function parameter")
# move salt into the container
thin_path = __utils__["thin.gen_thin"](
__opts__["cachedir"],
extra_mods=__salt__["config.option"]("thin_extra_mods", ""),
so_mods=__salt__["config.option"]("thin_so_mods", ""),
)
ret = copy_to(
name, thin_path, os.path.join(thin_dest_path, os.path.basename(thin_path))
)
# untar archive
untar_cmd = [
"python",
"-c",
("import tarfile; " 'tarfile.open("{0}/{1}").extractall(path="{0}")').format(
thin_dest_path, os.path.basename(thin_path)
),
]
ret = run_all(name, subprocess.list2cmdline(untar_cmd))
if ret["retcode"] != 0:
return {"result": False, "comment": ret["stderr"]}
try:
salt_argv = (
[
"python{}".format(sys.version_info[0]),
os.path.join(thin_dest_path, "salt-call"),
"--metadata",
"--local",
"--log-file",
os.path.join(thin_dest_path, "log"),
"--cachedir",
os.path.join(thin_dest_path, "cache"),
"--out",
"json",
"-l",
"quiet",
"--",
function,
]
+ list(args)
+ [
"{}={}".format(key, value)
for (key, value) in kwargs.items()
if not key.startswith("__")
]
)
ret = run_all(name, subprocess.list2cmdline(map(str, salt_argv)))
# python not found
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stderr"])
# process "real" result in stdout
try:
data = __utils__["json.find_json"](ret["stdout"])
local = data.get("local", data)
if isinstance(local, dict):
if "retcode" in local:
__context__["retcode"] = local["retcode"]
return local.get("return", data)
except ValueError:
return {"result": False, "comment": "Can't parse container command output"}
finally:
# delete the thin dir so that it does not end in the image
rm_thin_argv = ["rm", "-rf", thin_dest_path]
run_all(name, subprocess.list2cmdline(rm_thin_argv))
def apply_(name, mods=None, **kwargs):
"""
.. versionadded:: 2019.2.0
Apply states! This function will call highstate or state.sls based on the
arguments passed in, ``apply`` is intended to be the main gateway for
all state executions.
CLI Example:
.. code-block:: bash
salt 'docker' docker.apply web01
salt 'docker' docker.apply web01 test
salt 'docker' docker.apply web01 test,pkgs
"""
if mods:
return sls(name, mods, **kwargs)
return highstate(name, **kwargs)
def sls(name, mods=None, **kwargs):
"""
Apply the states defined by the specified SLS modules to the running
container
.. versionadded:: 2016.11.0
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
mods : None
A string containing comma-separated list of SLS with defined states to
apply to the container.
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2018.3.0
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt myminion docker.sls compassionate_mirzakhani mods=rails,web
"""
mods = [item.strip() for item in mods.split(",")] if mods else []
# Figure out the saltenv/pillarenv to use
pillar_override = kwargs.pop("pillar", None)
if "saltenv" not in kwargs:
kwargs["saltenv"] = "base"
sls_opts = __utils__["state.get_sls_opts"](__opts__, **kwargs)
# gather grains from the container
grains = call(name, "grains.items")
# compile pillar with container grains
pillar = salt.pillar.get_pillar(
__opts__,
grains,
__opts__["id"],
pillar_override=pillar_override,
pillarenv=sls_opts["pillarenv"],
).compile_pillar()
if pillar_override and isinstance(pillar_override, dict):
pillar.update(pillar_override)
sls_opts["grains"].update(grains)
sls_opts["pillar"].update(pillar)
trans_tar = _prepare_trans_tar(
name,
sls_opts,
mods=mods,
pillar=pillar,
extra_filerefs=kwargs.get("extra_filerefs", ""),
)
# where to put the salt trans tar
trans_dest_path = _generate_tmp_path()
mkdirp_trans_argv = ["mkdir", "-p", trans_dest_path]
# put_archive requires the path to exist
ret = run_all(name, subprocess.list2cmdline(mkdirp_trans_argv))
if ret["retcode"] != 0:
return {"result": False, "comment": ret["stderr"]}
ret = None
try:
trans_tar_sha256 = __utils__["hashutils.get_hash"](trans_tar, "sha256")
copy_to(
name,
trans_tar,
os.path.join(trans_dest_path, "salt_state.tgz"),
exec_driver=_get_exec_driver(),
overwrite=True,
)
# Now execute the state into the container
ret = call(
name,
"state.pkg",
os.path.join(trans_dest_path, "salt_state.tgz"),
trans_tar_sha256,
"sha256",
)
finally:
# delete the trans dir so that it does not end in the image
rm_trans_argv = ["rm", "-rf", trans_dest_path]
run_all(name, subprocess.list2cmdline(rm_trans_argv))
# delete the local version of the trans tar
try:
os.remove(trans_tar)
except OSError as exc:
log.error(
"docker.sls: Unable to remove state tarball '%s': %s", trans_tar, exc
)
if not isinstance(ret, dict):
__context__["retcode"] = 1
elif not __utils__["state.check_result"](ret):
__context__["retcode"] = 2
else:
__context__["retcode"] = 0
return ret
def highstate(name, saltenv="base", **kwargs):
"""
Apply a highstate to the running container
.. versionadded:: 2019.2.0
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
CLI Example:
.. code-block:: bash
salt myminion docker.highstate compassionate_mirzakhani
"""
return sls(name, saltenv="base", **kwargs)
def sls_build(
repository, tag="latest", base="opensuse/python", mods=None, dryrun=False, **kwargs
):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Build a Docker image using the specified SLS modules on top of base image
.. versionadded:: 2016.11.0
The base image does not need to have Salt installed, but Python is required.
repository
Repository name for the image to be built
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image to be built
.. versionadded:: 2018.3.0
name
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
base : opensuse/python
Name or ID of the base image
mods
A string containing comma-separated list of SLS with defined states to
apply to the base image.
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2018.3.0
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionadded:: 2018.3.0
dryrun: False
when set to True the container will not be committed at the end of
the build. The dryrun succeed also when the state contains errors.
**RETURN DATA**
A dictionary with the ID of the new container. In case of a dryrun,
the state result is returned and the container gets removed.
CLI Example:
.. code-block:: bash
salt myminion docker.sls_build imgname base=mybase mods=rails,web
"""
create_kwargs = __utils__["args.clean_kwargs"](**copy.deepcopy(kwargs))
for key in ("image", "name", "cmd", "interactive", "tty", "extra_filerefs"):
try:
del create_kwargs[key]
except KeyError:
pass
# start a new container
ret = create(
image=base, cmd="sleep infinity", interactive=True, tty=True, **create_kwargs
)
id_ = ret["Id"]
try:
start_(id_)
# Now execute the state into the container
ret = sls(id_, mods, **kwargs)
# fail if the state was not successful
if not dryrun and not __utils__["state.check_result"](ret):
raise CommandExecutionError(ret)
if dryrun is False:
ret = commit(id_, repository, tag=tag)
finally:
stop(id_)
rm_(id_)
return ret
|
"""Models module."""
from enum import Enum
class Gender(str, Enum):
"""Gender enum."""
MALE = "male"
FEMALE = "female"
|
import argparse
parser = argparse.ArgumentParser(
description='Filter assays.'
)
parser.add_argument(
'--in', nargs='+',
help='input assay files'
)
parser.add_argument(
'--out',
help='output assay file'
)
parser.add_argument(
'--swath_windows',
help='SWATH isolation window file'
)
assay_filter_group = parser.add_argument_group('assay filters')
assay_filter_group.add_argument(
'--min_precursor_mz', type=float,
help='lower m/z limit of precursor ions'
)
assay_filter_group.add_argument(
'--max_precursor_mz', type=float,
help='upper m/z limit of precursor ions'
)
assay_filter_group.add_argument(
'--min_fragment_number', default=6, type=int,
help='remove assays with < N fragments (default: %(default)s)'
)
assay_filter_group.add_argument(
'--min_peptide_fragment_number', default=3, type=int,
help='remove assays with < N peptide fragments (default: %(default)s)'
)
assay_filter_group.add_argument(
'--min_glycan_fragment_number', default=3, type=int,
help='remove assays with < N glycan fragments (default: %(default)s)'
)
def add_fragment_filter_args(parser):
for quantify_group in ['main', 'quantify']:
if quantify_group == 'quantify':
arg_quantify_group = 'quantify_'
help_quantify_group = 'quantifying '
else:
arg_quantify_group = ''
help_quantify_group = ''
for prior_group in ['main', 'prior_peptide', 'prior_glycan']:
if prior_group == 'prior_peptide':
arg_group = arg_quantify_group + 'prior_peptide_'
help_group = help_quantify_group + 'prior peptide '
elif prior_group == 'prior_glycan':
arg_group = arg_quantify_group + 'prior_glycan_'
help_group = help_quantify_group + 'prior glycan '
else:
arg_group = arg_quantify_group
help_group = help_quantify_group
frag_filter_group = parser.add_argument_group(help_group + 'fragment filters')
if prior_group == 'prior_peptide' or prior_group == 'prior_glycan':
if quantify_group == 'quantify':
default = 6
else:
default = 10
frag_filter_group.add_argument(
'--%sfragment_number' % arg_group, default=default, type=int,
help='try to select top N %sfragments' % help_group + ' (default: %(default)s)'
)
else:
if quantify_group == 'quantify':
default = 12
else:
default = 20
frag_filter_group.add_argument(
'--%smax_fragment_number' % arg_group, default=default, type=int,
help='maximal number of fragments (default: %(default)s)'
)
frag_filter_group.add_argument(
'--%sfragment_type' % arg_group, type=str, nargs='+',
help='list of %sfragment types' % help_group
)
if prior_group == 'main' or prior_group == 'prior_peptide':
frag_filter_group.add_argument(
'--%smin_fragment_amino_acid_number' % arg_group, type=int,
help='lower limit of amino acid number of %sfragment ions' % help_group
)
frag_filter_group.add_argument(
'--%sfragment_charge' % arg_group, type=int, nargs='+',
help='list of allowed charge states of %sfragment ions' % help_group
)
frag_filter_group.add_argument(
'--%sfragment_loss_type' % arg_group, type=str, nargs='+',
help='list of neutral loss types of %sfragment ions' % help_group
)
frag_filter_group.add_argument(
'--%smin_fragment_mz' % arg_group, type=float,
help='lower m/z limit of %sfragment ions' % help_group
)
frag_filter_group.add_argument(
'--%smax_fragment_mz' % arg_group, type=float,
help='upper m/z limit of %sfragment ions' % help_group
)
if prior_group == 'main' or prior_group == 'prior_glycan':
frag_filter_group.add_argument(
'--%smin_fragment_monosaccharide_number' % arg_group, type=eval, default=1,
help='lower limit of monosaccharide number of %sfragment ions' % help_group + ' (default: %(default)s)'
)
if prior_group == 'main':
frag_filter_group.add_argument(
'--%smin_relative_fragment_intensity' % arg_group, type=float,
help='lower relative intensity limit of %sfragment ions' % help_group
)
add_fragment_filter_args(parser)
args = parser.parse_args()
assay_files = getattr(args, 'in')
out_file = args.out
swath_window_file = args.swath_windows
filter_args = vars(args)
filter_args.pop('in')
filter_args.pop('out')
filter_args.pop('swath_windows')
def arrange_filter_args(filter_args):
main_args = {
'prior_peptide_fragment_criteria': {},
'prior_glycan_fragment_criteria': {},
'quantifying_transition_criteria': {
'prior_peptide_fragment_criteria': {},
'prior_glycan_fragment_criteria': {}
}
}
for k, v in filter_args.items():
if k.startswith('quantify_'):
target = main_args['quantifying_transition_criteria']
k = k[len('quantify_'):]
else:
target = main_args
if k.startswith('prior_peptide_') and k != 'prior_peptide_fragment_number':
k = k[len('prior_peptide_'):]
target = target['prior_peptide_fragment_criteria']
elif k.startswith('prior_glycan_') and k != 'prior_glycan_fragment_number':
k = k[len('prior_glycan_'):]
target = target['prior_glycan_fragment_criteria']
target[k] = v
main_args['min_peptide_fragment_criteria'] = main_args['prior_peptide_fragment_criteria']
main_args['min_glycan_fragment_criteria'] = main_args['prior_glycan_fragment_criteria']
return main_args
filter_criteria = arrange_filter_args(filter_args)
# %%
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s: [%(levelname)s] %(message)s'
)
# %%
from util import list_files
if globals().get('assay_files', None) is None:
assay_files = list_files(
path='.',
pattern='\\.assay\\.pickle$'
)
if len(assay_files) == 0:
raise ValueError('no assay files')
# %%
import os
if globals().get('out_file', None) is None:
out_file = os.path.splitext(assay_files[0])[0]
if out_file.endswith('.assay'):
out_file = out_file[:-len('.assay')]
if len(assay_files) > 1:
out_file += '_' + str(len(assay_files))
out_file += '_filtered.assay.pickle'
# %%
from util import save_pickle, load_pickle
from assay import GlycoAssayBuilder
import pandas as pd
# %%
assays = []
for assay_file in assay_files:
logging.info('loading assays: ' + assay_file)
assay_data = load_pickle(assay_file)
assays.extend(assay_data)
logging.info('assays loaded: {0}, {1} spectra' \
.format(assay_file, len(assay_data)))
logging.info('assays loaded: {0} spectra totally' \
.format(len(assays)))
# %%
if swath_window_file is not None:
logging.info('loading SWATH windows: ' + swath_window_file)
swath_windows = pd.read_csv(swath_window_file, sep='\t')
logging.info('SWATH windows loaded: {0} windows' \
.format(len(swath_windows)))
else:
swath_windows = None
# %%
logging.info(
'filtering assays using the following parameters: \n' + \
'\n'.join((
k + '=' + str(v)
for k, v in filter_args.items()
if v is not None
))
)
assay_builder = GlycoAssayBuilder()
assays = assay_builder.filter_assays(
assays,
swath_windows=swath_windows,
**filter_criteria
)
logging.info('assays filtered: {0} spectra remaining' \
.format(len(assays)))
# %%
logging.info('saving assays: {0}' \
.format(out_file))
save_pickle(assays, out_file)
logging.info('assays saved: {0}, {1} spectra' \
.format(out_file, len(assays)))
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore import context
from mindspore import Tensor, nn
from mindspore.ops import composite as C
from mindspore.common import dtype as mstype
from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
class IfAfterIfInForNet(nn.Cell):
def __init__(self):
super().__init__()
self.param_a = Parameter(Tensor(5, mstype.int32), name='a')
self.param_b = Parameter(Tensor(4, mstype.int32), name='b')
def construct(self, x):
out = x + self.param_b
for _ in range(4):
if out <= 20:
out += self.param_a
self.param_b += 3
if x < self.param_b:
out -= self.param_b
return out
class IfAfterIfInForNet1(nn.Cell):
def __init__(self):
super().__init__()
self.param_a = Parameter(Tensor(5, mstype.int32), name='a')
self.param_b = Parameter(Tensor(4, mstype.int32), name='b')
def construct(self, x):
out = self.func(x)
if x < self.param_b:
out -= self.param_b
return out
def func(self, x):
out = x + self.param_b
for _ in range(4):
if out <= 20:
out += self.param_a
self.param_b += 3
return out
class IfAfterIfInForNet2(nn.Cell):
def __init__(self):
super().__init__()
self.param_a = Parameter(Tensor(5, mstype.int32), name='a')
self.param_b = Parameter(Tensor(4, mstype.int32), name='b')
def construct(self, x):
out = self.func(x)
if x < self.param_b:
out -= self.param_b
return out
def func(self, x):
out = x + self.param_b
for _ in range(4):
out = self.subfunc(out)
self.param_b += 3
return out
def subfunc(self, x):
if x <= 20:
x += self.param_a
return x
class IfAfterIfInForNet3(nn.Cell):
def __init__(self):
super().__init__()
self.param_a = Parameter(Tensor(5, mstype.int32), name='a')
self.param_b = Parameter(Tensor(4, mstype.int32), name='b')
def construct(self, x):
out = self.func(x)
if x < self.param_b:
out -= self.param_b
return out
def func(self, x):
out = x + self.param_b
for _ in range(3):
out += self.subfunc(x)
self.param_b += 3
return out
def subfunc(self, x):
if x > 10:
return self.param_a
return self.param_b
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, *inputs):
return grad_all(self.net)(*inputs)
def control_flow_if_after_if_in_for(input_net, x, expect1, expect2):
# graph mode
context.set_context(mode=context.GRAPH_MODE)
net = input_net()
grad_net = GradNet(net)
forward_net = input_net()
graph_forward_res = forward_net(x)
graph_backward_res = grad_net(x)
assert graph_forward_res == expect1
assert graph_backward_res == expect2
# @pytest.mark.skip(reason="ME EvalCNode error")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_after_if_in_for():
x = Tensor(2, mstype.int32)
expect1 = Tensor(14, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_for(IfAfterIfInForNet, x, expect1, expect2)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_after_if_in_for_01():
x = Tensor(2, mstype.int32)
expect1 = Tensor(14, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_for(IfAfterIfInForNet1, x, expect1, expect2)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_after_if_in_for_02():
x = Tensor(2, mstype.int32)
expect1 = Tensor(14, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_for(IfAfterIfInForNet2, x, expect1, expect2)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_after_if_in_for_03():
x = Tensor(2, mstype.int32)
expect1 = Tensor(11, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_for(IfAfterIfInForNet3, x, expect1, expect2)
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from desktop.lib.paths import get_apps_root
from useradmin.models import install_sample_user
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
args = '<username>'
def handle(self, *args, **options):
if args:
user = args[0]
else:
user = install_sample_user()
api = HbaseApi(user=user)
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
#HBASE Tables
try:
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name,'analytics_demo')
self.load_analytics_table(api, cluster_name,'analytics_demo')
self.create_binary_table(api, cluster_name,'document_demo')
self.load_binary_table(api, cluster_name,'document_demo')
except :
pass
#MapR-DB Tables
try:
self.create_analytics_table(api, cluster_name,'/analytics_demo')
self.load_analytics_table(api, cluster_name,'/analytics_demo')
self.create_binary_table(api, cluster_name,'/document_demo')
self.load_binary_table(api, cluster_name,'/document_demo')
except:
pass
def create_analytics_table(self, api, cluster_name, table_name):
try:
api.createTable(cluster_name, table_name, [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name, table_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, table_name, open(table_data))
def create_binary_table(self, api, cluster_name, table_name):
try:
api.createTable(cluster_name, table_name, [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name, table_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, table_name, today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, table_name, today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, table_name, tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, table_name, tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, table_name, today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, table_name, today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, table_name, today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
|
# Straightforward implementation of the Singleton Pattern
import yaml
import pathlib
#import picamera_colllecter
class Configuration(object):
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(Configuration, cls).__new__(cls)
# Put any initialization here.
path =pathlib.Path(__file__).parent
with open(path / 'app_settings.yaml') as file:
cls.config_data = yaml.load(file, Loader=yaml.FullLoader)
with open(path / 'app_current.yaml') as file:
cls.current_config = yaml.load(file, Loader=yaml.FullLoader)
return cls._instance
def save_current(cls,camera_args):
cls.current_config['iso']=int(camera_args.get('ddlISO'))
cls.current_config['mode']=camera_args.get('ddlMode')
cls.current_config['resolution']=camera_args.get('ddlResolution')
cls.current_config['jpegquality']=int(camera_args.get('ddlJPEG'))
cls.current_config['method']=camera_args.get('ddlMethod')
cls.current_config['shutter_speed']=camera_args.get('ddlShutterSpeed')
path =pathlib.Path(__file__).parent
with open(path / 'app_current.yaml',"w") as file:
yaml.dump(cls.current_config, file)
|
"""
Check subclassing strange thingies.
"""
import support
try:
class foo(12): pass
except TypeError, e:
pass
else:
raise support.TestError("expecting a TypeError for attempting to subclass integer instance")
|
from typing import Dict
class BaseModel(object):
"""Abstract base class for learner model
Defines necessary operations for the underlying
learner model; training, prediction, classification
probabilities, and decision function results. In order
for an initial or improved learner to use a future-defined
model, the model must include these operations
"""
def train(self, instances):
"""Train on the set of training instances.
Returns:
None.
"""
raise NotImplementedError
def predict(self, instances):
"""Predict classification labels for the set of instances.
Returns:
label classifications (List(int))
"""
raise NotImplementedError
def predict_proba(self, instances):
"""Use the model to determine probability of adversarial classification.
Returns:
probability of adversarial classification (List(int))
"""
raise NotImplementedError
def predict_log_proba(self, instances):
"""Use the model to determine probability of adversarial classification.
Returns:
probability of adversarial classification (List(int))
"""
raise NotImplementedError
def decision_function_(self, instances):
"""Use the model to determine the decision function for each instance.
Returns:
decision values (List(int))
"""
raise NotImplementedError
def set_params(self, params: Dict):
"""Set params for the model.
Args:
params (Dict): set of available params with updated values
"""
raise NotImplementedError
def get_available_params(self) -> Dict:
"""Get the set of params defined in the model usage.
Returns:
dictionary mapping param names to current values
"""
raise NotImplementedError
def get_alg(self):
"""Return the underlying model algorithm.
Returns:
algorithm used to train and test instances
"""
raise NotImplementedError
|
# Create your views here.
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect
from django.contrib.comments.models import Comment
from models import Post
from django.template import RequestContext
def comment_posted(request):
comment_id = request.GET['c']
post_id = Comment.objects.get(id=comment_id).content_object.id
return HttpResponseRedirect('/blog/'+str(post_id))
def post_detail(request, post_id):
post = get_object_or_404(Post, pk=post_id)
return render_to_response('example/post_detail.html', {'post': post}, context_instance=RequestContext(request))
|
def CuboidVolume():
length = int(input("Masukkan panjang : "))
width = int(input("Masukkan lebar : "))
height = int(input("Masukkan ketinggian : "))
cuboidVolumeCalc = length*width*height
return cuboidVolumeCalc
def SilinderVolume(pi):
radius = int(input("Masukkan jejari : "))
height = int(input("Masukkan ketinggian : "))
circleBase = pi*radius**2
silinderVolumeCalc = circleBase*height
return silinderVolumeCalc
def ConeVolume(pi):
radius = int(input("Masukkan jejari : "))
height = int(input("Masukkan ketinggian : "))
circleBase = pi*radius**2
coneVolumeCalc = 1/3*circleBase*height
return coneVolumeCalc
def SphereVolume(pi):
radius = int(input("Masukkan jejari : "))
sphereVolumeCalc = 4/3*pi*radius**3
return sphereVolumeCalc
def Menu():
print("*"*40)
print(" "*7,"Menu Mengira Isi Padu"," "*7)
print("*"*40)
print("1. Kuboid")
print("2.Silinder")
print("3. Kon")
print("4. Sfera")
print("*"*40)
choose = int(input("Masukkan pilihan anda: [1 - 4] : "))
return choose
def Calc(choose):
if choose == 1:
cuboidVolume = CuboidVolume()
print("Isi padu ialah",cuboidVolume)
elif choose == 2:
silinderVolume = SilinderVolume(22/7)
print("Isi padu ialah",silinderVolume)
elif choose == 3:
coneVolume = ConeVolume(22/7)
print("Isi padu ialah",coneVolume)
elif choose == 4:
sphereVolume = SphereVolume(22/7)
print("Isi padu ialah",sphereVolume)
else:
print("This choice is unavailable")
resume = "y"
while resume == "y":
chooseValue = Menu()
Calc(chooseValue)
resume = str(input("Type y to continue or n to stop: "))
if resume == "n":
break
else:
resume = str(input("This is not an option. Continue? [y/n] "))
while resume != "y" or "n":
resume = str(input("This is not an option. Continue? [y/n] "))
print("Thanks for using the calculator.")
|
# -*- coding: utf-8 -*-
"""
jishaku.cog
~~~~~~~~~~~~
The Jishaku debugging and diagnostics cog implementation.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
from discord.ext import commands
from jishaku.features.filesystem import FilesystemFeature
from jishaku.features.guild import GuildFeature
from jishaku.features.invocation import InvocationFeature
from jishaku.features.management import ManagementFeature
from jishaku.features.python import PythonFeature
from jishaku.features.root_command import RootCommand
from jishaku.features.shell import ShellFeature
from jishaku.features.voice import VoiceFeature
__all__ = (
"Jishaku",
"STANDARD_FEATURES",
"OPTIONAL_FEATURES",
"setup",
)
STANDARD_FEATURES = (VoiceFeature, GuildFeature, FilesystemFeature, InvocationFeature, ShellFeature, PythonFeature, ManagementFeature, RootCommand)
OPTIONAL_FEATURES = []
try:
from jishaku.features.youtube import YouTubeFeature
except ImportError:
pass
else:
OPTIONAL_FEATURES.insert(0, YouTubeFeature)
class Jishaku(*OPTIONAL_FEATURES, *STANDARD_FEATURES): # pylint: disable=too-few-public-methods
"""
The frontend subclass that mixes in to form the final Jishaku cog.
"""
def setup(bot: commands.Bot):
"""
The setup function defining the jishaku.cog and jishaku extensions.
"""
bot.add_cog(Jishaku(bot=bot))
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import zipfile
import io
import pydicom
from pydicom import dcmread
from pydicom.filebase import DicomBytesIO
import requests
from tqdm import tqdm
pbar = tqdm(total=197)
def getImageUrl(sid):
baseurl = 'https://services.cancerimagingarchive.net/services/v3/TCIA'
queryEndpoint = '/query/getImage?'
queryParams = f'SeriesInstanceUID={sid}&'
form = 'format=zip'
url = baseurl+queryEndpoint+queryParams+form
return url
def download_extract_zip(url):
"""
Download a ZIP file and extract its contents in raw bytes format
in memory
yields (filename, file-like object) pairs
"""
response = requests.get(url)
with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:
for zipinfo in thezip.infolist():
data = thezip.read(zipinfo.filename)
yield zipinfo.filename, data
def getImages(SID):
imagedataset = []
for s,sid in enumerate(SID):
if s>3: break
myurl = getImageUrl(sid)
files = []
for i,j in download_extract_zip(myurl):
files.append(j)
for i,f in enumerate(files):
if i>3:
break
try:
imagedataset.append(f)
# imagedataset.append(dcmread(DicomBytesIO(f)))
except:
continue
pbar.update(1)
return imagedataset
|
"""
Print messages to terminal depending on a given verbosity level.
Similar to the Python logging module. Also meant to be used as Singleton.
The verbosity levels are:
- CRITICAL
- ERROR
- WARNING
- INFO
- DETAILS
- DEBUG
- DEBUG2
The default verbosity is `WARNING`.
"""
import textwrap
# should match the values defined in the logging module!
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DETAILS = 15
DEBUG = 10
DEBUG2 = 5
DEFAULT = WARNING
WIDTH = 70 # default line width for output
VERBOSITY_TO_NAME = {
CRITICAL: "CRITICAL",
ERROR: "ERROR",
WARNING: "WARNING",
INFO: "INFO",
DETAILS: "DETAILS",
DEBUG: "DEBUG",
DEBUG2: "DEBUG2",
}
class Output:
"""
Handling the output based on the current verbosity level.
This is inspired by the class ``logging.Logger()``. A verbosity level is stored,
only messages printed with methods with higher importance will be printed.
Parameters
----------
verbosity : int
Verbosity level.
Minimum level of importance of messages to be printed, as defined by
constants in this module
logger : logging.Logger, optional
Optional logger.
Can be used to send messages also to a log file or elsewhere, log level is separate
from verbosity.
"""
def __init__(self, verbosity=DEFAULT, logger=None):
"""
Initialize the unique Output object.
At the moment only one instance will created: in the global scope of this module. An
application might use the `setup()` method to set the verbosity and a logger.
"""
self.verbosity = verbosity
self.logger = logger
def set_verbosity(self, verbosity=DEFAULT):
"""
Set verbosity level.
Parameters
----------
verbosity : int
Verbosity level.
"""
self.verbosity = verbosity
def _print(self, verbosity, msg, wrap, indent):
if verbosity >= self.verbosity:
if wrap:
input_msg = msg.split("\n")
msg = "\n".join(
[
textwrap.fill(
line,
width=WIDTH,
break_long_words=False,
initial_indent=indent,
subsequent_indent=indent,
)
for line in input_msg
]
)
print(msg)
if self.logger:
self.logger.log(verbosity if verbosity not in (DETAILS, DEBUG2) else DEBUG, msg)
def debug2(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level DEBUG2.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
self._print(DEBUG2, msg, wrap, indent)
def debug(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level DEBUG.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
# this is the old verbose >= 3
self._print(DEBUG, msg, wrap, indent)
def details(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level DETAILS.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
# this is the old verbose >= 2
self._print(DETAILS, msg, wrap, indent)
def info(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level INFO.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
# this is the old verbose >= 1
self._print(INFO, msg, wrap, indent)
def warning(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level WARNING.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
# this is the old verbose >= 0
self._print(WARNING, msg, wrap, indent)
def error(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level ERROR.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
# to print errors, probably not used atm
self._print(ERROR, msg, wrap, indent)
def critical(self, msg, wrap=True, indent=""):
"""
Print a message with verbosity level CRITICAL.
Parameters
----------
msg : str
The message.
wrap : bool, optional
Wrap the message at 99 characters (if too long).
indent : str, optional
Indent each line with the this string.
"""
# just for consistency with the logging module
self._print(CRITICAL, msg, wrap, indent)
output = Output()
|
import asyncio
import json
import random
import time
from collections import namedtuple
from functools import partial
from typing import Dict, Iterable, List, Tuple
import discord
import lxml
import markovify
from aiofile import AIOFile
from discord.ext import commands
from requests_html import HTML, HTMLSession
from ..utils.caching import get_cached
from ..utils.exceptions import CommandError
from ..utils.experimental import get_ctx
from ..utils.json import dump_json
from .base_cog import BaseCog
session = HTMLSession()
USERS_FILE = "db/twitter/users.json"
TwitterUser = namedtuple("TwitterUser", "user modified tweets aliases", defaults=[[]])
Tweet = namedtuple("Tweet", "text url")
class TwitterCog(BaseCog):
"""Twitter commands"""
EMOJI = "<:twitter:572746936658952197>"
DIRS = ["db/twitter"]
FILES = [USERS_FILE]
TWITTER_PAGES = 20
MARKOV_LEN = 140
def __init__(self, bot: commands.Bot) -> None:
super().__init__(bot)
# Key: Username, Value: markovify text model
self.text_models: Dict[str, markovify.NewlineText] = {}
for user in self.users.values():
self.create_commands(user)
@property
def users(self) -> Dict[str, TwitterUser]:
users = get_cached(USERS_FILE, category="twitter")
return {user: TwitterUser(*values) for user, values in users.items()}
async def update_user(self, user: TwitterUser) -> None:
self.users[user.user] = user
await dump_json(USERS_FILE, self.users)
def create_commands(self, user: TwitterUser) -> None:
username = user.user.lower()
aliases = user.aliases
# Make command that fetches random tweet URL
_u_cmd = asyncio.coroutine(partial(self._twitter_url_cmd, user=username))
url_cmd = commands.command(name=f"{username}", aliases=aliases)(_u_cmd)
# Make command that generates tweet using markov chain
_m_cmd = asyncio.coroutine(partial(self._twitter_markov_cmd, user=username))
markov_cmd = commands.command(name=f"{username}_markov",
aliases=[f"{alias}m" for alias in aliases] or [f"{username}m"]
)(_m_cmd)
self.bot.add_command(url_cmd)
self.bot.add_command(markov_cmd)
async def _twitter_url_cmd(self, ctx: commands.Context, user: str) -> None:
tweets = self.users[user].tweets
await ctx.send(random.choice(tweets)[0])
async def _twitter_markov_cmd(self, ctx: commands.Context, user: str) -> None:
await ctx.send(await self.generate_sentence(user))
@commands.group(name="twitter", enabled=False)
async def twitter(self, ctx: commands.Context) -> None:
cmds = ", ".join([f"**`{command.name}`**" for command in ctx.command.commands])
if ctx.invoked_subcommand is None:
await ctx.send(f"No argument provided! Must be one of: {cmds}.")
@twitter.command(name="add")
async def add_twitter_user(self, ctx: commands.Context, user: str, *aliases) -> None:
"""Add twitter user."""
await ctx.message.channel.trigger_typing()
user = user.lower()
if user in self.users:
raise CommandError(f"{user} is already added! "
f"Type **`{self.bot.command_prefix}twitter update {user}`** to fetch newest tweets for {user}.")
try:
await self.get_tweets(ctx, user, aliases=aliases)
except:
raise
else:
self.create_commands(self.users[user])
await ctx.send(f"Added {user}!")
@twitter.command(name="update")
async def update_tweets(self, ctx: commands.Context, user: str) -> None:
"""Update tweets for a specific user."""
user = user.lower()
if user in self.users:
try:
await self.get_tweets(ctx, user)
except:
raise
else:
await ctx.send(f"Updated {user} successfully!")
else:
await ctx.send("User is not added! "
f"Type **`{self.bot.command_prefix}twitter add <user>`** or ")
@twitter.command(name="users", aliases=["show", "list"])
async def show_twitter_users(self, ctx: commands.Context) -> None:
"""Displays added twitter users."""
users = "\n".join([user for user in self.users])
if not users:
return await ctx.send("No twitter users added! "
f"Type `{self.bot.command_prefix}twitter add <user>` to add a user"
)
await self.send_embed_message(ctx, "Twitter users", users)
async def get_tweets(self, ctx: commands.Context, user: str, aliases=None) -> List[Tuple[str, str]]:
"""Retrieves tweets for a specific user.
If user already has saved tweets, new tweets are added to
existing list of tweets.
"""
if not aliases:
aliases = []
tweets = []
msg = await ctx.send("Fetching tweets...")
try:
to_run = partial(self._get_tweets, user, pages=self.TWITTER_PAGES)
async with ctx.typing():
tweets = await self.bot.loop.run_in_executor(None, to_run)
except ValueError:
raise IOError(f"Unable not fetch tweets for {user}")
except lxml.etree.ParserError:
pass
else:
if not tweets:
return
# Add fetched tweets to existing user's tweets if they exist
_user = self.users.get(user)
if _user:
tweets = set(tuple(tweets))
old_tweets = set(tuple((tweet[0], tweet[1]) for tweet in _user.tweets))
tweets.update(old_tweets)
tweets = list(tweets)
aliases = _user.aliases
# Create new TwitterUser object with updated tweets
tu = TwitterUser(user, time.time(), tweets, aliases)
# Save updated user
await self.update_user(tu)
finally:
await msg.delete()
def _get_tweets(self, user: str, pages: int) -> Iterable[str]:
"""Modified version of https://github.com/kennethreitz/twitter-scraper.
Generator of tweet URLs or tweet text from a specific user.
Parameters
----------
user : `str`
Username of user to get tweets from
pages : `int`, optional
Number of pages to return tweets from.
25 is the maximum allowed number of pages.
Raises
------
ValueError
Raised if user cannot be found
Returns
-------
`Iterable[str]`
Tweets
"""
url = f'https://twitter.com/i/profiles/show/{user}/timeline/tweets?include_available_features=1&include_entities=1&include_new_items_bar=true'
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': f'https://twitter.com/{user}',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',
'X-Twitter-Active-User': 'yes',
'X-Requested-With': 'XMLHttpRequest'
}
def gen_tweets(pages: int):
r = session.get(url, headers=headers)
while pages > 0:
try:
html = HTML(html=r.json()['items_html'],
url='bunk', default_encoding='utf-8')
except KeyError:
raise ValueError(
f'Oops! Either "{user}" does not exist or is private.')
tweets = []
for tweet in html.find('.stream-item'):
try:
text = tweet.find('.tweet-text')[0].full_text.split(
"pic.twitter.com")[0].split("https://")[0]
_url = f"https://twitter.com/{user}/status/{tweet.attrs['data-item-id']}"
tweets.append((_url, text))
except:
pass
last_tweet = html.find('.stream-item')[-1].attrs['data-item-id']
for tweet in tweets:
if tweet:
yield tweet
r = session.get(
url, params = {'max_position': last_tweet}, headers = headers)
pages += -1
yield from gen_tweets(pages)
async def generate_sentence(self, user: str, length: int=140) -> str:
# Add user if username passed in is not added to database
if user not in self.users:
ctx = get_ctx()
await self.get_tweets(ctx, user)
# Get tweet text only
tweets = [tweet[1] for tweet in self.users[user].tweets]
# Create text model based on tweets
text_model = await self.get_text_model(user, tweets)
to_run = partial(text_model.make_short_sentence, length, tries=300)
sentence = await self.bot.loop.run_in_executor(None, to_run)
if not sentence:
raise OSError("Could not generate text!") # I'll find a better exception class
return sentence
async def get_text_model(self, user: str, text: List[str]) -> markovify.Text:
# Check if we have already created a text model for this user
text_model = self.text_models.get(user)
if not text_model:
t = "\n".join([_t for _t in text])
text_model = markovify.NewlineText(t)
self.text_models[user] = text_model
return text_model
|
import requests
import json
import yaml
def search(text):
words = "+".join(text.split())
url = f"https://www.youtube.com/results?search_query={words}&sp=EgIQAQ%253D%253D"
response = requests.get(url)
response_json = get_json(response.text)
write_json("response", response_json)
videos = get_videos(response_json)
return
return [get_video(video) for video in videos]
def write_json(file, data):
with open(f"{file}.json", "w") as file:
file.write(json.dumps(data, indent=4))
def get_json(html):
START_TEXT = "var ytInitialData = "
END_TEXT = "</script>"
reduced_html = html[html.find(START_TEXT) + len(START_TEXT):]
json_text = reduced_html[:reduced_html.find(END_TEXT)].strip()[:-1]
return yaml.load(json_text, Loader=yaml.FullLoader)
def get_videos(data):
data = data["contents"]["twoColumnSearchResultsRenderer"]
data = data["primaryContents"]["sectionListRenderer"]["contents"]
return data[0]["itemSectionRenderer"]["contents"]
def get_video(data):
data = data["videoRenderer"]
video = {
"id": data["videoId"],
"title": data["title"]["runs"][0]["text"],
"thumbnail": data["thumbnail"]["thumbnails"][-1]["url"],
"channel": {
"title": data["ownerText"]["runs"][0]["text"],
"id": data["ownerText"]["runs"][0]["navigationEndpoint"]["browseEndpoint"]["browseId"],
"thumbnail": data["channelThumbnailSupportedRenderers"]["channelThumbnailWithLinkRenderer"]["thumbnail"]["thumbnails"][-1]["url"]
}
}
video["url"] = f"https://www.youtube.com/watch?v={video['id']}"
video["channel"]["url"] = f"https://www.youtube.com/channel/{video['channel']['id']}"
return video
def print_json(data):
print(json.dumps(data, indent=4))
def main():
results = search("ahefao esfbfalsbdvakdsfabsdff")
print_json(results)
return results
if __name__ == "__main__":
main() |
import networkx as nx
import matplotlib.pyplot as plt
from random import randint
from math import inf
import geopy.distance
G=nx.Graph()
def make_nodes():
# node list
locations = {
"Melbourne University": (-37.798248386903616, 144.9609632686033),
"Monash University": (-37.907144361086004, 145.13717270067085),
"Swinburne University": (-37.820634057054306, 145.03683630636752),
"RMIT University": (-37.80674215563578, 144.96438807402902),
"Victoria University": (-37.79331726178111, 144.89875939620745),
"KFC": (-37.86817774432651, 144.729787441064),
"McDonald's": (-37.89468475952529, 144.7529148686743),
"Oporto": (-37.83391390548008, 144.69030112965373),
"Hungry Jacks": (-37.87556724886548, 144.6819279538855),
"Werribee": (-37.89869509112579, 144.66345105480698),
"Point Cook": (-37.918329529191574, 144.74669353277324),
"Hoppers Crossing": (-37.86233985503369, 144.68476425340285),
"Burnside": (-37.753250721869605, 144.75228098106768),
"Tarneit": (-37.808604569947896, 144.66686217407914),
"Manor Lakes": (-37.8736488572485, 144.58036378564364),
"Gladstone Park": (-37.69183386250476, 144.89449104693733),
"Wollert": (-37.60740317084443, 145.03031352102934),
"Reservior": (-37.71298544191029, 145.0060242796427),
"Altona North": (-37.83764214052825, 144.84601862975424)
}
# add nodes
for i in locations.keys():
for j in locations.keys():
if not i == j:
G.add_edge(i, j, weight=geopy.distance.distance(locations[i],locations[j]).km)
friends = {
"Me": ["Werribee"],
"Aaryan": ["Werribee"],
"James": ["Point Cook"],
"Yuktha": ["Point Cook"],
"Andy": ["Point Cook"],
"Prabhas": ["Point Cook"],
"Jay": ["Hoppers Crossing"],
"Akira": ["Burnside"],
"Tarneit": ["Nathan"],
"Manor Lakes": ["Angna"],
"Gladstone Park": ["Yaseen"],
"Wollert": ["Alex"],
"Reservior": ["Nicky"],
"Altona North": ["Lina"]
}
for i in friends.keys():
for j in friends[i]:
G.add_edge(j, i, weight=0)
make_nodes()
uni = input("Select a University: ")
def say_friends():
friends = {"Raida": ["Computer Science", "Melbourne University"], "Yuktha": ["Law", "Monash University"], "Phuong": ["Engineering", "Monash University"], "Levan": ["Science", "Melbourne University"]}
keys = list(friends.keys())
print("The friends you have to pick up are:")
dest = []
for i in range(3):
loc = randint(0,len(keys)-1)
name = keys[loc]
keys.pop(loc)
print(f"{name} studying {friends[name][0]} in {friends[name][1]}")
dest.append(friends[name][1]) # append university
return dest
def dijkstra():
nx.set_node_attributes(G, inf, "dist")
G.nodes["Werribee"]["dist"] = 0
nx.set_node_attributes(G, None, "prev")
unvisited = [node for node in G.nodes]
while unvisited:
current = min([(G.nodes[node]["dist"],node) for node in unvisited],key=lambda t: t[0])[1]
unvisited.remove(current)
edges = G.edges(current)
for edge in edges:
newdist = G.nodes[current]['dist'] + G.edges[edge]['weight']
if newdist < G.nodes[edge[1]]['dist']:
G.nodes[edge[1]]['dist'] = newdist
G.nodes[edge[1]]['prev'] = current
dijkstra()
dests = say_friends()
# color map for nodes
color_map = []
for node in G:
if node in dests:
color_map.append('red')
elif node == "Werribee":
color_map.append('red')
else:
color_map.append('blue')
# render
pos = nx.random_layout(G)
#labels = nx.get_node_attributes(G, 'dist')
nx.draw(G, pos, node_color=color_map, with_labels=True)
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
plt.show()
|
import numpy as np
from dasy.core import DataSynthesizer
#TODO: switch from numpy random to Generator
class UniformSynth(DataSynthesizer):
"""
Samples from a single Uniform Distribution on an interval
"""
def __init__(self, dim, low=-10., high=10.):
super().__init__(dim)
self.low = low
self.high = high
def sample(self, n=100):
return np.random.uniform(low=self.low, high=self.high, size=(n, self.dim))
|
import luigi
from typing import Dict, Any
import torch
import torch.nn as nn
import numpy as np
from numpy.random.mtrand import RandomState
from mars_gym.model.bandit import BanditPolicy
from typing import Dict, Any, List, Tuple, Union
from mars_gym.meta_config import ProjectConfig
from mars_gym.model.abstract import RecommenderModule
class SimpleLinearModel(RecommenderModule):
def __init__(
self,
project_config: ProjectConfig,
index_mapping: Dict[str, Dict[Any, int]],
n_factors: int,
metadata_size: int,
window_hist_size: int,
):
super().__init__(project_config, index_mapping)
self.user_embeddings = nn.Embedding(self._n_users, n_factors)
self.item_embeddings = nn.Embedding(self._n_items, n_factors)
# user + item + flatten hist + position + metadata
num_dense = 2 * n_factors + window_hist_size * n_factors + 1 + metadata_size
self.dense = nn.Sequential(
nn.Linear(num_dense, 500), nn.SELU(), nn.Linear(500, 1),
)
def flatten(self, input: torch.Tensor):
return input.view(input.size(0), -1)
def forward(
self,
user_ids: torch.Tensor,
item_ids: torch.Tensor,
pos_item_id: torch.Tensor,
list_reference_item: torch.Tensor,
list_metadata: torch.Tensor,
):
user_emb = self.user_embeddings(user_ids)
item_emb = self.item_embeddings(item_ids)
history_items_emb = self.item_embeddings(list_reference_item)
x = torch.cat(
(
user_emb,
item_emb,
self.flatten(history_items_emb),
pos_item_id.float().unsqueeze(1),
list_metadata.float(),
),
dim=1,
)
x = self.dense(x)
return torch.sigmoid(x)
class EGreedyPolicy(BanditPolicy):
def __init__(self, reward_model: nn.Module, epsilon: float = 0.1, seed: int = 42) -> None:
super().__init__(reward_model)
self._epsilon = epsilon
self._rng = RandomState(seed)
def _select_idx(
self,
arm_indices: List[int],
arm_contexts: Tuple[np.ndarray, ...] = None,
arm_scores: List[float] = None,
pos: int = 0,
) -> Union[int, Tuple[int, float]]:
if self._rng.choice([True, False], p=[self._epsilon, 1.0 - self._epsilon]):
action = self._rng.choice(len(arm_indices))
else:
action = np.argmax(arm_scores)
return action
|
# Lint as: python3
#
# Copyright 2021 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xls.public.runtime_build_actions."""
import sys
import tempfile
from absl.testing import absltest
from xls.common.python import init_xls
from xls.public.python import runtime_build_actions
def setUpModule():
# This is required so that module initializers are called including those
# which register delay models.
init_xls.init_xls(sys.argv)
class RuntimeBuildActionsTest(absltest.TestCase):
def test_convert_dslx_to_ir(self):
dslx_text = 'pub fn foo(v:u8) -> u8{ v+u8:1 }'
ir_text = runtime_build_actions.convert_dslx_to_ir(dslx_text,
'/path/to/foo', 'foo',
[])
self.assertIn('package foo', ir_text)
self.assertIn('bits[8]', ir_text)
self.assertIn('add', ir_text)
self.assertIn('literal', ir_text)
def test_convert_dslx_path_to_ir(self):
with tempfile.NamedTemporaryFile(prefix='foo', suffix='.x') as f:
f.write(b'pub fn foo(v:u8) -> u8{ v+u8:1 }')
f.flush()
ir_text = runtime_build_actions.convert_dslx_path_to_ir(f.name, [])
self.assertIn('package foo', ir_text)
self.assertIn('bits[8]', ir_text)
self.assertIn('add', ir_text)
self.assertIn('literal', ir_text)
def test_optimize_ir(self):
dslx_text = 'pub fn foo(v:u8) -> u8{ v+u8:2-u8:1 }'
ir_text = runtime_build_actions.convert_dslx_to_ir(dslx_text,
'/path/to/foo', 'foo',
[])
opt_ir_text = runtime_build_actions.optimize_ir(ir_text, '__foo__foo')
self.assertNotEqual(ir_text, opt_ir_text)
def test_mangle_dslx_name(self):
mangled_name = runtime_build_actions.mangle_dslx_name('foo', 'bar')
self.assertEqual('__foo__bar', mangled_name)
def test_proto_to_dslx(self):
binding_name = 'MY_TEST_MESSAGE'
proto_def = """syntax = "proto2";
package xls_public_test;
message TestMessage {
optional int32 test_field = 1;
}
message TestRepeatedMessage {
repeated TestMessage messages = 1;
}"""
text_proto = """messages: {
test_field: 42
}
messages: {
test_field: 64
}"""
message_name = 'xls_public_test.TestRepeatedMessage'
dslx_text = runtime_build_actions.proto_to_dslx(proto_def, message_name,
text_proto, binding_name)
self.assertIn('TestMessage', dslx_text)
self.assertIn('message', dslx_text)
self.assertIn('test_field', dslx_text)
self.assertIn(binding_name, dslx_text)
if __name__ == '__main__':
absltest.main()
|
import numpy as np
def getPoints(width,height,H):
points=np.zeros((4,2))
point=np.zeros(3)
point[2]=1
points[0]=(np.matmul(H,point.T)/np.matmul(H,point.T)[2])[:2]
point[1]=height-1
points[1]=(np.matmul(H,point.T)/np.matmul(H,point.T)[2])[:2]
point[0]=width-1
points[3]=(np.matmul(H,point.T)/np.matmul(H,point.T)[2])[:2]
point[1]=0
points[2]=(np.matmul(H,point.T)/np.matmul(H,point.T)[2])[:2]
return points,point |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Constants should be defined here.
"""
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class AssetstoreType:
"""
All possible assetstore implementation types.
"""
FILESYSTEM = 0
GRIDFS = 1
S3 = 2
class AccessType:
"""
Represents the level of access granted to a user or group on an
AccessControlledModel. Having a higher access level on a resource also
confers all of the privileges of the lower levels.
Semantically, READ access on a resource means that the user can see all
the information pertaining to the resource, but cannot modify it.
WRITE access usually means the user can modify aspects of the resource.
ADMIN access confers total control; the user can delete the resource and
also manage permissions for other users on it.
"""
NONE = -1
READ = 0
WRITE = 1
ADMIN = 2
|
from datetime import date
from dateutil.relativedelta import relativedelta
from django.db.models import Count, Sum
from django.db.models.functions import TruncMonth
from project.models import Project, ProjectUserMembership
from stats.models import ComputeDaily
from .util import parse_efficiency_result_set, seconds_to_hours
class UserStatsParser:
'''
User stats parser.
'''
def __init__(self, user, project_filter):
self.user = user
self.project_ids = self._parse_project_ids(project_filter)
# Default to last 12 months
self.start_date = date.today() + relativedelta(months=-12)
self.end_date = date.today()
def rate_of_usage_per_month(self):
'''
Return the rate of usage grouped by month.
'''
try:
# Query rate of usage
result = ComputeDaily.objects.filter(
project__in=self.project_ids,
user=self.user,
date__range=[self.start_date, self.end_date],
).annotate(month=TruncMonth('date')).values('month').annotate(
c=Count('id'),
wait_time=Sum('wait_time'),
cpu_time=Sum('cpu_time'),
wall_time=Sum('wall_time'),
).order_by('month')
# Parse result
dates = []
wait_time = []
cpu_time = []
wall_time = []
for row in result:
dates.append(row['month'].strftime('%b %Y'))
wait_time.append(seconds_to_hours(row['wait_time'].total_seconds()))
cpu_time.append(seconds_to_hours(row['cpu_time'].total_seconds()))
wall_time.append(seconds_to_hours(row['wall_time'].total_seconds()))
# Build response
data = {
'dates': dates,
'wait_time': wait_time,
'cpu_time': cpu_time,
'wall_time': wall_time,
}
except Exception:
data = {}
return data
def cumulative_total_usage_per_month(self):
'''
Return the cumulative total usage grouped by month.
'''
try:
# Query cumulative total usage
result = ComputeDaily.objects.filter(
project__in=self.project_ids,
user=self.user,
date__range=[self.start_date, self.end_date],
).annotate(month=TruncMonth('date')).values('month').annotate(
c=Count('id'),
wait_time=Sum('wait_time'),
cpu_time=Sum('cpu_time'),
wall_time=Sum('wall_time'),
).order_by('month')
# Init response lists
dates = [result[0]['month'].strftime('%b %Y')]
wait_time = [seconds_to_hours(result[0]['wait_time'].total_seconds())]
cpu_time = [seconds_to_hours(result[0]['cpu_time'].total_seconds())]
wall_time = [seconds_to_hours(result[0]['wall_time'].total_seconds())]
# Build cumulative values
if len(result) > 1:
for row in result[1:]:
dates.append(row['month'].strftime('%b %Y'))
wait_time.append(seconds_to_hours(row['wait_time'].total_seconds()) + wait_time[-1])
cpu_time.append(seconds_to_hours(row['cpu_time'].total_seconds()) + cpu_time[-1])
wall_time.append(seconds_to_hours(row['wall_time'].total_seconds()) + wall_time[-1])
# Build response
data = {
'dates': dates,
'wait_time': wait_time,
'cpu_time': cpu_time,
'wall_time': wall_time,
}
except Exception:
data = {}
return data
def efficiency_per_month(self):
'''
Return the efficiency grouped by month.
'''
try:
# Query cpu and wall time in date range
results_in_date_range = ComputeDaily.objects.filter(
project__in=self.project_ids,
user=self.user,
date__range=[self.start_date, self.end_date],
).annotate(month=TruncMonth('date')).values('month').annotate(
c=Count('id'),
cpu_time_sum=Sum('cpu_time'),
wall_time_sum=Sum('wall_time'),
).order_by('month')
# Parse in date range results
dates, efficiency = parse_efficiency_result_set(results_in_date_range)
# Build response
data = {
'dates': dates,
'efficiency': efficiency,
}
except Exception:
data = {}
return data
def num_jobs_per_month(self):
'''
Return the number of jobs grouped by month.
'''
try:
# Query number of jobs in date range
results_in_date_range = ComputeDaily.objects.filter(
project__in=self.project_ids,
user=self.user,
date__range=[self.start_date, self.end_date],
).annotate(month=TruncMonth('date')).values('month').annotate(
c=Count('id'),
number_jobs=Sum('number_jobs'),
).order_by('month')
# Parse in date range results
dates = []
number_jobs = []
for row in results_in_date_range:
dates.append(row['month'].strftime('%b %Y'))
number_jobs.append(row['number_jobs'])
# Build response
data = {
'dates': dates,
'number_jobs': number_jobs,
}
except Exception:
data = {}
return data
def _parse_project_ids(self, project_filter):
'''
Return project ids from a given list of project codes.
'''
project_ids = []
# Validate the user has a project user membership to the project codes.
valid_projects = ProjectUserMembership.objects.filter(
user=self.user,
status=ProjectUserMembership.AUTHORISED,
).values('project__code', 'project__id')
if valid_projects:
# Return the project id's for all valid project codes.
if project_filter == 'all':
project_ids = [project['project__id'] for project in valid_projects]
else:
# Return the project id of the chosen project code.
try:
# Check the project code provided is in valid projects.
valid = False
for project in valid_projects:
if project['project__code'] == project_filter:
valid = True
break
if valid:
project = Project.objects.get(code=project_filter)
project_ids = [project.id]
except Exception:
# A user is most likely trying to access a project
# in which they do not have a project user membership.
pass
return project_ids
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Sobol indexes sampling strategy
Created on May 21, 2016
@author: alfoa
supercedes Samplers.py from talbpw
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import numpy as np
from operator import mul
from functools import reduce
import itertools
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .SparseGridCollocation import SparseGridCollocation
from .Grid import Grid
from utils import utils
import Distributions
import SupervisedLearning
import Quadratures
import IndexSets
import MessageHandler
#Internal Modules End--------------------------------------------------------------------------------
class Sobol(SparseGridCollocation):
"""
Sobol indexes sampling strategy
"""
def __init__(self):
"""
Default Constructor that will initialize member variables with reasonable
defaults or empty lists/dictionaries where applicable.
@ In, None
@ Out, None
"""
Grid.__init__(self)
self.type = 'SobolSampler'
self.printTag = 'SAMPLER SOBOL'
self.assemblerObjects={} #dict of external objects required for assembly
self.maxPolyOrder = None #L, the relative maximum polynomial order to use in any dimension
self.sobolOrder = None #S, the order of the HDMR expansion (1,2,3), queried from the sobol ROM
self.indexSetType = None #the type of index set to use, queried from the sobol ROM
self.polyDict = {} #varName-indexed dict of polynomial types
self.quadDict = {} #varName-indexed dict of quadrature types
self.importanceDict = {} #varName-indexed dict of importance weights
self.references = {} #reference (mean) values for distributions, by var
self.solns = None #pointer to output dataObjects object
self.ROM = None #pointer to sobol ROM
self.jobHandler = None #pointer to job handler for parallel runs
self.doInParallel = True #compute sparse grid in parallel flag, recommended True
self.distinctPoints = set() #tracks distinct points used in creating this ROM
self.sparseGridType = 'smolyak'
self.addAssemblerObject('ROM','1',True)
def _localWhatDoINeed(self):
"""
Used to obtain necessary objects.
@ In, None
@ Out, gridDict, dict, the dict listing the needed objects
"""
gridDict = Grid._localWhatDoINeed(self)
gridDict['internal'] = [(None,'jobHandler')]
return gridDict
def _localGenerateAssembler(self,initDict):
"""
Used to obtain necessary objects.
@ In, initDict, dict, dictionary of objects required to initialize
@ Out, None
"""
Grid._localGenerateAssembler(self, initDict)
self.jobHandler = initDict['internal']['jobHandler']
self.dists = self.transformDistDict()
for dist in self.dists.values():
if isinstance(dist,Distributions.NDimensionalDistributions):
self.raiseAnError(IOError,'ND Distributions containing the variables in the original input space are not supported for this sampler!')
def localInitialize(self):
"""
Will perform all initialization specific to this Sampler.
@ In, None
@ Out, None
"""
SVL = self.readFromROM()
#make combination of ROMs that we need
self.sobolOrder = SVL.sobolOrder
self._generateQuadsAndPolys(SVL)
self.features = SVL.features
needCombos = itertools.chain.from_iterable(itertools.combinations(self.features,r) for r in range(self.sobolOrder+1))
self.SQs={}
self.ROMs={} #keys are [combo]
for combo in needCombos:
if len(combo)==0:
continue
distDict={}
quadDict={}
polyDict={}
imptDict={}
limit=0
for c in combo:
distDict[c]=self.dists[c]
quadDict[c]=self.quadDict[c]
polyDict[c]=self.polyDict[c]
imptDict[c]=self.importanceDict[c]
iset=IndexSets.returnInstance(SVL.indexSetType,self)
iset.initialize(combo,imptDict,SVL.maxPolyOrder)
self.SQs[combo] = Quadratures.returnInstance(self.sparseGridType,self)
self.SQs[combo].initialize(combo,iset,distDict,quadDict,self.jobHandler,self.messageHandler)
initDict={'IndexSet' :iset.type, # type of index set
'PolynomialOrder':SVL.maxPolyOrder, # largest polynomial
'Interpolation' :SVL.itpDict, # polys, quads per input
'Features' :','.join(combo), # input variables
'Target' :','.join(SVL.target)}# set below, per-case basis
#initializeDict is for SVL.initialize()
initializeDict={'SG' :self.SQs[combo], # sparse grid
'dists':distDict, # distributions
'quads':quadDict, # quadratures
'polys':polyDict, # polynomials
'iSet' :iset} # index set
self.ROMs[combo] = SupervisedLearning.returnInstance('GaussPolynomialRom',self,**initDict)
self.ROMs[combo].initialize(initializeDict)
self.ROMs[combo].messageHandler = self.messageHandler
#make combined sparse grids
self.references={}
for var in self.features:
self.references[var]=self.dists[var].untruncatedMean()
self.pointsToRun=[]
#make sure reference case gets in there
newpt = np.zeros(len(self.features))
for v,var in enumerate(self.features):
newpt[v] = self.references[var]
self.pointsToRun.append(tuple(newpt))
self.distinctPoints.add(tuple(newpt))
#now do the rest
for combo,rom in sorted(self.ROMs.items()):
# just for each combo
SG = rom.sparseGrid #they all should have the same sparseGrid
SG._remap(combo)
for l in range(len(SG)):
pt,wt = SG[l]
newpt = np.zeros(len(self.features))
for v,var in enumerate(self.features):
if var in combo:
newpt[v] = pt[combo.index(var)]
else:
newpt[v] = self.references[var]
newpt=tuple(newpt)
self.distinctPoints.add(newpt)
if newpt not in self.pointsToRun:
self.pointsToRun.append(newpt)
self.limit = len(self.pointsToRun)
self.raiseADebug('Needed points: %i' %self.limit)
initdict={'ROMs':self.ROMs,
'SG':self.SQs,
'dists':self.dists,
'quads':self.quadDict,
'polys':self.polyDict,
'refs':self.references,
'numRuns':len(self.distinctPoints)}
#for target in self.targets:
self.ROM.supervisedEngine.supervisedContainer[0].initialize(initdict)
def localGenerateInput(self,model,myInput):
"""
Function to select the next most informative point
@ In, model, model instance, an instance of a model
@ In, myInput, list, a list of the original needed inputs for the model (e.g. list of files, etc.)
@ Out, None
"""
try:
pt = self.pointsToRun[self.counter-1]
except IndexError:
self.raiseADebug('All sparse grids are complete! Moving on...')
raise utils.NoMoreSamplesNeeded
for v,varName in enumerate(self.features):
# compute the SampledVarsPb for 1-D distribution
if self.variables2distributionsMapping[varName]['totDim'] == 1:
for key in varName.strip().split(','):
self.values[key] = pt[v]
self.inputInfo['SampledVarsPb'][varName] = self.distDict[varName].pdf(pt[v])
self.inputInfo['ProbabilityWeight-'+varName] = self.inputInfo['SampledVarsPb'][varName]
# compute the SampledVarsPb for N-D distribution
elif self.variables2distributionsMapping[varName]['totDim'] > 1 and self.variables2distributionsMapping[varName]['reducedDim'] == 1:
dist = self.variables2distributionsMapping[varName]['name']
ndCoordinates = np.zeros(len(self.distributions2variablesMapping[dist]))
positionList = self.distributions2variablesIndexList[dist]
for varDict in self.distributions2variablesMapping[dist]:
var = utils.first(varDict.keys())
position = utils.first(varDict.values())
location = -1
for key in var.strip().split(','):
if key in self.features:
location = self.features.index(key)
break
if location > -1:
ndCoordinates[positionList.index(position)] = pt[location]
else:
self.raiseAnError(IOError,'The variables ' + var + ' listed in sobol sampler, but not used in the ROM!' )
for key in var.strip().split(','):
self.values[key] = pt[location]
self.inputInfo['SampledVarsPb'][varName] = self.distDict[varName].pdf(ndCoordinates)
self.inputInfo['ProbabilityWeight-'+dist] = self.inputInfo['SampledVarsPb'][varName]
self.inputInfo['PointProbability'] = reduce(mul,self.inputInfo['SampledVarsPb'].values())
self.inputInfo['ProbabilityWeight'] = np.atleast_1d(1.0) # weight has no meaning for sobol
self.inputInfo['SamplerType'] = 'Sparse Grids for Sobol'
|
import numpy
import cv2
import matplotlib.pyplot as plt
class Chess_calibration_points:
mtx = []
dist = []
def __init__(self, image_names, chess_size):
obj_points = [] # 3d points in real world space
img_points = [] # 2d points in image plane.
img_shape = []
objp = numpy.zeros((chess_size[0]*chess_size[1],3), numpy.float32)
objp[:,:2] = numpy.mgrid[0:chess_size[0],0:chess_size[1]].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
# Step through the list and search for chessboard corners
for fname in image_names:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if not img_shape:
img_shape = gray.shape;
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (chess_size[0],chess_size[1]),None)
# If found, add object points, image points
if ret == True:
obj_points.append(objp)
img_points.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (chess_size[0],chess_size[1]), corners, ret)
plt.imshow(img)
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, img_shape[::-1], None, None)
def undist_image(self, image):
undist = cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
return undist;
|
#!/usr/bin/env python3
import unittest
import os
import sys
from pathlib import Path
import numpy as np
from PIL import Image
from daltonlens import convert, simulate, generate
test_images_path = Path(__file__).parent.absolute() / "images"
class TestCVD(unittest.TestCase):
def checkModels(self, im, models_to_test, tolerance=1e-8):
for simulator, deficiency, severity, gt_im in models_to_test:
out = simulator.simulate_cvd(im, deficiency=deficiency, severity=severity)
# Uncomment to generate a new ground truth.
# Image.fromarray(out).save(test_images_path / ('generated_' + str(gt_im)))
gt = np.asarray(Image.open(test_images_path / gt_im).convert('RGB'))
if not np.allclose(out, gt, atol=tolerance):
maxDelta = np.max(np.abs(out.astype(float)-gt))
self.fail(f"The images are different, max delta = {maxDelta}")
def test_vienot1999(self):
vienot1999 = simulate.Simulator_Vienot1999(convert.LMSModel_sRGB_SmithPokorny75(ignoreJuddVosCorrection=False))
models_to_test = [
(vienot1999, simulate.Deficiency.PROTAN, 1.0, "vienot1999_protan_1.0.png"),
(vienot1999, simulate.Deficiency.DEUTAN, 1.0, "vienot1999_deutan_1.0.png"),
(vienot1999, simulate.Deficiency.TRITAN, 1.0, "vienot1999_tritan_1.0.png"),
(vienot1999, simulate.Deficiency.PROTAN, 0.55, "vienot1999_protan_0.55.png"),
(vienot1999, simulate.Deficiency.DEUTAN, 0.55, "vienot1999_deutan_0.55.png"),
(vienot1999, simulate.Deficiency.TRITAN, 0.55, "vienot1999_tritan_0.55.png"),
]
im = generate.rgb_span(27*8, 27*8)
self.checkModels (im, models_to_test, 2)
def test_brettel1997(self):
brettel1997 = simulate.Simulator_Brettel1997(convert.LMSModel_sRGB_SmithPokorny75(ignoreJuddVosCorrection=False))
# wn means use_white_as_neutral = true
models_to_test = [
(brettel1997, simulate.Deficiency.PROTAN, 1.0, "brettel1997_protan_wn_1.0.png"),
(brettel1997, simulate.Deficiency.DEUTAN, 1.0, "brettel1997_deutan_wn_1.0.png"),
(brettel1997, simulate.Deficiency.TRITAN, 1.0, "brettel1997_tritan_wn_1.0.png"),
(brettel1997, simulate.Deficiency.PROTAN, 0.55, "brettel1997_protan_wn_0.55.png"),
(brettel1997, simulate.Deficiency.DEUTAN, 0.55, "brettel1997_deutan_wn_0.55.png"),
(brettel1997, simulate.Deficiency.TRITAN, 0.55, "brettel1997_tritan_wn_0.55.png"),
]
im = generate.rgb_span(27*8, 27*8)
self.checkModels (im, models_to_test)
def test_vischeck(self):
brettel1997_vischeck = simulate.Simulator_Vischeck()
# wn means use_white_as_neutral = true
models_to_test = [
(brettel1997_vischeck, simulate.Deficiency.PROTAN, 1.0, "vischeck_gimp_protan.png"),
(brettel1997_vischeck, simulate.Deficiency.DEUTAN, 1.0, "vischeck_gimp_deutan.png"),
(brettel1997_vischeck, simulate.Deficiency.TRITAN, 1.0, "vischeck_gimp_tritan.png"),
]
im = generate.rgb_span(27*8, 27*8)
# Add a small tolerance due to round, etc.
self.checkModels (im, models_to_test, tolerance=1)
def test_machado2009(self):
machado2009 = simulate.Simulator_Machado2009()
models_to_test = [
(machado2009, simulate.Deficiency.PROTAN, 1.0, "machado2009_protan_1.0.png"),
(machado2009, simulate.Deficiency.DEUTAN, 1.0, "machado2009_deutan_1.0.png"),
(machado2009, simulate.Deficiency.TRITAN, 1.0, "machado2009_tritan_1.0.png"),
(machado2009, simulate.Deficiency.PROTAN, 0.55, "machado2009_protan_0.55.png"),
(machado2009, simulate.Deficiency.DEUTAN, 0.55, "machado2009_deutan_0.55.png"),
(machado2009, simulate.Deficiency.TRITAN, 0.55, "machado2009_tritan_0.55.png"),
]
im = generate.rgb_span(27*8, 27*8)
self.checkModels (im, models_to_test)
def test_coblisV1(self):
coblisv1 = simulate.Simulator_CoblisV1()
models_to_test = [
(coblisv1, simulate.Deficiency.PROTAN, 1.0, "coblisv1_protan_1.0.png"),
(coblisv1, simulate.Deficiency.DEUTAN, 1.0, "coblisv1_deutan_1.0.png"),
(coblisv1, simulate.Deficiency.TRITAN, 1.0, "coblisv1_tritan_1.0.png"),
]
im = generate.rgb_span(27*8, 27*8)
self.checkModels (im, models_to_test, tolerance=1)
def test_coblisV2(self):
coblisv2 = simulate.Simulator_CoblisV2()
models_to_test = [
(coblisv2, simulate.Deficiency.PROTAN, 1.0, "coblisv2_protan_1.0.png"),
(coblisv2, simulate.Deficiency.DEUTAN, 1.0, "coblisv2_deutan_1.0.png"),
(coblisv2, simulate.Deficiency.TRITAN, 1.0, "coblisv2_tritan_1.0.png"),
]
im = generate.rgb_span(27*8, 27*8)
self.checkModels (im, models_to_test, tolerance=1)
def test_auto(self):
im = generate.rgb_span(27, 27)
machado2009 = simulate.Simulator_Machado2009()
brettel1997 = simulate.Simulator_Brettel1997(convert.LMSModel_sRGB_SmithPokorny75())
vienot1999 = simulate.Simulator_Vienot1999(convert.LMSModel_sRGB_SmithPokorny75())
auto = simulate.Simulator_AutoSelect()
out_auto = auto.simulate_cvd(im, simulate.Deficiency.PROTAN, severity=0.3)
out_ref = machado2009.simulate_cvd(im, simulate.Deficiency.PROTAN, severity=0.3)
self.assertTrue(np.allclose(out_auto, out_ref))
out_auto = auto.simulate_cvd(im, simulate.Deficiency.TRITAN, severity=0.3)
out_ref = brettel1997.simulate_cvd(im, simulate.Deficiency.TRITAN, severity=0.3)
self.assertTrue(np.allclose(out_auto, out_ref))
out_auto = auto.simulate_cvd(im, simulate.Deficiency.DEUTAN, severity=1.0)
out_ref = vienot1999.simulate_cvd(im, simulate.Deficiency.DEUTAN, severity=1.0)
self.assertTrue(np.allclose(out_auto, out_ref))
if __name__ == '__main__':
unittest.main()
|
#!python
# External dependencies
import numpy as np
import pandas as pd
"""
Usage :
data = {
"data": {
"candles": [
["05-09-2013", 5553.75, 5625.75, 5552.700195, 5592.950195, 274900],
["06-09-2013", 5617.450195, 5688.600098, 5566.149902, 5680.399902, 253000],
["10-09-2013", 5738.5, 5904.850098, 5738.200195, 5896.75, 275200],
["11-09-2013", 5887.25, 5924.350098, 5832.700195, 5913.149902, 265000],
["12-09-2013", 5931.149902, 5932, 5815.799805, 5850.700195, 273000],
...
["27-01-2014", 6186.299805, 6188.549805, 6130.25, 6135.850098, 190400],
["28-01-2014", 6131.850098, 6163.600098, 6085.950195, 6126.25, 184100],
["29-01-2014", 6161, 6170.450195, 6109.799805, 6120.25, 146700],
["30-01-2014", 6067, 6082.850098, 6027.25, 6073.700195, 208100],
["31-01-2014", 6082.75, 6097.850098, 6067.350098, 6089.5, 146700]
]
}
}
# Date must be present as a Pandas DataFrame with ['date', 'open', 'high', 'low', 'close', 'volume'] as columns
df = pd.DataFrame(data["data"]["candles"], columns=['date', 'open', 'high', 'low', 'close', 'volume'])
# Columns as added by each function specific to their computations
EMA(df, 'close', 'ema_5', 5)
ATR(df, 14)
SuperTrend(df, 10, 3)
MACD(df)
"""
def HA(df, ohlc=['Open', 'High', 'Low', 'Close']):
"""
Function to compute Heiken Ashi Candles (HA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
Heiken Ashi Close (HA_$ohlc[3])
Heiken Ashi Open (HA_$ohlc[0])
Heiken Ashi High (HA_$ohlc[1])
Heiken Ashi Low (HA_$ohlc[2])
"""
ha_open = 'HA_' + ohlc[0]
ha_high = 'HA_' + ohlc[1]
ha_low = 'HA_' + ohlc[2]
ha_close = 'HA_' + ohlc[3]
df[ha_close] = (df[ohlc[0]] + df[ohlc[1]] + df[ohlc[2]] + df[ohlc[3]]) / 4
df[ha_open] = 0.00
for i in range(0, len(df)):
if i == 0:
df[ha_open].iat[i] = (df[ohlc[0]].iat[i] + df[ohlc[3]].iat[i]) / 2
else:
df[ha_open].iat[i] = (df[ha_open].iat[i - 1] + df[ha_close].iat[i - 1]) / 2
df[ha_high]=df[[ha_open, ha_close, ohlc[1]]].max(axis=1)
df[ha_low]=df[[ha_open, ha_close, ohlc[2]]].min(axis=1)
return df
def SMA(df, base, target, period):
"""
Function to compute Simple Moving Average (SMA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the SMA needs to be computed from
target : String indicates the column name to which the computed data needs to be stored
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new column added with name 'target'
"""
df[target] = df[base].rolling(window=period).mean()
df[target].fillna(0, inplace=True)
return df
def STDDEV(df, base, target, period):
"""
Function to compute Standard Deviation (STDDEV)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the SMA needs to be computed from
target : String indicates the column name to which the computed data needs to be stored
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new column added with name 'target'
"""
df[target] = df[base].rolling(window=period).std()
df[target].fillna(0, inplace=True)
return df
def EMA(df, base, target, period, alpha=False):
"""
Function to compute Exponential Moving Average (EMA)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the EMA needs to be computed from
target : String indicates the column name to which the computed data needs to be stored
period : Integer indicates the period of computation in terms of number of candles
alpha : Boolean if True indicates to use the formula for computing EMA using alpha (default is False)
Returns :
df : Pandas DataFrame with new column added with name 'target'
"""
con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]])
if (alpha == True):
# (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period
df[target] = con.ewm(alpha=1 / period, adjust=False).mean()
else:
# ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1)
df[target] = con.ewm(span=period, adjust=False).mean()
df[target].fillna(0, inplace=True)
return df
def ATR(df, period, ohlc=['Open', 'High', 'Low', 'Close']):
"""
Function to compute Average True Range (ATR)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
period : Integer indicates the period of computation in terms of number of candles
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
True Range (TR)
ATR (ATR_$period)
"""
atr = 'ATR_' + str(period)
# Compute true range only if it is not computed and stored earlier in the df
if not 'TR' in df.columns:
df['h-l'] = df[ohlc[1]] - df[ohlc[2]]
df['h-yc'] = abs(df[ohlc[1]] - df[ohlc[3]].shift())
df['l-yc'] = abs(df[ohlc[2]] - df[ohlc[3]].shift())
df['TR'] = df[['h-l', 'h-yc', 'l-yc']].max(axis=1)
df.drop(['h-l', 'h-yc', 'l-yc'], inplace=True, axis=1)
# Compute EMA of true range using ATR formula after ignoring first row
EMA(df, 'TR', atr, period, alpha=True)
return df
def SuperTrend(df, period, multiplier, ohlc=['Open', 'High', 'Low', 'Close']):
"""
Function to compute SuperTrend
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
period : Integer indicates the period of computation in terms of number of candles
multiplier : Integer indicates value to multiply the ATR
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
Returns :
df : Pandas DataFrame with new columns added for
True Range (TR), ATR (ATR_$period)
SuperTrend (ST_$period_$multiplier)
SuperTrend Direction (STX_$period_$multiplier)
"""
ATR(df, period, ohlc=ohlc)
atr = 'ATR_' + str(period)
st = 'ST_' + str(period) + '_' + str(multiplier)
stx = 'STX_' + str(period) + '_' + str(multiplier)
"""
SuperTrend Algorithm :
BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR
BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR
FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND))
THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND)
FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND))
THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND)
SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN
Current FINAL UPPERBAND
ELSE
IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN
Current FINAL LOWERBAND
ELSE
IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN
Current FINAL LOWERBAND
ELSE
IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN
Current FINAL UPPERBAND
"""
# Compute basic upper and lower bands
df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr]
df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]
# Compute final upper and lower bands
df['final_ub'] = 0.00
df['final_lb'] = 0.00
for i in range(period, len(df)):
df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df[ohlc[3]].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1]
df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df[ohlc[3]].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1]
# Set the Supertrend value
df[st] = 0.00
for i in range(period, len(df)):
df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df[ohlc[3]].iat[i] <= df['final_ub'].iat[i] else \
df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df[ohlc[3]].iat[i] > df['final_ub'].iat[i] else \
df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df[ohlc[3]].iat[i] >= df['final_lb'].iat[i] else \
df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df[ohlc[3]].iat[i] < df['final_lb'].iat[i] else 0.00
# Mark the trend direction up/down
df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)
# Remove basic and final bands from the columns
df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1)
df.fillna(0, inplace=True)
return df
def MACD(df, fastEMA=12, slowEMA=26, signal=9, base='Close'):
"""
Function to compute Moving Average Convergence Divergence (MACD)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
fastEMA : Integer indicates faster EMA
slowEMA : Integer indicates slower EMA
signal : Integer indicates the signal generator for MACD
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
Returns :
df : Pandas DataFrame with new columns added for
Fast EMA (ema_$fastEMA)
Slow EMA (ema_$slowEMA)
MACD (macd_$fastEMA_$slowEMA_$signal)
MACD Signal (signal_$fastEMA_$slowEMA_$signal)
MACD Histogram (MACD (hist_$fastEMA_$slowEMA_$signal))
"""
fE = "ema_" + str(fastEMA)
sE = "ema_" + str(slowEMA)
macd = "macd_" + str(fastEMA) + "_" + str(slowEMA) + "_" + str(signal)
sig = "signal_" + str(fastEMA) + "_" + str(slowEMA) + "_" + str(signal)
hist = "hist_" + str(fastEMA) + "_" + str(slowEMA) + "_" + str(signal)
# Compute fast and slow EMA
EMA(df, base, fE, fastEMA)
EMA(df, base, sE, slowEMA)
# Compute MACD
df[macd] = np.where(np.logical_and(np.logical_not(df[fE] == 0), np.logical_not(df[sE] == 0)), df[fE] - df[sE], 0)
# Compute MACD Signal
EMA(df, macd, sig, signal)
# Compute MACD Histogram
df[hist] = np.where(np.logical_and(np.logical_not(df[macd] == 0), np.logical_not(df[sig] == 0)), df[macd] - df[sig], 0)
return df
def BBand(df, base='Close', period=20, multiplier=2):
"""
Function to compute Bollinger Band (BBand)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
period : Integer indicates the period of computation in terms of number of candles
multiplier : Integer indicates value to multiply the SD
Returns :
df : Pandas DataFrame with new columns added for
Upper Band (UpperBB_$period_$multiplier)
Lower Band (LowerBB_$period_$multiplier)
"""
upper = 'UpperBB_' + str(period) + '_' + str(multiplier)
lower = 'LowerBB_' + str(period) + '_' + str(multiplier)
sma = df[base].rolling(window=period, min_periods=period - 1).mean()
sd = df[base].rolling(window=period).std()
df[upper] = sma + (multiplier * sd)
df[lower] = sma - (multiplier * sd)
df[upper].fillna(0, inplace=True)
df[lower].fillna(0, inplace=True)
return df
def RSI(df, base="Close", period=21):
"""
Function to compute Relative Strength Index (RSI)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new columns added for
Relative Strength Index (RSI_$period)
"""
delta = df[base].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
rUp = up.ewm(com=period - 1, adjust=False).mean()
rDown = down.ewm(com=period - 1, adjust=False).mean().abs()
df['RSI_' + str(period)] = 100 - 100 / (1 + rUp / rDown)
df['RSI_' + str(period)].fillna(0, inplace=True)
return df
def Ichimoku(df, ohlc=['Open', 'High', 'Low', 'Close'], param=[9, 26, 52, 26]):
"""
Function to compute Ichimoku Cloud parameter (Ichimoku)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
ohlc: List defining OHLC Column names (default ['Open', 'High', 'Low', 'Close'])
param: Periods to be used in computation (default [tenkan_sen_period, kijun_sen_period, senkou_span_period, chikou_span_period] = [9, 26, 52, 26])
Returns :
df : Pandas DataFrame with new columns added for ['Tenkan Sen', 'Kijun Sen', 'Senkou Span A', 'Senkou Span B', 'Chikou Span']
"""
high = df[ohlc[1]]
low = df[ohlc[2]]
close = df[ohlc[3]]
tenkan_sen_period = param[0]
kijun_sen_period = param[1]
senkou_span_period = param[2]
chikou_span_period = param[3]
tenkan_sen_column = 'Tenkan Sen'
kijun_sen_column = 'Kijun Sen'
senkou_span_a_column = 'Senkou Span A'
senkou_span_b_column = 'Senkou Span B'
chikou_span_column = 'Chikou Span'
# Tenkan-sen (Conversion Line)
tenkan_sen_high = high.rolling(window=tenkan_sen_period).max()
tenkan_sen_low = low.rolling(window=tenkan_sen_period).min()
df[tenkan_sen_column] = (tenkan_sen_high + tenkan_sen_low) / 2
# Kijun-sen (Base Line)
kijun_sen_high = high.rolling(window=kijun_sen_period).max()
kijun_sen_low = low.rolling(window=kijun_sen_period).min()
df[kijun_sen_column] = (kijun_sen_high + kijun_sen_low) / 2
# Senkou Span A (Leading Span A)
df[senkou_span_a_column] = ((df[tenkan_sen_column] + df[kijun_sen_column]) / 2).shift(kijun_sen_period)
# Senkou Span B (Leading Span B)
senkou_span_high = high.rolling(window=senkou_span_period).max()
senkou_span_low = low.rolling(window=senkou_span_period).min()
df[senkou_span_b_column] = ((senkou_span_high + senkou_span_low) / 2).shift(kijun_sen_period)
# The most current closing price plotted chikou_span_period time periods behind
df[chikou_span_column] = close.shift(-1 * chikou_span_period)
return df |
# Generated by Django 2.1 on 2020-09-07 11:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='person',
name='phone',
field=models.IntegerField(blank=True, max_length=13, null=True),
),
]
|
# -*- coding: utf-8 -*-
# @Author: jerry
# @Date: 2017-09-26 20:06:50
# @Last Modified by: jerry
# @Last Modified time: 2017-09-26 20:07:47
class Duck:
def quack(self):
print("這鴨子在呱呱叫")
def feathers(self):
print("這鴨子擁有白色與灰色羽毛")
class Person:
def quack(self):
print("這人正在模仿鴨子")
def feathers(self):
print("這人在地上拿起1根羽毛然後給其他人看")
def in_the_forest(d):
d.quack()
d.feathers()
def game():
donald = Duck()
john = Person()
in_the_forest(donald)
in_the_forest(john)
game()
|
import logging
import os
import csv
from datetime import datetime, timedelta
from pathlib import Path
def new_transaction_lists(impact, t_lists, account_name, network_name):
"""Transforms new transaction records by adding the Impact tracker ID for the environment (desktop/mobile),
finding the correct Impact partner ID from the network publisher ID, and determining the commission rate.
See README for contract payout groups requirements for correct payments.
Arguments:
new_transactions {list} -- list of transaction dicts. See below for required key value pairs in the dictionary
account_name {string} -- Network program/account name
network_name {string} -- Network name
Returns:
tuple -- (list of Impact conversions upload headers, list of amended transaction dicts)
"""
headrow = ["CampaignId","ActionTrackerId","EventDate","OrderId","MediaPartnerId",'CustomerStatus',"CurrencyCode","Amount","Category","Sku","Quantity",'Text1','PromoCode','Country','OrderLocation','Text2','Date1','Note','Numeric1','OrderStatus','VoucherCode','Modified']
t_list = []
for l in t_lists:
for t in l:
try:
mpid = impact.existing_partner_dict[t['publisherId']]
except KeyError as e:
try:
mpid = impact.existing_partner_dict[str(t['publisherId'])]
except KeyError as e:
logging.warning(f'No valid partner {t["publisherId"]} found for {network_name} transaction {t["id"]}')
# mpid = 'NOMPID'
continue
try:
commission_rate = int(round(float(t['commissionAmount']['amount']) / float(t['saleAmount']['amount']), 2) * 100)
except ZeroDivisionError as e:
commission_rate = 0
if t['device'] == 'Desktop':
at_id = impact.desktop_action_tracker_id
elif t['device'] == 'Mobile':
at_id = impact.mobile_action_tracker_id
transaction = [
impact.program_id,
at_id,
t['transactionDate'],
t['id'],
mpid,
t['status'],
t['saleAmount']['currency'],
t['saleAmount']['amount'],
'cat',
'sku',
1,
account_name,
account_name,
# t['voucherCode'],
t['customerCountry'],
t['customerCountry'],
t['advertiserCountry'],
t['transactionDate'],
account_name,
commission_rate,
'pending',
t['voucherCode'],
f'{datetime.now():%Y-%m-%dT%H:%M:%S}'
]
t_list.append(transaction)
return headrow, t_list
def modified_transaction_lists(impact, approved, declined):
"""Transforms modified transaction records by adding the Impact tracker ID for the environment (desktop/mobile) and the Reason Code for the modification.
Updates the amount in the case of an approval and zeroes the amount in the case of a reversal.
Arguments:
approved {list} -- List of approved transactions (dictionaries)
declined {list} -- List of declined/reversed transactions (dictionaries)
Returns:
tuple -- Two items, the Impact modification file standard headers, and the amended list of transactions to be modifed
"""
headrow = ['ActionTrackerID','Oid','Amount','Reason']
t_list = []
for t in approved:
if t['device'] == 'Desktop':
at_id = impact.desktop_action_tracker_id
elif t['device'] == 'Mobile':
at_id = impact.mobile_action_tracker_id
transaction = [
at_id,
t['id'],
t['saleAmount']['amount'],
'VALIDATED_ORDER'
]
t_list.append(transaction)
for t in declined:
if t['device'] == 'Desktop':
at_id = impact.desktop_action_tracker_id
elif t['device'] == 'Mobile':
at_id = impact.mobile_action_tracker_id
transaction = [
at_id,
t['id'],
0,
'RETURNED'
]
t_list.append(transaction)
return headrow, t_list
def prepare_transactions(impact, account_id, network, target_date):
"""Extract transactions from the network object methods (date formats are specific to each network) and pass them to the transformation functions
TODO :: move the date formatting to the network objects
TODO :: historical data functions
Arguments:
account_id {string} -- Network specific authentication token to get publisher list
network {object} -- Network object (currently one of AWin, Admitad, Linkshare)
Keyword Arguments:
historical {bool} -- Not currently implemented (default: {False})
Returns:
tuple -- (list of approved transactions, list of declined, list of pending, end datetime object) (all amended to fit Impact formatting requirements)
"""
approved = []
pending = []
declined = []
logging.info(f'Getting {network.network_name} transactions and modifications for account {account_id}')
start,end = network.date_formatter(target_date)
if network.network_name == 'Linkshare':
approved, pending, declined = network.get_all_transactions(account_id, start, end)
declined = []
else:
approved = network.get_all_transactions(account_id, start, end, 'approved')
declined = network.get_all_transactions(account_id, start, end, 'declined')
pending = network.get_all_transactions(account_id, start, end, 'pending')
return approved, declined, pending, target_date
def transactions_process(impact, account_id, account_name, network, target_date):
"""Main function running the extract and transform process for the network transaction data. Writes the results to CSV ready for upload.
Arguments:
account_id {string} -- Network account ID
account_name {string} -- Network program/account name
network {object} -- Network object (currently one of AWin, Admitad, Linkshare)
Keyword Arguments:
historical {bool} -- Not implemented (default: {False})
Returns:
file_path_m {string} -- path to the modifications file
file_path_p {string} -- path to the pending transactions file
"""
approved, declined, pending, end = prepare_transactions(impact, account_id, network, target_date)
transactions_filepath = f'transactions/{end.year}/{end:%m}/{end:%d}/{account_name.replace(" ","_")}'
modifications_filepath = f'modifications/{end.year}/{end:%m}/{end:%d}/{account_name.replace(" ","_")}'
try:
os.makedirs(transactions_filepath)
except FileExistsError as e:
pass
try:
os.makedirs(modifications_filepath)
except FileExistsError as e:
pass
file_path_p = Path(f'{transactions_filepath}/{account_name.replace(" ","_")}_{end:%Y-%m-%d}.csv')
file_path_m = Path(f'{modifications_filepath}/{account_name.replace(" ","_")}_{end:%Y-%m-%d}.csv')
logging.info(f'Approved transactions {len(approved)}')
logging.info(f'New pending transactions {len(pending)}')
logging.info(f'Declined transactions {len(declined)}')
headrow, t_list = new_transaction_lists(impact, [approved, declined, pending], account_name, network.network_name)
with open(file_path_p , 'w', newline="") as f:
csvwriter = csv.writer(f, delimiter = ',')
csvwriter.writerow(headrow)
csvwriter.writerows(t_list)
headrow, t_list = modified_transaction_lists(impact, approved, declined)
with open(file_path_m , 'w', newline="") as f:
csvwriter = csv.writer(f, delimiter = ',')
csvwriter.writerow(headrow)
csvwriter.writerows(t_list)
return file_path_m, file_path_p |
from typing import List
import torch.nn as nn
import torch
class GramMatrix(nn.Module):
"""
Base Gram Matrix calculation as per Gatys et al. 2015
"""
def forward(self, input):
b, c, h, w = input.size()
F = input.view(b, c, h*w)
G = torch.bmm(F, F.transpose(1, 2))
G = G.div_(h*w)
return G
class NormalizedGramMatrix(nn.Module):
"""
I have found that normalizing the tensor before calculating the gram matrices leads to better convergence.
"""
def forward(self, input):
b, c, h, w = input.size()
F = input.view(b, c, h*w)
F = normalize_by_stddev(F)
G = torch.bmm(F, F.transpose(1, 2))
G = G.div_(h*w)
return G
def normalize_by_stddev(tensor):
"""
divides channel-wise by standard deviation of channel
"""
channels = tensor.shape[1]
stddev = tensor.std(dim=(0, 2)).view(1, channels, 1) + 10e-16
return tensor.div(stddev)
def style_loss(
generated_activations: List[torch.Tensor],
style_gram_matrices: List[torch.Tensor],
gram_class: nn.Module = GramMatrix
) -> List[torch.Tensor]:
"""
Calculate MSE of generated activations' Gram matrices vs target activation's Gram matrices
Note: Function expects that you pass matrices of different shapes (see params)
Parameters
----------
- generated_activations: list of generated activations
- style_gram_matrices: list of target GRAM MATRICES; this function expects that the target gram matrices do not need
to be recalculated
Returns
-------
list of loss corresponding with each activation that was passed
"""
generated_gram_matrices = [gram_class()(i) for i in generated_activations]
return [nn.MSELoss()(generated, target) for generated, target in zip(generated_gram_matrices, style_gram_matrices)]
class NSTLoss(nn.Module):
"""
Utility class for calculating loss for Neural Style Transfer.
Notes
-----
- Currently just supports style loss.
- Stores target gram activations in self, to avoid re-calculating on every forward pass
- Also stores style_weights and style_loss_fn in self for convenience
"""
def __init__(
self,
style_targets=None,
style_weights=None,
style_loss_fn=None,
style_gram_class: nn.Module = None,
content_targets=None,
content_weights=None,
content_loss_fn=None,
alpha=None
):
super(NSTLoss, self).__init__()
self._init_style(style_targets, style_weights, style_loss_fn, style_gram_class)
self._init_content(content_targets, content_weights, content_loss_fn)
self.alpha=alpha
def _init_content(self, content_targets, content_weights, content_loss_fn):
if content_targets or content_weights or content_loss_fn:
raise NotImplementedError
def _init_style(self, style_targets, style_weights, style_loss_fn, style_gram_class):
self.style_targets = style_targets
if not style_weights:
style_weights = [1 / len(style_targets) for _ in style_targets]
self.style_weights = style_weights
self.style_loss_fn = style_loss_fn
self.style_gram_class = style_gram_class
def forward(self, generated_style_activations=None, generated_content_activations=None):
"""
Parameters
----------
generated_style_activations: activations from given layers in a given model
generated_content_activations: activations from given layers in a given model
Todo
----
- Implement structured logging instead of print statements
"""
content_losses = []
style_losses = []
if generated_style_activations:
style_losses = self.style_loss_fn(generated_style_activations, self.style_targets, self.style_gram_class)
style_losses = [weight * loss for weight, loss in zip(self.style_weights, style_losses)]
if generated_content_activations:
content_losses = self.content_loss_fn(generated_content_activations, self.content_targets)
content_losses = [weight * loss for weight, loss in zip(self.content_weights, content_losses)]
if self.alpha:
return (sum(content_losses) * self.alpha) + (sum(style_losses) * (1-self.alpha))
print([float(i) for i in style_losses])
return sum(content_losses) + sum(style_losses)
|
import random
import string
# Directly copied from anki utils!
# used in ankiweb
def _base62(num, extra=""):
s = string
table = s.ascii_letters + s.digits + extra
buf = ""
while num:
num, i = divmod(num, len(table))
buf = table[i] + buf
return buf
_base91_extra_chars = "!#$%&()*+,-./:;<=>?@[]^_`{|}~"
def _base91(num):
# all printable characters minus quotes, backslash and separators
return _base62(num, _base91_extra_chars)
def _guid64():
"""Return a base91-encoded 64bit random number."""
return _base91(random.randint(0, 2 ** 64 - 1))
def guid():
""" Return globally unique ID """
return _guid64()
|
import unittest
from os import environ
from nbs.prime import Prime
class TestRequest(unittest.TestCase):
def test_api(self):
address = environ.get('PRIME_ADDRESS')
username = environ.get('PRIME_USER')
password = environ.get('PRIME_PASS')
prime = Prime(address, username, password, False, 'unknown')
self.assertIsInstance(prime, Prime)
prime.run()
self.assertIsInstance(prime.hosts, list)
if __name__ == '__main__':
unittest.main()
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the Amazon API Gateway unit tests.
When tests are run against an actual AWS account, the stubber class does not
set up stubs and passes all calls through to the Boto3 client.
"""
import json
from test_tools.example_stubber import ExampleStubber
class ApiGatewayStubber(ExampleStubber):
"""
A class that implements a variety of stub functions that are used by the
Amazon API Gateway unit tests.
The stubbed functions all expect certain parameters to be passed to them as
part of the tests, and will raise errors when the actual parameters differ from
the expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 API Gateway client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_create_rest_api(self, api_name, api_id, error_code=None):
expected_params = {'name': api_name}
response = {'id': api_id}
self._stub_bifurcator(
'create_rest_api', expected_params, response, error_code=error_code)
def stub_get_resources(self, api_id, resources, error_code=None):
expected_params = {'restApiId': api_id}
response = {'items': resources}
self._stub_bifurcator(
'get_resources', expected_params, response, error_code=error_code)
def stub_create_resource(
self, api_id, parent_id, path, resource_id, error_code=None):
expected_params = {'restApiId': api_id, 'parentId': parent_id, 'pathPart': path}
response = {'id': resource_id}
self._stub_bifurcator(
'create_resource', expected_params, response, error_code=error_code)
def stub_put_method(
self, api_id, resource_id, error_code=None, http_method='ANY'):
expected_params = {
'restApiId': api_id, 'resourceId': resource_id, 'httpMethod': http_method,
'authorizationType': 'NONE'}
self._stub_bifurcator(
'put_method', expected_params, error_code=error_code)
def stub_put_method_response(
self, api_id, resource_id, response_models, error_code=None,
http_method='ANY'):
expected_params = {
'restApiId': api_id, 'resourceId': resource_id, 'httpMethod': http_method,
'statusCode': '200', 'responseModels': response_models}
self._stub_bifurcator(
'put_method_response', expected_params, error_code=error_code)
def stub_put_integration(
self, api_id, resource_id, uri, error_code=None, http_method='ANY',
integ_type='AWS_PROXY', integ_method='POST', integ_role_arn=None,
integ_templates=None, passthrough=None):
expected_params = {
'restApiId': api_id,
'resourceId': resource_id,
'httpMethod': http_method,
'type': integ_type,
'integrationHttpMethod': integ_method,
'uri': uri}
if integ_role_arn is not None:
expected_params['credentials'] = integ_role_arn
if integ_templates is not None:
expected_params['requestTemplates'] = {
'application/json': json.dumps(integ_templates)}
if passthrough is not None:
expected_params['passthroughBehavior'] = passthrough
self._stub_bifurcator(
'put_integration', expected_params, error_code=error_code)
def stub_put_integration_response(
self, api_id, resource_id, response_templates, error_code=None,
http_method='ANY'):
expected_params = {
'restApiId': api_id, 'resourceId': resource_id, 'httpMethod': http_method,
'statusCode': '200', 'responseTemplates': response_templates}
self._stub_bifurcator(
'put_integration_response', expected_params, error_code=error_code)
def stub_create_deployment(self, api_id, api_stage, error_code=None):
expected_params = {'restApiId': api_id, 'stageName': api_stage}
self._stub_bifurcator(
'create_deployment', expected_params, error_code=error_code)
def stub_delete_rest_api(self, api_id, error_code=None):
expected_params = {'restApiId': api_id}
self._stub_bifurcator(
'delete_rest_api', expected_params, error_code=error_code)
def stub_get_rest_apis(self, rest_apis, error_code=None):
expected_params = {}
response = {'items': rest_apis}
self._stub_bifurcator(
'get_rest_apis', expected_params, response, error_code=error_code)
|
import database.sqlite3
import datetime
def is_new_article(title):
database.sqlite3.set_conn()
db = database.sqlite3.get_conn()
count = db.cursor().execute(''' SELECT * FROM article WHERE title = ? ''', [title])
is_new = count.fetchone() is None
print("Checking if article with title '" + title + "' is new..." + str(is_new))
return is_new
def create(article):
db = database.sqlite3.get_conn()
print("Inserting article" + article.title)
now = datetime.datetime.utcnow()
db.cursor().execute(''' INSERT INTO article (title, timestamp, url, content, source) VALUES(?, ?, ?, ?, ?)''',
[article.title, now, article.full_url, article.full_url, article.source])
db.commit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import progressbar
vec_repr = {}
DOT = '.'
COMMA = ','
COLON = ':'
SEMICOLON = ';'
QUOTATION = '"'
DOLLAR = '$'
PERCENT = '%'
EXCLAMATION = '!'
QUESTION = '?'
DIV = '/'
R_OPEN = '('
R_CLOSE = ')'
S_OPEN = '['
S_CLOSE = ']'
ELLIPSIS = '…'
LONG_DASH = '–'
IMP_ELLIP = '...'
EOL = '\n'
START = [IMP_ELLIP, QUOTATION, R_OPEN, S_OPEN, DOLLAR, ELLIPSIS]
END = [R_CLOSE, S_CLOSE, PERCENT, DOT, COMMA, COLON, SEMICOLON,
EXCLAMATION, QUOTATION, QUESTION, ELLIPSIS, IMP_ELLIP]
ALL_SYM = [DOT, COMMA, COLON, SEMICOLON, QUOTATION, DOLLAR, PERCENT,
EXCLAMATION, QUESTION, DIV, R_OPEN, R_CLOSE, S_OPEN,
S_CLOSE, ELLIPSIS, LONG_DASH, IMP_ELLIP, EOL]
vec_repr = dict(zip(ALL_SYM, range(0, len(ALL_SYM))))
def process_word(word):
l_accum = []
r_accum = []
# print len(word)
while word[0] in START or word[:3] in START:
for sym in START:
if word.startswith(sym):
l_accum.append(vec_repr[sym])
if sym != IMP_ELLIP:
word = word[1:]
else:
word = word[3:]
if len(word) == 0:
break
if len(word) > 0:
while word[-1] in END or word[-3:] in END:
for sym in END:
if word.endswith(sym):
r_accum = [vec_repr[sym]] + r_accum
if sym != IMP_ELLIP:
word = word[:-1]
else:
word = word[:-3]
if len(word) == 0:
break
return word, l_accum, r_accum
def add_word(word):
idx = len(vec_repr) - 1
word = word.lower()
if word not in vec_repr:
vec_repr[word] = idx + 1
return vec_repr[word]
def process_line(line):
result = []
if len(line) > 0:
words = line.split(' ')
for word in words:
if len(word) > 0:
# try:
word, l_accum, r_accum = process_word(word)
# except Exception as e:
# print word
# print line
# raise e
if DIV in word:
spl_words = word.split(DIV)
for part in spl_words:
val = add_word(part)
l_accum.append(val)
else:
val = add_word(word)
l_accum.append(val)
result += l_accum + r_accum
else:
result.append(vec_repr[EOL])
return result
def main():
bar = progressbar.ProgressBar()
_input = []
with open('speeches.txt', 'rb') as fp:
for line in bar(fp):
line = line.rstrip()
post_pr = process_line(line)
_input += post_pr
np.savez('dictionary', data=_input, vec_repr=vec_repr)
if __name__ == '__main__':
main()
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import time
BATCH_SIZE = 16
EPOCHS = 5
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = (np.expand_dims(train_images, axis=-1)/255).astype(np.float32).reshape((60000, 784))
print(train_images.shape)
train_labels = (train_labels).astype(np.int64)
test_images = (np.expand_dims(test_images, axis=-1)/255.).astype(np.float32).reshape((10000, 784))
test_labels = (test_labels).astype(np.int64)
print(train_images.shape, train_labels.shape)
def build_ann_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input((784, ), batch_size=BATCH_SIZE))
model.add(tf.keras.layers.Flatten())
#model.add(tf.keras.layers.Dense(128, activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.keras.activations.softmax, name='dense2'))
return model
model = build_ann_model()
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.SGD(1e-1), metrics=['accuracy'])
weights = np.load('weights.npy', allow_pickle=True)
bias = np.load('bias.npy', allow_pickle=True)
model.trainable_variables[0].assign(weights.T)
model.trainable_variables[1].assign(bias)
start = time.time()
model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_acc)
print('Elapsed time: {}'.format(time.time() - start))
'''
dataset_size = [10, 100, 500, 1000, 5000, 10000, 30000, len(train_images)]
total_accuracy = []
for size in dataset_size:
model = build_ann_model()
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.SGD(1e-1), metrics=['accuracy'])
BATCH_SIZE = 64
EPOCHS = 5
indexes = np.random.randint(0, len(train_images), size)
new_train_images = []
new_train_labels = []
for i in indexes:
new_train_images.append(train_images[i])
new_train_labels.append(train_labels[i])
model.fit(np.array(new_train_images), np.array(new_train_labels), batch_size=BATCH_SIZE, epochs=EPOCHS)
test_loss, test_acc = model.evaluate(test_images, test_labels)
total_accuracy.append(test_acc)
print('Test accuracy:', test_acc)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(dataset_size, total_accuracy, label='1 Layer Regular NN')
ax1.plot(dataset_size, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], label='1 Layer Inverse NN (Our model)')
plt.legend(loc='bottom right')
plt.show()
''' |
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge
from geometry_msgs.msg import Vector3Stamped
from byakugan.msg import BoolStamped
def dispo(img, circles):
if (circles is not None):
circles = np.uint16(np.around(circles))
circulo.existe.data = True
maiorRaio = 0
for i in circles[0, :]:
x, y, r = i[0], i[1], i[2]
if(r > maiorRaio):
maiorRaio = r
coordenadas.vector.x = x
coordenadas.vector.y = y
coordenadas.vector.z = r
else:
circulo.existe.data = False
coordenadas.vector.x = 0
coordenadas.vector.y = 0
coordenadas.vector.z = 0
pub.publish(coordenadas)
pub2.publish(circulo)
def pubCirculosEm(img):
copiaImg = img.copy()
cinza = cv2.cvtColor(copiaImg, cv2.COLOR_BGR2GRAY)
cinza = cv2.medianBlur(cinza, 5)
rows = cinza.shape[1]
circles = cv2.HoughCircles(cinza, cv2.HOUGH_GRADIENT, 1.2, rows/8,
param1=45, param2=30, minRadius=10, maxRadius=100)
dispo(copiaImg, circles)
def callback(img):
np_arr = np.fromstring(img.data, np.uint8)
imgCV = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
pubCirculosEm(imgCV)
'''
imgCV = ponte.imgmsg_to_cv2(img, 'bgr8')
pubCirculosEm(imgCV)
cv2.imshow('img', imgCV)
cv2.waitKey(1)
'''
def listenerImg():
rospy.init_node('pubCircle', anonymous=False)
rospy.Subscriber('/raspicam_node/image/compressed', CompressedImage, callback)
#rospy.Subscriber("imgCam", Image, callback)
rospy.spin()
if __name__ == "__main__":
ponte = CvBridge()
pub = rospy.Publisher('coordenadas_circulos', Vector3Stamped, queue_size=10)
pub2 = rospy.Publisher('tem_circulos', BoolStamped, queue_size=10)
circulo = BoolStamped()
coordenadas = Vector3Stamped()
listenerImg()
|
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn import decomposition as sk_dec
from sklearn import preprocessing as sk_prep
import torch
def seed(value: int = 42):
np.random.seed(value)
torch.manual_seed(value)
torch.cuda.manual_seed(value)
random.seed(value)
def plot_vectors(latent: torch.Tensor, labels: torch.Tensor):
latent = sk_prep.normalize(X=latent, norm="l2")
z2d = sk_dec.PCA(n_components=2).fit_transform(latent)
fig, ax = plt.subplots(figsize=(10, 10))
for y in labels.unique():
ax.scatter(
z2d[labels == y, 0], z2d[labels == y, 1],
marker=".", label=y.item(),
)
fig.legend()
return fig
|
# To run the program correctly, change the lines below with your database server or local database.
host_name = 'academic-mysql.cc.gatech.edu'
database_name = 'cs4400_group1'
user_name = 'cs4400_group1'
password = 'ehiHGsY7' |
"""Script for analysing the results from the 16 different seeds from the 11 systems."""
# It also parses the gsd format trajectory stored in each output analysis folder (obtained by executing conv_traj.py before this script) to get the RDFs."""
import os
import shutil
from glob import glob
import freud
import matplotlib.pyplot as plt
import mdtraj as md
import numpy as np
import signac
from scipy import stats
def main():
"""Read run files from all the independent seeds and get the final density and RDFs."""
data_path = "spe_data"
if os.path.exists(data_path):
shutil.rmtree(data_path)
os.makedirs(data_path)
# delete the folder manually
os.chdir(data_path)
project = signac.get_project()
for (
molecule,
ensemble,
temperature,
pressure,
cutoff_style,
long_range_correction,
), group in project.groupby(
(
"molecule",
"ensemble",
"temperature",
"pressure",
"cutoff_style",
"long_range_correction",
)
):
print("-----------------------------------------------------")
print(
molecule,
ensemble,
temperature,
pressure,
cutoff_style,
long_range_correction,
)
if not os.path.isdir(
"{}_{}_{}K_{}kPa_cutoff_{}_lrc_{}".format(
molecule,
ensemble,
temperature,
pressure,
cutoff_style,
str(long_range_correction),
)
):
os.makedirs(
"{}_{}_{}K_{}kPa_cutoff_{}_lrc_{}".format(
molecule,
ensemble,
temperature,
pressure,
cutoff_style,
str(long_range_correction),
)
)
os.chdir(
"{}_{}_{}K_{}kPa_cutoff_{}_lrc_{}".format(
molecule,
ensemble,
temperature,
pressure,
cutoff_style,
str(long_range_correction),
)
)
base_dir = os.getcwd()
if ensemble == "NPT":
density_list = []
for job in group:
if job.sp.engine != "mcccs":
continue
os.chdir(job.ws)
potential_energy = get_pe()
lj_inter_energy = get_lj_inter()
lj_intra_energy = get_lj_intra()
tail_correction = get_tailc()
coulomb_energy = get_coul()
bond_energy = get_bond()
angle_energy = get_angle()
dihedral_energy = get_torsion()
print(potential_energy)
print(lj_inter_energy)
print(lj_intra_energy)
print(tail_correction)
print(coulomb_energy)
print(bond_energy)
print(angle_energy)
print(dihedral_energy)
print(job)
csv_pe = potential_energy
csv_lj_energy = (
lj_inter_energy + lj_intra_energy + tail_correction
)
csv_tail_energy = tail_correction
csv_coulomb_energy = coulomb_energy
# csv_kspace_energy = float("NaN")
csv_kspace_energy = 0
csv_pair_energy = csv_lj_energy + csv_coulomb_energy
csv_bond_energy = bond_energy
csv_angle_energy = angle_energy
csv_dihedral_energy = dihedral_energy
csv_intermolecular_energy = lj_inter_energy
csv_intramolecular_energy = lj_intra_energy
csv_total_coulombic = coulomb_energy
csv_short_range = lj_inter_energy + lj_intra_energy
output_string = ""
print(output_string)
os.chdir(base_dir)
energies = 0.00831441001625545 * np.array(
[
[
csv_pe,
csv_lj_energy,
csv_tail_energy,
csv_coulomb_energy,
csv_kspace_energy,
csv_pair_energy,
csv_bond_energy,
csv_angle_energy,
csv_dihedral_energy,
csv_intramolecular_energy,
csv_intermolecular_energy,
csv_total_coulombic,
csv_short_range,
]
]
)
header = "potential_energy \t vdw_energy \t tail_energy \t coul_energy \t kspace_energy \t pair_energy \t bonds_energy \t angles_energy \t dihedrals_energy \t intramolecular_lj_energy \t intermolecular_lj_energy \t total_coulombic_energy \t short_range_energy"
np.savetxt("log-spe.txt", energies, header=header, delimiter="\t")
os.chdir("..")
def get_pe():
"""Get PE for the system."""
os.system("grep 'total energy' run.melt | awk '{print $3}' > temp_pe.txt")
if os.path.exists("temp_pe.txt"):
try:
pe_one_seed = np.genfromtxt("temp_pe.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp_pe.txt")
return np.mean(pe_one_seed[0])
else:
return None
def get_lj_inter():
"""Get inter LJ energy for the system."""
os.system(
"grep 'inter lj energy' run.melt | awk '{print $4}' > temp_lj_inter.txt"
)
if os.path.exists("temp_lj_inter.txt"):
try:
lj_inter_one_seed = np.genfromtxt("temp_lj_inter.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp_lj_inter.txt")
return np.mean(lj_inter_one_seed[0])
else:
return None
def get_lj_intra():
"""Get intra LJ energy for the system."""
os.system(
"grep 'intra lj energy' run.melt | awk '{print $4}' > temp_lj_intra.txt"
)
if os.path.exists("temp_lj_intra.txt"):
try:
lj_intra_one_seed = np.genfromtxt("temp_lj_intra.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp_lj_intra.txt")
return np.mean(lj_intra_one_seed[0])
else:
return None
def get_tailc():
"""Get tail corection energy for the system."""
os.system(
"grep 'Tail correction' run.melt | awk '{print $3}' > temp_tailc.txt"
)
if os.path.exists("temp_tailc.txt"):
try:
tailc_one_seed = np.genfromtxt("temp_tailc.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp_tailc.txt")
return np.mean(tailc_one_seed[0])
else:
return None
def get_coul():
"""Get coulombic energy for the system."""
os.system("grep 'coulombic energy' run.melt | awk '{print $3}' > temp.txt")
if os.path.exists("temp.txt"):
try:
one_seed = np.genfromtxt("temp.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp.txt")
return np.mean(one_seed[0])
else:
return None
def get_bond():
"""Get bond energy for the system."""
os.system("grep 'bond vibration' run.melt | awk '{print $3}' > temp.txt")
if os.path.exists("temp.txt"):
try:
one_seed = np.genfromtxt("temp.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp.txt")
return np.mean(one_seed[0])
else:
return None
def get_angle():
"""Get angle bending energy for the system."""
os.system("grep 'bond bending' run.melt | awk '{print $3}' > temp.txt")
if os.path.exists("temp.txt"):
try:
one_seed = np.genfromtxt("temp.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp.txt")
return np.mean(one_seed[0])
else:
return None
def get_torsion():
"""Get torsional energy for the system."""
os.system("grep 'torsional' run.melt | awk '{print $2}' > temp.txt")
if os.path.exists("temp.txt"):
try:
one_seed = np.genfromtxt("temp.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp.txt")
return np.mean(one_seed[-1])
else:
return None
def avg_one_seed_density_box1(prod_run_files):
"""For a one particular seed, read all the prod run files and provide the average density (g/ml) for one seed."""
if len(prod_run_files) == 0:
return None
os.system(
"grep 'specific density ' run.prod* | awk '{print $6}' > temp_density.txt"
)
# os.system("grep 'specific density ' run.prod* | awk '{print $6}' ")
if os.path.exists("temp_density.txt"):
try:
density_list_one_seed = np.genfromtxt("temp_density.txt")
except Exception as e:
raise e
finally:
# now delete the temp file
os.remove("temp_density.txt")
return np.mean(density_list_one_seed)
else:
return None
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) 2016-2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import paramiko
import subprocess
logger = logging.getLogger(__name__)
class Scp():
@staticmethod
def get_file(address, localfile, remotefile,
user=None, password=None, pkey=None):
logger.debug("Copying {}@{}:{} to {}".format(user, address, remotefile,
localfile))
client = Ssh.get_client(address, user, password, pkey)
sftp = client.open_sftp()
sftp.get(remotefile, localfile)
sftp.close()
client.close()
@staticmethod
def put_file(address, localfile, remotefile,
user=None, password=None, pkey=None):
logger.debug("Copying {} to {}@{}:{}".format(localfile, user, address,
remotefile))
client = Ssh.get_client(address, user, password, pkey)
sftp = client.open_sftp()
sftp.put(localfile, remotefile)
sftp.close()
client.close()
class Ssh():
@staticmethod
def get_client(address, user=None, password=None, pkey=None):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(address, username=user, password=password, pkey=pkey)
return client
@staticmethod
def execute_command(address, command, user=None, password=None, pkey=None):
try:
logger.debug("ssh {}@{}, running: {}".format(user, address,
command))
client = Ssh.get_client(address, user, password, pkey)
_, stdout_stream, stderr_stream = client.exec_command(command)
stdout, stderr = stdout_stream.read(), stderr_stream.read()
exit_code = stdout_stream.channel.recv_exit_status()
logger.debug("exit_code: " + str(exit_code))
logger.debug("stdout: " + stdout)
logger.debug("stderr: " + stderr)
client.close()
except IOError:
logger.warning(".. host " + address + " is not up")
return -1, "host not up", "host not up"
return exit_code, stdout, stderr
class Exec():
@staticmethod
def execute_command(cmd):
logger.debug("Executing command: " + str(cmd))
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
exit_code = process.returncode
logger.debug("Got back:\n"
" returncode={}\n"
" stdout={}\n"
" stderr={}".format(str(process.returncode),
stdout, stderr))
return exit_code, stdout, stderr
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juergen Brendel, Cisco Systems Inc.
# @author: Abhishek Raut, Cisco Systems Inc.
from mock import patch
from neutron import context
import neutron.db.api as db
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import n1kv_models_v2
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco.extensions import n1kv_profile
from neutron.plugins.cisco.n1kv import n1kv_client
from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
class FakeResponse(object):
"""
This object is returned by mocked httplib instead of a normal response.
Initialize it with the status code, content type and buffer contents
you wish to return.
"""
def __init__(self, status, response_text, content_type):
self.buffer = response_text
self.status = status
def __getitem__(cls, val):
return "application/xml"
def read(self, *args, **kwargs):
return self.buffer
def _fake_add_dummy_profile_for_test(self, obj):
"""
Replacement for a function in the N1KV neutron plugin module.
Since VSM is not available at the time of tests, we have no
policy profiles. Hence we inject a dummy policy/network profile into the
port/network object.
"""
dummy_profile_name = "dummy_profile"
dummy_tenant_id = "test-tenant"
db_session = db.get_session()
if 'port' in obj:
dummy_profile_id = "00000000-1111-1111-1111-000000000000"
self._add_policy_profile(dummy_profile_name,
dummy_profile_id,
dummy_tenant_id)
obj['port'][n1kv_profile.PROFILE_ID] = dummy_profile_id
elif 'network' in obj:
profile = {'name': 'dummy_profile',
'segment_type': 'vlan',
'physical_network': 'phsy1',
'segment_range': '3968-4047'}
self.network_vlan_ranges = {profile[
'physical_network']: [(3968, 4047)]}
n1kv_db_v2.sync_vlan_allocations(db_session, self.network_vlan_ranges)
np = n1kv_db_v2.create_network_profile(db_session, profile)
obj['network'][n1kv_profile.PROFILE_ID] = np.id
def _fake_setup_vsm(self):
"""Fake establish Communication with Cisco Nexus1000V VSM."""
self.agent_vsm = True
self._poll_policies(event_type="port_profile")
class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = ('neutron.plugins.cisco.n1kv.'
'n1kv_neutron_plugin.N1kvNeutronPluginV2')
tenant_id = "some_tenant"
DEFAULT_RESP_BODY = ""
DEFAULT_RESP_CODE = 200
DEFAULT_CONTENT_TYPE = ""
def _make_test_policy_profile(self, id):
"""Create a policy profile record for testing purpose."""
profile = {'id': id,
'name': 'TestGrizzlyPP'}
profile_obj = n1kv_db_v2.create_policy_profile(profile)
return profile_obj
def _make_test_profile(self):
"""Create a profile record for testing purposes."""
alloc_obj = n1kv_models_v2.N1kvVlanAllocation(physical_network='foo',
vlan_id=123)
alloc_obj.allocated = False
segment_range = "100-900"
segment_type = 'vlan'
physical_network = 'phys1'
profile_obj = n1kv_models_v2.NetworkProfile(
name="test_np",
segment_type=segment_type,
segment_range=segment_range,
physical_network=physical_network)
session = db.get_session()
session.add(profile_obj)
session.flush()
return profile_obj
def setUp(self):
"""
Setup method for n1kv plugin tests.
First step is to define an acceptable response from the VSM to
our requests. This needs to be done BEFORE the setUp() function
of the super-class is called.
This default here works for many cases. If you need something
extra, please define your own setUp() function in your test class,
and set your DEFAULT_RESPONSE value also BEFORE calling the
setUp() of the super-function (this one here). If you have set
a value already, it will not be overwritten by this code.
"""
if not self.DEFAULT_RESP_BODY:
self.DEFAULT_RESP_BODY = (
"""<?xml version="1.0" encoding="utf-8"?>
<set name="events_set">
<instance name="1" url="/api/hyper-v/events/1">
<properties>
<cmd>configure terminal ; port-profile type vethernet grizzlyPP
(SUCCESS)
</cmd>
<id>42227269-e348-72ed-bdb7-7ce91cd1423c</id>
<time>1369223611</time>
<name>grizzlyPP</name>
</properties>
</instance>
<instance name="2" url="/api/hyper-v/events/2">
<properties>
<cmd>configure terminal ; port-profile type vethernet havanaPP
(SUCCESS)
</cmd>
<id>3fc83608-ae36-70e7-9d22-dec745623d06</id>
<time>1369223661</time>
<name>havanaPP</name>
</properties>
</instance>
</set>
""")
# Creating a mock HTTP connection object for httplib. The N1KV client
# interacts with the VSM via HTTP. Since we don't have a VSM running
# in the unit tests, we need to 'fake' it by patching the HTTP library
# itself. We install a patch for a fake HTTP connection class.
# Using __name__ to avoid having to enter the full module path.
http_patcher = patch(n1kv_client.httplib2.__name__ + ".Http")
FakeHttpConnection = http_patcher.start()
self.addCleanup(http_patcher.stop)
# Now define the return values for a few functions that may be called
# on any instance of the fake HTTP connection class.
instance = FakeHttpConnection.return_value
instance.getresponse.return_value = (FakeResponse(
self.DEFAULT_RESP_CODE,
self.DEFAULT_RESP_BODY,
'application/xml'))
instance.request.return_value = (instance.getresponse.return_value,
self.DEFAULT_RESP_BODY)
# Patch some internal functions in a few other parts of the system.
# These help us move along, without having to mock up even more systems
# in the background.
# Return a dummy VSM IP address
get_vsm_hosts_patcher = patch(n1kv_client.__name__ +
".Client._get_vsm_hosts")
fake_get_vsm_hosts = get_vsm_hosts_patcher.start()
self.addCleanup(get_vsm_hosts_patcher.stop)
fake_get_vsm_hosts.return_value = ["127.0.0.1"]
# Return dummy user profiles
get_cred_name_patcher = patch(cdb.__name__ + ".get_credential_name")
fake_get_cred_name = get_cred_name_patcher.start()
self.addCleanup(get_cred_name_patcher.stop)
fake_get_cred_name.return_value = {"user_name": "admin",
"password": "admin_password"}
# Patch a dummy profile creation into the N1K plugin code. The original
# function in the plugin is a noop for production, but during test, we
# need it to return a dummy network profile.
(n1kv_neutron_plugin.N1kvNeutronPluginV2.
_add_dummy_profile_only_if_testing) = _fake_add_dummy_profile_for_test
n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm
super(N1kvPluginTestCase, self).setUp(self._plugin_name)
# Create some of the database entries that we require.
profile_obj = self._make_test_profile()
policy_profile_obj = (self._make_test_policy_profile(
'41548d21-7f89-4da0-9131-3d4fd4e8BBB8'))
# Additional args for create_network(), create_port(), etc.
self.more_args = {
"network": {"n1kv:profile_id": profile_obj.id},
"port": {"n1kv:profile_id": policy_profile_obj.id}
}
def test_plugin(self):
self._make_network('json',
'some_net',
True,
tenant_id=self.tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', self.tenant_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
body = self.deserialize('json', res)
self.assertIn('tenant_id', body['networks'][0])
class TestN1kvBasicGet(test_plugin.TestBasicGet,
N1kvPluginTestCase):
pass
class TestN1kvHTTPResponse(test_plugin.TestV2HTTPResponse,
N1kvPluginTestCase):
pass
class TestN1kvPorts(test_plugin.TestPortsV2,
N1kvPluginTestCase):
def _make_other_tenant_profile(self):
"""Underlying test uses other tenant Id for tests."""
profile_obj = self._make_test_profile()
policy_profile_obj = self._make_test_policy_profile(
'41548d21-7f89-4da0-9131-3d4fd4e8BBB9')
self.more_args = {
"network": {"n1kv:profile_id": profile_obj.id},
"port": {"n1kv:profile_id": policy_profile_obj.id}
}
def test_create_port_public_network(self):
# The underlying test function needs a profile for a different tenant.
self._make_other_tenant_profile()
super(TestN1kvPorts, self).test_create_port_public_network()
def test_create_port_public_network_with_ip(self):
# The underlying test function needs a profile for a different tenant.
self._make_other_tenant_profile()
super(TestN1kvPorts, self).test_create_port_public_network_with_ip()
def test_create_ports_bulk_emulated(self):
# The underlying test function needs a profile for a different tenant.
self._make_other_tenant_profile()
super(TestN1kvPorts,
self).test_create_ports_bulk_emulated()
def test_create_ports_bulk_emulated_plugin_failure(self):
# The underlying test function needs a profile for a different tenant.
self._make_other_tenant_profile()
super(TestN1kvPorts,
self).test_create_ports_bulk_emulated_plugin_failure()
def test_delete_port_public_network(self):
self._make_other_tenant_profile()
super(TestN1kvPorts, self).test_delete_port_public_network()
class TestN1kvNetworks(test_plugin.TestNetworksV2,
N1kvPluginTestCase):
_default_tenant = "somebody_else" # Tenant-id determined by underlying
# DB-plugin test cases. Need to use this
# one for profile creation
def test_update_network_set_not_shared_single_tenant(self):
# The underlying test function needs a profile for a different tenant.
profile_obj = self._make_test_profile()
policy_profile_obj = self._make_test_policy_profile(
'41548d21-7f89-4da0-9131-3d4fd4e8BBB9')
self.more_args = {
"network": {"n1kv:profile_id": profile_obj.id},
"port": {"n1kv:profile_id": policy_profile_obj.id}
}
super(TestN1kvNetworks,
self).test_update_network_set_not_shared_single_tenant()
class TestN1kvNonDbTest(base.BaseTestCase):
"""
This test class here can be used to test the plugin directly,
without going through the DB plugin test cases.
None of the set-up done in N1kvPluginTestCase applies here.
"""
def test_db(self):
n1kv_db_v2.initialize()
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def fun():
df= pd.read_csv("conveter.csv")
df1=df.dropna(axis=1, how='any', thresh=None, subset=None, inplace=False)
#df1.to_csv("edited.csv",index=False)
sns.pairplot(df1,dropna=True,hue='SSID',diag_kind='hist')
plt.show()
#fun() |
import pandas as pd
import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
import datetime
import time
import pickle
style.use('ggplot')
df = quandl.get('WIKI/GOOGL')
df = df [['Adj. Open','Adj. High','Adj. Low','Adj. Close','Adj. Volume',]]
df['HL_PCT'] = (df['Adj. High']-df['Adj. Close'])/df['Adj. Close']*100
df['PCT_change'] = (df['Adj. Close']-df['Adj. Open'])/df['Adj. Open']*100
df = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']] #features
forecast_col = 'Adj. Close'
df.fillna(-99999,inplace = True)
forecast_out = int(math.ceil(0.01*len(df)))
df['label'] = df[forecast_col].shift(-forecast_out)
x = np.array(df.drop(['label'],1))
x = preprocessing.scale(x)
x_lately = x[-forecast_out:]
x = x[:-forecast_out]
df.dropna(inplace=True)
y = np.array(df['label'])
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x,y,test_size = 0.2)
#clf = svm.SVR(kernel = 'poly')
clf = LinearRegression()
clf.fit(x_train, y_train)
with open ('linearregression.pickle', 'wb') as f:
pickle.dump(clf, f)
pickle_in = open ('linearregression.pickle','rb')
clf = pickle.load(pickle_in)
accuracy = clf.score(x_test,y_test)
print 'accuracy' , accuracy
forecast_set = clf.predict(x_lately)
print forecast_set, forecast_out
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = time.mktime(last_date.timetuple())
one_day = 86400
next_unix = last_unix+one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)]+[i]
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc = 4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
##################################################################################################################
"""Linear regression"""
|
# Copyright 2018 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_context import context
from cyborg import db
from cyborg.common import exception
from cyborg import objects
from cyborg.objects import base
from cyborg import tests as test
from cyborg.tests.unit import fake_accelerator
from cyborg.tests.unit import fake_deployable
from cyborg.tests.unit.objects import test_objects
from cyborg.tests.unit.db.base import DbTestCase
class _TestDeployableObject(DbTestCase):
@property
def fake_deployable(self):
db_deploy = fake_deployable.fake_db_deployable(id=2)
return db_deploy
@property
def fake_accelerator(self):
db_acc = fake_accelerator.fake_db_acceleraotr(id=2)
return db_acc
def test_create(self, mock_create):
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.accelerator_id = acc_get.id
dpl.create(self.context)
self.assertEqual(db_dpl['uuid'], dpl.uuid)
def test_get(self):
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.accelerator_id = acc_get.id
dpl.create(self.context)
dpl_get = objects.Deployable.get(self.context, dpl.uuid)
self.assertEqual(dpl_get.uuid, dpl.uuid)
def test_get_by_filter(self):
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.accelerator_id = acc_get.id
dpl.create(self.context)
query = {"uuid": dpl['uuid']}
dpl_get_list = objects.Deployable.get_by_filter(self.context, query)
self.assertEqual(dpl_get_list[0].uuid, dpl.uuid)
def test_save(self):
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.accelerator_id = acc_get.id
dpl.create(self.context)
dpl.host = 'test_save'
dpl.save(self.context)
dpl_get = objects.Deployable.get(self.context, dpl.uuid)
self.assertEqual(dpl_get.host, 'test_save')
def test_destroy(self):
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
db_dpl = self.fake_deployable
dpl = objects.Deployable(context=self.context,
**db_dpl)
dpl.accelerator_id = acc_get.id
dpl.create(self.context)
self.assertEqual(db_dpl['uuid'], dpl.uuid)
dpl.destroy(self.context)
self.assertRaises(exception.DeployableNotFound,
objects.Deployable.get, self.context,
dpl.uuid)
class TestDeployableObject(test_objects._LocalTest,
_TestDeployableObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
deployable = fake_deployable.fake_deployable_obj(self.context)
fields_with_save_methods = [field for field in deployable.fields
if hasattr(deployable, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(deployable, '_save_%s' % field)
@mock.patch.object(deployable, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
deployable.obj_reset_changes(fields=[field])
deployable._changed_fields.add(field)
self.assertRaises(expected_exception, deployable.save)
deployable.obj_reset_changes(fields=[field])
_test() |
#!/usr/bin/env python
"""
Inmarsat Modbus Proxy Adapter for ClearBlade connects to your ClearBlade cloud/platform using secure credentials.
In ClearBlade you define Collections that map an IDP MobileID to a proxy IP address and TCP port.
The Adapter runs on an Edge device such as Raspberry Pi, reads the mapping from the platform
and sets up concurrent Modbus servers using PyModbus and the Twisted.internet framework.
The Adapter creates IP aliases on the Edge and listens for Modbus TCP connections on each of the virtual interfaces.
ClearBlade Collection Example ``ModbusProxyRtus`` for mapping MobileID to a local IP address:
+-----------------+---------------+----------+----------+-------------+----------+-----------+----------+
| mobile_id | ip_address | tcp_port | slave_id | config_file | latitude | longitude | altitude |
+-----------------+---------------+----------+----------+-------------+----------+-----------+----------+
| 00000000SKYEE3D | 192.168.1.200 | 502 | 1 | <blob> | <meta> | <meta> | <meta> |
+-----------------+---------------+----------+----------+-------------+----------+-----------+----------+
ClearBlade Collection Example ``ModbusProxyData`` that holds the latest reported data from the field:
+---------------+----------+----------+------------------+---------------+---------------+---------------------+
| ip_address | tcp_port | slave_id | register_address | register_data | register_type | timestamp |
+---------------+----------+----------+------------------+---------------+---------------+---------------------+
| 192.168.1.200 | 502 | 1 | 0 | 123 | ir | 01/21/2019 07:00:00 |
+---------------+----------+----------+------------------+---------------+---------------+---------------------+
Register Types:
* ``ir`` **Input Register** also sometimes referred to as read-only analog inputs
* ``hr`` **Holding Register** sometimes referred to as read/write or output registers
* ``di`` **Discrete Input** read-only boolean values sometimes called digital inputs or contact closures
* ``co`` **Coil** read/write boolean values associated with digital outputs
.. todo::
* Setup for multiple instances running on virtual IP address/ports from a single host
* Proper credits for ClearBlade / Jim Bouquet
"""
__version__ = "0.1.0"
import sys
import argparse
import subprocess
import time
from clearblade.ClearBladeCore import System, Query
from pymodbus.server.async import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from twisted.internet import reactor
import headless
from context import ClearBladeModbusProxyServerContext
from constants import ADAPTER_DEVICE_ID, ADAPTER_CONFIG_COLLECTION, DEVICE_PROXY_CONFIG_COLLECTION, DATA_COLLECTION
from constants import COL_PROXY_IP_ADDRESS, COL_PROXY_IP_PORT
def get_parser():
"""
Parses the command line arguments
.. todo::
Adapter services related to alerts/messaging, and local device/Edge management
:returns: An object with attributes based on the arguments
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Inmarsat Modbus Proxy Adapter for ClearBlade")
parser.add_argument('--url', default='https://platform.clearblade.com',
help="The URL of the ClearBlade Platform the adapter will connect to.")
parser.add_argument('--systemKey', required=True,
help="The System Key of the ClearBlade platform System the adapter will connect to.")
parser.add_argument('--systemSecret', required=True,
help="The System Secret of the ClearBlade plaform System the adapter will connect to.")
parser.add_argument('--deviceName', default=ADAPTER_DEVICE_ID,
help="The id/name of the device that will be used for device \
authentication against the ClearBlade platform or Edge, defined \
within the devices table of the ClearBlade platform.")
parser.add_argument('--deviceKey', required=True,
help="The active key of the device that will be used for device \
authentication against the ClearBlade platform or Edge, defined within \
the Devices table of the ClearBlade platform.")
parser.add_argument('--_slaves', dest='slaves_collection', default=DEVICE_PROXY_CONFIG_COLLECTION,
help="The ClearBlade Collection name with RTU proxy definitions")
parser.add_argument('--data', dest='data_collection', default=DATA_COLLECTION,
help="The ClearBlade Collection name with proxy data")
parser.add_argument('--net', dest='net_if', default='eth0',
help="The physical port of the network listener")
parser.add_argument('--ip', dest='ip_address', default='localhost',
help="The local IP Address the PyModbus server will listen on")
parser.add_argument('--tcp', dest='tcp_port', default=502,
help="The local TCP Port the PyModbus server will listen on")
parser.add_argument('--logLevel', dest='log_level', default='INFO',
choices=['INFO', 'DEBUG'],
help="The level of logging that will be utilized by the adapter.")
parser.add_argument('--heartbeat', dest='heartbeat', default=30,
help="The logging heartbeat interval in seconds.")
# parser.add_argument('--messagingUrl', dest='messagingURL', default='localhost',
# help="The MQTT URL of the ClearBlade Platform or Edge the adapter will connect to.")
#
# parser.add_argument('--messagingPort', dest='messagingPort', default=1883,
# help="The MQTT Port of the ClearBlade Platform or Edge the adapter will connect to.")
#
# parser.add_argument('--topicRoot', dest='adapterTopicRoot', default='modbusProxy',
# help="The root of MQTT topics this adapter will subscribe and publish to.")
#
# parser.add_argument('--deviceProvisionSvc', dest='deviceProvisionSvc', default='',
# help="The name of a service that can be invoked to provision IoT devices \
# within the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceHealthSvc', dest='deviceHealthSvc', default='',
# help="The name of a service that can be invoked to provide the health of \
# an IoT device to the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceLogsSvc', dest='deviceLogsSvc', default='',
# help="The name of a service that can be invoked to provide IoT device \
# logging information to the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceStatusSvc', dest='deviceStatusSvc', default='',
# help="The name of a service that can be invoked to provide the status of \
# an IoT device to the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceDecommissionSvc', dest='deviceDecommissionSvc', default='',
# help="The name of a service that can be invoked to decommission IoT \
# devices within the ClearBlade Platform or Edge.")
return parser
def get_adapter_config(cb_system, cb_auth, log):
"""
Retrieve the runtime configuration for the adapter from a ClearBlade platform Collection
.. todo::
Not implemented
:param clearblade.ClearBladeCore.System cb_system: the ClearBlade System being used
:param clearblade.ClearBladeCore.System.Device cb_auth: the ClearBlade device authentication
:param logging.Logger log: a logger
"""
log.warning("get_adapter_config not implemented for {}".format(ADAPTER_CONFIG_COLLECTION))
# collection = cb_system.Collection(authenticatedUser=cb_auth, collectionName=ADAPTER_CONFIG_COLLECTION)
# query = Query()
# rows = collection.getItems(query)
# # Iterate through rows and display them
# for row in rows:
# log.info("ClearBlade Adapter Config: {}".format(row))
def _heartbeat(log, time_ref, interval=30):
"""
Logs a heartbeat message intended to be show the service still running if no data is flowing from polling clients
:param logging.Logger log: the service logger
:param float time_ref: a time.time() reference for the first heartbeat
:param int interval: seconds between heartbeat messages
"""
if headless.is_logger(log):
log.debug("Starting heartbeat ({}s)".format(interval))
while True:
if time.time() - time_ref >= interval:
log.debug("Heartbeat ({}s)".format(interval))
time_ref = time.time()
time.sleep(1)
def run_async_server():
"""
The main loop instantiates one or more PyModbus servers mapped to ClearBlade Modbus proxies based on
IP address and port defined in a ClearBlade platform Collection
"""
log = None
virtual_ifs = []
err_msg = None
defer_reactor = False
try:
parser = get_parser()
user_options = parser.parse_args()
local_ip_address = user_options.ip_address
local_tcp_port = user_options.tcp_port
net_if = user_options.net_if
if user_options.log_level == 'DEBUG':
_debug = True
else:
_debug = False
HEARTBEAT = user_options.heartbeat
log = headless.get_wrapping_logger(name=ADAPTER_DEVICE_ID, debug=_debug)
server_log = headless.get_wrapping_logger(name="pymodbus.server", debug=_debug)
log.info("Initializing ClearBlade System connection")
cb_system = System(systemKey=user_options.systemKey, systemSecret=user_options.systemSecret, url=user_options.url)
cb_auth = cb_system.Device(name=user_options.deviceName, key=user_options.deviceKey)
cb_slave_config = user_options.slaves_collection
cb_data = user_options.data_collection
ip_proxies = []
proxy_ports = []
ip_address = None
collection = cb_system.Collection(cb_auth, collectionName=cb_slave_config)
query = Query()
query.notEqualTo(COL_PROXY_IP_ADDRESS, '')
rows = collection.getItems(query)
for row in rows:
# TODO: allow for possibility of multiple IPs with same port or same IP with multiple ports
ip_address = str(row[COL_PROXY_IP_ADDRESS])
tcp_port = int(row[COL_PROXY_IP_PORT])
if ip_address not in ip_proxies:
log.info("Found slave at {} on ClearBlade adapter config".format(ip_address))
ip_proxies.append(ip_address)
proxy_ports.append(tcp_port)
else:
log.warning("Duplicate proxy IP address {} found in configuration - ignoring".format(ip_address))
log.debug("Processing {} slaves".format(len(ip_proxies)))
for i in range(0, len(ip_proxies)):
log.debug("Getting server context for {}".format(ip_proxies[i]))
context = ClearBladeModbusProxyServerContext(cb_system=cb_system, cb_auth=cb_auth,
cb_slaves_config=cb_slave_config, cb_data=cb_data,
ip_address=ip_proxies[i], log=log)
# Create IP aliases
local_ip_address = ip_proxies[i]
ip_mask = '255.255.255.0'
local_tcp_port = proxy_ports[i]
if sys.platform.startswith('win'):
log.info("I'm on Windows!")
local_ip_address = 'localhost'
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
virtual_if = '{nif}:{alias}'.format(nif=net_if, alias=i)
virtual_ifs.append(virtual_if)
linux_command = "ifconfig {vif} {ip}".format(vif=virtual_if, ip=local_ip_address)
if ip_mask is not None:
linux_command += " netmask {mask}".format(mask=ip_mask)
log.info("Creating virtual IP address / alias via $ {}".format(linux_command))
subprocess.call(linux_command, shell=True)
# Create Server Identification
identity = ModbusDeviceIdentification()
identity.VendorName = 'PyModbus'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'Inmarsat/ClearBlade Modbus Server Adapter'
identity.ModelName = ip_proxies[i]
identity.MajorMinorRevision = '1.0'
# Setup Modbus TCP Server
log.info("Starting Modbus TCP server on {}:{}".format(local_ip_address, local_tcp_port))
modbus_server_args = {
'context': context,
'identity': identity,
'address': (local_ip_address, local_tcp_port),
# 'console': _debug,
'defer_reactor_run': True,
}
if modbus_server_args['defer_reactor_run']:
defer_reactor = True
reactor.callInThread(StartTcpServer, **modbus_server_args)
if local_ip_address == 'localhost':
log.info("Windows retricted environment prevents IP alias - running localhost for {}"
.format(ip_proxies[i]))
break
reactor.callInThread(_heartbeat, log, time.time(), HEARTBEAT)
if defer_reactor:
reactor.suggestThreadPoolSize(len(ip_proxies))
reactor.run()
except KeyboardInterrupt:
err_msg = "modbus_server_adapter.py halted by Keyboard Interrupt"
if log is not None:
log.info(err_msg)
else:
print(err_msg)
sys.exit("modbus_server_adapter.py halted by Keyboard Interrupt")
except Exception as e:
err_msg = "EXCEPTION: {}".format(e)
if log is not None:
log.info(err_msg)
else:
print(err_msg)
sys.exit("modbus_server_adapter.py halted by exception {}".format(e))
finally:
if defer_reactor and reactor.running:
reactor.stop()
for vif in virtual_ifs:
debug_msg = "Taking down virtual interface {}".format(vif)
if log is not None:
log.debug(debug_msg)
else:
print(debug_msg)
linux_command = "ifconfig {} down".format(vif)
subprocess.call(linux_command, shell=True)
print("Exiting...")
if __name__ == '__main__':
run_async_server()
|
import sys, os
from os.path import dirname
root_path = dirname(dirname(os.getcwd()))
sys.path.insert(0, root_path) # insert root directory to environmental variables
from GFMM.accelbatchgfmm import AccelBatchGFMM
import numpy as np
from functionhelper.prepocessinghelper import loadDataset
if __name__ == '__main__':
save_result_folder_path = root_path + '\\Experiment\\Acel_Agglo'
dataset_path = root_path + '\\Dataset\\train_test'
#dataset_names = ['aggregation', 'circle', 'complex9', 'DiagnosticBreastCancer', 'elliptical_10_2', 'fourty', 'glass', 'heart', 'ionosphere', 'iris', 'segmentation', 'spherical_5_2', 'spiral', 'synthetic', 'thyroid', 'wine', 'yeast', 'zelnik6']
dataset_names = ['ringnorm', 'twonorm', 'waveform']
for dt in range(len(dataset_names)):
#try:
print('Current dataset: ', dataset_names[dt])
training_file = dataset_path + '\\' + dataset_names[dt] + '_train.dat'
testing_file = dataset_path + '\\' + dataset_names[dt] + '_test.dat'
# Read training file
Xtr, X_tmp, patClassIdTr, pat_tmp = loadDataset(training_file, 1, False)
# Read testing file
X_tmp, Xtest, pat_tmp, patClassIdTest = loadDataset(testing_file, 0, False)
teta = 0.4
# simil_thres = [0.02, 0.1, 0.1, 0.3, 0.4, 0.4, 0.6, 0.7, 0.8, 0.9]
simil_save = np.array([])
numhyperbox_short_si_save = np.array([], dtype=np.int64)
training_time_short_si_save = np.array([])
testing_error_short_si_save = np.array([])
numhyperbox_long_si_save = np.array([], dtype=np.int64)
training_time_long_si_save = np.array([])
testing_error_long_si_save = np.array([])
numhyperbox_midmax_si_save = np.array([], dtype=np.int64)
training_time_midmax_si_save = np.array([])
testing_error_midmax_si_save = np.array([])
numhyperbox_midmin_si_save = np.array([], dtype=np.int64)
training_time_midmin_si_save = np.array([])
testing_error_midmin_si_save = np.array([])
for simil_thres in np.arange(0.02, 1, 0.02):
simil_save = np.append(simil_save, simil_thres)
accelClassifier = AccelBatchGFMM(gamma = 1, teta = teta, bthres = simil_thres, simil = 'short', sing = 'max', isDraw = False, oper = 'min', isNorm = False)
accelClassifier.fit(Xtr, Xtr, patClassIdTr)
training_time_short_si_save = np.append(training_time_short_si_save, accelClassifier.elapsed_training_time)
numhyperbox_short_si_save = np.append(numhyperbox_short_si_save, len(accelClassifier.classId))
result = accelClassifier.predict(Xtest, Xtest, patClassIdTest)
if result != None:
numTestSample = Xtest.shape[0]
err = result.summis / numTestSample
testing_error_short_si_save = np.append(testing_error_short_si_save, err)
for simil_thres in np.arange(0.02, 1, 0.02):
accelClassifier = AccelBatchGFMM(gamma = 1, teta = teta, bthres = simil_thres, simil = 'long', sing = 'max', isDraw = False, oper = 'min', isNorm = False)
accelClassifier.fit(Xtr, Xtr, patClassIdTr)
training_time_long_si_save = np.append(training_time_long_si_save, accelClassifier.elapsed_training_time)
numhyperbox_long_si_save = np.append(numhyperbox_long_si_save, len(accelClassifier.classId))
result = accelClassifier.predict(Xtest, Xtest, patClassIdTest)
if result != None:
numTestSample = Xtest.shape[0]
err = result.summis / numTestSample
testing_error_long_si_save = np.append(testing_error_long_si_save, err)
for simil_thres in np.arange(0.02, 1, 0.02):
accelClassifier = AccelBatchGFMM(gamma = 1, teta = teta, bthres = simil_thres, simil = 'mid', sing = 'max', isDraw = False, oper = 'min', isNorm = False)
accelClassifier.fit(Xtr, Xtr, patClassIdTr)
training_time_midmax_si_save = np.append(training_time_midmax_si_save, accelClassifier.elapsed_training_time)
numhyperbox_midmax_si_save = np.append(numhyperbox_midmax_si_save, len(accelClassifier.classId))
result = accelClassifier.predict(Xtest, Xtest, patClassIdTest)
if result != None:
numTestSample = Xtest.shape[0]
err = result.summis / numTestSample
testing_error_midmax_si_save = np.append(testing_error_midmax_si_save, err)
for simil_thres in np.arange(0.02, 1, 0.02):
accelClassifier = AccelBatchGFMM(gamma = 1, teta = teta, bthres = simil_thres, simil = 'mid', sing = 'min', isDraw = False, oper = 'min', isNorm = False)
accelClassifier.fit(Xtr, Xtr, patClassIdTr)
training_time_midmin_si_save = np.append(training_time_midmin_si_save, accelClassifier.elapsed_training_time)
numhyperbox_midmin_si_save = np.append(numhyperbox_midmin_si_save, len(accelClassifier.classId))
result = accelClassifier.predict(Xtest, Xtest, patClassIdTest)
if result != None:
numTestSample = Xtest.shape[0]
err = result.summis / numTestSample
testing_error_midmin_si_save = np.append(testing_error_midmin_si_save, err)
# save result to file
data_save = np.hstack((simil_save.reshape(-1, 1), numhyperbox_short_si_save.reshape(-1, 1), training_time_short_si_save.reshape(-1, 1), testing_error_short_si_save.reshape(-1, 1),
numhyperbox_long_si_save.reshape(-1, 1), training_time_long_si_save.reshape(-1, 1), testing_error_long_si_save.reshape(-1, 1),
numhyperbox_midmax_si_save.reshape(-1, 1), training_time_midmax_si_save.reshape(-1, 1), testing_error_midmax_si_save.reshape(-1, 1),
numhyperbox_midmin_si_save.reshape(-1, 1), training_time_midmin_si_save.reshape(-1, 1), testing_error_midmin_si_save.reshape(-1, 1)))
filename = save_result_folder_path + '\\' + dataset_names[dt] + '.txt'
open(filename, 'w').close() # make existing file empty
with open(filename,'a') as f_handle:
f_handle.write('Teta = %f \n' % (teta))
f_handle.writelines('simi thres, No hyperboxes Short simi, Training time Short simi, Testing error Short simi, No hyperboxes Long simi, Training time Long simi, Testing error Long simi, \
No hyperboxes Mid max simi, Training time Mid max simi, Testing error Mid max simi,\
No hyperboxes Mid min simi, Training time Mid min simi, Testing error Mid min simi \n')
np.savetxt(f_handle, data_save, fmt='%f', delimiter=', ')
# except:
# pass
print('---Finish---') |
# Generated by Django 3.0.14 on 2021-06-09 17:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("iaso", "0090_auto_20210604_1553"),
("iaso", "0089_profile_language"),
]
operations = []
|
from guet.commands import CommandFactory
from guet.committers import Committers2 as Committers
from guet.committers import CurrentCommitters
from guet.files import FileSystem
from guet.steps import Step
from guet.steps.check import HelpCheck, VersionCheck
from guet.steps.preparation import InitializePreparation, SwapToLocal
from guet.util import HelpMessageBuilder
from ._action import GetCommittersAction
GET_HELP_MESSAGE = HelpMessageBuilder('guet get <identifier> [-flag, ...]',
'Get currently set information.') \
.explanation(('Valid Identifier\n\tcurrent - lists currently set committers'
'\n\tall - lists all committers')) \
.build()
class GetCommandFactory(CommandFactory):
def __init__(self,
file_system: FileSystem,
committers: Committers,
current_committers: CurrentCommitters):
self.file_system = file_system
self.committers = committers
self.current = current_committers
def build(self) -> Step:
return VersionCheck() \
.next(HelpCheck(GET_HELP_MESSAGE, stop_on_no_args=True)) \
.next(InitializePreparation(self.file_system)) \
.next(SwapToLocal(self.committers)) \
.next(GetCommittersAction(self.committers, self.current))
|
"""
==============================================
Plot which vertices are inside the same voxels
==============================================
Show lines connecting vertices on the flatmap that are actually within the same
voxels in a given scan.
Here, we used advanced compositing to be explicit about display options for the
connecting lines.
"""
import cortex
import numpy as np
import matplotlib.pyplot as plt
# Create an empty pycortex Volume
volume = cortex.Volume.empty(subject='S1', xfmname='retinotopy', value=np.nan)
# Plot a flatmap with the data projected onto the surface
fig = cortex.quickflat.make_figure(volume, with_curvature=True, with_colorbar=False)
# Advanced compositing addition of connected vertices.
# Note that this will not currently resize correctly with a figure.
lines = cortex.quickflat.composite.add_connected_vertices(fig, volume,
exclude_border_width=None, color=(1.0, 0.5, 0.1, 0.6), linewidth=0.75,
alpha=0.3, recache=True)
plt.show()
|
"""
Tests for the Command Manager.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import unittest
import os
from enso.commands.manager import CommandObjectRegistry
from enso.commands.manager import CommandAlreadyRegisteredError
from enso.commands.interfaces import CommandExpression
# ----------------------------------------------------------------------------
# Object Registry Unit Tests
# ----------------------------------------------------------------------------
class FakeCommand:
def __init__( self, name ):
self.name = name
class RegistryTester( unittest.TestCase ):
def setUp( self ):
self.TESTS = range( ord("a"), ord("z") )
self.TESTS = [ chr(i) for i in self.TESTS ]
self.registry = CommandObjectRegistry()
for name in self.TESTS:
expr = CommandExpression( name )
self.registry.addCommandObj( FakeCommand(name), expr )
def tearDown( self ):
self.registry = None
def testAlreadyRegistered( self ):
for name in self.TESTS:
expr = CommandExpression( name )
self.failUnlessRaises( CommandAlreadyRegisteredError,
self.registry.addCommandObj,
FakeCommand(name), expr )
def testRegistered( self ):
for name in self.TESTS:
cmd = self.registry.getCommandObj( name )
self.failUnlessEqual( name, cmd.name )
self.failUnlessEqual( set( self.registry.getCommandList() ),
set( self.TESTS ) )
# TODO: Match testing.
# TODO: Suggestion testing.
# ----------------------------------------------------------------------------
# Script
# ----------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
|
from spotify.app import app
|
import sys
sys.path.insert(0, "../computerVision/")
import FaceDetection
import VideoStream
import cv2
#Initialise the face detection on camera 1
#If you are using your laptop camera or if you don't have embedded camera on your laptop , change 1 by 0
faceStream = FaceDetection.FaceStream(1)
exit = False
print faceStream.getRes()
while not exit :
# Change the frame from camera
frame = faceStream.nextFrame()
#Display the frame from camera on screen
faceStream.display()
key = cv2.waitKey()
#if q key have been press
if key == ord('q'):
exit = True
|
from .classifiers.linear import MulticlassLogisticRegression
from .encoders.sklearnencoders import ExperimentalPCA, ExperimentalIncrementalPCA, ExperimentalUMAP
from .encoders.dictembedding import DictEmbedding
classifiers_dict = {
'LogisticRegression': MulticlassLogisticRegression,
}
encoders_dict = {
'PCA': ExperimentalPCA,
'IncrementalPCA': ExperimentalIncrementalPCA,
'UMAP': ExperimentalUMAP,
'embedding_dict': DictEmbedding
}
|
# MINLP written by GAMS Convert at 05/07/21 17:13:00
#
# Equation counts
# Total E G L N X C B
# 189 5 0 184 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 145 45 100 0 0 0 0 0
# FX 0
#
# Nonzero counts
# Total const NL
# 788 488 300
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b2 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b3 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b4 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b5 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b6 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b7 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b8 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b9 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b10 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b11 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b12 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b13 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b14 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b15 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b16 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b17 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b18 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b19 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b20 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b21 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b22 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b23 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b24 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b25 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b26 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b27 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b28 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b29 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b30 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b31 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b32 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b33 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b34 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b35 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b36 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b37 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b38 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b39 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b40 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b41 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b42 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b43 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b44 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b45 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b46 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b47 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b48 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b49 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b50 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b51 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b52 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b53 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b54 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b55 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b56 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b57 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b58 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b59 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b60 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b61 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b62 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b63 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b64 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b65 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b66 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b67 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b68 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b69 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b70 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b71 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b72 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b73 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b74 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b75 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b76 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b77 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b78 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b79 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b80 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b81 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b82 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b83 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b84 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b85 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b86 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b87 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b88 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b89 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b90 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b91 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b92 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b93 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b94 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b95 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b96 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b97 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b98 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b99 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b100 = Var(within=Binary, bounds=(0,1), initialize=0)
m.x101 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x102 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x103 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x104 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x105 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x106 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x107 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x108 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x109 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x110 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x111 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x112 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x113 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x114 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x115 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x116 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x117 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x118 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x119 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x120 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x121 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x122 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x123 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x124 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x125 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x126 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x127 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x128 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x129 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x130 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x131 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x132 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x133 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x134 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x135 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x136 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x137 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x138 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x139 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x140 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x141 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x142 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x143 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x144 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x145 = Var(within=Reals, bounds=(0,10), initialize=0)
m.obj = Objective(sense=minimize, expr= m.x103 + m.x106 + m.x109 + m.x111 +
m.x113 + m.x115 + m.x117 + m.x119 + m.x121 + m.x123 + m.x125 + m.x127 +
m.x128 + m.x129 + m.x130 + m.x131 + m.x132 + m.x133 + m.x134 + m.x135 +
m.x136 + m.x137 + m.x138 + m.x139 + m.x140 + m.x141 + m.x142 + m.x143 +
m.x144 + m.x145)
m.e1 = Constraint(expr= m.x101 - m.x102 - m.x103 <= 0)
m.e2 = Constraint(expr= -m.x101 + m.x102 - m.x103 <= 0)
m.e3 = Constraint(expr= m.x104 - m.x105 - m.x106 <= 0)
m.e4 = Constraint(expr= -m.x104 + m.x105 - m.x106 <= 0)
m.e5 = Constraint(expr= m.x107 - m.x108 - m.x109 <= 0)
m.e6 = Constraint(expr= -m.x107 + m.x108 - m.x109 <= 0)
m.e7 = Constraint(expr= m.x101 - m.x110 - m.x111 <= 0)
m.e8 = Constraint(expr= -m.x101 + m.x110 - m.x111 <= 0)
m.e9 = Constraint(expr= m.x104 - m.x112 - m.x113 <= 0)
m.e10 = Constraint(expr= -m.x104 + m.x112 - m.x113 <= 0)
m.e11 = Constraint(expr= m.x107 - m.x114 - m.x115 <= 0)
m.e12 = Constraint(expr= -m.x107 + m.x114 - m.x115 <= 0)
m.e13 = Constraint(expr= m.x101 - m.x116 - m.x117 <= 0)
m.e14 = Constraint(expr= -m.x101 + m.x116 - m.x117 <= 0)
m.e15 = Constraint(expr= m.x104 - m.x118 - m.x119 <= 0)
m.e16 = Constraint(expr= -m.x104 + m.x118 - m.x119 <= 0)
m.e17 = Constraint(expr= m.x107 - m.x120 - m.x121 <= 0)
m.e18 = Constraint(expr= -m.x107 + m.x120 - m.x121 <= 0)
m.e19 = Constraint(expr= m.x101 - m.x122 - m.x123 <= 0)
m.e20 = Constraint(expr= -m.x101 + m.x122 - m.x123 <= 0)
m.e21 = Constraint(expr= m.x104 - m.x124 - m.x125 <= 0)
m.e22 = Constraint(expr= -m.x104 + m.x124 - m.x125 <= 0)
m.e23 = Constraint(expr= m.x107 - m.x126 - m.x127 <= 0)
m.e24 = Constraint(expr= -m.x107 + m.x126 - m.x127 <= 0)
m.e25 = Constraint(expr= m.x102 - m.x110 - m.x128 <= 0)
m.e26 = Constraint(expr= -m.x102 + m.x110 - m.x128 <= 0)
m.e27 = Constraint(expr= m.x105 - m.x112 - m.x129 <= 0)
m.e28 = Constraint(expr= -m.x105 + m.x112 - m.x129 <= 0)
m.e29 = Constraint(expr= m.x108 - m.x114 - m.x130 <= 0)
m.e30 = Constraint(expr= -m.x108 + m.x114 - m.x130 <= 0)
m.e31 = Constraint(expr= m.x102 - m.x116 - m.x131 <= 0)
m.e32 = Constraint(expr= -m.x102 + m.x116 - m.x131 <= 0)
m.e33 = Constraint(expr= m.x105 - m.x118 - m.x132 <= 0)
m.e34 = Constraint(expr= -m.x105 + m.x118 - m.x132 <= 0)
m.e35 = Constraint(expr= m.x108 - m.x120 - m.x133 <= 0)
m.e36 = Constraint(expr= -m.x108 + m.x120 - m.x133 <= 0)
m.e37 = Constraint(expr= m.x102 - m.x122 - m.x134 <= 0)
m.e38 = Constraint(expr= -m.x102 + m.x122 - m.x134 <= 0)
m.e39 = Constraint(expr= m.x105 - m.x124 - m.x135 <= 0)
m.e40 = Constraint(expr= -m.x105 + m.x124 - m.x135 <= 0)
m.e41 = Constraint(expr= m.x108 - m.x126 - m.x136 <= 0)
m.e42 = Constraint(expr= -m.x108 + m.x126 - m.x136 <= 0)
m.e43 = Constraint(expr= m.x110 - m.x116 - m.x137 <= 0)
m.e44 = Constraint(expr= -m.x110 + m.x116 - m.x137 <= 0)
m.e45 = Constraint(expr= m.x112 - m.x118 - m.x138 <= 0)
m.e46 = Constraint(expr= -m.x112 + m.x118 - m.x138 <= 0)
m.e47 = Constraint(expr= m.x114 - m.x120 - m.x139 <= 0)
m.e48 = Constraint(expr= -m.x114 + m.x120 - m.x139 <= 0)
m.e49 = Constraint(expr= m.x110 - m.x122 - m.x140 <= 0)
m.e50 = Constraint(expr= -m.x110 + m.x122 - m.x140 <= 0)
m.e51 = Constraint(expr= m.x112 - m.x124 - m.x141 <= 0)
m.e52 = Constraint(expr= -m.x112 + m.x124 - m.x141 <= 0)
m.e53 = Constraint(expr= m.x114 - m.x126 - m.x142 <= 0)
m.e54 = Constraint(expr= -m.x114 + m.x126 - m.x142 <= 0)
m.e55 = Constraint(expr= m.x116 - m.x122 - m.x143 <= 0)
m.e56 = Constraint(expr= -m.x116 + m.x122 - m.x143 <= 0)
m.e57 = Constraint(expr= m.x118 - m.x124 - m.x144 <= 0)
m.e58 = Constraint(expr= -m.x118 + m.x124 - m.x144 <= 0)
m.e59 = Constraint(expr= m.x120 - m.x126 - m.x145 <= 0)
m.e60 = Constraint(expr= -m.x120 + m.x126 - m.x145 <= 0)
m.e61 = Constraint(expr= (0.483311857356823 - m.x101)**2 + (0.114242198506904
- m.x104)**2 + (7.12048883659032 - m.x107)**2 + 188.522461227626 * m.b1
<= 189.522461227626)
m.e62 = Constraint(expr= (5.2590135790233 - m.x101)**2 + (7.33259189570392 -
m.x104)**2 + (5.312333476343 - m.x107)**2 + 98.8166159288294 * m.b2
<= 99.8166159288294)
m.e63 = Constraint(expr= (7.41517046461879 - m.x101)**2 + (9.62332773098117 -
m.x104)**2 + (4.79943898486809 - m.x107)**2 + 167.849028003939 * m.b3
<= 168.849028003939)
m.e64 = Constraint(expr= (6.671843981803 - m.x101)**2 + (8.10658123259484 -
m.x104)**2 + (8.43381689055527 - m.x107)**2 + 144.62434214578 * m.b4
<= 145.62434214578)
m.e65 = Constraint(expr= (9.05870575338678 - m.x101)**2 + (8.3311941216586 -
m.x104)**2 + (2.43718333261179 - m.x107)**2 + 188.522461227626 * m.b5
<= 189.522461227626)
m.e66 = Constraint(expr= (2.45247392282192 - m.x101)**2 + (3.04490781414335 -
m.x104)**2 + (3.74797873360784 - m.x107)**2 + 119.618424440661 * m.b6
<= 120.618424440661)
m.e67 = Constraint(expr= (3.17249885664207 - m.x101)**2 + (0.899014640298569 -
m.x104)**2 + (6.53554769882638 - m.x107)**2 + 128.282312211875 * m.b7
<= 129.282312211875)
m.e68 = Constraint(expr= (7.19140474364188 - m.x101)**2 + (6.78752778006733 -
m.x104)**2 + (7.10371917668867 - m.x107)**2 + 108.45575250014 * m.b8
<= 109.45575250014)
m.e69 = Constraint(expr= (0.581905599074722 - m.x101)**2 + (8.05664566308502 -
m.x104)**2 + (0.465270839540525 - m.x107)**2 + 163.557092366753 * m.b9
<= 164.557092366753)
m.e70 = Constraint(expr= (2.89314656575976 - m.x101)**2 + (2.98350648433744 -
m.x104)**2 + (4.94095686412664 - m.x107)**2 + 101.809153524392 * m.b10
<= 102.809153524392)
m.e71 = Constraint(expr= (2.18223181481477 - m.x101)**2 + (6.36734447251869 -
m.x104)**2 + (6.99053555821422 - m.x107)**2 + 135.200072571286 * m.b11
<= 136.200072571286)
m.e72 = Constraint(expr= (8.39213303571845 - m.x101)**2 + (0.0966493493157039
- m.x104)**2 + (0.992650538147096 - m.x107)**2 + 168.474583620344 * m.b12
<= 169.474583620344)
m.e73 = Constraint(expr= (6.8673656213906 - m.x101)**2 + (8.47463209326542 -
m.x104)**2 + (0.494039939513553 - m.x107)**2 + 188.895677706624 * m.b13
<= 189.895677706624)
m.e74 = Constraint(expr= (2.07334522686175 - m.x101)**2 + (0.611759422337085 -
m.x104)**2 + (7.49872182399417 - m.x107)**2 + 157.156134140441 * m.b14
<= 158.156134140441)
m.e75 = Constraint(expr= (5.58287553353321 - m.x101)**2 + (7.41023187669618 -
m.x104)**2 + (5.78186220125907 - m.x107)**2 + 102.681819435286 * m.b15
<= 103.681819435286)
m.e76 = Constraint(expr= (3.75663662491927 - m.x101)**2 + (2.16100057183036 -
m.x104)**2 + (9.4954261517135 - m.x107)**2 + 153.416411666872 * m.b16
<= 154.416411666872)
m.e77 = Constraint(expr= (4.04360404243071 - m.x101)**2 + (7.5903513366217 -
m.x104)**2 + (3.71685851137678 - m.x107)**2 + 100.651007858618 * m.b17
<= 101.651007858618)
m.e78 = Constraint(expr= (1.45072437530262 - m.x101)**2 + (1.11420059440894 -
m.x104)**2 + (9.42819884441584 - m.x107)**2 + 188.895677706624 * m.b18
<= 189.895677706624)
m.e79 = Constraint(expr= (8.44626629441698 - m.x101)**2 + (8.81210793727421 -
m.x104)**2 + (9.26767041565757 - m.x107)**2 + 168.474583620344 * m.b19
<= 169.474583620344)
m.e80 = Constraint(expr= (4.74415255019913 - m.x101)**2 + (2.8194183128037 -
m.x104)**2 + (1.76655535189797 - m.x107)**2 + 126.464760843581 * m.b20
<= 127.464760843581)
m.e81 = Constraint(expr= m.b1 + m.b2 + m.b3 + m.b4 + m.b5 + m.b6 + m.b7 + m.b8
+ m.b9 + m.b10 + m.b11 + m.b12 + m.b13 + m.b14 + m.b15 + m.b16 + m.b17 +
m.b18 + m.b19 + m.b20 == 1)
m.e82 = Constraint(expr= (0.483311857356823 - m.x102)**2 + (0.114242198506904
- m.x105)**2 + (7.12048883659032 - m.x108)**2 + 188.522461227626 * m.b21
<= 189.522461227626)
m.e83 = Constraint(expr= (5.2590135790233 - m.x102)**2 + (7.33259189570392 -
m.x105)**2 + (5.312333476343 - m.x108)**2 + 98.8166159288294 * m.b22
<= 99.8166159288294)
m.e84 = Constraint(expr= (7.41517046461879 - m.x102)**2 + (9.62332773098117 -
m.x105)**2 + (4.79943898486809 - m.x108)**2 + 167.849028003939 * m.b23
<= 168.849028003939)
m.e85 = Constraint(expr= (6.671843981803 - m.x102)**2 + (8.10658123259484 -
m.x105)**2 + (8.43381689055527 - m.x108)**2 + 144.62434214578 * m.b24
<= 145.62434214578)
m.e86 = Constraint(expr= (9.05870575338678 - m.x102)**2 + (8.3311941216586 -
m.x105)**2 + (2.43718333261179 - m.x108)**2 + 188.522461227626 * m.b25
<= 189.522461227626)
m.e87 = Constraint(expr= (2.45247392282192 - m.x102)**2 + (3.04490781414335 -
m.x105)**2 + (3.74797873360784 - m.x108)**2 + 119.618424440661 * m.b26
<= 120.618424440661)
m.e88 = Constraint(expr= (3.17249885664207 - m.x102)**2 + (0.899014640298569 -
m.x105)**2 + (6.53554769882638 - m.x108)**2 + 128.282312211875 * m.b27
<= 129.282312211875)
m.e89 = Constraint(expr= (7.19140474364188 - m.x102)**2 + (6.78752778006733 -
m.x105)**2 + (7.10371917668867 - m.x108)**2 + 108.45575250014 * m.b28
<= 109.45575250014)
m.e90 = Constraint(expr= (0.581905599074722 - m.x102)**2 + (8.05664566308502 -
m.x105)**2 + (0.465270839540525 - m.x108)**2 + 163.557092366753 * m.b29
<= 164.557092366753)
m.e91 = Constraint(expr= (2.89314656575976 - m.x102)**2 + (2.98350648433744 -
m.x105)**2 + (4.94095686412664 - m.x108)**2 + 101.809153524392 * m.b30
<= 102.809153524392)
m.e92 = Constraint(expr= (2.18223181481477 - m.x102)**2 + (6.36734447251869 -
m.x105)**2 + (6.99053555821422 - m.x108)**2 + 135.200072571286 * m.b31
<= 136.200072571286)
m.e93 = Constraint(expr= (8.39213303571845 - m.x102)**2 + (0.0966493493157039
- m.x105)**2 + (0.992650538147096 - m.x108)**2 + 168.474583620344 * m.b32
<= 169.474583620344)
m.e94 = Constraint(expr= (6.8673656213906 - m.x102)**2 + (8.47463209326542 -
m.x105)**2 + (0.494039939513553 - m.x108)**2 + 188.895677706624 * m.b33
<= 189.895677706624)
m.e95 = Constraint(expr= (2.07334522686175 - m.x102)**2 + (0.611759422337085 -
m.x105)**2 + (7.49872182399417 - m.x108)**2 + 157.156134140441 * m.b34
<= 158.156134140441)
m.e96 = Constraint(expr= (5.58287553353321 - m.x102)**2 + (7.41023187669618 -
m.x105)**2 + (5.78186220125907 - m.x108)**2 + 102.681819435286 * m.b35
<= 103.681819435286)
m.e97 = Constraint(expr= (3.75663662491927 - m.x102)**2 + (2.16100057183036 -
m.x105)**2 + (9.4954261517135 - m.x108)**2 + 153.416411666872 * m.b36
<= 154.416411666872)
m.e98 = Constraint(expr= (4.04360404243071 - m.x102)**2 + (7.5903513366217 -
m.x105)**2 + (3.71685851137678 - m.x108)**2 + 100.651007858618 * m.b37
<= 101.651007858618)
m.e99 = Constraint(expr= (1.45072437530262 - m.x102)**2 + (1.11420059440894 -
m.x105)**2 + (9.42819884441584 - m.x108)**2 + 188.895677706624 * m.b38
<= 189.895677706624)
m.e100 = Constraint(expr= (8.44626629441698 - m.x102)**2 + (8.81210793727421 -
m.x105)**2 + (9.26767041565757 - m.x108)**2 + 168.474583620344 * m.b39
<= 169.474583620344)
m.e101 = Constraint(expr= (4.74415255019913 - m.x102)**2 + (2.8194183128037 -
m.x105)**2 + (1.76655535189797 - m.x108)**2 + 126.464760843581 * m.b40
<= 127.464760843581)
m.e102 = Constraint(expr= m.b21 + m.b22 + m.b23 + m.b24 + m.b25 + m.b26 + m.b27
+ m.b28 + m.b29 + m.b30 + m.b31 + m.b32 + m.b33 + m.b34 + m.b35 + m.b36 +
m.b37 + m.b38 + m.b39 + m.b40 == 1)
m.e103 = Constraint(expr= (0.483311857356823 - m.x110)**2 + (0.114242198506904
- m.x112)**2 + (7.12048883659032 - m.x114)**2 + 188.522461227626 * m.b41
<= 189.522461227626)
m.e104 = Constraint(expr= (5.2590135790233 - m.x110)**2 + (7.33259189570392 -
m.x112)**2 + (5.312333476343 - m.x114)**2 + 98.8166159288294 * m.b42
<= 99.8166159288294)
m.e105 = Constraint(expr= (7.41517046461879 - m.x110)**2 + (9.62332773098117 -
m.x112)**2 + (4.79943898486809 - m.x114)**2 + 167.849028003939 * m.b43
<= 168.849028003939)
m.e106 = Constraint(expr= (6.671843981803 - m.x110)**2 + (8.10658123259484 -
m.x112)**2 + (8.43381689055527 - m.x114)**2 + 144.62434214578 * m.b44
<= 145.62434214578)
m.e107 = Constraint(expr= (9.05870575338678 - m.x110)**2 + (8.3311941216586 -
m.x112)**2 + (2.43718333261179 - m.x114)**2 + 188.522461227626 * m.b45
<= 189.522461227626)
m.e108 = Constraint(expr= (2.45247392282192 - m.x110)**2 + (3.04490781414335 -
m.x112)**2 + (3.74797873360784 - m.x114)**2 + 119.618424440661 * m.b46
<= 120.618424440661)
m.e109 = Constraint(expr= (3.17249885664207 - m.x110)**2 + (0.899014640298569
- m.x112)**2 + (6.53554769882638 - m.x114)**2 + 128.282312211875 * m.b47
<= 129.282312211875)
m.e110 = Constraint(expr= (7.19140474364188 - m.x110)**2 + (6.78752778006733 -
m.x112)**2 + (7.10371917668867 - m.x114)**2 + 108.45575250014 * m.b48
<= 109.45575250014)
m.e111 = Constraint(expr= (0.581905599074722 - m.x110)**2 + (8.05664566308502
- m.x112)**2 + (0.465270839540525 - m.x114)**2 + 163.557092366753 * m.b49
<= 164.557092366753)
m.e112 = Constraint(expr= (2.89314656575976 - m.x110)**2 + (2.98350648433744 -
m.x112)**2 + (4.94095686412664 - m.x114)**2 + 101.809153524392 * m.b50
<= 102.809153524392)
m.e113 = Constraint(expr= (2.18223181481477 - m.x110)**2 + (6.36734447251869 -
m.x112)**2 + (6.99053555821422 - m.x114)**2 + 135.200072571286 * m.b51
<= 136.200072571286)
m.e114 = Constraint(expr= (8.39213303571845 - m.x110)**2 + (0.0966493493157039
- m.x112)**2 + (0.992650538147096 - m.x114)**2 + 168.474583620344 * m.b52
<= 169.474583620344)
m.e115 = Constraint(expr= (6.8673656213906 - m.x110)**2 + (8.47463209326542 -
m.x112)**2 + (0.494039939513553 - m.x114)**2 + 188.895677706624 * m.b53
<= 189.895677706624)
m.e116 = Constraint(expr= (2.07334522686175 - m.x110)**2 + (0.611759422337085
- m.x112)**2 + (7.49872182399417 - m.x114)**2 + 157.156134140441 * m.b54
<= 158.156134140441)
m.e117 = Constraint(expr= (5.58287553353321 - m.x110)**2 + (7.41023187669618 -
m.x112)**2 + (5.78186220125907 - m.x114)**2 + 102.681819435286 * m.b55
<= 103.681819435286)
m.e118 = Constraint(expr= (3.75663662491927 - m.x110)**2 + (2.16100057183036 -
m.x112)**2 + (9.4954261517135 - m.x114)**2 + 153.416411666872 * m.b56
<= 154.416411666872)
m.e119 = Constraint(expr= (4.04360404243071 - m.x110)**2 + (7.5903513366217 -
m.x112)**2 + (3.71685851137678 - m.x114)**2 + 100.651007858618 * m.b57
<= 101.651007858618)
m.e120 = Constraint(expr= (1.45072437530262 - m.x110)**2 + (1.11420059440894 -
m.x112)**2 + (9.42819884441584 - m.x114)**2 + 188.895677706624 * m.b58
<= 189.895677706624)
m.e121 = Constraint(expr= (8.44626629441698 - m.x110)**2 + (8.81210793727421 -
m.x112)**2 + (9.26767041565757 - m.x114)**2 + 168.474583620344 * m.b59
<= 169.474583620344)
m.e122 = Constraint(expr= (4.74415255019913 - m.x110)**2 + (2.8194183128037 -
m.x112)**2 + (1.76655535189797 - m.x114)**2 + 126.464760843581 * m.b60
<= 127.464760843581)
m.e123 = Constraint(expr= m.b41 + m.b42 + m.b43 + m.b44 + m.b45 + m.b46 + m.b47
+ m.b48 + m.b49 + m.b50 + m.b51 + m.b52 + m.b53 + m.b54 + m.b55 + m.b56 +
m.b57 + m.b58 + m.b59 + m.b60 == 1)
m.e124 = Constraint(expr= (0.483311857356823 - m.x116)**2 + (0.114242198506904
- m.x118)**2 + (7.12048883659032 - m.x120)**2 + 188.522461227626 * m.b61
<= 189.522461227626)
m.e125 = Constraint(expr= (5.2590135790233 - m.x116)**2 + (7.33259189570392 -
m.x118)**2 + (5.312333476343 - m.x120)**2 + 98.8166159288294 * m.b62
<= 99.8166159288294)
m.e126 = Constraint(expr= (7.41517046461879 - m.x116)**2 + (9.62332773098117 -
m.x118)**2 + (4.79943898486809 - m.x120)**2 + 167.849028003939 * m.b63
<= 168.849028003939)
m.e127 = Constraint(expr= (6.671843981803 - m.x116)**2 + (8.10658123259484 -
m.x118)**2 + (8.43381689055527 - m.x120)**2 + 144.62434214578 * m.b64
<= 145.62434214578)
m.e128 = Constraint(expr= (9.05870575338678 - m.x116)**2 + (8.3311941216586 -
m.x118)**2 + (2.43718333261179 - m.x120)**2 + 188.522461227626 * m.b65
<= 189.522461227626)
m.e129 = Constraint(expr= (2.45247392282192 - m.x116)**2 + (3.04490781414335 -
m.x118)**2 + (3.74797873360784 - m.x120)**2 + 119.618424440661 * m.b66
<= 120.618424440661)
m.e130 = Constraint(expr= (3.17249885664207 - m.x116)**2 + (0.899014640298569
- m.x118)**2 + (6.53554769882638 - m.x120)**2 + 128.282312211875 * m.b67
<= 129.282312211875)
m.e131 = Constraint(expr= (7.19140474364188 - m.x116)**2 + (6.78752778006733 -
m.x118)**2 + (7.10371917668867 - m.x120)**2 + 108.45575250014 * m.b68
<= 109.45575250014)
m.e132 = Constraint(expr= (0.581905599074722 - m.x116)**2 + (8.05664566308502
- m.x118)**2 + (0.465270839540525 - m.x120)**2 + 163.557092366753 * m.b69
<= 164.557092366753)
m.e133 = Constraint(expr= (2.89314656575976 - m.x116)**2 + (2.98350648433744 -
m.x118)**2 + (4.94095686412664 - m.x120)**2 + 101.809153524392 * m.b70
<= 102.809153524392)
m.e134 = Constraint(expr= (2.18223181481477 - m.x116)**2 + (6.36734447251869 -
m.x118)**2 + (6.99053555821422 - m.x120)**2 + 135.200072571286 * m.b71
<= 136.200072571286)
m.e135 = Constraint(expr= (8.39213303571845 - m.x116)**2 + (0.0966493493157039
- m.x118)**2 + (0.992650538147096 - m.x120)**2 + 168.474583620344 * m.b72
<= 169.474583620344)
m.e136 = Constraint(expr= (6.8673656213906 - m.x116)**2 + (8.47463209326542 -
m.x118)**2 + (0.494039939513553 - m.x120)**2 + 188.895677706624 * m.b73
<= 189.895677706624)
m.e137 = Constraint(expr= (2.07334522686175 - m.x116)**2 + (0.611759422337085
- m.x118)**2 + (7.49872182399417 - m.x120)**2 + 157.156134140441 * m.b74
<= 158.156134140441)
m.e138 = Constraint(expr= (5.58287553353321 - m.x116)**2 + (7.41023187669618 -
m.x118)**2 + (5.78186220125907 - m.x120)**2 + 102.681819435286 * m.b75
<= 103.681819435286)
m.e139 = Constraint(expr= (3.75663662491927 - m.x116)**2 + (2.16100057183036 -
m.x118)**2 + (9.4954261517135 - m.x120)**2 + 153.416411666872 * m.b76
<= 154.416411666872)
m.e140 = Constraint(expr= (4.04360404243071 - m.x116)**2 + (7.5903513366217 -
m.x118)**2 + (3.71685851137678 - m.x120)**2 + 100.651007858618 * m.b77
<= 101.651007858618)
m.e141 = Constraint(expr= (1.45072437530262 - m.x116)**2 + (1.11420059440894 -
m.x118)**2 + (9.42819884441584 - m.x120)**2 + 188.895677706624 * m.b78
<= 189.895677706624)
m.e142 = Constraint(expr= (8.44626629441698 - m.x116)**2 + (8.81210793727421 -
m.x118)**2 + (9.26767041565757 - m.x120)**2 + 168.474583620344 * m.b79
<= 169.474583620344)
m.e143 = Constraint(expr= (4.74415255019913 - m.x116)**2 + (2.8194183128037 -
m.x118)**2 + (1.76655535189797 - m.x120)**2 + 126.464760843581 * m.b80
<= 127.464760843581)
m.e144 = Constraint(expr= m.b61 + m.b62 + m.b63 + m.b64 + m.b65 + m.b66 + m.b67
+ m.b68 + m.b69 + m.b70 + m.b71 + m.b72 + m.b73 + m.b74 + m.b75 + m.b76 +
m.b77 + m.b78 + m.b79 + m.b80 == 1)
m.e145 = Constraint(expr= (0.483311857356823 - m.x122)**2 + (0.114242198506904
- m.x124)**2 + (7.12048883659032 - m.x126)**2 + 188.522461227626 * m.b81
<= 189.522461227626)
m.e146 = Constraint(expr= (5.2590135790233 - m.x122)**2 + (7.33259189570392 -
m.x124)**2 + (5.312333476343 - m.x126)**2 + 98.8166159288294 * m.b82
<= 99.8166159288294)
m.e147 = Constraint(expr= (7.41517046461879 - m.x122)**2 + (9.62332773098117 -
m.x124)**2 + (4.79943898486809 - m.x126)**2 + 167.849028003939 * m.b83
<= 168.849028003939)
m.e148 = Constraint(expr= (6.671843981803 - m.x122)**2 + (8.10658123259484 -
m.x124)**2 + (8.43381689055527 - m.x126)**2 + 144.62434214578 * m.b84
<= 145.62434214578)
m.e149 = Constraint(expr= (9.05870575338678 - m.x122)**2 + (8.3311941216586 -
m.x124)**2 + (2.43718333261179 - m.x126)**2 + 188.522461227626 * m.b85
<= 189.522461227626)
m.e150 = Constraint(expr= (2.45247392282192 - m.x122)**2 + (3.04490781414335 -
m.x124)**2 + (3.74797873360784 - m.x126)**2 + 119.618424440661 * m.b86
<= 120.618424440661)
m.e151 = Constraint(expr= (3.17249885664207 - m.x122)**2 + (0.899014640298569
- m.x124)**2 + (6.53554769882638 - m.x126)**2 + 128.282312211875 * m.b87
<= 129.282312211875)
m.e152 = Constraint(expr= (7.19140474364188 - m.x122)**2 + (6.78752778006733 -
m.x124)**2 + (7.10371917668867 - m.x126)**2 + 108.45575250014 * m.b88
<= 109.45575250014)
m.e153 = Constraint(expr= (0.581905599074722 - m.x122)**2 + (8.05664566308502
- m.x124)**2 + (0.465270839540525 - m.x126)**2 + 163.557092366753 * m.b89
<= 164.557092366753)
m.e154 = Constraint(expr= (2.89314656575976 - m.x122)**2 + (2.98350648433744 -
m.x124)**2 + (4.94095686412664 - m.x126)**2 + 101.809153524392 * m.b90
<= 102.809153524392)
m.e155 = Constraint(expr= (2.18223181481477 - m.x122)**2 + (6.36734447251869 -
m.x124)**2 + (6.99053555821422 - m.x126)**2 + 135.200072571286 * m.b91
<= 136.200072571286)
m.e156 = Constraint(expr= (8.39213303571845 - m.x122)**2 + (0.0966493493157039
- m.x124)**2 + (0.992650538147096 - m.x126)**2 + 168.474583620344 * m.b92
<= 169.474583620344)
m.e157 = Constraint(expr= (6.8673656213906 - m.x122)**2 + (8.47463209326542 -
m.x124)**2 + (0.494039939513553 - m.x126)**2 + 188.895677706624 * m.b93
<= 189.895677706624)
m.e158 = Constraint(expr= (2.07334522686175 - m.x122)**2 + (0.611759422337085
- m.x124)**2 + (7.49872182399417 - m.x126)**2 + 157.156134140441 * m.b94
<= 158.156134140441)
m.e159 = Constraint(expr= (5.58287553353321 - m.x122)**2 + (7.41023187669618 -
m.x124)**2 + (5.78186220125907 - m.x126)**2 + 102.681819435286 * m.b95
<= 103.681819435286)
m.e160 = Constraint(expr= (3.75663662491927 - m.x122)**2 + (2.16100057183036 -
m.x124)**2 + (9.4954261517135 - m.x126)**2 + 153.416411666872 * m.b96
<= 154.416411666872)
m.e161 = Constraint(expr= (4.04360404243071 - m.x122)**2 + (7.5903513366217 -
m.x124)**2 + (3.71685851137678 - m.x126)**2 + 100.651007858618 * m.b97
<= 101.651007858618)
m.e162 = Constraint(expr= (1.45072437530262 - m.x122)**2 + (1.11420059440894 -
m.x124)**2 + (9.42819884441584 - m.x126)**2 + 188.895677706624 * m.b98
<= 189.895677706624)
m.e163 = Constraint(expr= (8.44626629441698 - m.x122)**2 + (8.81210793727421 -
m.x124)**2 + (9.26767041565757 - m.x126)**2 + 168.474583620344 * m.b99
<= 169.474583620344)
m.e164 = Constraint(expr= (4.74415255019913 - m.x122)**2 + (2.8194183128037 -
m.x124)**2 + (1.76655535189797 - m.x126)**2 + 126.464760843581 * m.b100
<= 127.464760843581)
m.e165 = Constraint(expr= m.b81 + m.b82 + m.b83 + m.b84 + m.b85 + m.b86 + m.b87
+ m.b88 + m.b89 + m.b90 + m.b91 + m.b92 + m.b93 + m.b94 + m.b95 + m.b96 +
m.b97 + m.b98 + m.b99 + m.b100 == 1)
m.e166 = Constraint(expr= m.b1 + m.b21 + m.b41 + m.b61 + m.b81 <= 1)
m.e167 = Constraint(expr= m.b2 + m.b22 + m.b42 + m.b62 + m.b82 <= 1)
m.e168 = Constraint(expr= m.b3 + m.b23 + m.b43 + m.b63 + m.b83 <= 1)
m.e169 = Constraint(expr= m.b4 + m.b24 + m.b44 + m.b64 + m.b84 <= 1)
m.e170 = Constraint(expr= m.b5 + m.b25 + m.b45 + m.b65 + m.b85 <= 1)
m.e171 = Constraint(expr= m.b6 + m.b26 + m.b46 + m.b66 + m.b86 <= 1)
m.e172 = Constraint(expr= m.b7 + m.b27 + m.b47 + m.b67 + m.b87 <= 1)
m.e173 = Constraint(expr= m.b8 + m.b28 + m.b48 + m.b68 + m.b88 <= 1)
m.e174 = Constraint(expr= m.b9 + m.b29 + m.b49 + m.b69 + m.b89 <= 1)
m.e175 = Constraint(expr= m.b10 + m.b30 + m.b50 + m.b70 + m.b90 <= 1)
m.e176 = Constraint(expr= m.b11 + m.b31 + m.b51 + m.b71 + m.b91 <= 1)
m.e177 = Constraint(expr= m.b12 + m.b32 + m.b52 + m.b72 + m.b92 <= 1)
m.e178 = Constraint(expr= m.b13 + m.b33 + m.b53 + m.b73 + m.b93 <= 1)
m.e179 = Constraint(expr= m.b14 + m.b34 + m.b54 + m.b74 + m.b94 <= 1)
m.e180 = Constraint(expr= m.b15 + m.b35 + m.b55 + m.b75 + m.b95 <= 1)
m.e181 = Constraint(expr= m.b16 + m.b36 + m.b56 + m.b76 + m.b96 <= 1)
m.e182 = Constraint(expr= m.b17 + m.b37 + m.b57 + m.b77 + m.b97 <= 1)
m.e183 = Constraint(expr= m.b18 + m.b38 + m.b58 + m.b78 + m.b98 <= 1)
m.e184 = Constraint(expr= m.b19 + m.b39 + m.b59 + m.b79 + m.b99 <= 1)
m.e185 = Constraint(expr= m.b20 + m.b40 + m.b60 + m.b80 + m.b100 <= 1)
m.e186 = Constraint(expr= m.x101 - m.x102 <= 0)
m.e187 = Constraint(expr= m.x102 - m.x110 <= 0)
m.e188 = Constraint(expr= m.x110 - m.x116 <= 0)
m.e189 = Constraint(expr= m.x116 - m.x122 <= 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.