code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-01 02:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0008_product_branch'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='branch',
),
]
|
jojoriveraa/titulacion-NFCOW
|
NFCow/products/migrations/0009_remove_product_branch.py
|
Python
|
apache-2.0
| 388
|
import ftplib
import logging
from io import BytesIO
from os.path import join
from urllib.error import URLError
from urllib.parse import urlparse
from urllib.request import urlopen
import cachecontrol.caches
import numpy as np
import requests
from PIL import ImageFile
from PIL.Image import LANCZOS
from PIL.Image import open as open_image
from requests.exceptions import RequestException
from Orange.misc.environ import cache_dir
log = logging.getLogger(__name__)
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImageLoader:
def __init__(self):
self._session = cachecontrol.CacheControl(
requests.session(),
cache=cachecontrol.caches.FileCache(
join(cache_dir(), __name__ + ".ImageEmbedder.httpcache")
),
)
def load_image_or_none(self, file_path, target_size=None):
if file_path is None:
return None
image = self._load_image_from_url_or_local_path(file_path)
if image is None:
return image
if not image.mode == "RGB":
try:
image = image.convert("RGB")
except ValueError:
return None
if target_size is not None:
image = image.resize(target_size, LANCZOS)
return image
def load_image_bytes(self, file_path, target_size=None):
image = self.load_image_or_none(file_path, target_size)
if image is None:
return None
image_bytes_io = BytesIO()
image.save(image_bytes_io, format="JPEG")
image.close()
image_bytes_io.seek(0)
image_bytes = image_bytes_io.read()
image_bytes_io.close()
return image_bytes
def _load_image_from_url_or_local_path(self, file_path):
urlparts = urlparse(file_path)
if urlparts.scheme in ("http", "https"):
try:
file = self._session.get(file_path, stream=True).raw
except RequestException:
log.warning("Image skipped", exc_info=True)
return None
elif urlparts.scheme in ("ftp", "data"):
try:
file = urlopen(file_path)
except (URLError,) + ftplib.all_errors:
log.warning("Image skipped", exc_info=True)
return None
else:
file = file_path
try:
return open_image(file)
except (IOError, ValueError):
log.warning("Image skipped", exc_info=True)
return None
@staticmethod
def preprocess_squeezenet(image):
mean_pixel = [104.006, 116.669, 122.679] # imagenet centering
image = np.array(image, dtype=float)
if len(image.shape) < 4:
image = image[None, ...]
swap_img = np.array(image)
img_out = np.array(swap_img)
img_out[:, :, 0] = swap_img[:, :, 2] # from rgb to bgr - caffe mode
img_out[:, :, 2] = swap_img[:, :, 0]
return img_out - mean_pixel
|
biolab/orange3-imageanalytics
|
orangecontrib/imageanalytics/utils/embedder_utils.py
|
Python
|
gpl-3.0
| 3,000
|
#!/bin/python
import serial, sys, time
config = {
'serial' : {
'port' : {
'printer' : "/dev/ttyACM0",
'arduino' : "/dev/ttyACM1"
},
'speed' : 115200,
'timeout' : 2, # seconds
'eol' : "\n\r"
},
'printer' : {
'command_delay': 0.02, # seconds
'general_gcode': 'G91',
'extrusion': {
'step' : 0.20
},
'travel' : {
'step' : 2.10
},
'plate' : {
'step' : 0.05
}
},
'analog' : {
'zero' : 1024 / 2,
'tolerance' : 30
}
}
class RepRapMCPrinterController:
def __init__(self):
self.ser_printer = None
self.ser_arduino = None
def sendGCode(self, gcode):
self.ser_printer.write(gcode + config.serial.eol)
# Flush
self.ser_printer.flushOutput()
def bootstrap(self):
print "Sending startup gcodes"
# Homes
self.sendGCode('G28')
# Set units to millimeters
self.sendGCode('G21')
# Use relative coordinates for traveling
self.sendGCode('G91')
# Use relative displacement for extrusion
self.sendGCode('G92 E0')
# Heats the head
self.sendGCode('M104 S200')
# Heats the plate
self.sendGCode('M140 S60')
# Boosts speed by 200%
self.sendGCode('M220 S200')
# Lower Z-plate
self.sendGCode('G1 Z' + config.printer.plate.step + ' F2400')
# Absolute positionning for these guys
self.z = 0.2
self.extrusion = 0
def connect(self):
print "Connecting serial ports"
# Open the ports
self.ser_printer = serial.Serial(port=config.serial.port.printer, baudrate=config.serial.speed, timeout=config.serial.timeout)
self.ser_arduino = serial.Serial(port=config.serial.port.arduino, baudrate=config.serial.speed, timeout=config.serial.timeout)
# Wait for ports to initialize
time.sleep(2)
# Flush
self.flushAll()
def flushAll(self):
self.ser_printer.flushOutput()
self.ser_printer.flushInput()
self.ser_arduino.flushInput()
def movePlateTo(self, absolute_z):
self.sendGCode(config.printer.general_gcode + ' Z' + str(absolute_z) + ' F2400')
def start(self):
print "Starting process ..."
# Let's compute this here first
lower_bound = config.analog.zero + tolerance
upper_bound = config.analog.zero - tolerance
while True:
self.flushAll()
command_available = False
command = self.ser_arduino.readline().split(",")
# A correct command line contains 4 items : x, y, extrude and plate
if len(command) == 4:
try:
x = int(command[0])
y = int(command[1])
extrude = int(command[2])
plate = int(command[3])
command_available = True
except ValueError:
# We have a rogue serial line, skip it
command_available = False
if command_available == True:
print "Command received :", x, y, extrude, plate
degrees = {}
if x < lower_bound:
# We want to move the extruder to the left
degrees['X'] = +config.printer.travel.step
elif x > upper_bound:
# ... to the right
degrees['X'] = -config.printer.travel.step
if y < lower_bound:
# We're going further from the face of the printer
degrees['Y'] = -config.printer.travel.step
elif y > upper_bound:
# ... and closer
degrees['Y'] = +config.printer.travel.step
if extrude == 1:
self.extrusion += config.printer.extrusion.step
degrees['E'] = self.extrusion
if plate == 1:
self.z += config.printer.plate.step
self.movePlateTo(self.z)
# TODO insert timer to avoid multiple bed descents
# TODO insert timer to avoid multiple bed descents
# TODO insert timer to avoid multiple bed descents
# TODO insert timer to avoid multiple bed descents
continue # When moving the bed down, we don't allow any other command
# We need to "move" somehow on any degree of freedom including extrusion
if len(degrees) > 0:
gcode = config.printer.general_gcode
for key, value in degrees.iteritems():
gcode += " " + key + str(value)
self.sendGCode(gcode)
time.sleep(config.printer.command_delay)
mc = RepRapMCPrinterController()
mc.connect()
mc.bootstrap()
# Start !
mc.start()
|
tchapi/RepRapMC
|
reprap-mc.py
|
Python
|
mit
| 4,419
|
"""Dominoes most basic computer player
This player just puts any available tile. You may use this file to create
your own player.
"""
from ai import *
class player1:
def __init__(self, dealed_tiles):
self.tiles = dealed_tiles
self.knowledge = []
self.knowledge.append(put_any_double())
self.knowledge.append(put_anyone())
self.player_position = "1" # player_pos = 1 - Player that starts this hand
# 2 - Second player
# 3 - Third player, plays with player 1
# 4 - Second player
def human(self):
return False
def computer(self):
return True
def player_pos(self, pos):
self.player_position = pos
def down_tile(self, left_tile, right_tile, board, tiles, log):
ai = AI(left_tile, right_tile, board, self.tiles, log)
return ai.go(self.knowledge)
def game_status(self):
pass
|
3oheme/dominous
|
player_basic.py
|
Python
|
gpl-3.0
| 1,026
|
# -*- coding: utf-8 -*-
from bpy.types import PropertyGroup
from bpy.props import StringProperty, IntProperty, BoolProperty, FloatProperty, FloatVectorProperty
from mmd_tools.core.bone import FnBone
def _updateMMDBoneAdditionalTransform(prop, context):
prop['is_additional_transform_dirty'] = True
p_bone = context.active_pose_bone
if p_bone and p_bone.mmd_bone.as_pointer() == prop.as_pointer():
FnBone.apply_additional_transformation(prop.id_data)
def _getAdditionalTransformBone(prop):
arm = prop.id_data
bone_id = prop.get('additional_transform_bone_id', -1)
if bone_id < 0:
return ''
fnBone = FnBone.from_bone_id(arm, bone_id)
if not fnBone:
return ''
return fnBone.pose_bone.name
def _setAdditionalTransformBone(prop, value):
arm = prop.id_data
prop['is_additional_transform_dirty'] = True
if value not in arm.pose.bones.keys():
prop['additional_transform_bone_id'] = -1
return
pose_bone = arm.pose.bones[value]
bone = FnBone(pose_bone)
prop['additional_transform_bone_id'] = bone.bone_id
class MMDBone(PropertyGroup):
name_j = StringProperty(
name='Name',
description='Japanese Name',
default='',
)
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
bone_id = IntProperty(
name='Bone ID',
default=-1,
)
transform_order = IntProperty(
name='Transform Order',
description='Deformation tier',
min=0,
max=100,
)
is_visible = BoolProperty(
name='Visible',
description='Is visible',
default=True,
)
is_controllable = BoolProperty(
name='Controllable',
description='Is controllable',
default=True,
)
transform_after_dynamics = BoolProperty(
name='After Dynamics',
description='After physics',
default=False,
)
enabled_fixed_axis = BoolProperty(
name='Fixed Axis',
description='Use fixed axis',
default=False,
)
fixed_axis = FloatVectorProperty(
name='Fixed Axis',
description='Fixed axis',
subtype='XYZ',
size=3,
default=[0, 0, 0],
)
enabled_local_axes = BoolProperty(
name='Local Axes',
description='Use local axes',
default=False,
)
local_axis_x = FloatVectorProperty(
name='Local X-Axis',
description='Local x-axis',
subtype='XYZ',
size=3,
default=[1, 0, 0],
)
local_axis_z = FloatVectorProperty(
name='Local Z-Axis',
description='Local z-axis',
subtype='XYZ',
size=3,
default=[0, 0, 1],
)
is_tip = BoolProperty(
name='Tip Bone',
description='Is zero length bone',
default=False,
)
ik_rotation_constraint = FloatProperty(
name='IK Rotation Constraint',
description='The unit angle of IK',
subtype='ANGLE',
soft_min=0,
soft_max=4,
default=1,
)
has_additional_rotation = BoolProperty(
name='Additional Rotation',
description='Additional rotation',
default=False,
update=_updateMMDBoneAdditionalTransform,
)
has_additional_location = BoolProperty(
name='Additional Location',
description='Additional location',
default=False,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_bone = StringProperty(
name='Additional Transform Bone',
description='Additional transform bone',
set=_setAdditionalTransformBone,
get=_getAdditionalTransformBone,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_bone_id = IntProperty(
name='Additional Transform Bone ID',
default=-1,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_influence = FloatProperty(
name='Additional Transform Influence',
description='Additional transform influence',
default=1,
soft_min=-1,
soft_max=1,
update=_updateMMDBoneAdditionalTransform,
)
is_additional_transform_dirty = BoolProperty(
name='',
default=True
)
|
lordscales91/blender_mmd_tools
|
mmd_tools/properties/bone.py
|
Python
|
mit
| 4,418
|
# -*- coding: utf-8 -*-
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the collection module."""
import re
import sys
import threading
from collections import defaultdict
sys.path[0:0] = [""]
from bson.regex import Regex
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import u, itervalues
from bson.son import SON
from pymongo import (ASCENDING, DESCENDING, GEO2D,
GEOHAYSTACK, GEOSPHERE, HASHED, TEXT)
from pymongo import MongoClient
from pymongo.collection import Collection, ReturnDocument
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import CursorType
from pymongo.errors import (DuplicateKeyError,
InvalidDocument,
InvalidName,
InvalidOperation,
OperationFailure)
from pymongo.operations import IndexModel
from pymongo.read_preferences import ReadPreference
from pymongo.results import (InsertOneResult,
InsertManyResult,
UpdateResult,
DeleteResult)
from pymongo.write_concern import WriteConcern
from test.test_client import IntegrationTest
from test.utils import (is_mongos, enable_text_search, get_pool,
rs_or_single_client, wait_until)
from test import client_context, host, port, unittest
class TestCollectionNoConnect(unittest.TestCase):
@classmethod
def setUpClass(cls):
client = MongoClient(host, port, connect=False)
cls.db = client.pymongo_test
def test_collection(self):
self.assertRaises(TypeError, Collection, self.db, 5)
def make_col(base, name):
return base[name]
self.assertRaises(InvalidName, make_col, self.db, "")
self.assertRaises(InvalidName, make_col, self.db, "te$t")
self.assertRaises(InvalidName, make_col, self.db, ".test")
self.assertRaises(InvalidName, make_col, self.db, "test.")
self.assertRaises(InvalidName, make_col, self.db, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "")
self.assertRaises(InvalidName, make_col, self.db.test, "te$t")
self.assertRaises(InvalidName, make_col, self.db.test, ".test")
self.assertRaises(InvalidName, make_col, self.db.test, "test.")
self.assertRaises(InvalidName, make_col, self.db.test, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t")
self.assertTrue(isinstance(self.db.test, Collection))
self.assertEqual(self.db.test, self.db["test"])
self.assertEqual(self.db.test, Collection(self.db, "test"))
self.assertEqual(self.db.test.mike, self.db["test.mike"])
self.assertEqual(self.db.test["mike"], self.db["test.mike"])
def test_getattr(self):
coll = self.db.test
self.assertTrue(isinstance(coll['_does_not_exist'], Collection))
with self.assertRaises(AttributeError) as context:
coll._does_not_exist
# Message should be:
# "AttributeError: Collection has no attribute '_does_not_exist'. To
# access the test._does_not_exist collection, use
# database['test._does_not_exist']."
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
def test_iteration(self):
self.assertRaises(TypeError, next, self.db)
class TestCollection(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestCollection, cls).setUpClass()
cls.w = client_context.w
@classmethod
def tearDownClass(cls):
cls.db.drop_collection("test_large_limit")
def test_drop_nonexistent_collection(self):
self.db.drop_collection('test')
self.assertFalse('test' in self.db.collection_names())
# No exception
self.db.drop_collection('test')
@client_context.require_version_min(2, 6)
def test_create_indexes(self):
db = self.db
self.assertRaises(TypeError, db.test.create_indexes, 'foo')
self.assertRaises(TypeError, db.test.create_indexes, ['foo'])
self.assertRaises(TypeError, IndexModel, 5)
self.assertRaises(ValueError, IndexModel, [])
db.test.drop_indexes()
db.test.insert_one({})
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_indexes([IndexModel("hello")])
db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)])])
# Tuple instead of list.
db.test.create_indexes([IndexModel((("world", ASCENDING),))])
self.assertEqual(len(db.test.index_information()), 4)
db.test.drop_indexes()
names = db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)],
name="hello_world")])
self.assertEqual(names, ["hello_world"])
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_indexes([IndexModel("hello")])
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
names = db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)]),
IndexModel("hello")])
info = db.test.index_information()
for name in names:
self.assertTrue(name in info)
db.test.drop()
db.test.insert_one({'a': 1})
db.test.insert_one({'a': 1})
self.assertRaises(
DuplicateKeyError,
db.test.create_indexes,
[IndexModel('a', unique=True)])
def test_create_index(self):
db = self.db
self.assertRaises(TypeError, db.test.create_index, 5)
self.assertRaises(TypeError, db.test.create_index, {"hello": 1})
self.assertRaises(ValueError, db.test.create_index, [])
db.test.drop_indexes()
db.test.insert_one({})
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index("hello")
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
# Tuple instead of list.
db.test.create_index((("world", ASCENDING),))
self.assertEqual(len(db.test.index_information()), 4)
db.test.drop_indexes()
ix = db.test.create_index([("hello", DESCENDING),
("world", ASCENDING)], name="hello_world")
self.assertEqual(ix, "hello_world")
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index("hello")
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
self.assertTrue("hello_-1_world_1" in db.test.index_information())
db.test.drop()
db.test.insert_one({'a': 1})
db.test.insert_one({'a': 1})
self.assertRaises(
DuplicateKeyError, db.test.create_index, 'a', unique=True)
def test_drop_index(self):
db = self.db
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index(name)
# Drop it again.
with self.assertRaises(OperationFailure):
db.test.drop_index(name)
self.assertEqual(len(db.test.index_information()), 2)
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index([("goodbye", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 2)
self.assertTrue("hello_1" in db.test.index_information())
def test_reindex(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"foo": "bar", "who": "what", "when": "how"})
db.test.create_index("foo")
db.test.create_index("who")
db.test.create_index("when")
info = db.test.index_information()
def check_result(result):
self.assertEqual(4, result['nIndexes'])
indexes = result['indexes']
names = [idx['name'] for idx in indexes]
for name in names:
self.assertTrue(name in info)
for key in info:
self.assertTrue(key in names)
reindexed = db.test.reindex()
if 'raw' in reindexed:
# mongos
for result in itervalues(reindexed['raw']):
check_result(result)
else:
check_result(reindexed)
def test_list_indexes(self):
db = self.db
db.test.drop()
db.test.insert_one({}) # create collection
def map_indexes(indexes):
return dict([(index["name"], index) for index in indexes])
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 1)
self.assertTrue("_id_" in map_indexes(indexes))
db.test.create_index("hello")
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 2)
self.assertEqual(map_indexes(indexes)["hello_1"]["key"],
SON([("hello", ASCENDING)]))
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 3)
index_map = map_indexes(indexes)
self.assertEqual(index_map["hello_-1_world_1"]["key"],
SON([("hello", DESCENDING), ("world", ASCENDING)]))
self.assertEqual(True, index_map["hello_-1_world_1"]["unique"])
def test_index_info(self):
db = self.db
db.test.drop()
db.test.insert_one({}) # create collection
self.assertEqual(len(db.test.index_information()), 1)
self.assertTrue("_id_" in db.test.index_information())
db.test.create_index("hello")
self.assertEqual(len(db.test.index_information()), 2)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)],
db.test.index_information()["hello_-1_world_1"]["key"]
)
self.assertEqual(
True, db.test.index_information()["hello_-1_world_1"]["unique"])
def test_index_geo2d(self):
db = self.db
db.test.drop_indexes()
self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)]))
index_info = db.test.index_information()['loc_2d']
self.assertEqual([('loc', '2d')], index_info['key'])
@client_context.require_no_mongos
def test_index_haystack(self):
db = self.db
db.test.drop()
_id = db.test.insert_one({
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}).inserted_id
db.test.insert_one({
"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"
})
db.test.insert_one({
"pos": {"long": 59.1, "lat": 87.2}, "type": "office"
})
db.test.create_index(
[("pos", GEOHAYSTACK), ("type", ASCENDING)],
bucketSize=1
)
results = db.command(SON([
("geoSearch", "test"),
("near", [33, 33]),
("maxDistance", 6),
("search", {"type": "restaurant"}),
("limit", 30),
]))['results']
self.assertEqual(2, len(results))
self.assertEqual({
"_id": _id,
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}, results[0])
@client_context.require_version_min(2, 3, 2)
@client_context.require_no_mongos
def test_index_text(self):
enable_text_search(self.client)
db = self.db
db.test.drop_indexes()
self.assertEqual("t_text", db.test.create_index([("t", TEXT)]))
index_info = db.test.index_information()["t_text"]
self.assertTrue("weights" in index_info)
if client_context.version.at_least(2, 5, 5):
db.test.insert_many([
{'t': 'spam eggs and spam'},
{'t': 'spam'},
{'t': 'egg sausage and bacon'}])
# MongoDB 2.6 text search. Create 'score' field in projection.
cursor = db.test.find(
{'$text': {'$search': 'spam'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
results = list(cursor)
self.assertTrue(results[0]['score'] >= results[1]['score'])
db.test.drop_indexes()
@client_context.require_version_min(2, 3, 2)
def test_index_2dsphere(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("geo_2dsphere",
db.test.create_index([("geo", GEOSPHERE)]))
for dummy, info in db.test.index_information().items():
field, idx_type = info['key'][0]
if field == 'geo' and idx_type == '2dsphere':
break
else:
self.fail("2dsphere index not found.")
poly = {"type": "Polygon",
"coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}
query = {"geo": {"$within": {"$geometry": poly}}}
# This query will error without a 2dsphere index.
db.test.find(query)
db.test.drop_indexes()
@client_context.require_version_min(2, 3, 2)
def test_index_hashed(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("a_hashed",
db.test.create_index([("a", HASHED)]))
for dummy, info in db.test.index_information().items():
field, idx_type = info['key'][0]
if field == 'a' and idx_type == 'hashed':
break
else:
self.fail("hashed index not found.")
db.test.drop_indexes()
def test_index_sparse(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('key', ASCENDING)], sparse=True)
self.assertTrue(db.test.index_information()['key_1']['sparse'])
def test_index_background(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('keya', ASCENDING)])
db.test.create_index([('keyb', ASCENDING)], background=False)
db.test.create_index([('keyc', ASCENDING)], background=True)
self.assertFalse('background' in db.test.index_information()['keya_1'])
self.assertFalse(db.test.index_information()['keyb_1']['background'])
self.assertTrue(db.test.index_information()['keyc_1']['background'])
def _drop_dups_setup(self, db):
db.drop_collection('test')
db.test.insert_one({'i': 1})
db.test.insert_one({'i': 2})
db.test.insert_one({'i': 2}) # duplicate
db.test.insert_one({'i': 3})
@client_context.require_version_max(2, 6)
def test_index_drop_dups(self):
# Try dropping duplicates
db = self.db
self._drop_dups_setup(db)
# No error, just drop the duplicate
db.test.create_index([('i', ASCENDING)], unique=True, dropDups=True)
# Duplicate was dropped
self.assertEqual(3, db.test.count())
# Index was created, plus the index on _id
self.assertEqual(2, len(db.test.index_information()))
def test_index_dont_drop_dups(self):
# Try *not* dropping duplicates
db = self.db
self._drop_dups_setup(db)
# There's a duplicate
def test_create():
db.test.create_index(
[('i', ASCENDING)],
unique=True,
dropDups=False
)
self.assertRaises(DuplicateKeyError, test_create)
# Duplicate wasn't dropped
self.assertEqual(4, db.test.count())
# Index wasn't created, only the default index on _id
self.assertEqual(1, len(db.test.index_information()))
def test_field_selection(self):
db = self.db
db.drop_collection("test")
doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}}
db.test.insert_one(doc)
# Test field inclusion
doc = next(db.test.find({}, ["_id"]))
self.assertEqual(list(doc), ["_id"])
doc = next(db.test.find({}, ["a"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "a"])
doc = next(db.test.find({}, ["b"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "b"])
doc = next(db.test.find({}, ["c"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = next(db.test.find({}, ["a"]))
self.assertEqual(doc["a"], 1)
doc = next(db.test.find({}, ["b"]))
self.assertEqual(doc["b"], 5)
doc = next(db.test.find({}, ["c"]))
self.assertEqual(doc["c"], {"d": 5, "e": 10})
# Test inclusion of fields with dots
doc = next(db.test.find({}, ["c.d"]))
self.assertEqual(doc["c"], {"d": 5})
doc = next(db.test.find({}, ["c.e"]))
self.assertEqual(doc["c"], {"e": 10})
doc = next(db.test.find({}, ["b", "c.e"]))
self.assertEqual(doc["c"], {"e": 10})
doc = next(db.test.find({}, ["b", "c.e"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "b", "c"])
doc = next(db.test.find({}, ["b", "c.e"]))
self.assertEqual(doc["b"], 5)
# Test field exclusion
doc = next(db.test.find({}, {"a": False, "b": 0}))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = next(db.test.find({}, {"_id": False}))
l = list(doc)
self.assertFalse("_id" in l)
def test_options(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=4096)
result = db.test.options()
# mongos 2.2.x adds an $auth field when auth is enabled.
result.pop('$auth', None)
self.assertEqual(result, {"capped": True, 'size': 4096})
db.drop_collection("test")
def test_insert_one(self):
db = self.db
db.test.drop()
document = {"_id": 1000}
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, int))
self.assertEqual(document["_id"], result.inserted_id)
self.assertTrue(result.acknowledged)
self.assertIsNotNone(db.test.find_one({"_id": document["_id"]}))
self.assertEqual(1, db.test.count())
document = {"foo": "bar"}
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, ObjectId))
self.assertEqual(document["_id"], result.inserted_id)
self.assertTrue(result.acknowledged)
self.assertIsNotNone(db.test.find_one({"_id": document["_id"]}))
self.assertEqual(2, db.test.count())
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, ObjectId))
self.assertEqual(document["_id"], result.inserted_id)
self.assertFalse(result.acknowledged)
# The insert failed duplicate key...
wait_until(lambda: 2 == db.test.count(), 'forcing duplicate key error')
def test_insert_many(self):
db = self.db
db.test.drop()
docs = [{} for _ in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual(5, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, ObjectId))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count({'_id': _id}))
self.assertTrue(result.acknowledged)
docs = [{"_id": i} for i in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual(5, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count({"_id": _id}))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
docs = [{} for _ in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertFalse(result.acknowledged)
self.assertEqual(15, db.test.count())
def test_delete_one(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"y": 1})
self.db.test.insert_one({"z": 1})
result = self.db.test.delete_one({"x": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(1, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(2, self.db.test.count())
result = self.db.test.delete_one({"y": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(1, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(1, self.db.test.count())
db = self.db.client.get_database(self.db.name,
write_concern=WriteConcern(w=0))
result = db.test.delete_one({"z": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertRaises(InvalidOperation, lambda: result.deleted_count)
self.assertFalse(result.acknowledged)
wait_until(lambda: 0 == db.test.count(), 'delete 1 documents')
def test_delete_many(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"y": 1})
self.db.test.insert_one({"y": 1})
result = self.db.test.delete_many({"x": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(2, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(0, self.db.test.count({"x": 1}))
db = self.db.client.get_database(self.db.name,
write_concern=WriteConcern(w=0))
result = db.test.delete_many({"y": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertRaises(InvalidOperation, lambda: result.deleted_count)
self.assertFalse(result.acknowledged)
wait_until(lambda: 0 == db.test.count(), 'delete 2 documents')
def test_find_by_default_dct(self):
db = self.db
db.test.insert_one({'foo': 'bar'})
dct = defaultdict(dict, [('foo', 'bar')])
self.assertIsNotNone(db.test.find_one(dct))
self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')]))
def test_find_w_fields(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": 1, "mike": "awesome",
"extra thing": "abcdefghijklmnopqrstuvwxyz"})
self.assertEqual(1, db.test.count())
doc = next(db.test.find({}))
self.assertTrue("x" in doc)
doc = next(db.test.find({}))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}))
self.assertTrue("extra thing" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertTrue("x" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertFalse("extra thing" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertFalse("x" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertFalse("extra thing" in doc)
def test_fields_specifier_as_dict(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"})
self.assertEqual([1, 2, 3], db.test.find_one()["x"])
self.assertEqual([2, 3],
db.test.find_one(
projection={"x": {"$slice": -2}})["x"])
self.assertTrue("x" not in db.test.find_one(projection={"x": 0}))
self.assertTrue("mike" in db.test.find_one(projection={"x": 0}))
def test_find_w_regex(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": "hello_world"})
db.test.insert_one({"x": "hello_mike"})
db.test.insert_one({"x": "hello_mikey"})
db.test.insert_one({"x": "hello_test"})
self.assertEqual(db.test.find().count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("^hello.*")}).count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("ello")}).count(), 4)
self.assertEqual(db.test.find({"x":
re.compile("^hello$")}).count(), 0)
self.assertEqual(db.test.find({"x":
re.compile("^hello_mi.*$")}).count(), 2)
def test_id_can_be_anything(self):
db = self.db
db.test.delete_many({})
auto_id = {"hello": "world"}
db.test.insert_one(auto_id)
self.assertTrue(isinstance(auto_id["_id"], ObjectId))
numeric = {"_id": 240, "hello": "world"}
db.test.insert_one(numeric)
self.assertEqual(numeric["_id"], 240)
obj = {"_id": numeric, "hello": "world"}
db.test.insert_one(obj)
self.assertEqual(obj["_id"], numeric)
for x in db.test.find():
self.assertEqual(x["hello"], u("world"))
self.assertTrue("_id" in x)
def test_invalid_key_names(self):
db = self.db
db.test.drop()
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": {"hello": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"$hello": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {"$hello": "world"}})
db.test.insert_one({"he$llo": "world"})
db.test.insert_one({"hello": {"hello$": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{".hello": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {".hello": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello.": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {"hello.": "world"}})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hel.lo": "world"})
self.assertRaises(InvalidDocument, db.test.insert_one,
{"hello": {"hel.lo": "world"}})
def test_unique_index(self):
db = self.db
db.drop_collection("test")
db.test.create_index("hello")
# No error.
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": "world"})
db.drop_collection("test")
db.test.create_index("hello", unique=True)
with self.assertRaises(DuplicateKeyError):
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": "world"})
def test_duplicate_key_error(self):
db = self.db
db.drop_collection("test")
db.test.create_index("x", unique=True)
db.test.insert_one({"_id": 1, "x": 1})
with self.assertRaises(DuplicateKeyError) as context:
db.test.insert_one({"x": 1})
self.assertIsNotNone(context.exception.details)
with self.assertRaises(DuplicateKeyError) as context:
db.test.insert_one({"x": 1})
self.assertIsNotNone(context.exception.details)
self.assertEqual(1, db.test.count())
def test_wtimeout(self):
# Ensure setting wtimeout doesn't disable write concern altogether.
# See SERVER-12596.
collection = self.db.test
collection.drop()
collection.insert_one({'_id': 1})
coll = collection.with_options(
write_concern=WriteConcern(w=1, wtimeout=1000))
self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1})
coll = collection.with_options(
write_concern=WriteConcern(wtimeout=1000))
self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1})
def test_error_code(self):
try:
self.db.test.update_many({}, {"$thismodifierdoesntexist": 1})
except OperationFailure as exc:
self.assertTrue(exc.code in (9, 10147, 16840, 17009))
# Just check that we set the error document. Fields
# vary by MongoDB version.
self.assertTrue(exc.details is not None)
else:
self.fail("OperationFailure was not raised")
def test_index_on_subfield(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"hello": {"a": 4, "b": 5}})
db.test.insert_one({"hello": {"a": 7, "b": 2}})
db.test.insert_one({"hello": {"a": 4, "b": 10}})
db.drop_collection("test")
db.test.create_index("hello.a", unique=True)
db.test.insert_one({"hello": {"a": 4, "b": 5}})
db.test.insert_one({"hello": {"a": 7, "b": 2}})
self.assertRaises(DuplicateKeyError,
db.test.insert_one,
{"hello": {"a": 4, "b": 10}})
def test_replace_one(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.replace_one({}, {"$set": {"x": 1}}))
id1 = db.test.insert_one({"x": 1}).inserted_id
result = db.test.replace_one({"x": 1}, {"y": 1})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"y": 1}))
self.assertEqual(0, db.test.count({"x": 1}))
self.assertEqual(db.test.find_one(id1)["y"], 1)
result = db.test.replace_one({"x": 2}, {"y": 2}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"y": 2}))
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.replace_one({"x": 0}, {"y": 0})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_one(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.update_one({}, {"x": 1}))
id1 = db.test.insert_one({"x": 5}).inserted_id
result = db.test.update_one({}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(db.test.find_one(id1)["x"], 6)
id2 = db.test.insert_one({"x": 1}).inserted_id
result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(db.test.find_one(id1)["x"], 7)
self.assertEqual(db.test.find_one(id2)["x"], 1)
result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_many(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.update_many({}, {"x": 1}))
db.test.insert_one({"x": 4, "y": 3})
db.test.insert_one({"x": 5, "y": 5})
db.test.insert_one({"x": 4, "y": 4})
result = db.test.update_many({"x": 4}, {"$set": {"y": 5}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(2, result.matched_count)
self.assertTrue(result.modified_count in (None, 2))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(3, db.test.count({"y": 5}))
result = db.test.update_many({"x": 5}, {"$set": {"y": 6}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count({"y": 6}))
result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_with_invalid_keys(self):
self.db.drop_collection("test")
self.assertTrue(self.db.test.insert_one({"hello": "world"}))
doc = self.db.test.find_one()
doc['a.b'] = 'c'
expected = InvalidDocument
if client_context.version.at_least(2, 5, 4, -1):
expected = OperationFailure
# Replace
self.assertRaises(expected, self.db.test.replace_one,
{"hello": "world"}, doc)
# Upsert
self.assertRaises(expected, self.db.test.replace_one,
{"foo": "bar"}, doc, upsert=True)
# Check that the last two ops didn't actually modify anything
self.assertTrue('a.b' not in self.db.test.find_one())
# Modify shouldn't check keys...
self.assertTrue(self.db.test.update_one({"hello": "world"},
{"$set": {"foo.bar": "baz"}},
upsert=True))
# I know this seems like testing the server but I'd like to be notified
# by CI if the server's behavior changes here.
doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")])
self.assertRaises(OperationFailure, self.db.test.update_one,
{"hello": "world"}, doc, upsert=True)
# This is going to cause keys to be checked and raise InvalidDocument.
# That's OK assuming the server's behavior in the previous assert
# doesn't change. If the behavior changes checking the first key for
# '$' in update won't be good enough anymore.
doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})])
self.assertRaises(expected, self.db.test.replace_one,
{"hello": "world"}, doc, upsert=True)
# Replace with empty document
self.assertNotEqual(0,
self.db.test.replace_one(
{"hello": "world"}, {}).matched_count)
def test_acknowledged_delete(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000)
db.test.insert_one({"x": 1})
self.assertEqual(1, db.test.count())
# Can't remove from capped collection.
self.assertRaises(OperationFailure, db.test.delete_one, {"x": 1})
db.drop_collection("test")
db.test.insert_one({"x": 1})
db.test.insert_one({"x": 1})
self.assertEqual(2, db.test.delete_many({}).deleted_count)
self.assertEqual(0, db.test.delete_many({}).deleted_count)
def test_manual_last_error(self):
coll = self.db.get_collection("test", write_concern=WriteConcern(w=0))
coll.insert_one({"x": 1})
self.db.command("getlasterror", w=1, wtimeout=1)
def test_count(self):
db = self.db
db.drop_collection("test")
self.assertEqual(db.test.count(), 0)
db.test.insert_many([{}, {}])
self.assertEqual(db.test.count(), 2)
db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}])
self.assertEqual(db.test.find({'foo': 'bar'}).count(), 1)
self.assertEqual(db.test.count({'foo': 'bar'}), 1)
self.assertEqual(db.test.find({'foo': re.compile(r'ba.*')}).count(), 2)
self.assertEqual(
db.test.count({'foo': re.compile(r'ba.*')}), 2)
def test_aggregate(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({'foo': [1, 2]})
self.assertRaises(TypeError, db.test.aggregate, "wow")
pipeline = {"$project": {"_id": False, "foo": True}}
result = db.test.aggregate([pipeline], useCursor=False)
self.assertTrue(isinstance(result, CommandCursor))
self.assertEqual([{'foo': [1, 2]}], list(result))
@client_context.require_version_min(2, 5, 1)
def test_aggregation_cursor_validation(self):
db = self.db
projection = {'$project': {'_id': '$_id'}}
cursor = db.test.aggregate([projection], cursor={})
self.assertTrue(isinstance(cursor, CommandCursor))
cursor = db.test.aggregate([projection], useCursor=True)
self.assertTrue(isinstance(cursor, CommandCursor))
@client_context.require_version_min(2, 5, 1)
def test_aggregation_cursor(self):
db = self.db
if client_context.replica_set_name:
# Test that getMore messages are sent to the right server.
db = self.client.get_database(
db.name,
read_preference=ReadPreference.SECONDARY,
write_concern=WriteConcern(w=self.w))
for collection_size in (10, 1000):
db.drop_collection("test")
db.test.insert_many([{'_id': i} for i in range(collection_size)])
expected_sum = sum(range(collection_size))
# Use batchSize to ensure multiple getMore messages
cursor = db.test.aggregate(
[{'$project': {'_id': '$_id'}}],
batchSize=5)
self.assertEqual(
expected_sum,
sum(doc['_id'] for doc in cursor))
# Test that batchSize is handled properly.
cursor = db.test.aggregate([], batchSize=5)
self.assertEqual(5, len(cursor._CommandCursor__data))
# Force a getMore
cursor._CommandCursor__data.clear()
next(cursor)
# startingFrom for a command cursor doesn't include the initial batch
# returned by the command.
self.assertEqual(5, cursor._CommandCursor__retrieved)
# batchSize - 1
self.assertEqual(4, len(cursor._CommandCursor__data))
# Exhaust the cursor. There shouldn't be any errors.
for doc in cursor:
pass
@client_context.require_version_min(2, 5, 1)
def test_aggregation_cursor_alive(self):
self.db.test.delete_many({})
self.db.test.insert_many([{} for _ in range(3)])
self.addCleanup(self.db.test.delete_many, {})
cursor = self.db.test.aggregate(pipeline=[], cursor={'batchSize': 2})
n = 0
while True:
cursor.next()
n += 1
if 3 == n:
self.assertFalse(cursor.alive)
break
self.assertTrue(cursor.alive)
@client_context.require_version_min(2, 5, 5)
@client_context.require_no_mongos
def test_parallel_scan(self):
db = self.db
db.drop_collection("test")
if client_context.replica_set_name:
# Test that getMore messages are sent to the right server.
db = self.client.get_database(
db.name,
read_preference=ReadPreference.SECONDARY,
write_concern=WriteConcern(w=self.w))
coll = db.test
coll.insert_many([{'_id': i} for i in range(8000)])
docs = []
threads = [threading.Thread(target=docs.extend, args=(cursor,))
for cursor in coll.parallel_scan(3)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(
set(range(8000)),
set(doc['_id'] for doc in docs))
def test_group(self):
db = self.db
db.drop_collection("test")
self.assertEqual([],
db.test.group([], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
db.test.insert_many([{"a": 2}, {"b": 5}, {"a": 1}])
self.assertEqual([{"count": 3}],
db.test.group([], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
self.assertEqual([{"count": 1}],
db.test.group([], {"a": {"$gt": 1}}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
db.test.insert_one({"a": 2, "b": 3})
self.assertEqual([{"a": 2, "count": 2},
{"a": None, "count": 1},
{"a": 1, "count": 1}],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
# modifying finalize
self.assertEqual([{"a": 2, "count": 3},
{"a": None, "count": 2},
{"a": 1, "count": 2}],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { obj.count++; }"))
# returning finalize
self.assertEqual([2, 1, 1],
db.test.group(["a"], {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { return obj.count; }"))
# keyf
self.assertEqual([2, 2],
db.test.group("function (obj) { if (obj.a == 2) "
"{ return {a: true} }; "
"return {b: true}; }", {}, {"count": 0},
"function (obj, prev) "
"{ prev.count++; }",
"function (obj) { return obj.count; }"))
# no key
self.assertEqual([{"count": 4}],
db.test.group(None, {}, {"count": 0},
"function (obj, prev) { prev.count++; }"
))
self.assertRaises(OperationFailure, db.test.group,
[], {}, {}, "5 ++ 5")
def test_group_with_scope(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"a": 1}, {"b": 1}])
reduce_function = "function (obj, prev) { prev.count += inc_value; }"
self.assertEqual(2, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 1}))[0]['count'])
self.assertEqual(4, db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 2}))[0]['count'])
self.assertEqual(1,
db.test.group([], {}, {"count": 0},
Code(reduce_function,
{"inc_value": 0.5}))[0]['count'])
self.assertEqual(2, db.test.group(
[], {}, {"count": 0},
Code(reduce_function, {"inc_value": 1}))[0]['count'])
self.assertEqual(4, db.test.group(
[], {}, {"count": 0},
Code(reduce_function, {"inc_value": 2}))[0]['count'])
self.assertEqual(1, db.test.group(
[], {}, {"count": 0},
Code(reduce_function, {"inc_value": 0.5}))[0]['count'])
def test_large_limit(self):
db = self.db
db.drop_collection("test_large_limit")
db.test_large_limit.create_index([('x', 1)])
my_str = "mongomongo" * 1000
for i in range(2000):
doc = {"x": i, "y": my_str}
db.test_large_limit.insert_one(doc)
i = 0
y = 0
for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]):
i += 1
y += doc["x"]
self.assertEqual(1900, i)
self.assertEqual((1900 * 1899) / 2, y)
def test_find_kwargs(self):
db = self.db
db.drop_collection("test")
for i in range(10):
db.test.insert_one({"x": i})
self.assertEqual(10, db.test.count())
total = 0
for x in db.test.find({}, skip=4, limit=2):
total += x["x"]
self.assertEqual(9, total)
def test_rename(self):
db = self.db
db.drop_collection("test")
db.drop_collection("foo")
self.assertRaises(TypeError, db.test.rename, 5)
self.assertRaises(InvalidName, db.test.rename, "")
self.assertRaises(InvalidName, db.test.rename, "te$t")
self.assertRaises(InvalidName, db.test.rename, ".test")
self.assertRaises(InvalidName, db.test.rename, "test.")
self.assertRaises(InvalidName, db.test.rename, "tes..t")
self.assertEqual(0, db.test.count())
self.assertEqual(0, db.foo.count())
for i in range(10):
db.test.insert_one({"x": i})
self.assertEqual(10, db.test.count())
db.test.rename("foo")
self.assertEqual(0, db.test.count())
self.assertEqual(10, db.foo.count())
x = 0
for doc in db.foo.find():
self.assertEqual(x, doc["x"])
x += 1
db.test.insert_one({})
self.assertRaises(OperationFailure, db.foo.rename, "test")
db.foo.rename("test", dropTarget=True)
def test_find_one(self):
db = self.db
db.drop_collection("test")
_id = db.test.insert_one({"hello": "world", "foo": "bar"}).inserted_id
self.assertEqual("world", db.test.find_one()["hello"])
self.assertEqual(db.test.find_one(_id), db.test.find_one())
self.assertEqual(db.test.find_one(None), db.test.find_one())
self.assertEqual(db.test.find_one({}), db.test.find_one())
self.assertEqual(db.test.find_one({"hello": "world"}),
db.test.find_one())
self.assertTrue("hello" in db.test.find_one(projection=["hello"]))
self.assertTrue("hello" not in db.test.find_one(projection=["foo"]))
self.assertEqual(["_id"], list(db.test.find_one(projection=[])))
self.assertEqual(None, db.test.find_one({"hello": "foo"}))
self.assertEqual(None, db.test.find_one(ObjectId()))
def test_find_one_non_objectid(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"_id": 5})
self.assertTrue(db.test.find_one(5))
self.assertFalse(db.test.find_one(6))
def test_find_one_with_find_args(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"x": i} for i in range(1, 4)])
self.assertEqual(1, db.test.find_one()["x"])
self.assertEqual(2, db.test.find_one(skip=1, limit=2)["x"])
def test_find_with_sort(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}])
self.assertEqual(2, db.test.find_one()["x"])
self.assertEqual(1, db.test.find_one(sort=[("x", 1)])["x"])
self.assertEqual(3, db.test.find_one(sort=[("x", -1)])["x"])
def to_list(things):
return [thing["x"] for thing in things]
self.assertEqual([2, 1, 3], to_list(db.test.find()))
self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)])))
self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)])))
self.assertRaises(TypeError, db.test.find, sort=5)
self.assertRaises(TypeError, db.test.find, sort="hello")
self.assertRaises(ValueError, db.test.find, sort=["hello", 1])
# TODO doesn't actually test functionality, just that it doesn't blow up
def test_cursor_timeout(self):
list(self.db.test.find(no_cursor_timeout=True))
list(self.db.test.find(no_cursor_timeout=False))
def test_exhaust(self):
if is_mongos(self.db.client):
self.assertRaises(InvalidOperation,
self.db.test.find,
cursor_type=CursorType.EXHAUST)
return
# Limit is incompatible with exhaust.
self.assertRaises(InvalidOperation,
self.db.test.find,
cursor_type=CursorType.EXHAUST,
limit=5)
cur = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(InvalidOperation, cur.limit, 5)
cur = self.db.test.find(limit=5)
self.assertRaises(InvalidOperation, cur.add_option, 64)
cur = self.db.test.find()
cur.add_option(64)
self.assertRaises(InvalidOperation, cur.limit, 5)
self.db.drop_collection("test")
# Insert enough documents to require more than one batch
self.db.test.insert_many([{'i': i} for i in range(150)])
client = rs_or_single_client(maxPoolSize=1)
socks = get_pool(client).sockets
# Make sure the socket is returned after exhaustion.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
next(cur)
self.assertEqual(0, len(socks))
for _ in cur:
pass
self.assertEqual(1, len(socks))
# Same as previous but don't call next()
for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST):
pass
self.assertEqual(1, len(socks))
# If the Cursor instance is discarded before being
# completely iterated we have to close and
# discard the socket.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
next(cur)
self.assertEqual(0, len(socks))
if sys.platform.startswith('java') or 'PyPy' in sys.version:
# Don't wait for GC or use gc.collect(), it's unreliable.
cur.close()
cur = None
# The socket should be discarded.
self.assertEqual(0, len(socks))
def test_distinct(self):
self.db.drop_collection("test")
test = self.db.test
test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}])
distinct = test.distinct("a")
distinct.sort()
self.assertEqual([1, 2, 3], distinct)
distinct = test.find({'a': {'$gt': 1}}).distinct("a")
distinct.sort()
self.assertEqual([2, 3], distinct)
distinct = test.distinct('a', {'a': {'$gt': 1}})
distinct.sort()
self.assertEqual([2, 3], distinct)
self.db.drop_collection("test")
test.insert_one({"a": {"b": "a"}, "c": 12})
test.insert_one({"a": {"b": "b"}, "c": 12})
test.insert_one({"a": {"b": "c"}, "c": 12})
test.insert_one({"a": {"b": "c"}, "c": 12})
distinct = test.distinct("a.b")
distinct.sort()
self.assertEqual(["a", "b", "c"], distinct)
def test_query_on_query_field(self):
self.db.drop_collection("test")
self.db.test.insert_one({"query": "foo"})
self.db.test.insert_one({"bar": "foo"})
self.assertEqual(1,
self.db.test.find({"query": {"$ne": None}}).count())
self.assertEqual(1,
len(list(self.db.test.find({"query": {"$ne": None}})))
)
def test_min_query(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"x": 1}, {"x": 2}])
self.db.test.create_index("x")
self.assertEqual(1, len(list(self.db.test.find({"$min": {"x": 2},
"$query": {}}))))
self.assertEqual(2, self.db.test.find({"$min": {"x": 2},
"$query": {}})[0]["x"])
def test_numerous_inserts(self):
# Ensure we don't exceed server's 1000-document batch size limit.
self.db.test.drop()
n_docs = 2100
self.db.test.insert_many([{} for _ in range(n_docs)])
self.assertEqual(n_docs, self.db.test.count())
self.db.test.drop()
def test_map_reduce(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"id": 1, "tags": ["dog", "cat"]})
db.test.insert_one({"id": 2, "tags": ["cat"]})
db.test.insert_one({"id": 3, "tags": ["mouse", "cat", "dog"]})
db.test.insert_one({"id": 4, "tags": []})
map = Code("function () {"
" this.tags.forEach(function(z) {"
" emit(z, 1);"
" });"
"}")
reduce = Code("function (key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
result = db.test.map_reduce(map, reduce, out='mrunittests')
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
db.test.insert_one({"id": 5, "tags": ["hampster"]})
result = db.test.map_reduce(map, reduce, out='mrunittests')
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
db.test.delete_one({"id": 5})
result = db.test.map_reduce(map, reduce,
out={'merge': 'mrunittests'})
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
result = db.test.map_reduce(map, reduce,
out={'reduce': 'mrunittests'})
self.assertEqual(6, result.find_one({"_id": "cat"})["value"])
self.assertEqual(4, result.find_one({"_id": "dog"})["value"])
self.assertEqual(2, result.find_one({"_id": "mouse"})["value"])
self.assertEqual(1, result.find_one({"_id": "hampster"})["value"])
result = db.test.map_reduce(
map,
reduce,
out={'replace': 'mrunittests'}
)
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
result = db.test.map_reduce(map, reduce,
out=SON([('replace', 'mrunittests'),
('db', 'mrtestdb')
]))
self.assertEqual(3, result.find_one({"_id": "cat"})["value"])
self.assertEqual(2, result.find_one({"_id": "dog"})["value"])
self.assertEqual(1, result.find_one({"_id": "mouse"})["value"])
self.client.drop_database('mrtestdb')
full_result = db.test.map_reduce(map, reduce,
out='mrunittests', full_response=True)
self.assertEqual(6, full_result["counts"]["emit"])
result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2)
self.assertEqual(2, result.find_one({"_id": "cat"})["value"])
self.assertEqual(1, result.find_one({"_id": "dog"})["value"])
self.assertEqual(None, result.find_one({"_id": "mouse"}))
result = db.test.map_reduce(map, reduce, out={'inline': 1})
self.assertTrue(isinstance(result, dict))
self.assertTrue('results' in result)
self.assertTrue(result['results'][1]["_id"] in ("cat",
"dog",
"mouse"))
result = db.test.inline_map_reduce(map, reduce)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse"))
full_result = db.test.inline_map_reduce(map, reduce,
full_response=True)
self.assertEqual(6, full_result["counts"]["emit"])
def test_messages_with_unicode_collection_names(self):
db = self.db
db[u("Employés")].insert_one({"x": 1})
db[u("Employés")].replace_one({"x": 1}, {"x": 2})
db[u("Employés")].delete_many({})
db[u("Employés")].find_one()
list(db[u("Employés")].find())
def test_drop_indexes_non_existent(self):
self.db.drop_collection("test")
self.db.test.drop_indexes()
# This is really a bson test but easier to just reproduce it here...
# (Shame on me)
def test_bad_encode(self):
c = self.db.test
c.drop()
self.assertRaises(InvalidDocument, c.insert_one, {"x": c})
class BadGetAttr(dict):
def __getattr__(self, name):
pass
bad = BadGetAttr([('foo', 'bar')])
c.insert_one({'bad': bad})
self.assertEqual('bar', c.find_one()['bad']['foo'])
def test_find_one_and(self):
c = self.db.test
c.drop()
c.insert_one({'_id': 1, 'i': 1})
self.assertEqual({'_id': 1, 'i': 1},
c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_delete({'_id': 1}))
self.assertEqual(None, c.find_one({'_id': 1}))
self.assertEqual(None,
c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 1},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER,
upsert=True))
self.assertEqual({'_id': 1, 'i': 2},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_replace(
{'_id': 1}, {'i': 3, 'j': 1},
projection=['i'],
return_document=ReturnDocument.AFTER))
self.assertEqual({'i': 4},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
projection={'i': 1, '_id': 0},
return_document=ReturnDocument.AFTER))
c.drop()
for j in range(5):
c.insert_one({'j': j, 'i': 0})
sort = [('j', DESCENDING)]
self.assertEqual(4, c.find_one_and_update({},
{'$inc': {'i': 1}},
sort=sort)['j'])
def test_find_with_nested(self):
c = self.db.test
c.drop()
c.insert_many([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4]
self.assertEqual(
[2],
[i['i'] for i in c.find({
'$and': [
{
# This clause gives us [1,2,4]
'$or': [
{'i': {'$lte': 2}},
{'i': {'$gt': 3}},
],
},
{
# This clause gives us [2,3]
'$or': [
{'i': 2},
{'i': 3},
]
},
]
})]
)
self.assertEqual(
[0, 1, 2],
[i['i'] for i in c.find({
'$or': [
{
# This clause gives us [2]
'$and': [
{'i': {'$gte': 2}},
{'i': {'$lt': 3}},
],
},
{
# This clause gives us [0,1]
'$and': [
{'i': {'$gt': -100}},
{'i': {'$lt': 2}},
]
},
]
})]
)
def test_find_regex(self):
c = self.db.test
c.drop()
c.insert_one({'r': re.compile('.*')})
self.assertTrue(isinstance(c.find_one()['r'], Regex))
for doc in c.find():
self.assertTrue(isinstance(doc['r'], Regex))
if __name__ == "__main__":
unittest.main()
|
felixonmars/mongo-python-driver
|
test/test_collection.py
|
Python
|
apache-2.0
| 66,325
|
"""
Array to a Feature Class - OGR Compilant
"""
from osgeo import ogr
def ogr_btw_driver(inShp, outShp, supportForSpatialLite=None):
"""
Convert a vectorial file to another ogr driver
Set supportForSpatialLite to True if outShp is a sqlite db and if you
want SpatialLite support for that database.
"""
import os
from gasp.gdal import get_driver_name
from gasp.oss.shell import execute_cmd
out_driver = get_driver_name(outShp)
if out_driver == 'SQLite' and supportForSpatialLite:
splite = ' -dsco "SPATIALITE=YES"'
else:
splite = ''
cmd = 'ogr2ogr -f "{drv}" {out} {_in}{lite}'.format(
drv=out_driver, out=outShp, _in=inShp,
lite=splite
)
# Run command
cmdout = execute_cmd(cmd)
return outShp
def loop_btw_driver(inFld, outFld, destiny_file_format,
file_format=None):
"""
Execute ogr_btw_driver for every file in a folder
"""
import os
from gasp.oss.info import list_files
if not os.path.exists(outFld):
from gasp.oss.ops import create_folder
create_folder(outFld)
geo_files = list_files(inFld, file_format=file_format)
for _file in geo_files:
ogr_btw_driver(
_file,
os.path.join(
outFld, '{}.{}'.format(
os.path.splitext(os.path.basename(_file))[0],
destiny_file_format
)
)
)
def to_polygon(inRst, outShp, epsg):
"""
Raster to polygon
"""
import os
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from gasp.gdal import get_driver_name
src = gdal.Open(inRst)
bnd = src.GetRasterBand(1)
output = ogr.GetDriverByName(
get_driver_name(ouShp)).CreateDataSource(outShp)
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
lyr = output.CreateLayer(
os.path.splitext(os.path.basename(outShp))[0],
srs
)
lyr.CreateField(ogr.FieldDefn('VALUE', ogr.OFTInteger))
gdal.Polygonize(bnd, None, lyr, 0, [], callback=None)
return outShp
def array_to_shp(array_like, outFile, x='x', y='y', epsg=None):
"""
Convert a array with geometric data into a file with geometry (GML, ESRI
Shapefile or others).
Example of an array_like object:
data = [
{col_1: value, col_2: value, x: value, y: value},
{col_1: value, col_2: value, x: value, y: value},
]
This array must contain a 'x' and 'y' keys (or
equivalent).
TODO: Now works only for points
"""
import os
from gasp.encoding import unicode_to_str
from gasp.gdal import get_driver_name
from gasp.gdal import create_point
from gasp.gdal.proj import get_sref_from_epsg
from gasp.gdal.manage.fields import map_pyType_fldCode
ogr.UseExceptions()
# Create output file
shp = ogr.GetDriverByName(
get_driver_name(outFile)).CreateDataSource(outFile)
lyr = shp.CreateLayer(
os.path.splitext(os.path.basename(outFile))[0],
None if not epsg else get_sref_from_epsg(epsg),
geom_type=ogr.wkbPoint,
)
# Create fields of output file
fields = []
keys_fields = {}
for k in array_like[0]:
if k != x and k != y:
fld_name = k[:9]
if type(fld_name) == unicode:
fld_name = unicode_to_str(fld_name)
if fld_name not in fields:
fields.append(fld_name)
else:
# Get All similar fields in the fields list
tmp_fld = []
for i in fields:
if i[:8] == fld_name[:8]:
tmp_fld.append(i[:8])
c = len(tmp_fld)
fld_name = fld_name[:8] + '_{n}'.format(n=str(c))
fields.append(fld_name)
# TODO: Automatic mapping of filters types needs further testing
#fld_type = map_pyType_fldCode(array_like[0][k])
lyr.CreateField(
ogr.FieldDefn(fld_name, ogr.OFTString)
)
keys_fields[k] = fld_name
defn = lyr.GetLayerDefn()
for i in range(len(array_like)):
feat = ogr.Feature(defn)
feat.SetGeometry(
create_point(array_like[i][x], array_like[i][y]))
for k in array_like[i]:
if k != x and k != y:
value = array_like[i][k]
if type(value) == unicode:
value = unicode_to_str(value)
if len(value) >= 254:
value = value[:253]
feat.SetField(
keys_fields[k], value
)
lyr.CreateFeature(feat)
feat = None
shp.Destroy()
def xls_to_shape(inXls, outShp):
"""
Creates a new feature class by reading a xls file
In this first version, o programa esta preparado apenas para criar um tema
poligonal com um unico poligono.
Este bloco de codigo sera actualizado mediante necessidade.
"""
import os
import xlrd
from osgeo import ogr
from gasp.gdal import get_driver_name
# Create Output
shp = ogr.GetDriverByName(
get_driver_name(outShp)).CreateDataSource(outShp)
lyr = shp.CreateLayer(
os.path.splitext(os.path.basename(inXls))[0],
geom_type=ogr.wkbPolygon
)
# Open xls
excel = xlrd.open_workbook(inXls)
sheet = excel.sheet_by_name(str(excel.sheets()[0].name))
ring = ogr.Geometry(ogr.wkbLinearRing)
poly = ogr.Geometry(ogr.wkbPolygon)
outFeat = ogr.Feature(lyr.GetLayerDefn())
for row in range(1, sheet.nrows):
x = float(sheet.cell(row, 1).value)
y = float(sheet.cell(row, 2).value)
ring.AddPoint(x, y)
poly.AddGeometry(ring)
outFeat.SetGeometry(poly)
lyr.CreateFeature(outFeat)
shp.Destroy()
return outShp
def osm_to_featurecls(xmlOsm, output):
"""
OSM to ESRI Shapefile
"""
import os
# Convert xml to sqliteDB
sqDB = os.path.join(
os.path.dirname(output),
os.path.splitext(os.path.basename(output))[0] + '.sqlite'
)
ogr_btw_driver(xmlOsm, sqDB)
# sqliteDB to file
ogr_btw_driver(sqDB, output)
return output
def getosm_to_featurecls(inBoundary, outVector, boundaryEpsg=4326,
vectorEpsg=4326):
"""
Get OSM Data from the Internet and convert the file to regular vector file
"""
import os
# Download data from the web
osmData = download_by_boundary(
inBoundary, os.path.join(
os.path.dirname(outVector),
os.path.splitext(os.path.basename(outVector))[0] + '.xml'
), boundaryEpsg
)
# Convert data to regular vector file
return osm_to_featurecls(osmData, outVector)
|
JoaquimPatriarca/senpy-for-gis
|
gasp/toshp/gdal.py
|
Python
|
gpl-3.0
| 7,217
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# be happy if someone already created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir is global in this module due
# to earlier import-*.
names = listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
seet61/one
|
v2/os.py
|
Python
|
bsd-2-clause
| 34,596
|
from django.utils import timezone
from django.views import generic
from django.shortcuts import render
from events.models import Event
# home page
class IndexView(generic.ListView):
template_name = 'mysite/index.html'
def get_queryset(self):
return Event.objects.filter(
event_time__gte=timezone.now()
)[:5]
# Holding page
def coming_soon(request):
event_list = Event.objects.filter(
event_time__gte=timezone.now()
)[:5]
return render(request, 'mysite/coming_soon.html', {
'event_list': event_list,
})
|
cs98jrb/Trinity
|
mysite/accounts/views/index.py
|
Python
|
gpl-2.0
| 577
|
from django.contrib.auth.models import Group
from django.contrib import messages
from django.shortcuts import render, redirect
from django.db import transaction
from jsonview.decorators import json_view
from airmozilla.base import mozillians
from airmozilla.manage import forms
from .decorators import (
staff_required,
permission_required,
cancel_redirect
)
@staff_required
@permission_required('auth.change_group')
def groups(request):
"""Group editor: view groups and change group permissions."""
groups = Group.objects.all()
return render(request, 'manage/groups.html', {'groups': groups})
@staff_required
@permission_required('auth.change_group')
@cancel_redirect('manage:groups')
@transaction.commit_on_success
def group_edit(request, id):
"""Edit an individual group."""
group = Group.objects.get(id=id)
if request.method == 'POST':
form = forms.GroupEditForm(request.POST, instance=group)
if form.is_valid():
form.save()
messages.info(request, 'Group "%s" saved.' % group.name)
return redirect('manage:groups')
else:
form = forms.GroupEditForm(instance=group)
return render(request, 'manage/group_edit.html',
{'form': form, 'group': group})
@staff_required
@permission_required('auth.add_group')
@transaction.commit_on_success
def group_new(request):
"""Add a new group."""
group = Group()
if request.method == 'POST':
form = forms.GroupEditForm(request.POST, instance=group)
if form.is_valid():
form.save()
messages.success(request, 'Group "%s" created.' % group.name)
return redirect('manage:groups')
else:
form = forms.GroupEditForm(instance=group)
return render(request, 'manage/group_new.html', {'form': form})
@staff_required
@permission_required('auth.delete_group')
@transaction.commit_on_success
def group_remove(request, id):
if request.method == 'POST':
group = Group.objects.get(id=id)
group.delete()
messages.info(request, 'Group "%s" removed.' % group.name)
return redirect('manage:groups')
@permission_required('main.change_event')
@json_view
def curated_groups_autocomplete(request):
q = request.GET.get('q', '').strip()
if not q:
return {'groups': []}
all = mozillians.get_all_groups_cached()
def describe_group(group):
if group['number_of_members'] == 1:
return '%s (1 member)' % (group['name'],)
else:
return (
'%s (%s members)' % (group['name'], group['number_of_members'])
)
groups = [
(x['name'], describe_group(x))
for x in all
if q.lower() in x['name'].lower()
]
return {'groups': groups}
|
tannishk/airmozilla
|
airmozilla/manage/views/groups.py
|
Python
|
bsd-3-clause
| 2,799
|
import re
import os.path
import sys
from netaddr import *
from pprint import pprint as pp
import mysql.connector
from datetime import datetime
import configparser as ConfigParser
"""
This script is pretty lazy, and will just add ips all day.
Uses mysql at the end to actually make sure each record is only
in the db once.
"""
class subnetToSql():
def __init__(self):
configFile = './config.cfg'
config = ConfigParser.ConfigParser()
config.read(configFile)
my_config = {
'user': config.get('mysql','user'),
'password': config.get('mysql','password'),
'host': config.get('mysql','host'),
'database': config.get('mysql','database')
}
self.subnetFile = config.get('subnetToSql','file')
self.sql = mysql.connector.connect(**my_config)
self.cursor = self.sql.cursor()
def __exit__(self):
self.cursor.close()
self.sql.close()
def main(self):
add_ip = ("INSERT INTO ip_address (ip) VALUES (%s)")
time = datetime.now()
print time.strftime("%Y-%m-%d %H:%M:%S.%f")
f = open(self.subnetFile,'r')
regex = re.compile('\"(\d+\.\d+\.\d+\.\d+)\"\,(\d+)')
for line in f:
matched = regex.match(line)
time = datetime.now()
if matched:
subnet = IPNetwork(matched.group(1) + "/" + matched.group(2))
if subnet[0].is_private():
print("Skipping private subnet")
continue
ip_array = []
ip_count = 0
for ip in subnet:
# print '%s' % ip.value
ip_number = [ip.value]
ip_array.append(ip_number)
ip_count = ip_count + 1
if ip_count > 1000:
print "INSERTING 1000 records"
self.cursor.executemany(add_ip,ip_array)
self.sql.commit()
ip_count = 0
ip_array = []
print "INSERTING %s records" % ip_count
print time.strftime("%Y-%m-%d %H:%M:%S.%f")
# pp(ip_array)
self.cursor.executemany(add_ip,ip_array)
self.sql.commit()
def dedupe(self):
print("RUNNING: INSERT INTO ip_address_unique (ip) select distinct(ip) from ip_address;")
make_unique = "INSERT INTO ip_address_unique (ip) select distinct(ip) from ip_address;"
self.cursor.execute(make_unique)
self.sql.commit()
if __name__ == "__main__":
main = subnetToSql()
main.main()
main.dedupe()
|
allmightyspiff/domainThing
|
subnetToSQL.py
|
Python
|
mit
| 2,715
|
#coding=UTF-8
'''
Created on 2011-7-6
@author: Administrator
'''
from urlparse import urlparse
import cookielib
from pyquery.pyquery import PyQuery #@UnresolvedImport
import re
import datetime #@UnusedImport
import urllib2
from lxml import etree #@UnresolvedImport
from lxml.cssselect import CSSSelector #@UnresolvedImport
import simplejson as js #@UnusedImport @UnresolvedImport
from config import housetype, checkPath, makePath,fitment,toward,deposit
import threading
from BeautifulSoup import BeautifulSoup #@UnresolvedImport
from spider.globalvars import fetch_quere
import time
import gc
homepath="e:\\home\\spider\\"
class LinkCrawl(object):
def __init__(self,citycode="",kind=""):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.endtime=str(datetime.date.today() -datetime.timedelta(days=7))
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.baseUrl="http://%s.ganji.com"%self.citycode
self.kind=kind
if kind=="1":#出售
self.urlpath="/fang5/a1u2%s/"
self.folder="sell\\"
elif kind=="2":#出租
self.urlpath="/fang1/u2%s/"
self.folder="rent\\"
elif kind=="3":#求购
self.urlpath="/fang4/u2f0/a1%s/"
self.folder="buy\\"
elif kind=="4":#求租
self.urlpath="/fang2/u2f0/a1%s/"
self.folder="req\\"
def __getAllNeedLinks(self):
cond=True
idx=0
checkit="0"
while cond:
url=self.baseUrl+self.urlpath%("f"+str(idx*32))
#url="http://gz.ganji.com/fang2/u2f0/a1f768/"
print url
try:
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
except:
continue
else:
check=PyQuery(p)("ul.pageLink li a.c").text()
if check==None or check==checkit:
cond=False
break
else:
checkit=check
links=PyQuery(p)("div.list dl")
p=None
print len(links)
for link in links:
lk=self.baseUrl+PyQuery(link)(" a.list_title").attr("href")
if self.kind=="3" or self.kind=="4":
tm=PyQuery(link)("dd span.time").text()
if re.match('''\d{2}-\d{2}''', tm):
Y=int(time.strftime('%Y', time.localtime()))
tm="%s-%s"%(Y,tm.strip())
if tm<self.endtime:
break
elif "分钟" in tm:
pass
elif "小时" in tm:
pass
else:
cond=False
break
if not checkPath(homepath,self.folder,lk):
fetch_quere.put({"mod":"ganji","link":lk,"citycode":self.citycode,"kind":self.kind})
# if lk not in self.clinks:
# self.clinks.append(lk)
idx=idx+1
print len(self.clinks)
def runme(self):
#self.__initPageNum()
self.__getAllNeedLinks()
class ContentCrawl(object):
def __init__(self,links,citycode,kind):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
if kind=="1":
self.folder="sell\\"
elif kind=="2":
self.folder="rent\\"
elif kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
#js resgx
self.xiaoqu_regex="xiaoqu : '(.*?)',"
self.address_regex="address : '(.*?)',"
self.house_room_regex="(\d+)室"
self.house_hall_regex="(\d+)厅"
self.house_toilet_regex="(\d+)卫"
self.house_desc_regex="房屋概况</p>(.*?)</p>"
self.house_floor_regex="<li>楼层: 第(\d+)层/总(\d+)层</li>"
self.house_totalarea_regex="<li>面积: (\d+) ㎡</li>"
self.house_totalarea_regex_qiu="(\d+)㎡"
self.house_type_regex3="<li>户型: (.*)</li>"
self.house_toward_regex="<li>朝向: (.*)</li>"
self.house_type_regex="<li>类型: (.*)</li>"
self.cityarea_regex="<li>区域:([\s\S]*?)</li>"
self.house_age_regex="<li>房龄: (\d+) 年</li>"
self.house_fitment_regex="<li>装修: (.*)</li>"
self.house_support_regex="<li>配置: (.*) </li>"
self.house_price_regex="<li>售价: <span>(.*)</span>.*</li>"
self.house_price_regex_2="<li>租金: <span>(.*)</span>.*</li>"
self.borough_name_regex="<li>小区:(.*)</li>"
self.house_deposit_regex="<li>租金: (.*)</li>"
self.house_price_regex_zu = "<li>期望租金: (.*)</li>"
self.borough_name_regex_reg = "<li>期望小区: (.*)</li>"
self.house_addr_regex_reg = "<li>小区地址:(.*)</li>"
self.house_price_regex_gou = "<li>期望售价: (.*)</li>"
def __addText(self,tag, no_tail=False):
text = []
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
text.append(self.__addText(child))
if not no_tail and tag.tail:
text.append(tag.tail)
return "".join(text)
def getText(self,html):
text=[]
for tag in html:
text.append(self.__addText(tag, no_tail=True))
return ' '.join([t.strip() for t in text if t.strip()])
def sell(self,url):
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
self.fd['house_flag'] = 1
self.fd['belong']=0
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
if re.search(self.house_floor_regex, response):
house_floor=re.search(self.house_floor_regex, response).group(1)
house_topfloor=re.search(self.house_floor_regex, response).group(2)
self.fd['house_floor'] = house_floor
self.fd['house_topfloor'] = house_topfloor
else:
self.fd['house_floor'] = None
self.fd['house_topfloor'] = None
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
#类型
if re.search(self.house_type_regex, response):
house_type=re.search(self.house_type_regex, response).group(1)
self.fd['house_type'] = housetype(house_type)
else:
self.fd['house_type'] = None
if re.search(self.house_price_regex, response):
house_price=re.search(self.house_price_regex, response).group(1)
if house_price=="面议":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def buy(self,url):
self.fd['city'] = self.citycode
self.fd['house_flag'] = 3
# self.fd['belong']="1"
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_type'] = 0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
if re.search(self.house_totalarea_regex_qiu, response):
house_totalarea=re.search(self.house_totalarea_regex_qiu, response).group(1)
self.fd['house_totalarea'] = house_totalarea
self.fd['house_totalarea_max'] = house_totalarea
self.fd['house_totalarea_min'] = house_totalarea
else:
self.fd['house_totalarea'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
if re.search(self.house_price_regex_gou, response):
house_price_zu = re.search(self.house_price_regex_gou, response).group(1)
house_price_zu = house_price_zu.replace('万','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('以上','')
self.fd['house_price'] = self.fd['house_price_min']
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = house_price_zu.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = self.fd['house_price_max']
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def rent(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 2
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
if re.search(self.house_price_regex_2, response):
house_price=re.search(self.house_price_regex_2, response).group(1)
if house_price=="面议":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
# house_price=tree.xpath("/html/body/div[2]/div/div/ul/li/span") and tree.xpath("/html/body/div[2]/div/div/ul/li/span")[0].text.strip() or None
# v['house_price'] = house_price
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
if re.search(self.house_deposit_regex, response):
house_deposit=re.search(self.house_deposit_regex, response).group(1)
self.fd['house_deposit'] = deposit(house_deposit)
else:
self.fd['house_deposit'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def require(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 4
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
self.fd['house_totalarea']=0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
self.fd['house_deposit'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
self.fd['house_totalarea'] = 0
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search(self.house_price_regex_zu, response):
house_price_zu = re.search(self.house_price_regex_zu, response).group(1)
house_price_zu = house_price_zu.replace('元/月','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('以上','')
self.fd['house_price'] = house_price_zu.replace('以上','')
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = house_price_zu.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = house_price_zu.replace('以下','')
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def extractDict(self):
if checkPath(homepath,self.folder,self.urls):
pass
else:
try:
if self.kind=="1":
self.sell(self.urls)
elif self.kind=="2":
self.rent(self.urls)
elif self.kind=="3":
self.buy(self.urls)
else:
self.require(self.urls)
makePath(homepath,self.folder,self.urls)
#超过七天
if (time.time() -self.fd["posttime"]) > 7*24*36000:return
except:pass
self.fd["c"]="houseapi"
self.fd["a"]="savehouse"
self.fd["is_checked"] = 1
self.fd["web_flag"] = "gj"
return self.fd
if not self.fd["is_checked"]:
for i in self.fd.items():
print i[0],i[1]
print "*"*80
# if len(self.fd)==7 or len(self.fd)==17:
# print "#####################################"
# continue
# req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(self.fd))
# p=self.br.open(req).read().strip()
# print p.decode('gbk')
# print "*"*80
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
class getLinksThread(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
gc.enable()
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
lc.runme()
del gc.garbage[:]
def getLinks(d):
gc.enable()
lc=LinkCrawl(d["citycode"],d["kind"])
lc.runme()
del gc.garbage[:]
def getContent(clinks,citycode,kind):
gc.enable()
cc=ContentCrawl(clinks,citycode,kind)
fd=cc.extractDict()
del gc.garbage[:]
return fd
if __name__=="__main__":
lc=LinkCrawl(citycode="su",kind="1")
lc.runme()#
#url1 = "http://su.ganji.com/fang5/11071015_233901.htm"
#url2 = "http://su.ganji.com/fang1/11071017_418972.htm"
#url3 = "http://su.ganji.com/fang4/11062413_4152.htm"
#url4 = "http://su.ganji.com/fang2/11070900_21214.htm"
#cc=ContentCrawl([url3],citycode="su",kind="3")
#cc.extractDict()
# while 1:
# for i in range(1,5):
# k = "%s" % str(i)
# try:
# lc=LinkCrawl(citycode="su",kind=k)
# clinks=lc.runme()
# cc=ContentCrawl(clinks,citycode="su",kind=k)
# cc.extractDict()
# except:
# pass
|
ptphp/PyLib
|
src/webpy1/src/spider/ganji.py
|
Python
|
apache-2.0
| 38,456
|
from __future__ import absolute_import
from six import text_type
from typing import Any, Union, Mapping, Optional
from django.conf import settings
from django.utils import timezone
from django.contrib.sessions.models import Session as djSession
import sockjs.tornado
from sockjs.tornado.session import ConnectionInfo
import tornado.ioloop
import ujson
import logging
import time
from zerver.models import UserProfile, get_user_profile_by_id, get_client
from zerver.lib.queue import queue_json_publish
from zerver.lib.actions import check_send_message, extract_recipients
from zerver.decorator import JsonableError
from zerver.lib.utils import statsd
from zerver.lib.event_queue import get_client_descriptor
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.session_user import get_session_user
logger = logging.getLogger('zulip.socket')
def get_user_profile(session_id):
# type: (Optional[text_type]) -> Optional[UserProfile]
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone.now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return UserProfile.objects.get(pk=get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict() # type: Dict[Union[int, str], SocketConnection]
def get_connection(id):
# type: (Union[int, str]) -> SocketConnection
return connections.get(id)
def register_connection(id, conn):
# type: (Union[int, str], SocketConnection) -> None
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn):
# type: (SocketConnection) -> None
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id):
# type: (text_type) -> text_type
return u'socket_req_status:%s' % (req_id,)
class SocketAuthError(Exception):
def __init__(self, msg):
# type: (str) -> None
self.msg = msg
class CloseErrorInfo(object):
def __init__(self, status_code, err_msg):
# type: (int, str) -> None
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
client_id = None # type: Optional[Union[int, str]]
def on_open(self, info):
# type: (ConnectionInfo) -> None
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None # type: CloseErrorInfo
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout():
# type: () -> None
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.add_timeout(time.time() + 10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg):
# type: (Dict[str, Any]) -> None
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error', 'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise SocketAuthError('Unknown or missing session')
self.session.user_profile = user_profile
if msg['request']['csrf_token'] != self.csrf_token:
raise SocketAuthError('CSRF token does not match that in cookie')
if 'queue_id' not in msg['request']:
raise SocketAuthError("Missing 'queue_id' argument")
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise SocketAuthError('Bad event queue id: %s' % (queue_id,))
if user_profile.id != client.user_profile_id:
raise SocketAuthError("You are not the owner of the queue with id '%s'" % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {}
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry))
if len(status) == 0:
status['status'] = 'not_received'
if 'response' in status:
status['response'] = ujson.loads(status['response'])
results[str(inquiry)] = status
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg_raw):
# type: (str) -> None
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg_raw)
if self.did_close:
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except SocketAuthError as e:
response = {'result': 'error', 'msg': e.msg}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue="tornado_return",
log_data=log_data,
request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
fake_message_sender)
def on_close(self):
# type: () -> None
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def fake_message_sender(event):
# type: (Dict[str, Any]) -> None
log_data = dict() # type: Dict[str, Any]
record_request_start_data(log_data)
req = event['request']
try:
sender = get_user_profile_by_id(event['server_meta']['user_id'])
client = get_client(req['client'])
msg_id = check_send_message(sender, client, req['type'],
extract_recipients(req['to']),
req['subject'], req['content'],
local_id=req.get('local_id', None),
sender_queue_id=req.get('queue_id', None))
resp = {"result": "success", "msg": "", "id": msg_id}
except JsonableError as e:
resp = {"result": "error", "msg": str(e)}
server_meta = event['server_meta']
server_meta.update({'worker_log_data': log_data,
'time_request_finished': time.time()})
result = {'response': resp, 'req_id': event['req_id'],
'server_meta': server_meta}
respond_send_message(result)
def respond_send_message(data):
# type: (Mapping[str, Any]) -> None
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the zulip.com cookie, which we use as part of our
# authentication scheme.
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': 'https://%s/static/third/sockjs/sockjs-0.3.4.js' % (
settings.EXTERNAL_HOST,),
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router():
# type: () -> sockjs.tornado.SockJSRouter
return sockjs_router
|
paxapy/zulip
|
zerver/lib/socket.py
|
Python
|
apache-2.0
| 13,348
|
import logging
import os.path
import sys
import time
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(dirname)
sys.path.append(os.path.join(dirname, '..'))
from cassandra.cluster import Cluster
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.query import SimpleStatement
log = logging.getLogger()
log.setLevel('INFO')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
supported_reactors = [AsyncoreConnection]
try:
from cassandra.io.libevreactor import LibevConnection
supported_reactors.append(LibevConnection)
except ImportError, exc:
log.warning("Not benchmarking libev reactor: %s" % (exc,))
KEYSPACE = "testkeyspace"
TABLE = "testtable"
NUM_QUERIES = 10000
def setup():
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
rows = session.execute("SELECT keyspace_name FROM system.schema_keyspaces")
if KEYSPACE in [row[0] for row in rows]:
log.debug("dropping existing keyspace...")
session.execute("DROP KEYSPACE " + KEYSPACE)
log.debug("Creating keyspace...")
session.execute("""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""" % KEYSPACE)
log.debug("Setting keyspace...")
session.set_keyspace(KEYSPACE)
log.debug("Creating table...")
session.execute("""
CREATE TABLE %s (
thekey text,
col1 text,
col2 text,
PRIMARY KEY (thekey, col1)
)
""" % TABLE)
def teardown():
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.execute("DROP KEYSPACE " + KEYSPACE)
def benchmark(run_fn):
for conn_class in supported_reactors:
setup()
log.info("==== %s ====" % (conn_class.__name__,))
cluster = Cluster(['127.0.0.1'])
cluster.connection_class = conn_class
session = cluster.connect(KEYSPACE)
log.debug("Sleeping for two seconds...")
time.sleep(2.0)
query = SimpleStatement("""
INSERT INTO {table} (thekey, col1, col2)
VALUES (%(key)s, %(a)s, %(b)s)
""".format(table=TABLE))
values = {'key': 'key', 'a': 'a', 'b': 'b'}
log.debug("Beginning inserts...")
start = time.time()
try:
run_fn(session, query, values, NUM_QUERIES)
end = time.time()
finally:
teardown()
total = end - start
log.info("Total time: %0.2fs" % total)
log.info("Average throughput: %0.2f/sec" % (NUM_QUERIES / total))
|
aholmberg/python-driver
|
benchmarks/base.py
|
Python
|
apache-2.0
| 2,702
|
#!/usr/bin/env python
# Copyright 2014 RethinkDB, all rights reserved.
from __future__ import print_function
import os, pprint, sys, time, threading, traceback
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
r = utils.import_python_driver()
"""The `interface.system_changefeeds` test checks that changefeeds on system tables
correctly notify when changes occur."""
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
opts = op.parse(sys.argv)
class AsyncChangefeed(object):
def __init__(self, host, port, query):
self.conn = r.connect(host, port)
self.stopping = False
self.err = None
self.changes = []
self.thr = threading.Thread(target = self.run, args = (query, ))
self.thr.daemon = True
self.thr.start()
time.sleep(0.5)
def run(self, query):
try:
for x in eval(query).changes().run(self.conn):
# Throw away initial values
if "old_val" in x:
self.changes.append(x)
except Exception, e:
self.err = sys.exc_info()
def check(self):
if self.err is not None:
print("Exception from other thread:")
traceback.print_exception(*self.err)
sys.exit(1)
with driver.Metacluster() as metacluster:
cluster1 = driver.Cluster(metacluster)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)
print("Spinning up two processes...")
files1 = driver.Files(metacluster, console_output="create-output-1", server_name="a", server_tags=["a_tag"], command_prefix=command_prefix)
proc1 = driver.Process(cluster1, files1, console_output="serve-output-1", command_prefix=command_prefix, extra_options=serve_options)
files2 = driver.Files(metacluster, console_output="create-output-2", server_name="b", server_tags=["b_tag"], command_prefix=command_prefix)
proc2 = driver.Process(cluster1, files2, console_output="serve-output-2", command_prefix=command_prefix, extra_options=serve_options)
proc1.wait_until_started_up()
proc2.wait_until_started_up()
cluster1.check()
# This is necessary because a few log messages may be printed even after
# `wait_until_started_up()` returns.
time.sleep(5.0)
conn = r.connect(proc1.host, proc1.driver_port)
tables = ["cluster_config", "db_config", "current_issues", "logs", "server_config",
"server_status", "table_config", "table_status"]
feeds = { }
for name in tables:
feeds[name] = AsyncChangefeed(proc1.host, proc1.driver_port,
"r.db('rethinkdb').table(%r)" % name)
def check(expected, timer):
time.sleep(timer)
for name, feed in feeds.iteritems():
feed.check()
if name in expected:
assert len(feed.changes) > 0, \
"Expected changes on %s, found none." % name
feed.changes = []
else:
assert len(feed.changes) == 0, \
"Expected no changes on %s, found %s." % (name, feed.changes)
check([], 5.0)
print("Changing auth key...")
res = r.db("rethinkdb").table("cluster_config").get("auth") \
.update({"auth_key": "foo"}).run(conn)
assert res["replaced"] == 1 and res["errors"] == 0, res
res = r.db("rethinkdb").table("cluster_config").get("auth") \
.update({"auth_key": None}).run(conn)
check(["cluster_config"], 1.0)
print("Creating database...")
res = r.db_create("test").run(conn)
assert res.get("dbs_created", 0) == 1, res
check(["db_config"], 1.0)
print("Creating tables...")
res = r.table_create("test", replicas={"a_tag": 1}, primary_replica_tag="a_tag").run(conn)
assert res["tables_created"] == 1, res
res = r.table_create("test2", replicas={"b_tag": 1}, primary_replica_tag="b_tag").run(conn)
assert res["tables_created"] == 1, res
check(["table_config", "table_status"], 1.0)
feeds["test_config"] = AsyncChangefeed(proc1.host, proc1.driver_port,
"r.table('test').config()")
feeds["test_status"] = AsyncChangefeed(proc1.host, proc1.driver_port,
"r.table('test').status()")
feeds["test2_config"] = AsyncChangefeed(proc1.host, proc1.driver_port,
"r.table('test2').config()")
feeds["test2_status"] = AsyncChangefeed(proc1.host, proc1.driver_port,
"r.table('test2').status()")
res = r.table("test").config() \
.update({"shards": [{"primary_replica": "a", "replicas": ["a", "b"]}]}).run(conn)
assert res["errors"] == 0, res
r.table("test").wait().run(conn)
check(["table_config", "table_status", "test_config", "test_status"], 1.0)
print("Renaming server...")
res = r.db("rethinkdb").table("server_config").filter({"name": "b"}) \
.update({"name": "c"}).run(conn)
assert res["replaced"] == 1 and res["errors"] == 0, res
check(["logs", "server_config", "server_status", "table_config", "table_status",
"test_config", "test_status", "test2_config", "test2_status"], 1.0)
print("Killing one server...")
proc2.check_and_stop()
check(["logs", "server_status", "table_status", "current_issues",
"test_status", "test2_status"], 1.0)
print("Declaring it dead...")
res = r.db("rethinkdb").table("server_config").filter({"name": "c"}).delete() \
.run(conn)
assert res["deleted"] == 1 and res["errors"] == 0, res
check(["server_config", "server_status", "table_config", "table_status", "current_issues",
"test_config", "test_status", "test2_config", "test2_status"], 1.0)
print("Shutting everything down...")
cluster1.check_and_stop()
print("Done.")
|
urandu/rethinkdb
|
test/interface/system_changefeeds.py
|
Python
|
agpl-3.0
| 5,839
|
import unittest
from test_initializer import TestInitializer
from page_objects import LoginPage, EditExercisePage, CourseName
class EditExercisePageTest(unittest.TestCase):
def setUp(self):
self.driver = TestInitializer().getDefaultDriver()
TestInitializer().recreateDatabase()
LoginPage(self.driver).loginAsTeacher()
def testShouldSaveExercise(self):
exerciseName = "Testiharjoitus"
maxSubmissions = "5"
maxPoints = "99"
pointToPass = "50"
exerciseNumber = 1
editExercisePage = EditExercisePage(self.driver, exerciseNumber)
editExercisePage.setExerciseName(exerciseName)
editExercisePage.setMaxSubmissions(maxSubmissions)
editExercisePage.setMaxPoints(maxPoints)
editExercisePage.setPointsToPass(pointToPass)
editExercisePage.submit()
self.assertTrue(editExercisePage.isSuccessfulSave())
editExercisePage = EditExercisePage(self.driver, exerciseNumber)
self.assertEqual(editExercisePage.getExerciseName(), exerciseName)
self.assertEqual(editExercisePage.getMaxSubmissions(), maxSubmissions)
self.assertEqual(editExercisePage.getMaxPoints(), maxPoints)
self.assertEqual(editExercisePage.getPointsToPass(), pointToPass)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Aalto-LeTech/a-plus
|
selenium_test/test/edit_exercise_page_test.py
|
Python
|
gpl-3.0
| 1,406
|
import os
import config.settings as settings
import config.paths as paths
import maya.standalone as std
import maya.cmds as cmds
import maya.mel as mel
from utils.maya_connector import MayaConnector
from utils.list_utils import ListUtils
from model_base import ModelBase
class TextureMap(ModelBase):
def __init__(self, attr, shader, obj, version, default_dir, asset_path):
self.attr = attr
self.shader = shader
self.obj = obj
self.version = version
self.default_dir = default_dir
self.asset_path = asset_path
def is_missing(self):
""" Checks if the referenced file of the texture node is missing. """
pass
def is_connected(self):
""" Checks if the map exists but is not connectedto the material for two reasons:
1. It hasn't been done. 2. Material doesn't exists. """
is_connected = False
MayaConnector.set_project()
cmds.file(self.asset_path, open = True)
if self.__is_valid_shader():
node = str(cmds.listConnections('%s.%s' % (self.shader, self.attr))[0])
if self.__is_valid_file(node):
return True
else:
is_connected = False
if self.__is_valid_gamma(node):
value_node = str(cmds.listConnections('%s.%s' % (node, 'value'))[0])
if self.__is_valid_file(node):
return True
else:
is_connected = False
else:
is_connected = False
else:
is_connected = False
return is_connected
def is_broken(self):
""" Checks if texture map file is not connected to a node or missing. """
return self.is_missing() or self.is_not_connected()
def connect(self):
""" Connects the texture map file to the texture node if it's not connected.
If there's no texture node, it's created too. """
pass
def __is_valid_shader(self):
try:
cmds.select(self.shader)
cmds.select(cmds.listConnections('%s.%s' % (self.shader, self.attr))[0])
return True
except:
return False
def __is_valid_gamma(self, node):
try:
if cmds.nodeType(node) == 'gammaCorrect':
value_node = cmds.listConnections('%s.%s' % (node, 'value'))[0]
cmds.select(value_node)
return True
else:
return False
except:
return False
def __is_valid_file(self, node):
try:
if cmds.nodeType(node) == 'file':
if cmds.getAttr('%s.fileTextureName' % (node)) != '':
return True
else:
return False
else:
return False
except:
return False
|
caedocha/sceneTasks
|
lib/models/texture_map.py
|
Python
|
gpl-2.0
| 2,346
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='mdotdevs',
version='0.1',
packages=['mdotdevs'],
include_package_data=True,
install_requires = [
'setuptools',
'django<1.9rc1',
],
license='Apache License, Version 2.0', # example license
description='A Django app for developer resources complimentary to mdot_web client.',
long_description=README,
url='http://www.example.com/',
author='Your Name',
author_email='yourname@example.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
uw-it-aca/mdot-developers
|
setup.py
|
Python
|
apache-2.0
| 1,212
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'picture', views.PictureViewSet)
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
|
scailer/picarchive
|
apps/picture/urls.py
|
Python
|
mit
| 289
|
T = int(raw_input())
for i in range(T):
N = int(raw_input())
dolls = {}
for j in range(N):
a = int(raw_input())
if dolls.get(a,0):
dolls[a] += 1
else:
dolls[a] = 1
for key in dolls:
if dolls[key] % 2:
print key
break
|
rohit91/codechef
|
codechef/MISSP.py
|
Python
|
gpl-2.0
| 324
|
import re
import sys
import traceback
import time
import fcntl
import random
import datetime
import uuid
import functools
from pandaserver.brokerage import ErrorCode
from pandaserver.taskbuffer import ProcessGroups
from pandaserver.dataservice import DataServiceUtils
from pandaserver.dataservice.DataServiceUtils import select_scope
from pandaserver.dataservice.DDM import rucioAPI
from pandaserver.config import panda_config
from pandacommon.pandalogger.PandaLogger import PandaLogger
_log = PandaLogger().getLogger('broker')
try:
long
except NameError:
long = int
# all known sites
_allSites = []
# non LRC checking
_disableLRCcheck = []
# lock for uuidgen
_lockGetUU = open(panda_config.lockfile_getUU, 'w')
# short-long mapping
shortLongMap = {'ANALY_BNL_SHORT': 'ANALY_BNL_LONG'}
# processingType to skip brokerage
skipBrokerageProTypes = ['prod_test']
# comparison function for sort
def _compFunc(jobA,jobB):
# append site if not in list
if jobA.computingSite not in _allSites:
_allSites.append(jobA.computingSite)
if jobB.computingSite not in _allSites:
_allSites.append(jobB.computingSite)
# compare
indexA = _allSites.index(jobA.computingSite)
indexB = _allSites.index(jobB.computingSite)
if indexA > indexB:
return 1
elif indexA < indexB:
return -1
else:
return 0
# release checker
def _checkRelease(jobRels,siteRels):
# all on/off
if "True" in siteRels:
return True
if "False" in siteRels:
return False
# loop over all releases
for tmpRel in jobRels.split('\n'):
relVer = re.sub('^Atlas-','',tmpRel)
# not available releases
if relVer not in siteRels:
return False
return True
# get list of files which already exist at the site
def _getOkFiles(v_ce, v_files, allLFNs, allOkFilesMap, prodsourcelabel, job_label, tmpLog=None, allScopeList=None):
scope_association_input, scope_association_output = select_scope(v_ce, prodsourcelabel, job_label)
dq2IDs = list(v_ce.setokens_input[scope_association_input].values())
try:
dq2IDs.remove('')
except Exception:
pass
dq2IDs.sort()
if dq2IDs == []:
dq2ID = v_ce.ddm_input[scope_association_input]
else:
dq2ID = ''
for tmpID in dq2IDs:
dq2ID += '%s,' % tmpID
dq2ID = dq2ID[:-1]
# set LFC and SE name
dq2URL = 'rucio://atlas-rucio.cern.ch:/grid/atlas'
tmpSE = v_ce.ddm_endpoints_input[scope_association_input].getAllEndPoints()
if tmpLog is not None:
tmpLog.debug('getOkFiles for %s with dq2ID:%s,LFC:%s,SE:%s' % (v_ce.sitename,dq2ID,dq2URL,str(tmpSE)))
anyID = 'any'
# use bulk lookup
if allLFNs != []:
# get all replicas
if dq2URL not in allOkFilesMap:
allOkFilesMap[dq2URL] = {}
tmpStat,tmpAvaFiles = rucioAPI.listFileReplicas(allScopeList, allLFNs, tmpSE)
if not tmpStat and tmpLog is not None:
tmpLog.debug('getOkFile failed to get file replicas')
tmpAvaFiles = {}
allOkFilesMap[dq2URL][anyID] = tmpAvaFiles
# get files for each dq2ID
if dq2ID not in allOkFilesMap[dq2URL]:
allOkFilesMap[dq2URL][dq2ID] = allOkFilesMap[dq2URL][anyID]
# make return map
retMap = {}
for tmpLFN in v_files:
if tmpLFN in allOkFilesMap[dq2URL][dq2ID]:
retMap[tmpLFN] = allOkFilesMap[dq2URL][dq2ID][tmpLFN]
tmpLog.debug('getOkFiles done')
# return
return retMap
else:
# old style
tmpLog.debug('getOkFiles old')
return {}
# check reprocessing or not
def _isReproJob(tmpJob):
if tmpJob is not None:
if tmpJob.processingType in ['reprocessing']:
return True
if tmpJob.transformation in ['csc_cosmics_trf.py','csc_BSreco_trf.py','BStoESDAODDPD_trf.py']:
return True
return False
# set 'ready' if files are already there
def _setReadyToFiles(tmpJob, okFiles, siteMapper, tmpLog):
tmpLog.debug(str(okFiles))
allOK = True
tmpSiteSpec = siteMapper.getSite(tmpJob.computingSite)
tmpSrcSpec = siteMapper.getSite(siteMapper.getCloud(tmpJob.getCloud())['source'])
scope_association_site_input, scope_association_site_output = select_scope(tmpSiteSpec, tmpJob.prodSourceLabel,
tmpJob.job_label)
scope_association_src_input, scope_association_src_output = select_scope(tmpSrcSpec, tmpJob.prodSourceLabel,
tmpJob.job_label)
tmpTapeEndPoints = tmpSiteSpec.ddm_endpoints_input[scope_association_site_input].getTapeEndPoints()
# direct usage of remote SE
if tmpSiteSpec.ddm_input[scope_association_site_input] != tmpSrcSpec.ddm_input[scope_association_src_input] \
and tmpSrcSpec.ddm_input[scope_association_src_input] in tmpSiteSpec.setokens_input[scope_association_site_input].values():
tmpSiteSpec = tmpSrcSpec
tmpLog.debug('%s uses remote SiteSpec of %s for %s' % (tmpJob.PandaID,tmpSrcSpec.sitename,tmpJob.computingSite))
for tmpFile in tmpJob.Files:
if tmpFile.type == 'input':
if tmpFile.status == 'ready':
tmpFile.dispatchDBlock = 'NULL'
elif DataServiceUtils.isCachedFile(tmpFile.dataset,tmpSiteSpec):
# cached file
tmpFile.status = 'cached'
tmpFile.dispatchDBlock = 'NULL'
elif tmpJob.computingSite == siteMapper.getCloud(tmpJob.getCloud())['source'] or \
tmpSiteSpec.ddm_input[scope_association_site_input] == tmpSrcSpec.ddm_input[scope_association_src_input]:
# use DDM prestage only for on-tape files
if len(tmpTapeEndPoints) > 0 and tmpFile.lfn in okFiles:
tapeOnly = True
tapeCopy = False
for tmpSE in okFiles[tmpFile.lfn]:
if tmpSE not in tmpTapeEndPoints:
tapeOnly = False
else:
# there is a tape copy
tapeCopy = True
# trigger prestage when disk copy doesn't exist or token is TAPE
if tapeOnly or (tapeCopy and tmpFile.dispatchDBlockToken in ['ATLASDATATAPE','ATLASMCTAPE']):
allOK = False
else:
# set ready
tmpFile.status = 'ready'
tmpFile.dispatchDBlock = 'NULL'
else:
# set ready anyway even if LFC is down. i.e. okFiles doesn't contain the file
tmpFile.status = 'ready'
tmpFile.dispatchDBlock = 'NULL'
else:
# set ready if the file exists and the site doesn't use prestage
tmpFile.status = 'ready'
tmpFile.dispatchDBlock = 'NULL'
# unset disp dataset
if allOK:
tmpJob.dispatchDBlock = 'NULL'
# check number/size of inputs
def _isTooManyInput(nFilesPerJob,inputSizePerJob):
# the number of inputs is larger than 5 or
# size of inputs is larger than 500MB
if nFilesPerJob > 5 or inputSizePerJob > 500*1024*1024:
return True
return False
# send analysis brokerage info to logger
def sendMsgToLogger(message):
_log.debug(message)
# send analysis brokerage info to logger with HTTP
def sendMsgToLoggerHTTP(msgList,job):
try:
# logging
iMsg = 0
# message type
msgType = 'analy_brokerage'
# make header
if job.jobsetID not in [None,'NULL']:
msgHead = "dn='%s' : jobset=%s jobdef=%s" % (job.prodUserName,job.jobsetID,job.jobDefinitionID)
else:
msgHead = "dn='%s' : jobdef=%s" % (job.prodUserName,job.jobDefinitionID)
for msgBody in msgList:
# make message
message = msgHead + ' : ' + msgBody
# dump locally
_log.debug(message)
# get logger
_pandaLogger = PandaLogger()
_pandaLogger.lock()
_pandaLogger.setParams({'Type':msgType})
logger = _pandaLogger.getHttpLogger(panda_config.loggername)
# add message
logger.info(message)
# release HTTP handler
_pandaLogger.release()
# sleep
iMsg += 1
if iMsg % 5 == 0:
time.sleep(1)
except Exception:
errType,errValue = sys.exc_info()[:2]
_log.error("sendMsgToLoggerHTTP : %s %s" % (errType,errValue))
# get T2 candidates when files are missing at T2
def getT2CandList(tmpJob,siteMapper,t2FilesMap):
if tmpJob is None:
return []
# no cloud info
if tmpJob.getCloud() not in t2FilesMap:
return []
# loop over all files
tmpCandT2s = None
for tmpFile in tmpJob.Files:
if tmpFile.type == 'input' and tmpFile.status == 'missing':
# no dataset info
if tmpFile.dataset not in t2FilesMap[tmpJob.getCloud()]:
return []
# initial candidates
if tmpCandT2s is None:
tmpCandT2s = t2FilesMap[tmpJob.getCloud()][tmpFile.dataset]['sites']
# check all candidates
newCandT2s = []
for tmpCandT2 in tmpCandT2s:
# site doesn't have the dataset
if tmpCandT2 not in t2FilesMap[tmpJob.getCloud()][tmpFile.dataset]['sites']:
continue
# site has the file
if tmpFile.lfn in t2FilesMap[tmpJob.getCloud()][tmpFile.dataset]['sites'][tmpCandT2]:
if tmpCandT2 not in newCandT2s:
newCandT2s.append(tmpCandT2)
# set new candidates
tmpCandT2s = newCandT2s
if tmpCandT2s == []:
break
# return [] if no missing files
if tmpCandT2s is None:
return []
# return
tmpCandT2s.sort()
return tmpCandT2s
# make compact dialog message
def makeCompactDiagMessage(header,results):
# limit
maxSiteList = 5
# types for compact format
compactTypeList = ['status','cpucore']
# message mapping
messageMap = {'rel' : 'missing rel/cache',
'pilot' : 'no pilot',
'status' : 'not online',
'disk' : 'SE full',
'memory' : 'RAM shortage',
'transferring' : 'many transferring',
'share' : 'zero share',
'maxtime' : 'short walltime',
'cpucore' : 'CPU core mismatch',
'scratch' : 'small scratch disk'
}
# put header
if header in ['',None]:
retStr = 'No candidate - '
else:
retStr = 'special brokerage for %s - ' % header
# count number of sites per type
numTypeMap = {}
for resultType in results:
resultList = results[resultType]
# ignore empty
if len(resultList) == 0:
continue
# add
nSites = len(resultList)
if nSites not in numTypeMap:
numTypeMap[nSites] = []
numTypeMap[nSites].append(resultType)
# sort
numTypeKeys = list(numTypeMap)
numTypeKeys.sort()
# use compact format for largest one
largeTypes = None
if len(numTypeKeys) > 0:
largeTypes = numTypeMap[numTypeKeys[-1]]
# loop over all types
for numTypeKey in numTypeKeys:
for resultType in numTypeMap[numTypeKey]:
# label
if resultType in messageMap:
retStr += '%s at ' % messageMap[resultType]
else:
retStr += '%s at' % resultType
# use comact format or not
if (resultType in compactTypeList+largeTypes \
or len(results[resultType]) >= maxSiteList) \
and header in ['',None,'reprocessing'] :
if len(results[resultType]) == 1:
retStr += '%s site' % len(results[resultType])
else:
retStr += '%s sites' % len(results[resultType])
else:
for tmpSite in results[resultType]:
retStr += '%s,' % tmpSite
retStr = retStr[:-1]
retStr += '. '
retStr = retStr[:-2]
# return
return retStr
# message class
class MsgWrapper:
def __init__(self):
self.timestamp = datetime.datetime.utcnow().isoformat('/')
def info(self,msg):
_log.info(self.timestamp + ' ' + msg)
def debug(self,msg):
_log.debug(self.timestamp + ' ' + msg)
def error(self,msg):
_log.error(self.timestamp + ' ' + msg)
def warning(self,msg):
_log.warning(self.timestamp + ' ' + msg)
# schedule
def schedule(jobs,taskBuffer,siteMapper,forAnalysis=False,setScanSiteList=[],trustIS=False,
distinguishedName=None,specialWeight={},getWeight=False,sizeMapForCheck={},
datasetSize=0,replicaMap={},pd2pT1=False,reportLog=False,minPriority=None,
t2FilesMap={},preferredCountries=[],siteReliability=None):
# make a message instance
tmpLog = MsgWrapper()
try:
tmpLog.debug('start %s %s %s %s minPrio=%s pref=%s siteRel=%s' % (forAnalysis,str(setScanSiteList),trustIS,
distinguishedName,minPriority,
str(preferredCountries),
siteReliability))
if specialWeight != {}:
tmpLog.debug('PD2P weight : %s' % str(specialWeight))
tmpLog.debug('replicaMap : %s' % str(replicaMap))
# no jobs
if len(jobs) == 0:
tmpLog.debug('finished : no jobs')
return
allOkFilesMap = {}
nJob = 20
iJob = 0
nFile = 20
fileList = []
scopeList = []
okFiles = {}
prioInterval = 50
totalNumInputs = 0
totalInputSize = 0
chosen_ce = None
prodDBlock = None
computingSite = None
dispatchDBlock = None
previousCloud = None
prevRelease = None
prevMemory = None
prevCmtConfig = None
prevProType = None
prevSourceLabel= None
prevDiskCount = None
prevHomePkg = None
prevDirectAcc = None
prevCoreCount = None
prevIsJEDI = None
prevDDM = None
prevBrokergageSiteList = None
prevManualPreset = None
prevGoToT2Flag = None
prevWorkingGroup = None
prevMaxCpuCount = None
prevBrokerageNote = None
prevPriority = None
nWNmap = {}
indexJob = 0
diskThresholdT1 = 20 * 1024
diskThresholdT2 = 200
diskThresholdAna = 200
diskThresholdPD2P = 1024 * 3
manyInputsThr = 20
weightUsedByBrokerage = {}
prestageSites = []
# check if only JEDI
onlyJEDI = True
for tmpJob in jobs:
if tmpJob.lockedby != 'jedi':
onlyJEDI = False
break
# get statistics
faresharePolicy = {}
newJobStatWithPrio = {}
jobStatBrokerClouds = {}
jobStatBrokerCloudsWithPrio = {}
hospitalQueueMap = {}
if len(jobs) > 0 and (jobs[0].processingType.startswith('gangarobot') or \
jobs[0].processingType.startswith('hammercloud') or \
jobs[0].processingType in ['pandamover','usermerge'] or \
onlyJEDI):
# disable redundant counting for HC
jobStatistics = {}
jobStatBroker = {}
jobStatBrokerClouds = {}
nRunningMap = {}
else:
jobStatistics = taskBuffer.getJobStatistics(forAnal=forAnalysis)
if not forAnalysis:
jobStatBroker = {}
jobStatBrokerClouds = taskBuffer.getJobStatisticsBrokerage()
faresharePolicy = taskBuffer.getFaresharePolicy()
else:
if minPriority is None:
jobStatBroker = taskBuffer.getJobStatisticsAnalBrokerage()
else:
jobStatBroker = taskBuffer.getJobStatisticsAnalBrokerage(minPriority=minPriority)
nRunningMap = taskBuffer.getnRunningInSiteData()
# sort jobs by siteID. Some jobs may already define computingSite
jobs = sorted(jobs, key=functools.cmp_to_key(_compFunc))
# brokerage for analysis
candidateForAnal = True
relCloudMap = {}
loggerMessages = []
# get all input files for bulk LFC lookup
allLFNs = []
allGUIDs = []
allScopes = []
for tmpJob in jobs:
if tmpJob.prodSourceLabel in ('test','managed') or tmpJob.prodUserName in ['gangarbt']:
for tmpFile in tmpJob.Files:
if tmpFile.type == 'input' and tmpFile.lfn not in allLFNs:
allLFNs.append(tmpFile.lfn)
allGUIDs.append(tmpFile.GUID)
allScopes.append(tmpFile.scope)
# loop over all jobs + terminator(None)
for job in jobs+[None]:
indexJob += 1
# ignore failed jobs
if job is None:
pass
elif job.jobStatus == 'failed':
continue
# list of sites for special brokerage
specialBrokergageSiteList = []
# note for brokerage
brokerageNote = ''
# send jobs to T2 when files are missing at T1
goToT2Flag = False
if job is not None and job.computingSite == 'NULL' and job.prodSourceLabel in ('test','managed') \
and specialBrokergageSiteList == []:
currentT2CandList = getT2CandList(job,siteMapper,t2FilesMap)
if currentT2CandList != []:
goToT2Flag = True
specialBrokergageSiteList = currentT2CandList
tmpLog.debug('PandaID:%s -> set SiteList=%s to use T2 for missing files at T1' % (job.PandaID,specialBrokergageSiteList))
brokerageNote = 'useT2'
# set computingSite to T1 for high priority jobs
if job is not None and job.currentPriority not in [None,'NULL'] and job.currentPriority >= 950 and job.computingSite == 'NULL' \
and job.prodSourceLabel in ('test','managed') and specialBrokergageSiteList == []:
specialBrokergageSiteList = [siteMapper.getCloud(job.getCloud())['source']]
# set site list to use T1 and T1_VL
if job.getCloud() in hospitalQueueMap:
specialBrokergageSiteList += hospitalQueueMap[job.getCloud()]
tmpLog.debug('PandaID:%s -> set SiteList=%s for high prio' % (job.PandaID,specialBrokergageSiteList))
brokerageNote = 'highPrio'
# use limited sites for MP jobs
if job is not None and job.computingSite == 'NULL' and job.prodSourceLabel in ('test','managed') \
and job.coreCount not in [None,'NULL'] and job.coreCount > 1 and specialBrokergageSiteList == []:
for tmpSiteName in siteMapper.getCloud(job.getCloud())['sites']:
if siteMapper.checkSite(tmpSiteName):
tmpSiteSpec = siteMapper.getSite(tmpSiteName)
if tmpSiteSpec.coreCount > 1:
specialBrokergageSiteList.append(tmpSiteName)
tmpLog.debug('PandaID:%s -> set SiteList=%s for MP=%scores' % (job.PandaID,specialBrokergageSiteList,job.coreCount))
brokerageNote = 'MP=%score' % job.coreCount
# use limited sites for reprocessing
if job is not None and job.computingSite == 'NULL' and job.prodSourceLabel in ('test','managed') \
and job.processingType in ['reprocessing'] and specialBrokergageSiteList == []:
for tmpSiteName in siteMapper.getCloud(job.getCloud())['sites']:
if siteMapper.checkSite(tmpSiteName):
tmpSiteSpec = siteMapper.getSite(tmpSiteName)
if _checkRelease(job.AtlasRelease,tmpSiteSpec.validatedreleases):
specialBrokergageSiteList.append(tmpSiteName)
tmpLog.debug('PandaID:%s -> set SiteList=%s for processingType=%s' % (job.PandaID,specialBrokergageSiteList,job.processingType))
brokerageNote = '%s' % job.processingType
# manually set site
manualPreset = False
if job is not None and job.computingSite != 'NULL' and job.prodSourceLabel in ('test','managed') \
and specialBrokergageSiteList == []:
specialBrokergageSiteList = [job.computingSite]
manualPreset = True
brokerageNote = 'presetSite'
overwriteSite = False
# check JEDI
isJEDI = False
if job is not None and job.lockedby == 'jedi' and job.processingType != 'evtest':
isJEDI = True
# new bunch or terminator
if job is None or len(fileList) >= nFile \
or (dispatchDBlock is None and job.homepackage.startswith('AnalysisTransforms')) \
or prodDBlock != job.prodDBlock or job.computingSite != computingSite or iJob > nJob \
or previousCloud != job.getCloud() or prevRelease != job.AtlasRelease \
or prevCmtConfig != job.cmtConfig \
or (computingSite in ['RAL_REPRO','INFN-T1_REPRO'] and len(fileList)>=2) \
or (prevProType in skipBrokerageProTypes and iJob > 0) \
or prevDirectAcc != job.transferType \
or (prevMemory != job.minRamCount and not isJEDI) \
or (prevDiskCount != job.maxDiskCount and not isJEDI) \
or prevCoreCount != job.coreCount \
or prevWorkingGroup != job.workingGroup \
or prevProType != job.processingType \
or (prevMaxCpuCount != job.maxCpuCount and not isJEDI) \
or prevBrokergageSiteList != specialBrokergageSiteList \
or prevIsJEDI != isJEDI \
or prevDDM != job.getDdmBackEnd():
if indexJob > 1:
tmpLog.debug('new bunch')
tmpLog.debug(' iJob %s' % iJob)
tmpLog.debug(' cloud %s' % previousCloud)
tmpLog.debug(' rel %s' % prevRelease)
tmpLog.debug(' sourceLabel %s' % prevSourceLabel)
tmpLog.debug(' cmtConfig %s' % prevCmtConfig)
tmpLog.debug(' memory %s' % prevMemory)
tmpLog.debug(' priority %s' % prevPriority)
tmpLog.debug(' prodDBlock %s' % prodDBlock)
tmpLog.debug(' computingSite %s' % computingSite)
tmpLog.debug(' processingType %s' % prevProType)
tmpLog.debug(' workingGroup %s' % prevWorkingGroup)
tmpLog.debug(' coreCount %s' % prevCoreCount)
tmpLog.debug(' maxCpuCount %s' % prevMaxCpuCount)
tmpLog.debug(' transferType %s' % prevDirectAcc)
tmpLog.debug(' goToT2 %s' % prevGoToT2Flag)
tmpLog.debug(' DDM %s' % prevDDM)
# brokerage decisions
resultsForAnal = {'rel':[],'pilot':[],'disk':[],'status':[],'weight':[],'memory':[],
'share':[],'transferring':[],'prefcountry':[],'cpucore':[],
'reliability':[],'maxtime':[],'scratch':[]}
# determine site
if (iJob == 0 or chosen_ce != 'TOBEDONE') and prevBrokergageSiteList in [None,[]]:
# file scan for pre-assigned jobs
jobsInBunch = jobs[indexJob-iJob-1:indexJob-1]
if jobsInBunch != [] and fileList != [] and (computingSite not in prestageSites) \
and (jobsInBunch[0].prodSourceLabel in ['managed','software'] or \
re.search('test',jobsInBunch[0].prodSourceLabel) is not None):
# get site spec
tmp_chosen_ce = siteMapper.getSite(computingSite)
# get files from LRC
okFiles = _getOkFiles(tmp_chosen_ce, fileList, allLFNs, allOkFilesMap,
jobsInBunch[0].prodSourceLabel, jobsInBunch[0].job_label,
tmpLog, allScopes)
nOkFiles = len(okFiles)
tmpLog.debug('site:%s - nFiles:%s/%s %s %s' % (computingSite,nOkFiles,len(fileList),str(fileList),str(okFiles)))
# loop over all jobs
for tmpJob in jobsInBunch:
# set 'ready' if files are already there
_setReadyToFiles(tmpJob,okFiles,siteMapper,tmpLog)
else:
# load balancing
minSites = {}
nMinSites = 2
if prevBrokergageSiteList != []:
# special brokerage
scanSiteList = prevBrokergageSiteList
elif setScanSiteList == []:
if siteMapper.checkCloud(previousCloud):
# use cloud sites
scanSiteList = siteMapper.getCloud(previousCloud)['sites']
else:
# use default sites
scanSiteList = siteMapper.getCloud('default')['sites']
else:
# use given sites
scanSiteList = setScanSiteList
# add long queue
for tmpShortQueue in shortLongMap:
tmpLongQueue = shortLongMap[tmpShortQueue]
if tmpShortQueue in scanSiteList:
if tmpLongQueue not in scanSiteList:
scanSiteList.append(tmpLongQueue)
# the number/size of inputs per job
nFilesPerJob = float(totalNumInputs)/float(iJob)
inputSizePerJob = float(totalInputSize)/float(iJob)
# use T1 for jobs with many inputs when weight is negative
if (not forAnalysis) and _isTooManyInput(nFilesPerJob,inputSizePerJob) and \
siteMapper.getCloud(previousCloud)['weight'] < 0 and prevManualPreset is False and \
(prevCoreCount not in ['NULL',None] and prevCoreCount > 1):
scanSiteList = [siteMapper.getCloud(previousCloud)['source']]
# set site list to use T1 and T1_VL
if previousCloud in hospitalQueueMap:
scanSiteList += hospitalQueueMap[previousCloud]
# get availabe sites with cache
useCacheVersion = False
siteListWithCache = []
if forAnalysis:
if prevRelease not in ['','NULL',None] and prevRelease.startswith('ROOT'):
if prevCmtConfig not in ['NULL',None,'']:
usePattern = True
if 'x86_64' in prevCmtConfig:
tmpCmtConfig = 'x86_64%'
else:
tmpCmtConfig = 'i686%'
# extract OS ver
tmpMatch = re.search('(slc\d+)',prevCmtConfig)
if tmpMatch is not None:
tmpCmtConfig += tmpMatch.group(1)
tmpCmtConfig += '%'
useCacheVersion = True
siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,
cmtConfig=tmpCmtConfig,
onlyCmtConfig=True,
cmtConfigPattern=usePattern)
tmpLog.debug(' using installSW for ROOT:cmtConfig %s' % prevCmtConfig)
else:
# reset release info for backward compatibility
prevRelease = ''
elif re.search('-\d+\.\d+\.\d+\.\d+',prevRelease) is not None:
useCacheVersion = True
siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,caches=prevRelease,cmtConfig=prevCmtConfig)
tmpLog.debug(' using installSW for cache %s' % prevRelease)
elif re.search('-\d+\.\d+\.\d+$',prevRelease) is not None:
useCacheVersion = True
siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,releases=prevRelease,cmtConfig=prevCmtConfig)
tmpLog.debug(' using installSW for release %s' % prevRelease)
elif re.search(':rel_\d+$$',prevRelease) is not None:
useCacheVersion = True
# FIXME
#siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,
# releases='nightlies',
# cmtConfig=prevCmtConfig)
siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,
releases='CVMFS')
tmpLog.debug(' using installSW for release:cache %s' % prevRelease)
elif previousCloud in ['DE','NL','FR','CA','ES','IT','TW','UK','US','ND','CERN','RU']:
useCacheVersion = True
# change / to -
convedPrevHomePkg = prevHomePkg.replace('/','-')
if re.search('rel_\d+(\n|$)',prevHomePkg) is None:
# only cache is used for normal jobs
siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,caches=convedPrevHomePkg,
cmtConfig=prevCmtConfig)
else:
# for nightlies
siteListWithCache = taskBuffer.checkSitesWithRelease(scanSiteList,
releases='nightlies',
cmtConfig=prevCmtConfig)
tmpLog.debug(' cache %s' % prevHomePkg)
if useCacheVersion:
tmpLog.debug(' cache/relSites %s' % str(siteListWithCache))
# release/cmtconfig check
foundRelease = False
# found candidate
foundOneCandidate = False
# randomize the order
if forAnalysis:
random.shuffle(scanSiteList)
# get cnadidates
if True:
# loop over all sites
for site in scanSiteList:
tmpLog.debug('calculate weight for site:%s' % site)
# _allSites may conain NULL after sort()
if site == 'NULL':
continue
if prevIsJEDI:
foundRelease = True
winv = 1
else:
# get SiteSpec
if siteMapper.checkSite(site):
tmpSiteSpec = siteMapper.getSite(site)
else:
tmpLog.debug(" skip: %s doesn't exist in DB" % site)
continue
# ignore test sites
if (prevManualPreset is False) and (site.endswith('test') or \
site.endswith('Test') or site.startswith('Test')):
continue
# ignore analysis queues
if (not forAnalysis) and (not tmpSiteSpec.runs_production()):
continue
# check status
if tmpSiteSpec.status in ['offline','brokeroff'] and computingSite in ['NULL',None,'']:
if forAnalysis and tmpSiteSpec.status == 'brokeroff' and tmpSiteSpec.accesscontrol == 'grouplist':
# ignore brokeroff for grouplist site
pass
elif forAnalysis and prevProType in ['hammercloud','gangarobot','gangarobot-squid']:
# ignore site status for HC
pass
else:
tmpLog.debug(' skip: status %s' % tmpSiteSpec.status)
resultsForAnal['status'].append(site)
continue
if tmpSiteSpec.status == 'test' and (prevProType not in ['prod_test','hammercloud','gangarobot','gangarobot-squid']) \
and prevSourceLabel not in ['test','prod_test']:
tmpLog.debug(' skip: status %s for %s' % (tmpSiteSpec.status,prevProType))
resultsForAnal['status'].append(site)
continue
tmpLog.debug(' status=%s' % tmpSiteSpec.status)
# check core count
if tmpSiteSpec.coreCount > 1:
# use multi-core queue for MP jobs
if prevCoreCount not in [None,'NULL'] and prevCoreCount > 1:
pass
else:
tmpLog.debug(' skip: MP site (%s core) for job.coreCount=%s' % (tmpSiteSpec.coreCount,
prevCoreCount))
resultsForAnal['cpucore'].append(site)
continue
else:
# use single core for non-MP jobs
if prevCoreCount not in [None,'NULL'] and prevCoreCount > 1:
tmpLog.debug(' skip: single core site (%s core) for job.coreCount=%s' % (tmpSiteSpec.coreCount,
prevCoreCount))
resultsForAnal['cpucore'].append(site)
continue
# check max memory
if tmpSiteSpec.memory != 0 and prevMemory not in [None,0,'NULL']:
try:
if int(tmpSiteSpec.memory) < int(prevMemory):
tmpLog.debug(' skip: site memory shortage %s<%s' % (tmpSiteSpec.memory,prevMemory))
resultsForAnal['memory'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("max memory check : %s %s" % (errtype,errvalue))
# check maxcpucount
if tmpSiteSpec.maxtime != 0 and prevMaxCpuCount not in [None,0,'NULL']:
try:
if int(tmpSiteSpec.maxtime) < int(prevMaxCpuCount):
tmpLog.debug(' skip: insufficient maxtime %s<%s' % (tmpSiteSpec.maxtime,prevMaxCpuCount))
resultsForAnal['maxtime'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("maxtime check : %s %s" % (errtype,errvalue))
if tmpSiteSpec.mintime != 0 and prevMaxCpuCount not in [None,0,'NULL']:
try:
if int(tmpSiteSpec.mintime) > int(prevMaxCpuCount):
tmpLog.debug(' skip: insufficient job maxtime %s<%s' % (prevMaxCpuCount,tmpSiteSpec.mintime))
resultsForAnal['maxtime'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("mintime check : %s %s" % (errtype,errvalue))
# check max work dir size
if tmpSiteSpec.maxwdir != 0 and (prevDiskCount not in [None,0,'NULL']):
try:
if int(tmpSiteSpec.maxwdir) < int(prevDiskCount):
tmpLog.debug(' skip: not enough disk %s<%s' % (tmpSiteSpec.maxwdir, prevDiskCount))
resultsForAnal['scratch'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("disk check : %s %s" % (errtype,errvalue))
tmpLog.debug(' maxwdir=%s' % tmpSiteSpec.maxwdir)
# reliability
if forAnalysis and isinstance(siteReliability, (int, long)):
if tmpSiteSpec.reliabilityLevel is not None and tmpSiteSpec.reliabilityLevel > siteReliability:
tmpLog.debug(' skip: insufficient reliability %s > %s' % (tmpSiteSpec.reliabilityLevel,siteReliability))
resultsForAnal['reliability'].append(site)
continue
# change NULL cmtconfig to slc3/4
if prevCmtConfig in ['NULL','',None]:
if forAnalysis:
tmpCmtConfig = 'i686-slc4-gcc34-opt'
else:
tmpCmtConfig = 'i686-slc3-gcc323-opt'
else:
tmpCmtConfig = prevCmtConfig
# set release
releases = tmpSiteSpec.releases
origReleases = releases
if prevProType in ['reprocessing']:
# use validated releases for reprocessing
releases = tmpSiteSpec.validatedreleases
if not useCacheVersion:
tmpLog.debug(' %s' % str(releases))
if origReleases == ['ANY']:
# doesn't check releases for catch all
tmpLog.debug(' no release check due to releases=%s' % origReleases)
foundRelease = True
elif forAnalysis and (tmpSiteSpec.cloud in ['ND'] or prevRelease==''):
# doesn't check releases for analysis
tmpLog.debug(' no release check')
pass
elif forAnalysis and useCacheVersion:
# cache matching
if site not in siteListWithCache:
tmpLog.debug(' skip: cache %s/%s not found' % (prevRelease.replace('\n',' '),prevCmtConfig))
if trustIS:
resultsForAnal['rel'].append(site)
continue
elif prevRelease is not None and \
(useCacheVersion and tmpSiteSpec.cloud not in ['ND'] and site not in ['CERN-RELEASE']) and \
(prevProType not in ['reprocessing']) and \
(site not in siteListWithCache):
tmpLog.debug(' skip: cache %s/%s not found' % (prevHomePkg.replace('\n',' '), prevCmtConfig))
# send message to logger
try:
if prevSourceLabel in ['managed','test']:
resultsForAnal['rel'].append(site)
# make message
message = '%s - cache %s/%s not found' % (site,prevHomePkg.replace('\n',' '),prevCmtConfig)
if message not in loggerMessages:
loggerMessages.append(message)
except Exception:
pass
continue
elif prevRelease is not None and \
((not useCacheVersion and releases != [] and tmpSiteSpec.cloud not in ['ND'] and site not in ['CERN-RELEASE']) or prevProType in ['reprocessing']) and \
(((not _checkRelease(prevRelease,releases) and prevManualPreset is False) or site not in siteListWithCache) and tmpSiteSpec.cloud not in ['ND'] and site not in ['CERN-RELEASE']):
# release matching
if not useCacheVersion:
tmpLog.debug(' skip: release %s/%s not found' % (prevRelease.replace('\n',' '),prevCmtConfig))
else:
tmpLog.debug(' skip: repro cache %s/%s not found' % (prevHomePkg.replace('\n',' '),prevCmtConfig))
resultsForAnal['rel'].append(site)
continue
elif not foundRelease:
# found at least one site has the release
foundRelease = True
# get pilot statistics
nPilotsGet = 0
nPilotsUpdate = 0
if nWNmap == {}:
nWNmap = taskBuffer.getCurrentSiteData()
if site in nWNmap:
nPilots = nWNmap[site]['getJob'] + nWNmap[site]['updateJob']
nPilotsGet = nWNmap[site]['getJob']
nPilotsUpdate = nWNmap[site]['updateJob']
elif site.split('/')[0] in nWNmap:
tmpID = site.split('/')[0]
nPilots = nWNmap[tmpID]['getJob'] + nWNmap[tmpID]['updateJob']
nPilotsGet = nWNmap[tmpID]['getJob']
nPilotsUpdate = nWNmap[tmpID]['updateJob']
else:
nPilots = 0
tmpLog.debug(' original nPilots:%s get:%s update:%s' % (nPilots,nPilotsGet,nPilotsUpdate))
# limit on (G+1)/(U+1)
limitOnGUmax = 1.1
limitOnGUmin = 0.9
guRatio = float(1+nPilotsGet)/float(1+nPilotsUpdate)
if guRatio > limitOnGUmax:
nPilotsGet = limitOnGUmax * float(1+nPilotsUpdate) - 1.0
elif guRatio < limitOnGUmin:
nPilotsGet = limitOnGUmin * float(1+nPilotsUpdate) - 1.0
tmpLog.debug(' limited nPilots:%s get:%s update:%s' % (nPilots,nPilotsGet,nPilotsUpdate))
# if no pilots
if nPilots == 0 and nWNmap != {}:
tmpLog.debug(" skip: %s no pilot" % site)
resultsForAnal['pilot'].append(site)
continue
# if no jobs in jobsActive/jobsDefined
jobStatistics.setdefault(site,
{'assigned':0,'activated':0,'running':0,'transferring':0})
# set nRunning
if forAnalysis:
nRunningMap.setdefault(site, 0)
# check space
if specialWeight != {}:
# for PD2P
if site in sizeMapForCheck:
# threshold for PD2P max(5%,3TB)
thrForThisSite = long(sizeMapForCheck[site]['total'] * 5 / 100)
if thrForThisSite < diskThresholdPD2P:
thrForThisSite = diskThresholdPD2P
remSpace = sizeMapForCheck[site]['total'] - sizeMapForCheck[site]['used']
tmpLog.debug(' space available=%s remain=%s thr=%s' % (sizeMapForCheck[site]['total'],
remSpace,thrForThisSite))
if remSpace-datasetSize < thrForThisSite:
tmpLog.debug(' skip: disk shortage %s-%s< %s' % (remSpace,datasetSize,thrForThisSite))
if getWeight:
weightUsedByBrokerage[site] = "NA : disk shortage"
continue
else:
if tmpSiteSpec.space:
# production
if not forAnalysis:
# take assigned/activated/running jobs into account for production
nJobsIn = float(jobStatistics[site]['assigned'])
nJobsOut = float(jobStatistics[site]['activated']+jobStatistics[site]['running'])
# get remaining space and threshold
if site == siteMapper.getCloud(previousCloud)['source']:
# T1
remSpace = float(tmpSiteSpec.space) - 0.2 * nJobsOut
remSpace = int(remSpace)
diskThreshold = diskThresholdT1
else:
# T2
remSpace = float(tmpSiteSpec.space) - 0.2 * nJobsOut - 2.0 * nJobsIn
remSpace = int(remSpace)
diskThreshold = diskThresholdT2
else:
# analysis
remSpace = tmpSiteSpec.space
diskThreshold = diskThresholdAna
tmpLog.debug(' space available=%s remain=%s' % (tmpSiteSpec.space,remSpace))
if remSpace < diskThreshold:
tmpLog.debug(' skip: disk shortage < %s' % diskThreshold)
resultsForAnal['disk'].append(site)
# keep message to logger
try:
if prevSourceLabel in ['managed','test']:
# make message
message = '%s - disk %s < %s' % (site,remSpace,diskThreshold)
if message not in loggerMessages:
loggerMessages.append(message)
except Exception:
pass
continue
# get the process group
tmpProGroup = ProcessGroups.getProcessGroup(prevProType)
if prevProType in skipBrokerageProTypes:
# use original processingType since prod_test is in the test category and thus is interfered by validations
tmpProGroup = prevProType
# production share
skipDueToShare = False
try:
if not forAnalysis and prevSourceLabel in ['managed'] and site in faresharePolicy:
for tmpPolicy in faresharePolicy[site]['policyList']:
# ignore priority policy
if tmpPolicy['priority'] is not None:
continue
# only zero share
if tmpPolicy['share'] != '0%':
continue
# check group
if tmpPolicy['group'] is not None:
if '*' in tmpPolicy['group']:
# wildcard
tmpPatt = '^' + tmpPolicy['group'].replace('*','.*') + '$'
if re.search(tmpPatt,prevWorkingGroup) is None:
continue
else:
# normal definition
if prevWorkingGroup != tmpPolicy['group']:
continue
else:
# catch all except WGs used by other policies
groupInDefList = faresharePolicy[site]['groupList']
usedByAnother = False
# loop over all groups
for groupInDefItem in groupInDefList:
if '*' in groupInDefItem:
# wildcard
tmpPatt = '^' + groupInDefItem.replace('*','.*') + '$'
if re.search(tmpPatt,prevWorkingGroup) is not None:
usedByAnother = True
break
else:
# normal definition
if prevWorkingGroup == groupInDefItem:
usedByAnother = True
break
if usedByAnother:
continue
# check type
if tmpPolicy['type'] is not None:
if tmpPolicy['type'] == tmpProGroup:
skipDueToShare = True
break
else:
# catch all except PGs used by other policies
typeInDefList = faresharePolicy[site]['typeList'][tmpPolicy['group']]
usedByAnother = False
for typeInDefItem in typeInDefList:
if typeInDefItem == tmpProGroup:
usedByAnother = True
break
if not usedByAnother:
skipDueToShare = True
break
# skip
if skipDueToShare:
tmpLog.debug(" skip: %s zero share" % site)
resultsForAnal['share'].append(site)
continue
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("share check : %s %s" % (errtype,errvalue))
# the number of assigned and activated
if not forAnalysis:
jobStatBrokerClouds.setdefault(previousCloud, {})
# use number of jobs in the cloud
jobStatBroker = jobStatBrokerClouds[previousCloud]
if site not in jobStatBroker:
jobStatBroker[site] = {}
if tmpProGroup not in jobStatBroker[site]:
jobStatBroker[site][tmpProGroup] = {'assigned':0,'activated':0,'running':0,'transferring':0}
# count # of assigned and activated jobs for prod by taking priorities in to account
nRunJobsPerGroup = None
if not forAnalysis and prevSourceLabel in ['managed','test']:
jobStatBrokerCloudsWithPrio.setdefault(prevPriority,
taskBuffer.getJobStatisticsBrokerage(
prevPriority,
prevPriority+prioInterval))
jobStatBrokerCloudsWithPrio[prevPriority].setdefault(previousCloud, {})
jobStatBrokerCloudsWithPrio[prevPriority][previousCloud].setdefault(site, {})
jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site].setdefault(
tmpProGroup, {'assigned':0,'activated':0,'running':0,'transferring':0})
nAssJobs = jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site][tmpProGroup]['assigned']
nActJobs = jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site][tmpProGroup]['activated']
nRunJobsPerGroup = jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site][tmpProGroup]['running']
# add newly assigned jobs
for tmpNewPriority in newJobStatWithPrio:
if tmpNewPriority < prevPriority:
continue
if previousCloud not in newJobStatWithPrio[tmpNewPriority]:
continue
if site not in newJobStatWithPrio[tmpNewPriority][previousCloud]:
continue
if tmpProGroup not in newJobStatWithPrio[tmpNewPriority][previousCloud][site]:
continue
nAssJobs += newJobStatWithPrio[tmpNewPriority][previousCloud][site][tmpProGroup]
else:
nAssJobs = jobStatBroker[site][tmpProGroup]['assigned']
if forAnalysis and 'defined' in jobStatBroker[site][tmpProGroup]:
nAssJobs += jobStatBroker[site][tmpProGroup]['defined']
nActJobs = jobStatBroker[site][tmpProGroup]['activated']
# number of jobs per node
if site not in nWNmap:
nJobsPerNode = 1
elif jobStatistics[site]['running']==0 or nWNmap[site]['updateJob']==0:
nJobsPerNode = 1
else:
if nRunJobsPerGroup is None:
nJobsPerNode = float(jobStatistics[site]['running'])/float(nWNmap[site]['updateJob'])
else:
if nRunJobsPerGroup == 0:
nJobsPerNode = 1.0/float(nWNmap[site]['updateJob'])
else:
nJobsPerNode = float(nRunJobsPerGroup)/float(nWNmap[site]['updateJob'])
# limit of the number of transferring jobs
if tmpSiteSpec.transferringlimit == 0:
maxTransferring = 2000
else:
maxTransferring = tmpSiteSpec.transferringlimit
# get ration of transferring to running
if not forAnalysis and tmpSiteSpec.cloud not in ['ND']:
nTraJobs = 0
nRunJobs = 0
for tmpGroupForTra in jobStatBroker[site]:
tmpCountsForTra = jobStatBroker[site][tmpGroupForTra]
if 'running' in tmpCountsForTra:
nRunJobs += tmpCountsForTra['running']
if 'transferring' in tmpCountsForTra:
nTraJobs += tmpCountsForTra['transferring']
tmpLog.debug(' running=%s transferring=%s max=%s' % (nRunJobs,nTraJobs,maxTransferring))
if max(maxTransferring,2*nRunJobs) < nTraJobs:
tmpLog.debug(" skip: %s many transferring=%s > max(%s,2*running=%s)" % (site,nTraJobs,maxTransferring,nRunJobs))
resultsForAnal['transferring'].append(site)
if prevSourceLabel in ['managed','test']:
# make message
message = '%s - too many transferring' % site
if message not in loggerMessages:
loggerMessages.append(message)
continue
# get ratio of running jobs = run(cloud)/run(all) for multi cloud (disabled)
multiCloudFactor = 1
# country preference
preferredCountryWeight = 1.0
preferredCountryWeightStr = ''
if forAnalysis:
if preferredCountries != [] and tmpSiteSpec.countryGroup != []:
for tmpCountry in preferredCountries:
if tmpCountry in tmpSiteSpec.countryGroup:
# avoid negative weight or zero-divide
if tmpSiteSpec.availableCPU >= tmpSiteSpec.pledgedCPU and tmpSiteSpec.pledgedCPU > 0:
preferredCountryWeight = float(tmpSiteSpec.availableCPU) / float(tmpSiteSpec.pledgedCPU)
preferredCountryWeightStr = "*(%s/%s)" % (tmpSiteSpec.availableCPU,tmpSiteSpec.pledgedCPU)
resultsForAnal['prefcountry'].append((site,tmpCountry))
break
tmpLog.debug(' country preference=%s' % preferredCountryWeightStr[1:])
# calculate weight
if specialWeight != {}:
if not pd2pT1:
# weight for T2 PD2P
nSubs = 1
if site in specialWeight:
nSubs = specialWeight[site]
tmpLog.debug(' %s nSubs:%s assigned:%s activated:%s running:%s nWNsG:%s nWNsU:%s' % \
(site,nSubs,nAssJobs,nActJobs,nRunningMap[site],nPilotsGet,nPilotsUpdate))
winv = float(nSubs) * float(nAssJobs+nActJobs) / float(1+nRunningMap[site]) / (1.0+float(nPilotsGet)/float(1+nPilotsUpdate))
if getWeight:
weightUsedByBrokerage[site] = "(1+%s/%s)*%s/%s/%s" % (nPilotsGet,1+nPilotsUpdate,1+nRunningMap[site],nAssJobs+nActJobs,nSubs)
else:
# weight for T1 PD2P
tmpLog.debug(' %s MoU:%s' % (site,specialWeight[site]))
winv = 1.0 / float(specialWeight[site])
if getWeight:
weightUsedByBrokerage[site] = "%s" % specialWeight[site]
else:
if not forAnalysis:
if nRunJobsPerGroup is None:
tmpLog.debug(' %s assigned:%s activated:%s running:%s nPilotsGet:%s nPilotsUpdate:%s multiCloud:%s' %
(site,nAssJobs,nActJobs,jobStatistics[site]['running'],nPilotsGet,nPilotsUpdate,multiCloudFactor))
else:
tmpLog.debug(' %s assigned:%s activated:%s runningGroup:%s nPilotsGet:%s nPilotsUpdate:%s multiCloud:%s' %
(site,nAssJobs,nActJobs,nRunJobsPerGroup,nPilotsGet,nPilotsUpdate,multiCloudFactor))
else:
tmpLog.debug(' %s assigned:%s activated:%s running:%s nWNsG:%s nWNsU:%s' %
(site,nAssJobs,nActJobs,nRunningMap[site],nPilotsGet,nPilotsUpdate))
if forAnalysis:
winv = float(nAssJobs+nActJobs) / float(1+nRunningMap[site]) / (1.0+float(nPilotsGet)/float(1+nPilotsUpdate))
else:
if nRunJobsPerGroup is None:
winv = float(nAssJobs+nActJobs) / float(1+jobStatistics[site]['running']) / (float(1+nPilotsGet)/float(1+nPilotsUpdate))
else:
winv = float(nAssJobs+nActJobs) / float(1+nRunJobsPerGroup) / (float(1+nPilotsGet)/float(1+nPilotsUpdate))
winv *= float(multiCloudFactor)
# send jobs to T1 when they require many or large inputs
if _isTooManyInput(nFilesPerJob,inputSizePerJob):
if site == siteMapper.getCloud(previousCloud)['source'] or \
(site=='NIKHEF-ELPROD' and previousCloud=='NL' and prevProType=='reprocessing') or \
(previousCloud in hospitalQueueMap and site in hospitalQueueMap[previousCloud]):
cloudT1Weight = 2.0
# use weight in cloudconfig
try:
tmpCloudT1Weight = float(siteMapper.getCloud(previousCloud)['weight'])
if tmpCloudT1Weight != 0.0:
cloudT1Weight = tmpCloudT1Weight
except Exception:
pass
winv /= cloudT1Weight
tmpLog.debug(' special weight for %s : nInputs/Job=%s inputSize/Job=%s weight=%s' %
(site,nFilesPerJob,inputSizePerJob,cloudT1Weight))
# found at least one candidate
foundOneCandidate = True
tmpLog.debug('Site:%s 1/Weight:%s' % (site, winv))
if forAnalysis and trustIS and reportLog:
resultsForAnal['weight'].append((site,'(1+%s/%s)*%s/%s%s' % (nPilotsGet,1+nPilotsUpdate,1+nRunningMap[site],
nAssJobs+nActJobs,preferredCountryWeightStr)))
# choose largest nMinSites weights
minSites[site] = winv
if len(minSites) > nMinSites:
maxSite = site
maxWinv = winv
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv > maxWinv:
maxSite = tmpSite
maxWinv = tmpWinv
# delte max one
del minSites[maxSite]
# remove too different weights
if len(minSites) >= 2:
# look for minimum
minSite = list(minSites)[0]
minWinv = minSites[minSite]
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv < minWinv:
minSite = tmpSite
minWinv = tmpWinv
# look for too different weights
difference = 2
removeSites = []
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv > minWinv*difference:
removeSites.append(tmpSite)
# remove
for tmpSite in removeSites:
del minSites[tmpSite]
# set default
if len(minSites) == 0:
# cloud's list
if forAnalysis or siteMapper.checkCloud(previousCloud):
minSites[scanSiteList[0]] = 0
else:
minSites[panda_config.def_sitename] = 0
# release not found
if forAnalysis and trustIS:
candidateForAnal = False
# use only one site for prod_test to skip LFC scan
if prevProType in skipBrokerageProTypes:
if len(minSites) > 1:
minSites = {list(minSites)[0]:0}
# choose site
tmpLog.debug('Min Sites:%s' % minSites)
if len(fileList) ==0 or prevIsJEDI is True:
# choose min 1/weight
minSite = list(minSites)[0]
minWinv = minSites[minSite]
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv < minWinv:
minSite = tmpSite
minWinv = tmpWinv
chosenCE = siteMapper.getSite(minSite)
else:
# compare # of files in LRC
maxNfiles = -1
for site in minSites:
tmp_chosen_ce = siteMapper.getSite(site)
# search LRC
if site in _disableLRCcheck:
tmpOKFiles = {}
else:
# get files from LRC
tmpOKFiles = _getOkFiles(tmp_chosen_ce, fileList, allLFNs, allOkFilesMap,
job.proSourceLabel, job.job_label, tmpLog, allScopes)
nFiles = len(tmpOKFiles)
tmpLog.debug('site:%s - nFiles:%s/%s %s' % (site,nFiles,len(fileList),str(tmpOKFiles)))
# choose site holding max # of files
if nFiles > maxNfiles:
chosenCE = tmp_chosen_ce
maxNfiles = nFiles
okFiles = tmpOKFiles
# set job spec
tmpLog.debug('indexJob : %s' % indexJob)
tmpLog.debug('nInputs/Job : %s' % nFilesPerJob)
tmpLog.debug('inputSize/Job : %s' % inputSizePerJob)
for tmpJob in jobs[indexJob-iJob-1:indexJob-1]:
# set computingSite
if (not candidateForAnal) and forAnalysis and trustIS:
resultsForAnalStr = 'ERROR : No candidate. '
if resultsForAnal['rel'] != []:
if prevCmtConfig in ['','NULL',None]:
resultsForAnalStr += 'Release:%s was not found at %s. ' % (prevRelease,str(resultsForAnal['rel']))
else:
resultsForAnalStr += 'Release:%s/%s was not found at %s. ' % (prevRelease,prevCmtConfig,str(resultsForAnal['rel']))
if resultsForAnal['pilot'] != []:
resultsForAnalStr += '%s are inactive (no pilots for last 3 hours). ' % str(resultsForAnal['pilot'])
if resultsForAnal['disk'] != []:
resultsForAnalStr += 'Disk shortage < %sGB at %s. ' % (diskThresholdAna,str(resultsForAnal['disk']))
if resultsForAnal['memory'] != []:
resultsForAnalStr += 'Insufficient RAM at %s. ' % str(resultsForAnal['memory'])
if resultsForAnal['maxtime'] != []:
resultsForAnalStr += 'Shorter walltime limit than maxCpuCount:%s at ' % prevMaxCpuCount
for tmpItem in resultsForAnal['maxtime']:
if siteMapper.checkSite(tmpItem):
resultsForAnalStr += '%s:%s,' % (tmpItem,siteMapper.getSite(tmpItem).maxtime)
resultsForAnalStr = resultsForAnalStr[:-1]
resultsForAnalStr += '. '
if resultsForAnal['status'] != []:
resultsForAnalStr += '%s are not online. ' % str(resultsForAnal['status'])
if resultsForAnal['reliability'] != []:
resultsForAnalStr += 'Insufficient reliability at %s. ' % str(resultsForAnal['reliability'])
resultsForAnalStr = resultsForAnalStr[:-1]
tmpJob.computingSite = resultsForAnalStr
else:
tmpJob.computingSite = chosenCE.sitename
tmpLog.debug('PandaID:%s -> site:%s' % (tmpJob.PandaID,tmpJob.computingSite))
# fail jobs if no sites have the release
if (not foundRelease or (tmpJob.relocationFlag != 1 and not foundOneCandidate)) and (tmpJob.prodSourceLabel in ['managed','test']):
# reset
if tmpJob.relocationFlag not in [1,2]:
tmpJob.computingSite = None
tmpJob.computingElement = None
# go to waiting
tmpJob.jobStatus = 'waiting'
tmpJob.brokerageErrorCode = ErrorCode.EC_Release
if tmpJob.relocationFlag in [1,2]:
try:
if resultsForAnal['pilot'] != []:
tmpJob.brokerageErrorDiag = '%s no pilots' % tmpJob.computingSite
elif resultsForAnal['disk'] != []:
tmpJob.brokerageErrorDiag = 'SE full at %s' % tmpJob.computingSite
elif resultsForAnal['memory'] != []:
tmpJob.brokerageErrorDiag = 'RAM shortage at %s' % tmpJob.computingSite
elif resultsForAnal['status'] != []:
tmpJob.brokerageErrorDiag = '%s not online' % tmpJob.computingSite
elif resultsForAnal['share'] != []:
tmpJob.brokerageErrorDiag = '%s zero share' % tmpJob.computingSite
elif resultsForAnal['cpucore'] != []:
tmpJob.brokerageErrorDiag = "CPU core mismatch at %s" % tmpJob.computingSite
elif resultsForAnal['maxtime'] != []:
tmpJob.brokerageErrorDiag = "short walltime at %s" % tmpJob.computingSite
elif resultsForAnal['transferring'] != []:
tmpJob.brokerageErrorDiag = 'too many transferring at %s' % tmpJob.computingSite
elif resultsForAnal['scratch'] != []:
tmpJob.brokerageErrorDiag = 'small scratch disk at %s' % tmpJob.computingSite
elif useCacheVersion:
tmpJob.brokerageErrorDiag = '%s/%s not found at %s' % (tmpJob.homepackage,tmpJob.cmtConfig,tmpJob.computingSite)
else:
tmpJob.brokerageErrorDiag = '%s/%s not found at %s' % (tmpJob.AtlasRelease,tmpJob.cmtConfig,tmpJob.computingSite)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("failed to set diag for %s: %s %s" % (tmpJob.PandaID,errtype,errvalue))
tmpJob.brokerageErrorDiag = 'failed to set diag. see brokerage log in the panda server'
elif prevBrokergageSiteList not in [[],None]:
try:
# make message
tmpJob.brokerageErrorDiag = makeCompactDiagMessage(prevBrokerageNote,resultsForAnal)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("failed to set special diag for %s: %s %s" % (tmpJob.PandaID,errtype,errvalue))
tmpJob.brokerageErrorDiag = 'failed to set diag. see brokerage log in the panda server'
elif prevProType in ['reprocessing']:
tmpJob.brokerageErrorDiag = '%s/%s not found at reprocessing sites' % (tmpJob.homepackage,tmpJob.cmtConfig)
elif not useCacheVersion:
tmpJob.brokerageErrorDiag = '%s/%s not found at online sites with enough memory and disk' % \
(tmpJob.AtlasRelease,tmpJob.cmtConfig)
else:
try:
tmpJob.brokerageErrorDiag = makeCompactDiagMessage('',resultsForAnal)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("failed to set compact diag for %s: %s %s" % (tmpJob.PandaID,errtype,errvalue))
tmpJob.brokerageErrorDiag = 'failed to set diag. see brokerage log in the panda server'
tmpLog.debug('PandaID:%s %s' % (tmpJob.PandaID,tmpJob.brokerageErrorDiag))
continue
# set ready if files are already there
if prevIsJEDI is False:
_setReadyToFiles(tmpJob,okFiles,siteMapper,tmpLog)
# update statistics
tmpProGroup = ProcessGroups.getProcessGroup(tmpJob.processingType)
if tmpJob.processingType in skipBrokerageProTypes:
# use original processingType since prod_test is in the test category and thus is interfered by validations
tmpProGroup = tmpJob.processingType
jobStatistics.setdefault(tmpJob.computingSite, {'assigned':0,'activated':0,'running':0})
jobStatBroker.setdefault(tmpJob.computingSite, {})
jobStatBroker[tmpJob.computingSite].setdefault(tmpProGroup,
{'assigned':0,'activated':0,'running':0})
jobStatistics[tmpJob.computingSite]['assigned'] += 1
jobStatBroker[tmpJob.computingSite][tmpProGroup]['assigned'] += 1
# update statistics by taking priorities into account
if not forAnalysis and prevSourceLabel in ['managed','test']:
newJobStatWithPrio.setdefault(prevPriority, {})
newJobStatWithPrio[prevPriority].setdefault(tmpJob.getCloud(), {})
newJobStatWithPrio[prevPriority][tmpJob.getCloud()].setdefault(tmpJob.computingSite, {})
newJobStatWithPrio[prevPriority][tmpJob.getCloud()][tmpJob.computingSite].setdefault(
tmpProGroup, 0)
newJobStatWithPrio[prevPriority][tmpJob.getCloud()][tmpJob.computingSite][tmpProGroup] += 1
# terminate
if job is None:
break
# reset iJob
iJob = 0
# reset file list
fileList = []
scopeList = []
okFiles = {}
totalNumInputs = 0
totalInputSize = 0
# create new dispDBlock
if job.prodDBlock != 'NULL':
# get datatype
try:
tmpDataType = job.prodDBlock.split('.')[-2]
except Exception:
# default
tmpDataType = 'GEN'
if len(tmpDataType) > 20:
# avoid too long name
tmpDataType = 'GEN'
transferType = 'transfer'
if job.useInputPrestaging():
transferType = 'prestaging'
dispatchDBlock = "panda.%s.%s.%s.%s.%s_dis%s" % (job.taskID, time.strftime('%m.%d'),
tmpDataType, transferType,
str(uuid.uuid4()), job.PandaID)
tmpLog.debug('New dispatchDBlock: %s' % dispatchDBlock)
prodDBlock = job.prodDBlock
# already define computingSite
if job.computingSite != 'NULL':
# instantiate KnownSite
chosen_ce = siteMapper.getSite(job.computingSite)
# if site doesn't exist, use the default site
if job.homepackage.startswith('AnalysisTransforms'):
if chosen_ce.sitename == panda_config.def_sitename:
chosen_ce = siteMapper.getSite(panda_config.def_queue)
overwriteSite = True
else:
# default for Analysis jobs
if job.homepackage.startswith('AnalysisTransforms'):
chosen_ce = siteMapper.getSite(panda_config.def_queue)
overwriteSite = True
else:
# set chosen_ce
chosen_ce = 'TOBEDONE'
# increment iJob
iJob += 1
# reserve computingSite and cloud
computingSite = job.computingSite
previousCloud = job.getCloud()
prevRelease = job.AtlasRelease
prevMemory = job.minRamCount
prevCmtConfig = job.cmtConfig
prevProType = job.processingType
prevSourceLabel = job.prodSourceLabel
prevDiskCount = job.maxDiskCount
prevHomePkg = job.homepackage
prevDirectAcc = job.transferType
prevCoreCount = job.coreCount
prevMaxCpuCount = job.maxCpuCount
prevBrokergageSiteList = specialBrokergageSiteList
prevManualPreset = manualPreset
prevGoToT2Flag = goToT2Flag
prevWorkingGroup = job.workingGroup
prevBrokerageNote = brokerageNote
prevIsJEDI = isJEDI
prevDDM = job.getDdmBackEnd()
# truncate prio to avoid too many lookups
if job.currentPriority not in [None,'NULL']:
prevPriority = (job.currentPriority / prioInterval) * prioInterval
# assign site
if chosen_ce != 'TOBEDONE':
job.computingSite = chosen_ce.sitename
if job.computingElement == 'NULL':
if job.prodSourceLabel == 'ddm':
# use nickname for ddm jobs
job.computingElement = chosen_ce.nickname
# update statistics
jobStatistics.setdefault(job.computingSite, {'assigned':0,'activated':0,'running':0})
jobStatistics[job.computingSite]['assigned'] += 1
tmpLog.debug('PandaID:%s -> preset site:%s' % (job.PandaID,chosen_ce.sitename))
# set cloud
if job.cloud in ['NULL',None,'']:
job.cloud = chosen_ce.cloud
# set destinationSE
destSE = job.destinationSE
if siteMapper.checkCloud(job.getCloud()):
# use cloud dest for non-exsiting sites
if job.prodSourceLabel != 'user' and job.destinationSE not in siteMapper.siteSpecList \
and job.destinationSE != 'local':
if DataServiceUtils.checkJobDestinationSE(job) is not None:
destSE = DataServiceUtils.checkJobDestinationSE(job)
else:
destSE = siteMapper.getCloud(job.getCloud())['dest']
job.destinationSE = destSE
# use CERN-PROD_EOSDATADISK for CERN-EOS jobs
if job.computingSite in ['CERN-EOS']:
overwriteSite = True
if overwriteSite:
# overwrite SE for analysis jobs which set non-existing sites
destSE = job.computingSite
job.destinationSE = destSE
# set dispatchDBlock and destinationSE
first = True
for file in job.Files:
# dispatchDBlock. Set dispDB for prestaging jobs too
if file.type == 'input' and file.dispatchDBlock == 'NULL' and \
((file.status not in ['ready', 'missing', 'cached']) or job.computingSite in prestageSites):
if first:
first = False
job.dispatchDBlock = dispatchDBlock
file.dispatchDBlock = dispatchDBlock
file.status = 'pending'
if file.lfn not in fileList:
fileList.append(file.lfn)
scopeList.append(file.scope)
try:
# get total number/size of inputs except DBRelease
# tgz inputs for evgen may be negligible
if re.search('\.tar\.gz', file.lfn) is None:
totalNumInputs += 1
totalInputSize += file.fsize
except Exception:
pass
# destinationSE
if file.type in ['output','log'] and destSE != '':
if job.prodSourceLabel == 'user' and job.computingSite == file.destinationSE:
pass
elif job.prodSourceLabel == 'user' and prevIsJEDI is True and file.destinationSE not in ['','NULL']:
pass
elif destSE == 'local':
pass
elif DataServiceUtils.getDistributedDestination(file.destinationDBlockToken) is not None:
pass
else:
file.destinationSE = destSE
# pre-assign GUID to log
if file.type == 'log':
# get lock
fcntl.flock(_lockGetUU.fileno(), fcntl.LOCK_EX)
# generate GUID
file.GUID = str(uuid.uuid4())
# release lock
fcntl.flock(_lockGetUU.fileno(), fcntl.LOCK_UN)
# send log messages
try:
for message in loggerMessages:
# get logger
_pandaLogger = PandaLogger()
_pandaLogger.lock()
_pandaLogger.setParams({'Type':'brokerage'})
logger = _pandaLogger.getHttpLogger(panda_config.loggername)
# add message
logger.warning(message)
# release HTTP handler
_pandaLogger.release()
time.sleep(1)
except Exception:
pass
# send analysis brokerage info when jobs are submitted
if len(jobs) > 0 and jobs[0] is not None and not forAnalysis and not pd2pT1 and specialWeight=={}:
# for analysis job. FIXME once ganga is updated to send analy brokerage info
if jobs[0].prodSourceLabel in ['user','panda'] and jobs[0].processingType in ['pathena','prun']:
# send countryGroup
tmpMsgList = []
tmpNumJobs = len(jobs)
if jobs[0].prodSourceLabel == 'panda':
tmpNumJobs -= 1
tmpMsg = 'nJobs=%s ' % tmpNumJobs
if jobs[0].countryGroup in ['NULL','',None]:
tmpMsg += 'countryGroup=None'
else:
tmpMsg += 'countryGroup=%s' % jobs[0].countryGroup
tmpMsgList.append(tmpMsg)
# send log
sendMsgToLoggerHTTP(tmpMsgList,jobs[0])
# finished
tmpLog.debug('N lookup for prio : {0}'.format(len(jobStatBrokerCloudsWithPrio)))
tmpLog.debug('finished')
if getWeight:
return weightUsedByBrokerage
except Exception as e:
tmpLog.error("schedule : %s %s" % (str(e), traceback.format_exc()))
if getWeight:
return {}
|
PanDAWMS/panda-server
|
pandaserver/brokerage/broker.py
|
Python
|
apache-2.0
| 93,553
|
from cmath import *
import os
import math
import sys
import time
import subprocess
import string
import getopt
from numpy import zeros, eye
import uuid
i = j = 1j
"""
File that prepares the input and reads the output from the .exe executable file.
The executable contains a Convex semidefinite programming code, that is a faster version of MLE.
This code was originally developed by NATHAN LANGFORD.
"""
#-------------------------------------------------------------------------
def bases(dim, triangular=False):
'''
Generate the Gell-Mann bases as a list of non-zero values
(they tend to be very sparse)
if triangular==True, do only the upper triangular region
'''
invsqrtdim = 1/float(math.sqrt(dim))
invsqrt2 = 1.0/math.sqrt(2)
gellmann = []
for m in range(dim):
for n in range(m+1):
B = []
if m == 0 and n == 0:
for ii in range(dim):
B.append((ii, ii, invsqrtdim))
gellmann.append(B)
elif m == n:
v = -1.0/math.sqrt(m*(m+1))
for ii in range(m):
B.append((ii, ii, v))
B.append((m, m, -m*v))
gellmann.append(B)
else:
if not triangular:
B.append((m, n, invsqrt2))
B.append((n, m, invsqrt2))
gellmann.append(B)
B = []
if not triangular:
B.append((m, n, 1j*invsqrt2))
B.append((n, m, -1j*invsqrt2))
gellmann.append(B)
return gellmann
#-------------------------------------------------------------------------
def readsol(filename, OPT):
'''
structure of solution file:
csdp:
vector x in one line
(primal) matrix Z in sparse sdpa format
(dual) matrix X in sparse sdpa format
dsdp5:
vector x in one line
... other stuff ...
sdplr:
vector -x one number per line
... other stuff ...
'''
x = []
# XXX make this more robust to initial comments?
# first line contains x
fp = open(filename)
if OPT['solver'] == "sdplr":
# skip first line
line = fp.readline()
# now read list of numbers 1 per line
# (next section has multiple numbers / line)
while True:
line = fp.readline().strip().split()
if len(line) > 1:
break
else:
# sdplr gives -x
x.append(-float(line[0]))
elif OPT['solver'] == "csdp" or OPT['solver'] == "dsdp5":
# solution on first line
line = fp.readline()
x = list(map(float, line.strip().split()))
else:
raise 'Unknown solver "%s"' % OPT['solver']
fp.close()
return x
def writesol(rho, filename):
'''
Write out reconstructed matrix. Real and imaginary parts are separate files for
ease of reading with other programs. The files are the comma separated entries
for rho.
'''
fpr = open(filename+'.rhor', 'w')
fpi = open(filename+'.rhoi', 'w')
for ii in range(rho.dim):
lr = []
li = []
for jj in range(rho.dim):
v = complex(rho[ii, jj])
lr.append(str(v.real))
li.append(str(v.imag))
fpr.write(string.join(lr, ', ')+'\n')
fpi.write(string.join(li, ', ')+'\n')
fpr.close()
fpi.close()
#-------------------------------------------------------------------------
def pretty_print(rho, tol=1e-4, dp=3):
'''
print matrix in a nice way
'''
print()
for ii in range(rho.dim):
values = []
for jj in range(rho.dim):
v = complex(rho[ii, jj])
if abs(v) < tol:
values.append("0")
elif abs(v.imag) < tol:
values.append("% g" % round(v.real, dp))
elif abs(v.real) < tol:
values.append("% g*i" % round(v.imag, dp))
else:
values.append("% g+% g*i" %
(round(v.real, dp), round(v.imag, dp)))
print(string.join(values, ',\t'))
print()
#-------------------------------------------------------------------------
def writesdpa_state(data, observables, weights, filename, fixedweight, OPT):
if fixedweight:
return writesdpa_state_fw(data, observables, weights, filename, OPT)
else:
return writesdpa_state_ml(data, observables, weights, filename, OPT)
def writesdpa_state_fw(data, observables, weights, filename, OPT):
'''
The Problem
~~~~~~~~~~~
The primal form:
(P) min c1*x1+c2*x2+...+cm*xm
st F1*x1+F2*x2+...+Fm*xn-F0=X>=0
Here all of the matrices F0, F1, ..., Fm, and X are assumed to be
symmetric of size n by n. The constraints X>=0 mean that X
must be positive semidefinite.
SDPA File Format
~~~~~~~~~~~~~~~~
The file consists of six sections:
1. Comments. The file can begin with arbitrarily many lines of comments.
Each line of comments must begin with '"' or '*'.
2. The first line after the comments contains m, the number of constraint
matrices. Additional text on this line after m is ignored.
3. The second line after the comments contains nblocks, the number of
blocks in the block diagonal structure of the matrices. Additional text
on this line after nblocks is ignored.
4. The third line after the comments contains a vector of numbers that
give the sizes of the individual blocks.
Negative numbers may be used to indicate that a block is actually a diagonal
submatrix. Thus a block size of "-5" indicates a 5 by 5 block in which
only the diagonal elements are nonzero.
5. The fourth line after the comments contains the objective function
vector c.
6. The remaining lines of the file contain entries in the constraint
matrices, with one entry per line. The format for each line is
<matno> <blkno> <i> <j> <entry>
Here <matno> is the number of the matrix to which this entry belongs,
<blkno> specifies the block within this matrix, <i> and <j> specify a
location within the block, and <entry> gives the value of the entry in
the matrix. Note that since all matrices are assumed to be symmetric,
only entries in the upper triangle of a matrix are given.
'''
d = observables[0].shape[0]
# d^2 - (identity component) + (slack variable) + N variable
ncontraints = d**2+1
nblocks = 2 # fixed by algorithm
dim1 = len(data)+1
fp = open(filename, 'w')
fp.write('* File generated by tomoc on %s\n' %
time.strftime("%c", time.localtime(time.time())))
fp.write('%d\n%d\n%d %d\n' % (ncontraints, nblocks, dim1, 2*d))
fp.write('1.0 '+'0.0 '*(d**2)+'\n')
gellmann = bases(d)
gellmann_ut = bases(d, triangular=True)
# F0 -------------------------------------------
# block 1
for ii in range(dim1-1):
#trE = observables[ii].trace()/float(d)
v = data[ii]
if v != 0:
fp.write('0 1 1 %d %f\n' % (ii+2, -v))
fp.write('0 1 %d %d %f\n' % (ii+2, ii+2, -v))
# Ft -------------------------------------------
fp.write('1 1 1 1 1.0\n')
# Fmu -------------------------------------------
for mu in range(0, d**2):
if OPT['verbose']:
print("writing matrix %d/%d ..." % (mu+2, d**2+2))
# block 1
Gmu = gellmann[mu]
for kk in range(dim1-1):
Ek = observables[kk]
v = 0
for item in Gmu:
v += item[-1]*Ek[item[1], item[0]]*weights[kk]
# XXX should check imaginary component is small
if isinstance(v, complex):
v = v.real
if v == 0:
continue
fp.write('%d 1 1 %d %f\n' % (mu+2, kk+2, -v))
#fp.write('%d 1 %d %d %f\n'%(mu+2,kk+2,kk+2,v))
# block 2
# we want all of the imaginary part:
# Gmu=gellmann[mu]
for item in Gmu:
Gi = complex(item[2]).imag
if Gi != 0:
fp.write('%d 2 %d %d %f\n' %
(mu+2, item[0]+1, item[1]+1+d, -Gi))
# but only want the upper triangular of the real part:
Gmu = gellmann_ut[mu]
for item in Gmu:
Gr = complex(item[2]).real
if Gr != 0:
fp.write('%d 2 %d %d %f\n' % (mu+2, item[0]+1, item[1]+1, Gr))
fp.write('%d 2 %d %d %f\n' %
(mu+2, item[0]+1+d, item[1]+1+d, Gr))
fp.close()
return d
def writesdpa_state_ml(data, observables, weights, filename, OPT):
'''
The Problem
~~~~~~~~~~~
The primal form:
(P) min c1*x1+c2*x2+...+cm*xm
st F1*x1+F2*x2+...+Fm*xn-F0=X>=0
Here all of the matrices F0, F1, ..., Fm, and X are assumed to be
symmetric of size n by n. The constraints X>=0 mean that X
must be positive semidefinite.
SDPA File Format
~~~~~~~~~~~~~~~~
The file consists of six sections:
1. Comments. The file can begin with arbitrarily many lines of comments.
Each line of comments must begin with '"' or '*'.
2. The first line after the comments contains m, the number of constraint
matrices. Additional text on this line after m is ignored.
3. The second line after the comments contains nblocks, the number of
blocks in the block diagonal structure of the matrices. Additional text
on this line after nblocks is ignored.
4. The third line after the comments contains a vector of numbers that
give the sizes of the individual blocks.
Negative numbers may be used to indicate that a block is actually a diagonal
submatrix. Thus a block size of "-5" indicates a 5 by 5 block in which
only the diagonal elements are nonzero.
5. The fourth line after the comments contains the objective function
vector c.
6. The remaining lines of the file contain entries in the constraint
matrices, with one entry per line. The format for each line is
<matno> <blkno> <i> <j> <entry>
Here <matno> is the number of the matrix to which this entry belongs,
<blkno> specifies the block within this matrix, <i> and <j> specify a
location within the block, and <entry> gives the value of the entry in
the matrix. Note that since all matrices are assumed to be symmetric,
only entries in the upper triangle of a matrix are given.
'''
d = observables[0].shape[0]
# d^2 - (identity component) + (slack variable) + N variable
nconstraints = d**2+1
nblocks = 2 # fixed by algorithm
dim1 = len(data)+1
fp = open(filename, 'w')
fp.write('* File generated by tomoc on %s\n' %
time.strftime("%c", time.localtime(time.time())))
fp.write('%d\n%d\n%d %d\n' % (nconstraints, nblocks, dim1, 2*d))
fp.write('1.0 '+'0.0 '*(d**2)+'\n')
gellmann = bases(d)
gellmann_ut = bases(d, triangular=True)
# F0 -------------------------------------------
# block 1
for ii in range(dim1-1):
#trE = observables[ii].trace()/float(d)
v = data[ii]
if v != 0:
fp.write('0 1 1 %d %f\n' % (ii+2, -v))
# Ft -------------------------------------------
fp.write('1 1 1 1 1.0\n')
# Fmu -------------------------------------------
for mu in range(0, d**2):
if OPT['verbose']:
print("writing matrix %d/%d ..." % (mu+2, d**2+2))
# block 1
Gmu = gellmann[mu]
for kk in range(dim1-1):
Ek = observables[kk]
v = 0
for item in Gmu:
v += item[-1]*Ek[item[1], item[0]]*weights[kk]
# XXX should check imaginary component is small
if isinstance(v, complex):
v = v.real
if v == 0:
continue
fp.write('%d 1 1 %d %f\n' % (mu+2, kk+2, -v))
fp.write('%d 1 %d %d %f\n' % (mu+2, kk+2, kk+2, v))
# block 2
# we want all of the imaginary part:
# Gmu=gellmann[mu]
for item in Gmu:
Gi = complex(item[2]).imag
if Gi != 0:
fp.write('%d 2 %d %d %f\n' %
(mu+2, item[0]+1, item[1]+1+d, -Gi))
# but only want the upper triangular of the real part:
Gmu = gellmann_ut[mu]
for item in Gmu:
Gr = complex(item[2]).real
if Gr != 0:
fp.write('%d 2 %d %d %f\n' % (mu+2, item[0]+1, item[1]+1, Gr))
fp.write('%d 2 %d %d %f\n' %
(mu+2, item[0]+1+d, item[1]+1+d, Gr))
fp.close()
return d
#-------------------------------------------------------------------------
def writesdpa_process(data, inputs, observables,
weights, filename, fixedweight,
OPT):
if fixedweight:
return writesdpa_process_fw(data, inputs,
observables, weights,
filename, OPT)
else:
return writesdpa_process_ml(data, inputs,
observables,
weights, filename,
OPT)
def writesdpa_process_fw(data, inputs, observables,
weights, filename, OPT):
'''
The Problem
~~~~~~~~~~~
The primal form:
(P) min c1*x1+c2*x2+...+cm*xm
st F1*x1+F2*x2+...+Fm*xn-F0=X>=0
Here all of the matrices F0, F1, ..., Fm, and X are assumed to be
symmetric of size n by n. The constraints X>=0 mean that X
must be positive semidefinite.
SDPA File Format
~~~~~~~~~~~~~~~~
The file consists of six sections:
1. Comments. The file can begin with arbitrarily many lines of comments.
Each line of comments must begin with '"' or '*'.
2. The first line after the comments contains m, the number of constraint
matrices. Additional text on this line after m is ignored.
3. The second line after the comments contains nblocks, the number of
blocks in the block diagonal structure of the matrices. Additional text
on this line after nblocks is ignored.
4. The third line after the comments contains a vector of numbers that
give the sizes of the individual blocks.
Negative numbers may be used to indicate that a block is actually a diagonal
submatrix. Thus a block size of "-5" indicates a 5 by 5 block in which
only the diagonal elements are nonzero.
5. The fourth line after the comments contains the objective function
vector c.
6. The remaining lines of the file contain entries in the constraint
matrices, with one entry per line. The format for each line is
<matno> <blkno> <i> <j> <entry>
Here <matno> is the number of the matrix to which this entry belongs,
<blkno> specifies the block within this matrix, <i> and <j> specify a
location within the block, and <entry> gives the value of the entry in
the matrix. Note that since all matrices are assumed to be symmetric,
only entries in the upper triangle of a matrix are given.
'''
d = observables[0].shape[0]
ncontraints = d**4-d+2 # d^4 - d (TP cond) + (slack variable) + N variable
nblocks = 2 # fixed by algorithm
dim1 = len(data)+1
fp = open(filename, 'w')
fp.write('* File generated by protomoc on %s\n' %
time.strftime("%c", time.localtime(time.time())))
fp.write('%d\n%d\n%d %d\n' % (ncontraints, nblocks, dim1, 2*d**2))
fp.write('1 '+'0 '*(d**4)+'\n')
gellmann = bases(d)
gellmann_ut = bases(d, triangular=True)
# F0 -------------------------------------------
# block 1
for mn in range(dim1-1):
v = data[mn]
if v != 0:
fp.write('0 1 1 %d %f\n' % (mn+2, -v))
fp.write('0 1 %d %d %f\n' % (mn+2, mn+2, -v))
# Ft -------------------------------------------
fp.write('1 1 1 1 1.0\n')
# FN -------------------------------------------
# block 1
for mn in range(dim1-1):
v = weights[mn]*observables[mn].trace()/float(d)
if v != 0:
fp.write('2 1 1 %d %f\n' % (mn+2, -v))
#fp.write('2 1 %d %d %f\n'%(mn+2,mn+2,v) )
# block 2
for jj in range(0, 2*d**2):
# to be real symmetric we have [Fr,-Fi;Fr,Fi] and want
# upper diagonal only, since this term is identity we can
# range over 2d**2 instead
fp.write('2 2 %d %d %f\n' % (jj+1, jj+1, 1/float(d**2)))
# Fjj -------------------------------------------
jk = 1 # this is the matrix number
for jj in range(0, d**2):
for kk in range(1, d**2):
if OPT['verbose']:
print("writing matrix %d/%d ..." % (jj*d**2+kk, d**4-d+1))
# block 1
Gj = gellmann[jj]
Gk = gellmann[kk]
for mn in range(dim1-1):
Emn = observables[mn]
Rmn = inputs[mn]
TrEmnGk = 0
TrRmnGj = 0
# only need to do the non-zero terms
# this is trace(Rmn^T Gj) = vec(Emn)^T vec(Gj)
for Gjnz in Gj:
TrRmnGj += Rmn[Gjnz[0], Gjnz[1]]*Gjnz[-1]
# this is trace(Emn Gj) = vec(Emn^T)^T vec(Gj)
for Gknz in Gk:
TrEmnGk += Emn[Gknz[1], Gknz[0]]*Gknz[-1]
# XXX should check imaginary component is small
v = d*TrEmnGk*TrRmnGj*weights[mn]
# both TrEmnGk and TrRmnGj shoulb be real (Gjj hermitian)
if isinstance(v, complex):
v = v.real
if v == 0:
continue
fp.write('%d 1 1 %d %f\n' % (jk+2, mn+2, -v))
#fp.write('%d 1 %d %d %f\n'%(jk+2,mn+2,mn+2,v))
# block 2
for gj in Gj:
for gk in Gk:
# work out tensor product Gj . Gk
gjgk = (gj[0]*d+gk[0], gj[1]*d+gk[1], gj[2]*gk[2])
# need to split this up hermitian -> real symmetric
# we only want the upper triangular of the real part:
if gjgk[1] >= gjgk[0]:
v = complex(gjgk[2]).real
if v != 0:
fp.write('%d 2 %d %d %f\n' %
(jk+2, gjgk[0]+1, gjgk[1]+1, v))
fp.write('%d 2 %d %d %f\n' %
(jk+2, gjgk[0]+1+d**2, gjgk[1]+1+d**2, v))
# Gjj=gellmann_ut[jj]
# for item in Gjj:
# Gr = complex(item[2]).real
# if Gr != 0:
# fp.write('%d 2 %d %d %f\n'%(jj+2, item[0]+1, item[1]+1,Gr))
# fp.write('%d 2 %d %d %f\n'%(jj+2, item[0]+1+d, item[1]+1+d,Gr))
# but all of the imaginary part:
v = complex(gjgk[2]).imag
fp.write('%d 2 %d %d %f\n' %
(jk+2, gjgk[0]+1, gjgk[1]+1+d**2, -v))
# Gjj=gellmann[jj]
# for item in Gjj:
# Gi = complex(item[2]).imag
# if Gi != 0:
# fp.write('%d 2 %d %d %f\n'%(jj+2, item[0]+1, item[1]+1+d,-Gi))
jk += 1
fp.close()
return d
def writesdpa_process_ml(data, inputs, observables, weights, filename, OPT):
'''
The Problem
~~~~~~~~~~~
The primal form:
(P) min c1*x1+c2*x2+...+cm*xm
st F1*x1+F2*x2+...+Fm*xn-F0=X>=0
Here all of the matrices F0, F1, ..., Fm, and X are assumed to be
symmetric of size n by n. The constraints X>=0 mean that X
must be positive semidefinite.
SDPA File Format
~~~~~~~~~~~~~~~~
The file consists of six sections:
1. Comments. The file can begin with arbitrarily many lines of comments.
Each line of comments must begin with '"' or '*'.
2. The first line after the comments contains m, the number of constraint
matrices. Additional text on this line after m is ignored.
3. The second line after the comments contains nblocks, the number of
blocks in the block diagonal structure of the matrices. Additional text
on this line after nblocks is ignored.
4. The third line after the comments contains a vector of numbers that
give the sizes of the individual blocks.
Negative numbers may be used to indicate that a block is actually a diagonal
submatrix. Thus a block size of "-5" indicates a 5 by 5 block in which
only the diagonal elements are nonzero.
5. The fourth line after the comments contains the objective function
vector c.
6. The remaining lines of the file contain entries in the constraint
matrices, with one entry per line. The format for each line is
<matno> <blkno> <i> <j> <entry>
Here <matno> is the number of the matrix to which this entry belongs,
<blkno> specifies the block within this matrix, <i> and <j> specify a
location within the block, and <entry> gives the value of the entry in
the matrix. Note that since all matrices are assumed to be symmetric,
only entries in the upper triangle of a matrix are given.
'''
d = observables[0].shape[0]
ncontraints = d**4-d+2 # d^4 - d (TP cond) + (slack variable) + N variable
nblocks = 2 # fixed by algorithm
dim1 = len(data)+1
fp = open(filename, 'w')
fp.write('* File generated by protomoc on %s\n' %
time.strftime("%c", time.localtime(time.time())))
fp.write('%d\n%d\n%d %d\n' % (ncontraints, nblocks, dim1, 2*d**2))
fp.write('1 '+'0 '*(d**4)+'\n')
gellmann = bases(d)
gellmann_ut = bases(d, triangular=True)
# F0 -------------------------------------------
# block 1
for mn in range(dim1-1):
v = data[mn]
if v != 0:
fp.write('0 1 1 %d %f\n' % (mn+2, -v))
# Ft -------------------------------------------
fp.write('1 1 1 1 1.0\n')
# FN -------------------------------------------
# block 1
for mn in range(dim1-1):
v = weights[mn]*observables[mn].trace()/float(d)
if v != 0:
fp.write('2 1 1 %d %f\n' % (mn+2, -v))
fp.write('2 1 %d %d %f\n' % (mn+2, mn+2, v))
# block 2
for jj in range(0, 2*d**2):
# to be real symmetric we have [Fr,-Fi;Fr,Fi] and want
# upper diagonal only, since this term is identity we can
# range over 2d**2 instead
fp.write('2 2 %d %d %f\n' % (jj+1, jj+1, 1/float(d**2)))
# Fjj -------------------------------------------
jk = 1 # this is the matrix number
for jj in range(0, d**2):
for kk in range(1, d**2):
if OPT['verbose']:
print("writing matrix %d/%d ..." % (jj*d**2+kk, d**4-d+1))
# block 1
Gj = gellmann[jj]
Gk = gellmann[kk]
for mn in range(dim1-1):
Emn = observables[mn]
Rmn = inputs[mn]
TrEmnGk = 0
TrRmnGj = 0
# only need to do the non-zero terms
# this is trace(Rmn^T Gj) = vec(Emn)^T vec(Gj)
for Gjnz in Gj:
TrRmnGj += Rmn[Gjnz[0], Gjnz[1]]*Gjnz[-1]
# this is trace(Emn Gj) = vec(Emn^T)^T vec(Gj)
for Gknz in Gk:
TrEmnGk += Emn[Gknz[1], Gknz[0]]*Gknz[-1]
# XXX should check imaginary component is small
v = d*TrEmnGk*TrRmnGj*weights[mn]
# both TrEmnGk and TrRmnGj shoulb be real (Gjj hermitian)
if isinstance(v, complex):
v = v.real
if v == 0:
continue
fp.write('%d 1 1 %d %f\n' % (jk+2, mn+2, -v))
fp.write('%d 1 %d %d %f\n' % (jk+2, mn+2, mn+2, v))
# block 2
for gj in Gj:
for gk in Gk:
# work out tensor product Gj . Gk
gjgk = (gj[0]*d+gk[0], gj[1]*d+gk[1], gj[2]*gk[2])
# need to split this up hermitian -> real symmetric
# we only want the upper triangular of the real part:
if gjgk[1] >= gjgk[0]:
v = complex(gjgk[2]).real
if v != 0:
fp.write('%d 2 %d %d %f\n' %
(jk+2, gjgk[0]+1, gjgk[1]+1, v))
fp.write('%d 2 %d %d %f\n' %
(jk+2, gjgk[0]+1+d**2, gjgk[1]+1+d**2, v))
# Gjj=gellmann_ut[jj]
# for item in Gjj:
# Gr = complex(item[2]).real
# if Gr != 0:
# fp.write('%d 2 %d %d %f\n'%(jj+2, item[0]+1, item[1]+1,Gr))
# fp.write('%d 2 %d %d %f\n'%(jj+2, item[0]+1+d, item[1]+1+d,Gr))
# but all of the imaginary part:
v = complex(gjgk[2]).imag
fp.write('%d 2 %d %d %f\n' %
(jk+2, gjgk[0]+1, gjgk[1]+1+d**2, -v))
# Gjj=gellmann[jj]
# for item in Gjj:
# Gi = complex(item[2]).imag
# if Gi != 0:
# fp.write('%d 2 %d %d %f\n'%(jj+2, item[0]+1, item[1]+1+d,-Gi))
jk += 1
fp.close()
return d
#-------------------------------------------------------------------------
def reconstructrho_state(x, dim, OPT):
'''
reconstruct solution as density matrix rho
'''
# 1st value is the min objective function
# 2nd value is the normalisation N
N = float(x[1])*sqrt(dim)
if OPT['verbose']:
print("Normalisation = ", N)
# get basis matrices
gellmann = bases(dim)
rho = zeros([dim, dim], dtype=complex)
for ii in range(dim**2):
for item in gellmann[ii]:
rho[item[0], item[1]] += x[ii+1]*item[2]
if OPT['normalised']:
rho = rho/rho.trace()
#rho = eye(dim)/float(dim)
# for ii in range(1,dim**2):
# for item in gellmann[ii]:
# rho[item[0],item[1]] += x[ii+1]*item[2]/N
return rho
def reconstructrho_process(x, dim, OPT):
'''
reconstruct solution as density matrix rho
'''
# 1st value is the min objective function
# 2nd value is the normalisation N
N = float(x[1])
if OPT['verbose']:
print("Normalisation = ", N)
# get basis matrices
gellmann = bases(dim)
rho = eye(dim**2, dtype=complex)/float(dim**2)*N
jk = 1 # this is the matrix number
for jj in range(0, dim**2):
for kk in range(1, dim**2):
Gj = gellmann[jj]
Gk = gellmann[kk]
for gj in Gj:
for gk in Gk:
# work out tensor product Gj . Gk
gjgk = (gj[0]*dim+gk[0], gj[1]*dim+gk[1], gj[2]*gk[2])
rho[gjgk[0], gjgk[1]] += x[jk+1]*gjgk[2]
jk += 1
if OPT['normalised']:
rho = rho/rho.trace()
return rho
#-------------------------------------------------------------------------
def tomo_state(data, observables, weights, filebase=None, fixedweight=True, tomo_options={}):
OPT = {'verbose': False, 'prettyprint': False,
'solver': 'csdp', 'normalised': False}
for o, a in list(tomo_options.items()):
OPT[o] = a
# default option defaults
if filebase is None:
filebase = 'temp' + str(uuid.uuid4())
fullfilepath = os.path.abspath(globals()['__file__'])
filedir, tmp = os.path.split(fullfilepath)
orig_dir = os.getcwd()
if OPT['verbose']:
print("changing dir to: %s" % filedir)
os.chdir(filedir)
if OPT['verbose']:
print("writing sdpa control file: %s.spda" % filebase)
dim = writesdpa_state(data, observables, weights,
filebase+'.sdpa', fixedweight, OPT)
if OPT['solver'] == 'csdp':
command = 'csdp %s.sdpa %s.sol' % (filebase, filebase)
if OPT['verbose']:
print("running: "+command)
# else:
# status, out = commands.getstatusoutput(command)
os.system(command)
elif OPT['solver'] == 'dsdp5':
# WARNING: dsdp5 arbitrarily truncates -save path!
command = 'dsdp5 %s.sdpa -save %s.sol' % (filebase, filebase)
if OPT['verbose']:
print("running: "+command)
# else:
# status, out = commands.getstatusoutput(command)
os.system(command)
elif OPT['solver'] == 'sdplr':
command = 'sdplr %s.sdpa non_existent_file non_existent_file %s.sol' % (
filebase, filebase)
if OPT['verbose']:
print("running: "+command)
# else:
# status, out = commands.getstatusoutput(command)
os.system(command)
else:
raise "unknown solver: %s" % OPT["solver"]
if OPT['verbose']:
print("reading in solution from %s.sol" % filebase)
x = readsol("%s.sol" % filebase, OPT)
# len(x) = d**2+1 [ d^2 - (identity component) + (slack variable) + N
# variable ]
rho = reconstructrho_state(x, dim, OPT)
if OPT['prettyprint']:
pretty_print(rho)
# clean up temporary files
os.remove("%s.sdpa" % filebase)
os.remove("%s.sol" % filebase)
os.chdir(orig_dir)
return rho
def tomo_process(data, inputs, observables, weights, filebase=None, fixedweight=True, tomo_options={}):
OPT = {'verbose': False, 'prettyprint': False,
'solver': 'csdp', 'normalised': False}
for o, a in list(tomo_options.items()):
OPT[o] = a
# default option defaults
if filebase is None:
filebase = 'temp' + str(uuid.uuid4())
fullfilepath = os.path.abspath(globals()['__file__'])
filedir, tmp = os.path.split(fullfilepath)
orig_dir = os.getcwd()
if OPT['verbose']:
print("changing dir to: %s" % dir)
os.chdir(filedir)
if OPT['verbose']:
print("writing sdpa control file: %s.spda" % filebase)
dim = writesdpa_process(data, inputs, observables,
weights, filebase+'.sdpa', fixedweight, OPT)
if OPT['solver'] == 'csdp':
command = 'csdp %s.sdpa %s.sol' % (filebase, filebase)
if OPT['verbose']:
print("running: "+command)
# else:
# status, out = commands.getstatusoutput(command)
os.system(command)
elif OPT['solver'] == 'dsdp5':
# WARNING: dsdp5 arbitrarily truncates -save path!
command = 'dsdp5 %s.sdpa -save %s.sol' % (filebase, filebase)
if OPT['verbose']:
print("running: "+command)
# else:
# status, out = commands.getstatusoutput(command)
os.system(command)
elif OPT['solver'] == 'sdplr':
command = 'sdplr %s.sdpa non_existent_file non_existent_file %s.sol' % (
filebase, filebase)
if OPT['verbose']:
print("running: "+command)
# else:
# status, out = commands.getstatusoutput(command)
os.system(command)
else:
raise "unknown solver: %s" % OPT["solver"]
if OPT['verbose']:
print("reading in solution from %s.sol" % filebase)
x = readsol("%s.sol" % filebase, OPT)
# len(x) = d**4-d+2 [d^4 - d (TP cond) + (slack variable) + N variable ]
rho = reconstructrho_process(x, dim, OPT)
if OPT['prettyprint']:
pretty_print(rho)
# # clean up temporary files
# os.remove("%s.sdpa"%filebase)
# os.remove("%s.sol"%filebase)
os.chdir(orig_dir)
return rho
#-------------------------------------------------------------------------
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/analysis_v2/pytomo.py
|
Python
|
mit
| 32,252
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_index('sentry_groupbookmark', ['user_id', 'group_id'])
def backwards(self, orm):
db.delete_index('sentry_groupbookmark', ['user_id', 'group_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'ident', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
NickPresta/sentry
|
src/sentry/migrations/0075_add_groupbookmark_index.py
|
Python
|
bsd-3-clause
| 21,478
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Thread.title'
db.add_column('askbot_thread', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=300), keep_default=False)
def backwards(self, orm):
# Deleting field 'Thread.title'
db.delete_column('askbot_thread', 'title')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'), ('question', 'revision'))", 'object_name': 'PostRevision'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Answer']", 'null': 'True', 'blank': 'True'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
divio/askbot-devel
|
askbot/migrations/0081_transplant_title_1.py
|
Python
|
gpl-3.0
| 26,914
|
import should_be.all # noqa
import unittest
class TestContainerMixin(unittest.TestCase):
def setUp(self):
self.lst = [1, 2, 3]
def test_should_include_iter(self):
err_msg = (r'[a-zA-Z0-9.]+ should have included \[.+?\]'
r', but did not have items .+')
self.assertRaisesRegexp(AssertionError, err_msg,
self.lst.should_include, [4])
self.lst.should_include([1, 2, 3])
def test_should_include_item(self):
err_msg = (r'[a-zA-Z0-9.]+ should have included .+?'
r', but did not')
self.assertRaisesRegexp(AssertionError, err_msg,
self.lst.should_include, 4)
self.lst.should_include(3)
def test_shouldnt_include_iter(self):
err_msg = 'should not have included'
self.assertRaisesRegexp(AssertionError, err_msg,
self.lst.shouldnt_include, [2, 3])
self.lst.shouldnt_include([4, 5])
def test_shouldnt_include_item(self):
err_msg = 'should not have included'
self.assertRaisesRegexp(AssertionError, err_msg,
self.lst.shouldnt_include, 3)
self.lst.shouldnt_include(4)
|
DirectXMan12/should_be
|
should_be/tests/test_container_mixin.py
|
Python
|
isc
| 1,254
|
import json
import os
from flask import Flask, Response
from flask.ext.httpauth import HTTPBasicAuth
from flask.ext.login import current_user, LoginManager
from flask.ext.mongoengine import MongoEngine
from flask.ext.principal import Principal, identity_loaded,\
Permission, RoleNeed, UserNeed
from flask.ext.sqlalchemy import SQLAlchemy
from common.constants import APP_NAME, CONTENT_KEY, DEFAULT_MODE,\
JSON_HEADER, GUEST_ROLE, NOT_AUTHORIZED_MESSAGE
from common.session import RedisSessionInterface
mode = os.environ.get('FLASK_MODE', DEFAULT_MODE)
app = Flask(APP_NAME)
app.config.from_object(mode)
redis_session_host = app.config['REDIS_HOST_SESSION']
redis_session_port = app.config['REDIS_PORT_SESSION']
redis_session_db = app.config['REDIS_DB_SESSION']
redis_host = app.config['REDIS_HOST']
redis_port = app.config['REDIS_PORT']
redis_db = app.config['REDIS_DB']
app.session_interface = RedisSessionInterface(redis_session_host,
redis_session_port,
redis_session_db)
auth = HTTPBasicAuth()
db = SQLAlchemy(app)
db_mongo = MongoEngine(app)
login_manager = LoginManager()
login_manager.setup_app(app)
principals = Principal(app)
admin_permission = Permission(RoleNeed('admin'))
member_permission = Permission(RoleNeed('member'))
guest_permission = Permission(RoleNeed('guest'))
from landing import landing_module
from user import user_module
from blog import blog_module
from role import role_module
from toolchain import toolchain_module
from user.models import User
from user_role.models import UserRole
from article import article_module
from session import session_module
from job_offers import job_offers_module
from search_sites import search_sites_module
app.register_blueprint(landing_module)
app.register_blueprint(user_module)
app.register_blueprint(role_module)
app.register_blueprint(toolchain_module)
app.register_blueprint(article_module)
app.register_blueprint(session_module)
app.register_blueprint(blog_module)
app.register_blueprint(search_sites_module)
app.register_blueprint(job_offers_module)
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# Assuming the User model has a list of roles, update the
# identity with the roles that the user provides
user_role = UserRole().query.filter_by(user_id=current_user.id).all()
if len(user_role) > 0:
for credential in user_role:
identity.provides.add(RoleNeed(credential.role.name))
else:
identity.provides.add(RoleNeed(GUEST_ROLE))
@app.errorhandler(500)
def permission_denied(e):
return Response(json.dumps({CONTENT_KEY: NOT_AUTHORIZED_MESSAGE}),
status=500, content_type=JSON_HEADER)
|
jvazquez/organization
|
organization/__init__.py
|
Python
|
unlicense
| 3,087
|
from .utils import PyKEArgumentHelpFormatter
from . import kepmsg, kepio, kepkey, kepplot
import re
import numpy as np
from astropy.io import fits as pyfits
from scipy import optimize as opt
from matplotlib import pyplot as plt
from tqdm import tqdm
import random
__all__ = ['keppca']
def keppca(infile, outfile=None, maskfile='ALL', components='1-3', plotpca=False,
nmaps=10, overwrite=False, verbose=False, logfile='keppca.log'):
"""
keppca -- Perform principal component analysis upon a target pixel file
keppca provides a method to mitigate for motion-derived systematic
artifacts via Principle Component Analysis (PCA). This method was
demonstrated on Kepler light curves by Harrison et al. (2012). It provides
an alternative to cotrending data using basis vectors (kepcotrend) and
correlating aperture photometry struture with time-series centroid
measurements (kepsff). PCA will perhaps become a more widespread tool in
the K2 era where the magnitde of target motion across the detector over a
Kepler quarter is experienced by a K2 target over just 6-hours during its
regular sequence of thruster firings that counteract boresight roll motion
Pixel-level PCA employs only those pixels collected around a specific
target and separates photometric trends common to all pixels from trends
localized to individual targets or pixels in a series of principal
component curves.
The user has the option to choose the specific set of pixels to sample in
this analysis. Principal components are plotted by the tool and written out
to an output FITS file in an output extension called PRINCIPAL_COMPONENTS.
The extension contains a 2D table with one row per timestamp recorded in
the input file and one column for every principal component. Summing all
principal components together will reconstruct a normalized version of the
summed pixel within the chosen aperture. The user also has the choice of
which principal components to optimally-subtract from the aperture-derived
light curve in order to remove motion systematics from the time-series
data. The aperture light curve and the corrected light curve are written to
the LIGHTCURVE extension of the output file. The first populates the
SAP_FLUX data column and the second is written to a column called PCA_FLUX.
This output file can be used as input for other PyKE tasks and can be e.g.
inspected using kepdraw.
Parameters
----------
infile : str
The name of a standard format FITS file containing Kepler or K2 target
pixels within the first data extension.
outfile : str
Filename for the output light curves and principal components. This
product will be written to the same FITS format as archived light
curves. Aperture photometry will be stored in the SAP_FLUX column of
the first FITS extension called LIGHTCURVE. A version of this light
curve with principal components subtracted is stored in column PCA_FLUX
and a normalized version is stored in PCA_FLUX_NRM. The individual
principal components are stored within a new FITS extension called
PRINCIPAL_COMPONENTS.
maskfile : str
This string can be one of three options:
* 'ALL' tells the task to calculate principal components from all
pixels within the pixel mask stored in the input file.
* 'APER' tells the task to calculate principal components from only the
pixels within the photometric aperture stored in the input file (e.g.
only those pixels summed by the Kepler pipeline to produce the light
curve archived at MAST (note that no such light curves are currently
being created for the K2 mission)
* A filename describing the desired photometric aperture. Such a file
can be constructed using the kepmask or kepffi tools, or can be created
manually using the format described in the documentation for those
tools. Note that if an aperture provided is not stricly rectangular,
keppca will increase the size of the aperture so that it defines the
smallest possible rectangle that contains all of the specified pixels.
components : str
A list of the principal components to subtract from the aperture light
curve. The strings '1 2 3 4 5', 1,'2,3,4,5' and '1,2,3-5' yield the
same result.
plotpca : bool
If True, keppca will produce plots containing individual principal
components, correlation maps and light curves, both aperture and
PCA-corrected versions. The will be stored as hardcopies in PNG format.
nmaps : int
The number of correlation maps and principal components to plot as
output. This can be any positive integer up to the number of pixels
within the mask, although note that many hundreds of plots will likely
become prohibitive and is unlikely to be informative.
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning message
Examples
--------
.. code-block:: bash
$ keppca ktwo202073445-c00_lpd-targ.fits.gz --plotpca
.. image:: ../_static/images/api/keppca.png
:align: center
"""
import mdp
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPCA -- '
+ ' infile={}'.format(infile)
+ ' maskfile={}'.format(maskfile)
+ ' outfile={}'.format(outfile)
+ ' components={}'.format(components)
+ ' plotpca={}'.format(plotpca)
+ ' nmaps={}'.format(nmaps)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call + '\n', verbose)
kepmsg.clock('KEPPCA started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPPCA: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# Set output file names - text file with data and plot
dataout = np.copy(outfile)
repname = re.sub('.fits', '.png', outfile)
# open input file
instr = pyfits.open(infile, mode='readonly', memmap=True)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
# open TPF FITS file
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile, 'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, flux_bkg = \
kepio.readTPF(infile, 'FLUX_BKG', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err = \
kepio.readTPF(infile, 'FLUX_BKG_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile, 'QUALITY', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pcorr1 = \
kepio.readTPF(infile, 'POS_CORR1', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pcorr2 = \
kepio.readTPF(infile, 'POS_CORR2', logfile ,verbose)
# Save original data dimensions, in case of using maskfile
xdimorig = xdim
ydimorig = ydim
# read mask definition file if it has been supplied
if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
maskx = np.array([], 'int')
masky = np.array([], 'int')
lines = kepio.openascii(maskfile, 'r', logfile, verbose)
for line in lines:
line = line.strip().split('|')
if len(line) == 6:
y0 = int(line[3])
x0 = int(line[4])
line = line[5].split(';')
for items in line:
try:
masky = np.append(masky, y0 + int(items.split(',')[0]))
maskx = np.append(maskx, x0 + int(items.split(',')[1]))
except:
continue
kepio.closeascii(lines, logfile, verbose)
if len(maskx) == 0 or len(masky) == 0:
errmsg = 'ERROR -- KEPPCA: {} contains no pixels.'.format(maskfile)
kepmsg.err(logfile, errmsg, verbose)
xdim = max(maskx) - min(maskx) + 1 # Find largest x dimension of mask
ydim = max(masky) - min(masky) + 1 # Find largest y dimension of mask
# pad mask to ensure it is rectangular
workx = np.array([], 'int')
worky = np.array([], 'int')
for ip in np.arange(min(maskx), max(maskx) + 1):
for jp in np.arange(min(masky), max(masky) + 1):
workx = np.append(workx, ip)
worky = np.append(worky, jp)
maskx = workx
masky = worky
# define new subimage bitmap...
if maskfile.lower() != 'all':
aperx = np.array([], 'int')
apery = np.array([], 'int')
# aperb is an array that contains the pixel numbers in the mask
aperb = maskx - x0 + xdimorig * (masky - y0)
npix = len(aperb)
# ...or use all pixels
if maskfile.lower() == 'all':
npix = xdimorig * ydimorig
aperb = np.array([], 'int')
aperb = np.r_[0: npix]
# legal mask defined?
if len(aperb) == 0:
message = ('ERROR -- KEPPCA: no legal pixels within the subimage are'
' defined.')
kepmsg.err(logfile, message, verbose)
# Identify principal components desired
pcaout = []
txt = components.strip().split(',')
for work1 in txt:
try:
pcaout.append(int(work1.strip()))
except:
work2 = work1.strip().split('-')
try:
for work3 in range(int(work2[0]), int(work2[1]) + 1):
pcaout.append(work3)
except:
errmsg = ('ERROR -- KEPPCA: cannot understand principal'
' component list requested')
kepmsg.err(logfile, message, verbose)
pcaout = set(np.sort(pcaout))
# The list of pca component numbers to be removed
pcarem = np.array(list(pcaout)) - 1
# Initialize arrays and variables, and apply pixel mask to the data
ntim = 0
time = np.array([], dtype='float64')
timecorr = np.array([], dtype='float32')
cadenceno = np.array([], dtype='int')
pixseries = np.array([], dtype='float32')
errseries = np.array([], dtype='float32')
bkgseries = np.array([], dtype='float32')
berseries = np.array([], dtype='float32')
quality = np.array([], dtype='float32')
pos_corr1 = np.array([], dtype='float32')
pos_corr2 = np.array([], dtype='float32')
nrows = np.size(fluxpixels, 0)
# Apply the pixel mask so we are left with only the desired pixels
pixseriesb = fluxpixels[:, aperb]
errseriesb = errpixels[:, aperb]
bkgseriesb = flux_bkg[:, aperb]
berseriesb = flux_bkg_err[:, aperb]
# Read in the data to various arrays
for i in range(nrows):
if (qual[i] < 10000 and np.isfinite(barytime[i])
and np.isfinite(fluxpixels[i, int(ydim * xdim / 2 + 0.5)])
and np.isfinite(fluxpixels[i, 1 + int(ydim * xdim / 2 + 0.5)])):
ntim += 1
time = np.append(time, barytime[i])
timecorr = np.append(timecorr, tcorr[i])
cadenceno = np.append(cadenceno, cadno[i])
pixseries = np.append(pixseries, pixseriesb[i])
errseries = np.append(errseries, errseriesb[i])
bkgseries = np.append(bkgseries, bkgseriesb[i])
berseries = np.append(berseries, berseriesb[i])
quality = np.append(quality, qual[i])
pos_corr1 = np.append(pos_corr1, pcorr1[i])
pos_corr2 = np.append(pos_corr2, pcorr2[i])
pixseries = np.reshape(pixseries,(ntim, npix))
errseries = np.reshape(errseries,(ntim, npix))
bkgseries = np.reshape(bkgseries,(ntim, npix))
berseries = np.reshape(berseries,(ntim, npix))
tmp = np.ma.median(np.ma.masked_invalid(pixseries), axis=1)
for i in range(len(tmp)):
pixseries[i] = pixseries[i] - tmp[i]
pixseries = np.ma.masked_invalid(pixseries)
# Figure out which pixels are undefined/nan and remove them.
# Keep track for adding back in later
nanpixels = np.array([], dtype='int')
i = 0
while i < npix:
if np.isnan(pixseries[0, i]):
nanpixels = np.append(nanpixels, i)
npix = npix - 1
i = i + 1
pixseries = np.delete(pixseries, nanpixels, 1)
errseries = np.delete(errseries, nanpixels, 1)
pixseries[np.isnan(pixseries)] = random.gauss(100, 10)
errseries[np.isnan(errseries)] = 10
# Compute statistical weights, means, standard deviations
weightseries = (pixseries / errseries) ** 2
pixMean = np.average(pixseries, axis=0, weights=weightseries)
pixStd = np.std(pixseries, axis=0)
# Normalize the input by subtracting the mean and divising by the standard
# deviation.
# This makes it a correlation-based PCA, which is what we want.
pixseriesnorm = (pixseries - pixMean) / pixStd
# Number of principal components to compute. Setting it equal to the number
# of pixels
nvecin = npix
# Run PCA using the MDP Whitening PCA, which produces normalized PCA
# components (zero mean and unit variance)
pcan = mdp.nodes.WhiteningNode(svd=True)
pcar = pcan.execute(pixseriesnorm)
eigvec = pcan.get_recmatrix()
model = pcar
# Re-insert nan columns as zeros
for i in range(len(nanpixels)):
nanpixels[i] = nanpixels[i] - i
eigvec = np.insert(eigvec, nanpixels, 0, 1)
pixMean = np.insert(pixMean, nanpixels, 0, 0)
# Make output eigenvectors (correlation images) into xpix by ypix images
eigvec = eigvec.reshape(nvecin, ydim, xdim)
# Calculate sum of all pixels to display as raw lightcurve and other quantities
pixseriessum = np.sum(pixseries, axis=1)
# Number of components to remove
nrem = len(pcarem)
# Number of pcas to plot - currently set to plot all components, but could set
# nplot = nrem to just plot as many components as is being removed
nplot = npix
# Subtract components by fitting them to the summed light curve
x0 = np.tile(-1.0, 1)
for k in tqdm(range(nrem)):
def f(x):
fluxcor = pixseriessum
for k in range(len(x)):
fluxcor = fluxcor - x[k]*model[:, pcarem[k]]
return mad(fluxcor)
if k == 0:
x0 = np.array([-1.0])
else:
x0 = np.append(x0, 1.0)
myfit = opt.fmin(f, x0, maxiter=50000, maxfun=50000, disp=False)
x0 = myfit
# Now that coefficients for all components have been found, subtract them
# to produce a calibrated time-series,
# and then divide by the robust mean to produce a normalized time series
# as well
c = myfit
fluxcor = pixseriessum
for k in range(0, nrem):
fluxcor = fluxcor - c[k] * model[:, pcarem[k]]
normfluxcor = fluxcor / np.nanmean(reject_outliers(fluxcor, 2))
# input file data
cards0 = instr[0].header.cards
cards1 = instr[1].header.cards
cards2 = instr[2].header.cards
table = instr[1].data[:]
maskmap = np.copy(instr[2].data)
# subimage physical WCS data
crpix1p = cards2['CRPIX1P'].value
crpix2p = cards2['CRPIX2P'].value
crval1p = cards2['CRVAL1P'].value
crval2p = cards2['CRVAL2P'].value
cdelt1p = cards2['CDELT1P'].value
cdelt2p = cards2['CDELT2P'].value
# dummy columns for output file
sap_flux_err = np.empty(len(time))
sap_flux_err[:] = np.nan
sap_bkg = np.empty(len(time))
sap_bkg[:] = np.nan
sap_bkg_err = np.empty(len(time))
sap_bkg_err[:] = np.nan
pdc_flux = np.empty(len(time))
pdc_flux[:] = np.nan
pdc_flux_err = np.empty(len(time))
pdc_flux_err[:] = np.nan
psf_centr1 = np.empty(len(time))
psf_centr1[:] = np.nan
psf_centr1_err = np.empty(len(time))
psf_centr1_err[:] = np.nan
psf_centr2 = np.empty(len(time))
psf_centr2[:] = np.nan
psf_centr2_err = np.empty(len(time))
psf_centr2_err[:] = np.nan
mom_centr1 = np.empty(len(time))
mom_centr1[:] = np.nan
mom_centr1_err = np.empty(len(time))
mom_centr1_err[:] = np.nan
mom_centr2 = np.empty(len(time))
mom_centr2[:] = np.nan
mom_centr2_err = np.empty(len(time))
mom_centr2_err[:] = np.nan
# mask bitmap
if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
for i in range(maskmap.shape[0]):
for j in range(maskmap.shape[1]):
aperx = append(aperx,crval1p + (j + 1 - crpix1p) * cdelt1p)
apery = append(apery,crval2p + (i + 1 - crpix2p) * cdelt2p)
if maskmap[i, j] == 0:
pass
else:
maskmap[i, j] = 1
for k in range(len(maskx)):
if aperx[-1] == maskx[k] and apery[-1] == masky[k]:
maskmap[i, j] = 3
# construct output primary extension
hdu0 = pyfits.PrimaryHDU()
for i in range(len(cards0)):
if cards0[i].keyword not in hdu0.header.keys():
hdu0.header[cards0[i].keyword] = (cards0[i].value,
cards0[i].comment)
else:
hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
kepkey.history(call, hdu0, outfile, logfile, verbose)
outstr = pyfits.HDUList(hdu0)
# construct output light curve extension
col1 = pyfits.Column(name='TIME', format='D', unit='BJD - 2454833',
array=time)
col2 = pyfits.Column(name='TIMECORR', format='E', unit='d', array=timecorr)
col3 = pyfits.Column(name='CADENCENO', format='J', array=cadenceno)
col4 = pyfits.Column(name='SAP_FLUX', format='E', unit='e-/s',
array=pixseriessum)
col5 = pyfits.Column(name='SAP_FLUX_ERR', format='E', unit='e-/s',
array=sap_flux_err)
col6 = pyfits.Column(name='SAP_BKG', format='E', unit='e-/s',
array=sap_bkg)
col7 = pyfits.Column(name='SAP_BKG_ERR', format='E', unit='e-/s',
array=sap_bkg_err)
col8 = pyfits.Column(name='PDCSAP_FLUX', format='E', unit='e-/s',
array=pdc_flux)
col9 = pyfits.Column(name='PDCSAP_FLUX_ERR', format='E', unit='e-/s',
array=pdc_flux_err)
col10 = pyfits.Column(name='SAP_QUALITY', format='J', array=quality)
col11 = pyfits.Column(name='PSF_CENTR1', format='E', unit='pixel',
array=psf_centr1)
col12 = pyfits.Column(name='PSF_CENTR1_ERR', format='E', unit='pixel',
array=psf_centr1_err)
col13 = pyfits.Column(name='PSF_CENTR2', format='E', unit='pixel',
array=psf_centr2)
col14 = pyfits.Column(name='PSF_CENTR2_ERR', format='E', unit='pixel',
array=psf_centr2_err)
col15 = pyfits.Column(name='MOM_CENTR1', format='E', unit='pixel',
array=mom_centr1)
col16 = pyfits.Column(name='MOM_CENTR1_ERR', format='E', unit='pixel',
array=mom_centr1_err)
col17 = pyfits.Column(name='MOM_CENTR2', format='E', unit='pixel',
array=mom_centr2)
col18 = pyfits.Column(name='MOM_CENTR2_ERR', format='E', unit='pixel',
array=mom_centr2_err)
col19 = pyfits.Column(name='POS_CORR1', format='E', unit='pixel',
array=pos_corr1)
col20 = pyfits.Column(name='POS_CORR2', format='E', unit='pixel',
array=pos_corr2)
col21 = pyfits.Column(name='PCA_FLUX', format='E', unit='e-/s',
array=fluxcor)
col22 = pyfits.Column(name='PCA_FLUX_NRM', format='E', array=normfluxcor)
cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8,
col9, col10, col11, col12, col13, col14, col15,
col16, col17, col18, col19, col20, col21, col22])
hdu1 = pyfits.BinTableHDU.from_columns(cols)
hdu1.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
hdu1.header['TFORM1'] = ('D', 'data type: float64')
hdu1.header['TUNIT1'] = ('BJD - 2454833',
'column units: barycenter corrected JD')
hdu1.header['TDISP1'] = ('D12.7', 'column display format')
hdu1.header['TTYPE2'] = ('TIMECORR',
'column title: barycentric-timeslice correction')
hdu1.header['TFORM2'] = ('E', 'data type: float32')
hdu1.header['TUNIT2'] = ('d', 'column units: days')
hdu1.header['TTYPE3'] = ('CADENCENO',
'column title: unique cadence number')
hdu1.header['TFORM3'] = ('J', 'column format: signed integer32')
hdu1.header['TTYPE4'] = ('SAP_FLUX',
'column title: aperture photometry flux')
hdu1.header['TFORM4'] = ('E', 'column format: float32')
hdu1.header['TUNIT4'] = ('e-/s', 'column units: electrons per second')
hdu1.header['TTYPE5'] = ('SAP_FLUX_ERR',
'column title: aperture phot. flux error')
hdu1.header['TFORM5'] = ('E', 'column format: float32')
hdu1.header['TUNIT5'] = ('e-/s',
'column units: electrons per second (1-sigma)')
hdu1.header['TTYPE6'] = ('SAP_BKG',
'column title: aperture phot. background flux')
hdu1.header['TFORM6'] = ('E', 'column format: float32')
hdu1.header['TUNIT6'] = ('e-/s', 'column units: electrons per second')
hdu1.header['TTYPE7'] = ('SAP_BKG_ERR',
'column title: ap. phot. background flux error')
hdu1.header['TFORM7'] = ('E', 'column format: float32')
hdu1.header['TUNIT7'] = ('e-/s',
'column units: electrons per second (1-sigma)')
hdu1.header['TTYPE8'] = ('PDCSAP_FLUX',
'column title: PDC photometry flux')
hdu1.header['TFORM8'] = ('E', 'column format: float32')
hdu1.header['TUNIT8'] = ('e-/s', 'column units: electrons per second')
hdu1.header['TTYPE9'] = ('PDCSAP_FLUX_ERR', 'column title: PDC flux error')
hdu1.header['TFORM9'] = ('E', 'column format: float32')
hdu1.header['TUNIT9'] = ('e-/s',
'column units: electrons per second (1-sigma)')
hdu1.header['TTYPE10'] = ('SAP_QUALITY',
'column title: aperture photometry quality flag')
hdu1.header['TFORM10'] = ('J', 'column format: signed integer32')
hdu1.header['TTYPE11'] = ('PSF_CENTR1',
'column title: PSF fitted column centroid')
hdu1.header['TFORM11'] = ('E', 'column format: float32')
hdu1.header['TUNIT11'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE12'] = ('PSF_CENTR1_ERR',
'column title: PSF fitted column error')
hdu1.header['TFORM12'] = ('E', 'column format: float32')
hdu1.header['TUNIT12'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE13'] = ('PSF_CENTR2',
'column title: PSF fitted row centroid')
hdu1.header['TFORM13'] = ('E', 'column format: float32')
hdu1.header['TUNIT13'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE14'] = ('PSF_CENTR2_ERR',
'column title: PSF fitted row error')
hdu1.header['TFORM14'] = ('E', 'column format: float32')
hdu1.header['TUNIT14'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE15'] = ('MOM_CENTR1',
'column title: moment-derived column centroid')
hdu1.header['TFORM15'] = ('E', 'column format: float32')
hdu1.header['TUNIT15'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE16'] = ('MOM_CENTR1_ERR',
'column title: moment-derived column error')
hdu1.header['TFORM16'] = ('E', 'column format: float32')
hdu1.header['TUNIT16'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE17'] = ('MOM_CENTR2',
'column title: moment-derived row centroid')
hdu1.header['TFORM17'] = ('E', 'column format: float32')
hdu1.header['TUNIT17'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE18'] = ('MOM_CENTR2_ERR',
'column title: moment-derived row error')
hdu1.header['TFORM18'] = ('E', 'column format: float32')
hdu1.header['TUNIT18'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE19'] = ('POS_CORR1',
'column title: col correction for vel. abbern')
hdu1.header['TFORM19'] = ('E', 'column format: float32')
hdu1.header['TUNIT19'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE20'] = ('POS_CORR2',
'column title: row correction for vel. abbern')
hdu1.header['TFORM20'] = ('E', 'column format: float32')
hdu1.header['TUNIT20'] = ('pixel', 'column units: pixel')
hdu1.header['TTYPE21'] = ('PCA_FLUX', 'column title: PCA-corrected flux')
hdu1.header['TFORM21'] = ('E', 'column format: float32')
hdu1.header['TUNIT21'] = ('pixel', 'column units: e-/s')
hdu1.header['TTYPE22'] = ('PCA_FLUX_NRM',
'column title: normalized PCA-corrected flux')
hdu1.header['TFORM22'] = ('E', 'column format: float32')
hdu1.header['EXTNAME'] = ('LIGHTCURVE', 'name of extension')
for i in range(len(cards1)):
if (cards1[i].keyword not in hdu1.header.keys() and
cards1[i].keyword[:4] not in ['TTYP', 'TFOR', 'TUNI', 'TDIS',
'TDIM', 'WCAX', '1CTY', '2CTY',
'1CRP', '2CRP', '1CRV', '2CRV',
'1CUN', '2CUN', '1CDE', '2CDE',
'1CTY', '2CTY', '1CDL', '2CDL',
'11PC', '12PC', '21PC', '22PC']):
hdu1.header[cards1[i].keyword] = (cards1[i].value, cards1[i].comment)
outstr.append(hdu1)
# construct output mask bitmap extension
hdu2 = pyfits.ImageHDU(maskmap)
for i in range(len(cards2)):
if cards2[i].keyword not in hdu2.header.keys():
hdu2.header[cards2[i].keyword] = (cards2[i].value,
cards2[i].comment)
else:
hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
outstr.append(hdu2)
# construct principal component table
cols = [pyfits.Column(name='TIME', format='E', unit='BJD - 2454833',
array=time)]
for i in range(len(pcar[0, :])):
colname = 'PC' + str(i + 1)
col = pyfits.Column(name=colname, format='E', array=pcar[:, i])
cols.append(col)
hdu3 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
hdu3.header['EXTNAME'] = ('PRINCIPAL_COMPONENTS', 'name of extension')
hdu3.header['TTYPE1'] = ('TIME', 'column title: data time stamps')
hdu3.header['TFORM1'] = ('D', 'data type: float64')
hdu3.header['TUNIT1'] = ('BJD - 2454833',
'column units: barycenter corrected JD')
hdu3.header['TDISP1'] = ('D12.7', 'column display format')
for i in range(len(pcar[0, :])):
hdu3.header['TTYPE' + str(i + 2)] = ("PC" + str(i + 1),
"column title: principal "
"component number " + str(i + 1))
hdu3.header['TFORM' + str(i + 2)] = ('E', 'column format: float32')
outstr.append(hdu3)
# write output file
print("Writing output file {}...".format(outfile))
outstr.writeto(outfile)
# close input structure
instr.close()
# Create PCA report
if plotpca:
npp = 7 # Number of plots per page
l = 1
repcnt = 1
for k in range(nmaps):
# First plot of every pagewith flux image,
# flux and calibrated time series
if (k % (npp - 1) == 0):
plt.figure(figsize=[10, 16])
plt.subplot2grid((npp,6), (0, 0), colspan=2)
plt.imshow(np.log10(np.flipud(pixMean.reshape(ydim,xdim)) - min(pixMean) + 1),
interpolation="nearest",cmap='RdYlBu')
plt.xticks([])
plt.yticks([])
ax1 = plt.subplot2grid((npp, 6), (0, 2), colspan=4)
px = np.copy(time) + bjdref
py = np.copy(pixseriessum)
px, xlab = kepplot.cleanx(px, logfile, verbose)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, '#0000ff', 1.0, '#ffff00', 0.2,
True)
py = np.copy(fluxcor)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
plt.plot(px, py, marker='.', color='r', linestyle='',
markersize=1.0)
kepplot.labels('', re.sub('\)', '', re.sub('Flux \(','', ylab)),
'k', 14)
plt.grid()
plt.setp(ax1.get_xticklabels(), visible=False)
# plot principal components
plt.subplot2grid((npp, 6), (l, 0), colspan=2)
plt.imshow(eigvec[k], interpolation="nearest", cmap='RdYlBu')
plt.xlim(-0.5, xdim-0.5)
plt.ylim(-0.5, ydim-0.5)
plt.xticks([])
plt.yticks([])
# The last plot on the page that should have the xlabel
if (k % (npp - 1) == npp - 2 or k == nvecin - 1):
plt.subplot2grid((npp, 6), (l, 2), colspan=4)
py = np.copy(model[:, k])
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, 'r', 1.0, 'g', 0.2, True)
kepplot.labels(xlab, 'PC ' + str(k + 1), 'k', 14)
plt.grid()
plt.tight_layout()
l = 1
plt.savefig(re.sub('.png', '_%d.png' % repcnt,repname))
repcnt += 1
# The other plots on the page that should have no xlabel
else:
ax2 = plt.subplot2grid((npp, 6), (l, 2), colspan=4)
py = np.copy(model[:, k])
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, 'r', 1.0, 'g', 0.2, True)
kepplot.labels('', 'PC ' + str(k + 1), 'k', 14)
plt.grid()
plt.setp(ax2.get_xticklabels(), visible=False)
plt.tight_layout()
l=l+1
plt.savefig(re.sub('.png', '_%d.png' % repcnt, repname))
# plot style and size
if plotpca:
plt.figure()
plt.clf()
# plot aperture photometry and PCA corrected data
ax = kepplot.location([0.06, 0.54, 0.93, 0.43])
px = np.copy(time) + bjdref
py = np.copy(pixseriessum)
px, xlab = kepplot.cleanx(px, logfile, verbose)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
kepplot.RangeOfPlot(px, py, 0.01, False)
kepplot.plot1d(px, py, cadence, '#0000ff', 1.0, '#ffff00', 0.2, True)
py = np.copy(fluxcor)
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
kepplot.plot1d(px, py, cadence, 'r', 2, '#ffff00', 0.0, True)
plt.setp(plt.gca(), xticklabels=[])
kepplot.labels('', ylab, 'k', 14)
plt.grid()
# plot aperture photometry and PCA corrected data
ax = kepplot.location([0.06, 0.09, 0.93, 0.43])
yr = np.array([], 'float32')
npc = min([6, nrem])
for i in range(npc - 1, -1, -1):
py = pcar[:, i] * c[i]
py, ylab = kepplot.cleany(py, 1.0, logfile, verbose)
cl = float(i) / (float(npc))
kepplot.plot1d(px, py, cadence, [1.0 - cl, 0.0, cl], 2, '#ffff00',
0.0, True)
yr = np.append(yr, py)
y1 = max(yr)
y2 = -min(yr)
kepplot.RangeOfPlot(px, np.array([-y1, y1, -y2, y2]), 0.01, False)
kepplot.labels(xlab, 'Principal Components', 'k', 14)
plt.grid()
# save plot to file
plt.savefig(repname)
# render plot
plt.show()
# stop time
kepmsg.clock('KEPPCA ended at', logfile, verbose)
def reject_outliers(data, m):
"""Outlier rejection for computing robust mean"""
try:
return data[np.abs(data - np.nanmean(data)) < m * np.std(data)]
except:
print("Warning: Could not reject outliers.")
return data
def mad(data):
"""
Mean absolute deviation function used for fitting the PCA components to
the data and subtracting them out
"""
return np.nanmean(np.absolute(data - np.nanmean(data)))
def keppca_main():
import argparse
parser = argparse.ArgumentParser(
description='Pixel-level principal component analysis of time series',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input target pixel FITS file',
type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keppca.'),
default=None)
parser.add_argument('--maskfile', help='Name of mask defintion ASCII file',
default='ALL', type=str)
parser.add_argument('--components', default='1-3',
help='Principal components to be removed', type=str)
parser.add_argument('--plotpca', action='store_true',
help='Create PCA plots?')
parser.add_argument('--nmaps', default=10,
help='Number of principal components to include in report',
type=int)
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keppca.log', dest='logfile', type=str)
args = parser.parse_args()
keppca(args.infile, args.outfile, args.maskfile, args.components,
args.plotpca, args.nmaps, args.overwrite, args.verbose, args.logfile)
|
gully/PyKE
|
pyke/keppca.py
|
Python
|
mit
| 36,295
|
#!../bin/python
# -*- coding: utf-8 -*-
import nuka
from nuka.hosts import Cloud
from nuka.tasks import shell
nuka.config['gce'] = {
'driver': '~/.gce/nuka.json',
'user': 'gawel',
'create_node_args': {
'size': 'f1-micro',
'image': 'debian-8-jessie-v20161215',
'location': 'europe-west1-d',
'ex_tags': ['nuka'],
'ex_disk_auto_delete': True,
'ex_service_accounts': [{
'scopes': [
'https://www.googleapis.com/auth/cloud.useraccounts.readonly',
'https://www.googleapis.com/auth/devstorage.read_only',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring.write',
'https://www.googleapis.com/auth/service.management.readonly',
'https://www.googleapis.com/auth/servicecontrol'
],
}]
},
}
nuka.config['ssh'].update({
'extra_options': ['-C', '-oStrictHostKeyChecking=no'],
'keysfile': '~/.ssh/authorized_keys',
})
cloud = Cloud('gce')
host = cloud.get_or_create_node('myhost')
async def my_tasks(host):
await shell.command(['ls'])
nuka.cli.add_argument('--destroy', action='store_true', default=False)
nuka.cli.parse_args()
if nuka.cli.args.destroy:
nuka.run(cloud.destroy())
else:
nuka.run(my_tasks(host))
|
bearstech/nuka
|
examples/gce.py
|
Python
|
gpl-3.0
| 1,359
|
"""The gene_features module generates a DB table for features
like gene length and misc. features from the MutSigCV paper (fig s5).
The MutSigCV paper can be found here:
http://www.nature.com/nature/journal/v499/n7457/full/nature12213.html
The MutSigCV paper suggested that the background mutation rate for genes is important
for identifying statistically significant cancer genes. However, it is
not clear how important those features are for supervised learning on
"known" oncogenes and tsgs.
"""
import src.utils.python.util as _utils
import pandas as pd
import pandas.io.sql as psql
import sqlite3
import string
import os
import logging
logger = logging.getLogger(__name__)
def calc_gene_length(file_path):
"""Read in a FASTA file and calculate sequence length.
Assumes a typical one line header for a FASTA file.
**Parameters**
file_path : str
Path to FASTA file
**Returns**
seq_len : int
length of gene
"""
with open(file_path) as handle:
handle.readline() # skip FASTA header
seq = handle.read() # read file into single string
seq = seq.replace('\n', '') # remove line breaks
seq_len = len(seq)
return seq_len
def recursive_gene_length(fasta_dir):
"""Recursively scans the FASTA directory to calc gene lengths.
NOTE: assumes directories are ['0-9', 'A', .., 'Z']
**Parameters**
fasta_dir : str
path to fasta directory downloaded from COSMIC
**Returns**
gene_length_dict : dict
keys=gene name, values=gene length
"""
logger.info('Recursively calculating length in FASTA directories . . .')
gene_length_dict = {}
mydirs = ['0-9'] + list(string.ascii_uppercase)
for mydir in mydirs:
print(mydir)
dir_path = fasta_dir + mydir + '/'
for file_name in os.listdir(dir_path):
if '_protein' in file_name and '_ENST' not in file_name:
gene_name = file_name.strip('_protein.txt')
gene_length = calc_gene_length(dir_path + file_name)
gene_length_dict[gene_name] = gene_length
logger.info('Finished counting gene length.')
return gene_length_dict
def save_db(df, genedb_path):
"""Saves the data into the gene_features table.
If the table already exists, the table is droped and then
re-inserted.
**Parameters**
df : pd.DataFrame
data to insert into DB table
genedb_path : str
path to sqlite db
"""
logger.debug('Dropping gene_features table IF EXISTS.')
_utils.drop_table('gene_features', genes_db_path=genedb_path, kind='sqlite') # drop table if exists
logger.debug('After dropping gene_features table IF EXISTS.')
logger.info('Saving gene_features table ...')
conn = sqlite3.connect(genedb_path) # open connection
# save to sqlite3 database
psql.write_frame(df, # pandas dataframe
'gene_features', # table name
con=conn, # connection
flavor='sqlite', # use sqlite
if_exists='replace') # drop table if exists
conn.close()
logger.info('Finished saving gene_features table.')
def main(db_path):
# get config files
in_opts = _utils.get_input_config('input')
db_opts = _utils.get_db_config('2020plus')
# get absolute path for cosmic data
cosmic_path = os.path.join(_utils.proj_dir, in_opts['cosmic_path'])
# get data for gene_features table
logger.info('Processing features for gene_features table ...')
if os.path.isdir(cosmic_path):
gene_length = recursive_gene_length(in_opts['fasta_dir'])
genes, lengths = zip(*gene_length.items())
gene_length_df = pd.DataFrame({'gene': genes, 'gene length': lengths})
else:
gene_length_df = pd.read_csv(cosmic_path, sep='\t')
gene_length_df = gene_length_df[['Gene name', 'Gene CDS length']]
gene_length_df = gene_length_df.rename(columns={'Gene name': 'gene',
'Gene CDS length': 'gene length'})
gene_length_df.drop_duplicates(cols=['gene'], inplace=True)
# merge in data from mutsig and biogrid
mutsigcv_feature_path = os.path.join(_utils.proj_dir, in_opts['mutsigcv_features'])
df = pd.read_csv(mutsigcv_feature_path, sep='\t')
df = pd.merge(gene_length_df, df, how='left', on='gene') # merge the data frames
biogrid_path = os.path.join(_utils.proj_dir, 'data/biogrid_stats.txt')
biogrid_df = pd.read_csv(biogrid_path, sep='\t')
df = pd.merge(df, biogrid_df, how='left', on='gene')
# path to database
db_path = db_path if db_path else db_opts['db']
logger.info('Finished processing features for gene_features table.')
# save database
save_db(df, db_path)
|
KarchinLab/2020plus
|
src/savedb/python/gene_features.py
|
Python
|
apache-2.0
| 4,824
|
from . import meta_selector # noqa
from .pg import PatternGenerator
from .selector import Selector
PatternGenerator('')
Selector('')
|
stack-of-tasks/sot-pattern-generator
|
src/dynamic_graph/sot/pattern_generator/__init__.py
|
Python
|
isc
| 136
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext as _
from django.conf import settings
from models import FilerLinkPlugin
class FilerLinkPlugin(CMSPluginBase):
module = 'Filer'
model = FilerLinkPlugin
name = _("Link")
text_enabled = True
render_template = "cmsplugin_filer_link/link.html"
def render(self, context, instance, placeholder):
if instance.file:
link = instance.file.url
elif instance.mailto:
link = u"mailto:%s" % _(instance.mailto)
elif instance.url:
link = _(instance.url)
elif instance.page_link:
link = instance.page_link.get_absolute_url()
else:
link = ""
context.update({
'link': link,
'style': instance.link_style,
'name': instance.name,
'new_window': instance.new_window,
})
return context
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/link.png"
plugin_pool.register_plugin(FilerLinkPlugin)
|
pbs/cmsplugin-filer
|
cmsplugin_filer_link/cms_plugins.py
|
Python
|
bsd-3-clause
| 1,138
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'outline05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a outlines in a XlsxWriter file. These tests are
based on the outline programs in the examples directory.
"""
workbook = Workbook(self.got_filename)
worksheet2 = workbook.add_worksheet('Collapsed Rows')
bold = workbook.add_format({'bold': 1})
worksheet2.set_row(1, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(2, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(3, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(4, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(5, None, None, {'level': 1, 'hidden': True, 'collapsed': True})
worksheet2.set_row(6, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(7, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(8, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(9, None, None, {'level': 2, 'hidden': True})
worksheet2.set_row(10, None, None, {'level': 1, 'hidden': True, 'collapsed': True})
worksheet2.set_row(11, None, None, {'collapsed': True})
worksheet2.set_column('A:A', 20)
worksheet2.set_selection('A14')
worksheet2.write('A1', 'Region', bold)
worksheet2.write('A2', 'North')
worksheet2.write('A3', 'North')
worksheet2.write('A4', 'North')
worksheet2.write('A5', 'North')
worksheet2.write('A6', 'North Total', bold)
worksheet2.write('B1', 'Sales', bold)
worksheet2.write('B2', 1000)
worksheet2.write('B3', 1200)
worksheet2.write('B4', 900)
worksheet2.write('B5', 1200)
worksheet2.write('B6', '=SUBTOTAL(9,B2:B5)', bold, 4300)
worksheet2.write('A7', 'South')
worksheet2.write('A8', 'South')
worksheet2.write('A9', 'South')
worksheet2.write('A10', 'South')
worksheet2.write('A11', 'South Total', bold)
worksheet2.write('B7', 400)
worksheet2.write('B8', 600)
worksheet2.write('B9', 500)
worksheet2.write('B10', 600)
worksheet2.write('B11', '=SUBTOTAL(9,B7:B10)', bold, 2100)
worksheet2.write('A12', 'Grand Total', bold)
worksheet2.write('B12', '=SUBTOTAL(9,B2:B10)', bold, 6400)
workbook.close()
self.assertExcelEqual()
|
jkyeung/XlsxWriter
|
xlsxwriter/test/comparison/test_outline05.py
|
Python
|
bsd-2-clause
| 3,191
|
from r_support import *
from loda_support import *
from test_setups import *
from alad import *
def test_load_model_csv(opts):
opts.set_multi_run_options(1, 1)
modelmanager = ModelManager.get_model_manager("csv")
model = modelmanager.load_model(opts)
print "model loaded..."
def test_load_data(args):
filepath = os.path.join(args.filedir, args.dataset + "_1.csv")
print filepath
data = read_csv(filepath, header=True)
print "data loaded..."
def test_load_samples(opts):
alldata = load_all_samples(opts.dataset, opts.filedir, [1], opts)
#logger.debug(alldata[0].lbls)
#logger.debug(alldata[0].fmat)
print "Loaded samples..."
def test_loda(opts):
alldata = load_all_samples(opts.dataset, opts.filedir, [1], opts)
#logger.debug(alldata[0].lbls)
#logger.debug(alldata[0].fmat)
print "Loaded samples..."
a = alldata[0].fmat
logger.debug(a.shape)
if opts.randseed > 0:
np.random.seed(opts.randseed)
#args.original_dims = True
lodares = loda(a, sparsity=opts.sparsity, mink=opts.mink, maxk=opts.maxk,
keep=opts.keep, exclude=opts.exclude, original_dims=opts.original_dims)
model = generate_model_from_loda_result(lodares, a, alldata[0].lbls)
logger.debug("The neg-log-lik scores in the model will be treated as ensemble scores")
logger.debug("model.nlls.shape: %s;\nThere are %d instances and %d ensemble members (each LODA projection is one member)" %
(str(model.nlls.shape), model.nlls.shape[0], model.nlls.shape[1]))
logger.debug(model.nlls)
print "Completed LODA..."
def pdf_hist_bin(x, h, minpdf=1e-8):
"""Returns the histogram bins for input values.
Used for debugging only...
"""
n = len(x)
pd = np.zeros(n, dtype=int)
for j in range(n):
# use simple index lookup in case the histograms are equal width
# this returns the lower index
i = get_bin_for_equal_hist(h.breaks, x[j])
if i >= len(h.density):
i = len(h.density)-1 # maybe something else should be done here
pd[j] = i+1 # adding 1 to make it like R # max(h.density[i], minpdf)
return pd
# get all bins from individual histograms.
def get_all_hist_pdf_bins(a, w, hists):
"""Returns the histogram bins for input values.
Used for debugging only...
"""
x = a.dot(w)
bins = np.zeros(shape=(len(x), len(hists)), dtype=int)
for i in range(len(hists)):
bins[:, i] = pdf_hist_bin(x[:, i], hists[i])
return bins
def test_show_baseline(opts):
data = load_all_samples(opts.dataset, opts.filedir, range(opts.minfid, opts.maxfid + 1), opts)
logger.debug("data loaded...")
modelmanager = ModelManager.get_model_manager(opts.cachetype)
ensembles = get_loda_alad_ensembles(range(opts.minfid, opts.maxfid + 1),
range(1, opts.reruns + 1), data, opts)
for i, fid in enumerate(range(opts.minfid, opts.maxfid+1)):
fensembles = ensembles[i]
for j, runidx in enumerate(range(1, opts.reruns+1)):
opts.set_multi_run_options(fid, runidx)
model = None
if False:
lodares = modelmanager.load_model(opts)
model = generate_model_from_loda_result(lodares, data[i].fmat, data[i].lbls)
ensemble = LodaEnsemble.ensemble_from_lodares(lodares, data[i].fmat, data[i].lbls)
if False:
hpdfs = get_all_hist_pdfs(data[i].fmat, lodares.pvh.pvh.w, lodares.pvh.pvh.hists)
prefix = opts.model_file_prefix()
fname = os.path.join(opts.cachedir, "%s-hpdfs.csv" % (prefix,))
np.savetxt(fname, hpdfs, fmt='%.15f', delimiter=',')
logger.debug(lodares.pvh.pvh.hists[0].breaks)
logger.debug(lodares.pvh.pvh.hists[0].density)
else:
ensemble = fensembles[j]
#logger.debug("model loaded...")
#nll = np.mean(model.nlls, axis=1, dtype=float)
#logger.debug("#scores: %d" % (len(nll),))
#logger.debug(nll)
#logger.debug("#anoms: %d" % (np.sum(data[0].lbls),))
#logger.debug(data[0].lbls)
if model is not None:
ordered = order(model.anom_score, decreasing=True)
logger.debug("\n%s" % (list(np.cumsum(model.lbls[ordered])[0:opts.budget]),))
#logger.debug(data[0].lbls[ordered])
ordered = order(ensemble.agg_scores, decreasing=True)
logger.debug("\n%s" % (list(np.cumsum(ensemble.labels[ordered])[0:opts.budget]),))
def test_histogram(opts):
data = load_all_samples(opts.dataset, opts.filedir, range(opts.minfid, opts.maxfid + 1), opts)
logger.debug("data loaded...")
logger.debug(ncol(data[0].fmat))
for i in range(ncol(data[0].fmat)):
x = data[0].fmat[:, i]
#logger.debug(x)
hist = histogram_r(x)
#logger.debug(hist.breaks)
#logger.debug(hist.counts)
logger.debug(hist.density)
def test_ensemble_load(opts):
samples = load_samples(opts.datafile, opts)
logger.debug("Loaded samples...")
em = PrecomputedEnsemble(opts)
ensemble = em.load_data(samples.fmat, samples.lbls, opts)
logger.debug("Loaded ensemble...")
def test_alad(opts):
alad_results = alad(opts)
write_sequential_results_to_csv(alad_results, opts)
logger.debug("completed test_alad...")
def run_test(args):
configure_logger(args)
opts = Opts(args)
logger.debug(opts.str_opts())
if args.op == "load":
test_load_data(opts)
elif args.op == "samples":
test_load_samples(opts)
elif args.op == "loda":
test_loda(opts)
elif args.op == "csvmodel":
test_load_model_csv(opts)
elif args.op == "baseline":
test_show_baseline(opts)
elif args.op == "hist":
test_histogram(opts)
elif args.op == "ensemble":
test_ensemble_load(opts)
elif args.op == "alad":
test_alad(opts)
else:
raise ValueError("Invalid operation: %s" % (args.op,))
|
shubhomoydas/pyaad
|
pyalad/unit_tests.py
|
Python
|
mit
| 6,173
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import policy_fixture
class ServersPreSchedulingTestCase(test.TestCase):
"""Tests for the servers API with unscheduled instances.
With cellsv2 an instance is not written to an instance table in the cell
database until it has been scheduled to a cell. This means we need to be
careful to ensure the instance can still be represented before that point.
NOTE(alaski): The above is the desired future state, this test class is
here to confirm that the behavior does not change as the transition is
made.
This test class starts the wsgi stack for the nova api service, and uses
an in memory database for persistence. It does not allow requests to get
past scheduling.
"""
api_major_version = 'v2.1'
def setUp(self):
super(ServersPreSchedulingTestCase, self).setUp()
fake_image.stub_out_image_service(self)
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.api.microversion = 'latest'
def test_instance_from_buildrequest(self):
self.useFixture(nova_fixtures.AllServicesCurrent())
image_ref = fake_image.get_valid_image_id()
body = {
'server': {
'name': 'foo',
'imageRef': image_ref,
'flavorRef': '1',
'networks': 'none',
}
}
create_resp = self.api.api_post('servers', body)
get_resp = self.api.api_get('servers/%s' %
create_resp.body['server']['id'])
server = get_resp.body['server']
# Validate a few things
self.assertEqual('foo', server['name'])
self.assertEqual(image_ref, server['image']['id'])
self.assertEqual('1', server['flavor']['id'])
self.assertEqual('', server['hostId'])
self.assertIsNone(None, server['OS-SRV-USG:launched_at'])
self.assertIsNone(None, server['OS-SRV-USG:terminated_at'])
self.assertFalse(server['locked'])
self.assertEqual([], server['tags'])
self.assertEqual('scheduling', server['OS-EXT-STS:task_state'])
self.assertEqual('building', server['OS-EXT-STS:vm_state'])
self.assertEqual('BUILD', server['status'])
def test_instance_from_buildrequest_old_service(self):
image_ref = fake_image.get_valid_image_id()
body = {
'server': {
'name': 'foo',
'imageRef': image_ref,
'flavorRef': '1',
'networks': 'none',
}
}
create_resp = self.api.api_post('servers', body)
get_resp = self.api.api_get('servers/%s' %
create_resp.body['server']['id'])
server = get_resp.body['server']
# Just validate some basics
self.assertEqual('foo', server['name'])
self.assertEqual(image_ref, server['image']['id'])
self.assertEqual('1', server['flavor']['id'])
self.assertEqual('', server['hostId'])
self.assertIsNone(None, server['OS-SRV-USG:launched_at'])
self.assertIsNone(None, server['OS-SRV-USG:terminated_at'])
self.assertFalse(server['locked'])
self.assertEqual([], server['tags'])
self.assertEqual('scheduling', server['OS-EXT-STS:task_state'])
self.assertEqual('building', server['OS-EXT-STS:vm_state'])
self.assertEqual('BUILD', server['status'])
def test_delete_instance_from_buildrequest(self):
self.useFixture(nova_fixtures.AllServicesCurrent())
image_ref = fake_image.get_valid_image_id()
body = {
'server': {
'name': 'foo',
'imageRef': image_ref,
'flavorRef': '1',
'networks': 'none',
}
}
create_resp = self.api.api_post('servers', body)
self.api.api_delete('servers/%s' % create_resp.body['server']['id'])
get_resp = self.api.api_get('servers/%s' %
create_resp.body['server']['id'],
check_response_status=False)
self.assertEqual(404, get_resp.status)
def test_delete_instance_from_buildrequest_old_service(self):
image_ref = fake_image.get_valid_image_id()
body = {
'server': {
'name': 'foo',
'imageRef': image_ref,
'flavorRef': '1',
'networks': 'none',
}
}
create_resp = self.api.api_post('servers', body)
self.api.api_delete('servers/%s' % create_resp.body['server']['id'])
get_resp = self.api.api_get('servers/%s' %
create_resp.body['server']['id'],
check_response_status=False)
self.assertEqual(404, get_resp.status)
def _test_instance_list_from_buildrequests(self):
image_ref = fake_image.get_valid_image_id()
body = {
'server': {
'name': 'foo',
'imageRef': image_ref,
'flavorRef': '1',
'networks': 'none',
}
}
inst1 = self.api.api_post('servers', body)
body['server']['name'] = 'bar'
inst2 = self.api.api_post('servers', body)
list_resp = self.api.get_servers()
# Default sort is created_at desc, so last created is first
self.assertEqual(2, len(list_resp))
self.assertEqual(inst2.body['server']['id'], list_resp[0]['id'])
self.assertEqual('bar', list_resp[0]['name'])
self.assertEqual(inst1.body['server']['id'], list_resp[1]['id'])
self.assertEqual('foo', list_resp[1]['name'])
# Change the sort order
list_resp = self.api.api_get(
'servers/detail?sort_key=created_at&sort_dir=asc')
list_resp = list_resp.body['servers']
self.assertEqual(2, len(list_resp))
self.assertEqual(inst1.body['server']['id'], list_resp[0]['id'])
self.assertEqual('foo', list_resp[0]['name'])
self.assertEqual(inst2.body['server']['id'], list_resp[1]['id'])
self.assertEqual('bar', list_resp[1]['name'])
def test_instance_list_from_buildrequests(self):
self.useFixture(nova_fixtures.AllServicesCurrent())
self._test_instance_list_from_buildrequests()
def test_instance_list_from_buildrequests_old_service(self):
self._test_instance_list_from_buildrequests()
|
sebrandon1/nova
|
nova/tests/functional/wsgi/test_servers.py
|
Python
|
apache-2.0
| 7,356
|
def shell_sort(lst):
"""
shell sort
"""
n = len(lst)
gap = n // 2
while gap:
for i in range(gap, n):
for j in range(i - gap, -1, -gap):
if lst[j + gap] < lst[j]:
lst[j + gap], lst[j] = lst[j], lst[j + gap]
else:
break
gap //= 2
# while gap:
# for i in range(gap, n):
# j = i
# while j > 0:
# if lst[j - gap] > lst[j]:
# lst[j - gap], lst[j] = lst[j], lst[j - gap]
# j -= gap
# else:
# break
# gap //= 2
if __name__ == "__main__":
lst = [5, 4, 7, 1, 12, 13, 23, 0, 4]
print(lst)
shell_sort(lst)
print(lst)
|
luoshao23/ML_algorithm
|
Sorting/shell_sort.py
|
Python
|
mit
| 787
|
"""Stdout module."""
from __future__ import print_function, absolute_import
import logging
import click
from swak.plugin import Output
from swak.formatter import Formatter, StdoutFormatter
from swak.buffer import Buffer
from swak.memorybuffer import MemoryBuffer, DEFAULT_CHUNK_MAX_RECORD,\
DEFAULT_CHUNK_MAX_SIZE, DEFAULT_BUFFER_MAX_CHUNK
class Stdout(Output):
"""Stdout class."""
def __init__(self, formatter=None, abuffer=None):
"""Init.
Args:
formatter (Formatter): Swak formatter for this output.
abuffer (Buffer): Swak buffer for this output.
"""
logging.info("Stdout.__init__")
formatter = formatter if formatter is not None else StdoutFormatter()
super(Stdout, self).__init__(formatter, abuffer)
def _write(self, bulk):
"""Write a bulk.
Args:
bulk (bytearray or list): If the chunk that passes the argument is
a binary type, bulk is an array of bytes, otherwise it is a list
of strings.
"""
logging.debug("Stdout._write")
if type(bulk) is list:
for line in bulk:
print(line)
else:
print(bulk)
@click.group(chain=True, invoke_without_command=True,
help="Output to standard output.")
@click.pass_context
def main(ctx):
"""Plugin entry."""
pass
@main.resultcallback()
def process_components(components):
"""Process components and build a Stdout.
Args:
components (list)
Returns:
Stdout
"""
_formatter = _buffer = None
for com in components:
if isinstance(com, Formatter):
_formatter = com
if isinstance(com, Buffer):
_buffer = com
return Stdout(_formatter, _buffer)
@main.command('f.stdout', help="Stdout formatter for this output.")
@click.option('-z', '--timezone', default=None, show_default=True,
help="Timezone for format.")
def f_stdout(timezone):
"""Formatter entry."""
return StdoutFormatter(timezone=timezone)
@main.command('b.memory', help="Memory buffer for this output.")
@click.option('-f', '--flush-interval', default=None, type=str,
show_default=True, help="Flush interval.")
@click.option('-r', '--chunk-max-record', default=DEFAULT_CHUNK_MAX_RECORD,
type=int, show_default=True, help="Maximum records per chunk.")
@click.option('-s', '--chunk-max-size', default=DEFAULT_CHUNK_MAX_SIZE,
show_default=True, help="Maximum chunks per buffer.")
@click.option('-c', '--buffer-max-chunk', default=DEFAULT_BUFFER_MAX_CHUNK,
show_default=True, help="Maximum chunks per buffer.")
def b_memory(flush_interval, chunk_max_record, chunk_max_size,
buffer_max_chunk):
"""Formatter entry."""
return MemoryBuffer(None, False, flush_interval=flush_interval,
buffer_max_chunk=buffer_max_chunk,
chunk_max_record=chunk_max_record)
if __name__ == '__main__':
main()
|
haje01/swak
|
swak/stdplugins/stdout/o_stdout.py
|
Python
|
mit
| 3,058
|
#!/usr/bin/python
# vim:set ts=4 sw=4 expandtab:
import os
import sys
import shutil
import optparse
import errno
import glob
import re
from distutils.sysconfig import get_python_lib
import napkin.version
parser = optparse.OptionParser(version=napkin.version.__version__)
parser.add_option("--prefix", action="store", dest="prefix", default="/usr/local")
parser.add_option("--sysconfdir", action="store", dest="sysconfdir", default="%(prefix)s/etc")
parser.add_option("--pkgconfdir", action="store", dest="pkgconfdir", default="%(sysconfdir)s/napkin")
parser.add_option("--sbindir", action="store", dest="sbindir", default="%(prefix)s/sbin")
parser.add_option("--localstatedir", action="store", dest="localstatedir", default="%(prefix)s/var")
parser.add_option("--pkgstatedir", action="store", dest="pkgstatedir", default="%(localstatedir)s/lib/napkin")
parser.add_option("--initddir", action="store", dest="initddir", default="%(sysconfdir)s/init.d")
parser.add_option("--destdir", action="store", dest="destdir", default="")
(options, args) = parser.parse_args(sys.argv[1:])
options.pythondir = get_python_lib()
options.srcdir = os.path.dirname(sys.argv[0])
def get_path(name):
p = getattr(options, name)
while '%(' in p:
p = p % options.__dict__
return p
def mkdir_p(name, mode='755'):
try:
os.makedirs(name, int(mode, 8))
except OSError:
if sys.exc_info()[1].errno != errno.EEXIST:
raise
def j(*args):
r = ""
for i in args:
if len(i) == 0:
continue
if i[0] != os.path.sep:
r += os.path.sep
r += i
return r
def install(files, dest, replacements=None, keep=False, mode=None, dbn=None):
if not replacements:
replacements = {}
replacements['^#!\s*\S+python[0-9.]*'] = "#!%s" % sys.executable
reps = {}
for i in replacements:
r = re.compile(i)
reps[r] = replacements[i]
replacements = reps
files = glob.glob(files)
mkdir_p(dest)
for sn in files:
bn = os.path.basename(sn)
if dbn:
dn = j(dest, dbn)
else:
dn = j(dest, bn)
if keep and os.path.exists(dn):
continue
rf = open(sn, 'r')
wf = open(dn, 'w')
while True:
buf = rf.readline()
if not buf:
break
for i in replacements:
buf = re.sub(i, replacements[i], buf)
wf.write(buf)
wf.close()
rf.close()
sst = os.stat(sn)
os.utime(dn, (sst.st_atime, sst.st_mtime))
if not mode:
m = sst.st_mode
else:
m = int(mode, 8)
os.chmod(dn, m)
def install_lib():
install('napkin/*.py', j(get_path("destdir"), get_path("pythondir"), "napkin"), mode='644')
install('napkin/providers/*.py', j(get_path("destdir"), get_path("pythondir"), "napkin", "providers"), mode='644')
def install_master():
install('master/napkin-master.py', j(get_path("destdir"), get_path("sbindir")), mode='755', dbn='napkin-master')
install('master/napkin-run.py', j(get_path("destdir"), get_path("sbindir")), mode='755', dbn='napkin-run')
install('master/napkin-ca.py', j(get_path("destdir"), get_path("sbindir")), mode='755', dbn='napkin-ca')
mkdir_p(j(get_path("destdir"), get_path("pkgconfdir")), '700')
for i in ['etc/master.conf', 'etc/agent-template.ct', 'etc/master-template.ct']:
install(i, j(get_path("destdir"), get_path("pkgconfdir")), mode='644', keep=True)
install('master/napkin-master.init', j(get_path("destdir"), get_path("initddir")), mode='755', dbn='napkin-master')
def install_agent():
install('agent/napkind.py', j(get_path("destdir"), get_path("sbindir")), mode='755', dbn='napkind')
mkdir_p(j(get_path("destdir"), get_path("pkgconfdir")), '700')
for i in ['etc/logging.conf', 'etc/agent-template.ct']:
install(i, j(get_path("destdir"), get_path("pkgconfdir")), mode='644', keep=True)
install('agent/napkind.init', j(get_path("destdir"), get_path("initddir")), mode='755', dbn='napkind')
mkdir_p(j(get_path("destdir"), get_path("pkgstatedir")), '700')
def dist():
ver = napkin.version.__version__
ret = os.system("git archive --format=tar --prefix=napkin-%s/ HEAD | bzip2 -9 > napkin-%s.tar.bz2" % (ver, ver))
if ret != 0:
raise Exception("creating archive failed with %d" % ret)
def rpm():
dist()
ret = os.system("rpmbuild -tb napkin-%s.tar.bz2" % napkin.version.__version__)
if ret != 0:
raise Exception("creating RPMs failed with %d" % ret)
if args[0] == "install":
install_lib()
install_master()
install_agent()
elif args[0] == "install-lib":
install_lib()
elif args[0] == "install-master":
install_lib()
install_master()
elif args[0] == "install-agent":
install_lib()
install_agent()
elif args[0] == "dist":
dist()
elif args[0] == "rpm":
rpm()
else:
raise Exception("unknown operation %s" % args[0])
|
dhozac/napkin
|
setup.py
|
Python
|
gpl-3.0
| 5,021
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import db
from nova.objects import base
from nova.objects import fields
from nova import utils
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstancePCIRequest(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add request_id
VERSION = '1.1'
fields = {
'count': fields.IntegerField(),
'spec': fields.ListOfDictOfNullableStringsField(),
'alias_name': fields.StringField(nullable=True),
# A stashed request related to a resize, not current
'is_new': fields.BooleanField(default=False),
'request_id': fields.UUIDField(nullable=True),
}
def obj_load_attr(self, attr):
setattr(self, attr, None)
# NOTE(danms): The dict that this object replaces uses a key of 'new'
# so we translate it here to our more appropropriately-named 'is_new'.
# This is not something that affects the obect version, so we could
# remove this later when all dependent code is fixed.
@property
def new(self):
return self.is_new
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'request_id' in primitive:
del primitive['request_id']
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstancePCIRequests(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: InstancePCIRequest 1.1
VERSION = '1.1'
fields = {
'instance_uuid': fields.UUIDField(),
'requests': fields.ListOfObjectsField('InstancePCIRequest'),
}
obj_relationships = {
'requests': [('1.0', '1.0'), ('1.1', '1.1')],
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'requests' in primitive:
for index, request in enumerate(self.requests):
request.obj_make_compatible(
primitive['requests'][index]['nova_object.data'], '1.0')
primitive['requests'][index]['nova_object.version'] = '1.0'
@classmethod
def obj_from_db(cls, context, instance_uuid, db_requests):
self = cls(context=context, requests=[],
instance_uuid=instance_uuid)
if db_requests is not None:
requests = jsonutils.loads(db_requests)
else:
requests = []
for request in requests:
request_obj = InstancePCIRequest(
count=request['count'], spec=request['spec'],
alias_name=request['alias_name'], is_new=request['is_new'],
request_id=request['request_id'])
request_obj.obj_reset_changes()
self.requests.append(request_obj)
self.obj_reset_changes()
return self
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_pci_requests = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['pci_requests'])
if db_pci_requests is not None:
db_pci_requests = db_pci_requests['pci_requests']
return cls.obj_from_db(context, instance_uuid, db_pci_requests)
@classmethod
def get_by_instance_uuid_and_newness(cls, context, instance_uuid, is_new):
requests = cls.get_by_instance_uuid(context, instance_uuid)
requests.requests = [x for x in requests.requests
if x.new == is_new]
return requests
@staticmethod
def _load_legacy_requests(sysmeta_value, is_new=False):
if sysmeta_value is None:
return []
requests = []
db_requests = jsonutils.loads(sysmeta_value)
for db_request in db_requests:
request = InstancePCIRequest(
count=db_request['count'], spec=db_request['spec'],
alias_name=db_request['alias_name'], is_new=is_new)
request.obj_reset_changes()
requests.append(request)
return requests
@classmethod
def get_by_instance(cls, context, instance):
# NOTE (baoli): not all callers are passing instance as object yet.
# Therefore, use the dict syntax in this routine
if 'pci_requests' in instance['system_metadata']:
# NOTE(danms): This instance hasn't been converted to use
# instance_extra yet, so extract the data from sysmeta
sysmeta = instance['system_metadata']
_requests = (
cls._load_legacy_requests(sysmeta['pci_requests']) +
cls._load_legacy_requests(sysmeta.get('new_pci_requests'),
is_new=True))
requests = cls(instance_uuid=instance['uuid'], requests=_requests)
requests.obj_reset_changes()
return requests
else:
return cls.get_by_instance_uuid(context, instance['uuid'])
def to_json(self):
blob = [{'count': x.count,
'spec': x.spec,
'alias_name': x.alias_name,
'is_new': x.is_new,
'request_id': x.request_id} for x in self.requests]
return jsonutils.dumps(blob)
@classmethod
def from_request_spec_instance_props(cls, pci_requests):
objs = [InstancePCIRequest(**request)
for request in pci_requests['requests']]
return cls(requests=objs, instance_uuid=pci_requests['instance_uuid'])
|
nikesh-mahalka/nova
|
nova/objects/instance_pci_requests.py
|
Python
|
apache-2.0
| 6,320
|
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name="fant_sizer",
version="0.7",
author="Rypiuk Oleksandr",
author_email="ripiuk96@gmail.com",
description="fant_sizer command-line file-information",
url="https://github.com/ripiuk/fant_sizer",
keywords="file command-line information size tool recursively",
license="MIT",
classifiers=[
'Topic :: Utilities',
'Environment :: Console',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6'
],
packages=find_packages(),
long_description=open(join(dirname(__file__), "README.rst")).read(),
entry_points={
"console_scripts":
['fant_sizer = fant_sizer.fant_sizer:_main'],
},
)
|
ripiuk/fant_sizer
|
setup.py
|
Python
|
mit
| 1,096
|
# Sample script using YajlParser
# https://github.com/pykler/yajl-py/blob/master/examples/yajl_py_example.py
# Be aware it might take some time to parse large files.
import os
import sys
BASEPATH = os.path.dirname(os.path.realpath(__file__))
sys.path = [BASEPATH, '%s/..' %BASEPATH] + sys.path
from yajl import YajlContentHandler, YajlParser
# Sample callbacks, which output some debug info
# these are examples to show off the yajl parser
class ContentHandler(YajlContentHandler):
def __init__(self):
self.out = sys.stdout
def yajl_null(self, ctx):
self.out.write("null\n" )
def yajl_boolean(self, ctx, boolVal):
self.out.write("bool: %s\n" %('true' if boolVal else 'false'))
def yajl_integer(self, ctx, integerVal):
self.out.write("integer: %s\n" %integerVal)
def yajl_double(self, ctx, doubleVal):
self.out.write("double: %s\n" %doubleVal)
def yajl_number(self, ctx, stringNum):
''' Since this is defined both integer and double callbacks are useless '''
num = float(stringNum) if '.' in stringNum else int(stringNum)
self.out.write("number: %s\n" %num)
def yajl_string(self, ctx, stringVal):
self.out.write("string: '%s'\n" %stringVal)
def yajl_start_map(self, ctx):
self.out.write("map open '{'\n")
def yajl_map_key(self, ctx, stringVal):
self.out.write("key: '%s'\n" %stringVal)
def yajl_end_map(self, ctx):
self.out.write("map close '}'\n")
def yajl_start_array(self, ctx):
self.out.write("array open '['\n")
def yajl_end_array(self, ctx):
self.out.write("array close ']'\n")
def main(args):
parser = YajlParser(ContentHandler())
parser.allow_multiple_values = True
if args:
for fn in args:
f = open(fn)
parser.parse(f=f)
f.close()
else:
parser.parse()
return 0
if __name__ == "__main__":
raise SystemExit(main(sys.argv[1:]))
|
algarecu/PlottingJSON
|
yajl_py_example.py
|
Python
|
gpl-2.0
| 1,979
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import print_function
import errno
import socket
import getpass
import gssapi
import ldif
import os
import re
import fileinput
import sys
import tempfile
import shutil
import traceback
import textwrap
from contextlib import contextmanager
from dns import resolver, rdatatype
from dns.exception import DNSException
import ldap
import ldapurl
import six
from six.moves.configparser import SafeConfigParser, NoOptionError
import ipaplatform
from ipapython import ipautil, sysrestore, admintool, version
from ipapython.admintool import ScriptError
from ipapython.ipa_log_manager import root_logger
from ipalib.util import validate_hostname
from ipapython import config
from ipalib import api, errors, x509
from ipapython.dn import DN
from ipaserver.install import certs, service, sysupgrade
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
if six.PY3:
unicode = str
# Used to determine install status
IPA_MODULES = [
'httpd', 'kadmin', 'dirsrv', 'pki-tomcatd', 'install', 'krb5kdc', 'ntpd',
'named', 'ipa_memcached']
class BadHostError(Exception):
pass
class HostLookupError(BadHostError):
pass
class HostForwardLookupError(HostLookupError):
pass
class HostReverseLookupError(HostLookupError):
pass
class HostnameLocalhost(HostLookupError):
pass
class UpgradeVersionError(Exception):
pass
class UpgradePlatformError(UpgradeVersionError):
pass
class UpgradeDataOlderVersionError(UpgradeVersionError):
pass
class UpgradeDataNewerVersionError(UpgradeVersionError):
pass
class UpgradeMissingVersionError(UpgradeVersionError):
pass
class ReplicaConfig:
def __init__(self, top_dir=None):
self.realm_name = ""
self.domain_name = ""
self.master_host_name = ""
self.dirman_password = ""
self.host_name = ""
self.dir = ""
self.subject_base = None
self.setup_ca = False
self.version = 0
self.top_dir = top_dir
subject_base = ipautil.dn_attribute_property('_subject_base')
def get_fqdn():
fqdn = ""
try:
fqdn = socket.getfqdn()
except Exception:
try:
fqdn = socket.gethostname()
except Exception:
fqdn = ""
return fqdn
def verify_fqdn(host_name, no_host_dns=False, local_hostname=True):
"""
Run fqdn checks for given host:
- test hostname format
- test that hostname is fully qualified
- test forward and reverse hostname DNS lookup
Raises `BadHostError` or derived Exceptions if there is an error
:param host_name: The host name to verify.
:param no_host_dns: If true, skip DNS resolution tests of the host name.
:param local_hostname: If true, run additional checks for local hostnames
"""
if len(host_name.split(".")) < 2 or host_name == "localhost.localdomain":
raise BadHostError("Invalid hostname '%s', must be fully-qualified." % host_name)
if host_name != host_name.lower():
raise BadHostError("Invalid hostname '%s', must be lower-case." % host_name)
if ipautil.valid_ip(host_name):
raise BadHostError("IP address not allowed as a hostname")
try:
# make sure that the host name meets the requirements in ipalib
validate_hostname(host_name)
except ValueError as e:
raise BadHostError("Invalid hostname '%s', %s" % (host_name, unicode(e)))
if local_hostname:
try:
root_logger.debug('Check if %s is a primary hostname for localhost', host_name)
ex_name = socket.gethostbyaddr(host_name)
root_logger.debug('Primary hostname for localhost: %s', ex_name[0])
if host_name != ex_name[0]:
raise HostLookupError("The host name %s does not match the primary host name %s. "\
"Please check /etc/hosts or DNS name resolution" % (host_name, ex_name[0]))
except socket.gaierror:
pass
except socket.error as e:
root_logger.debug(
'socket.gethostbyaddr() error: %d: %s',
e.errno, e.strerror) # pylint: disable=no-member
if no_host_dns:
print("Warning: skipping DNS resolution of host", host_name)
return
try:
root_logger.debug('Search DNS for %s', host_name)
hostaddr = socket.getaddrinfo(host_name, None)
except Exception as e:
root_logger.debug('Search failed: %s', e)
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
if len(hostaddr) == 0:
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
# Verify this is NOT a CNAME
try:
root_logger.debug('Check if %s is not a CNAME', host_name)
resolver.query(host_name, rdatatype.CNAME)
raise HostReverseLookupError("The IPA Server Hostname cannot be a CNAME, only A and AAAA names are allowed.")
except DNSException:
pass
# list of verified addresses to prevent multiple searches for the same address
verified = set()
for a in hostaddr:
address = a[4][0]
if address in verified:
continue
if address == '127.0.0.1' or address == '::1':
raise HostForwardLookupError("The IPA Server hostname must not resolve to localhost (%s). A routable IP address must be used. Check /etc/hosts to see if %s is an alias for %s" % (address, host_name, address))
try:
root_logger.debug('Check reverse address of %s', address)
revname = socket.gethostbyaddr(address)[0]
except Exception as e:
root_logger.debug('Check failed: %s', e)
root_logger.error(
"Unable to resolve the IP address %s to a host name, "
"check /etc/hosts and DNS name resolution", address)
else:
root_logger.debug('Found reverse name: %s', revname)
if revname != host_name:
root_logger.error(
"The host name %s does not match the value %s obtained "
"by reverse lookup on IP address %s", host_name, revname,
address)
verified.add(address)
def record_in_hosts(ip, host_name=None, conf_file=paths.HOSTS):
"""
Search record in /etc/hosts - static table lookup for hostnames
In case of match, returns a tuple of ip address and a list of
hostname aliases
When no record is matched, None is returned
:param ip: IP address
:param host_name: Optional hostname to search
:param conf_file: Optional path to the lookup table
"""
hosts = open(conf_file, 'r').readlines()
for line in hosts:
line = line.rstrip('\n')
fields = line.partition('#')[0].split()
if len(fields) == 0:
continue
try:
hosts_ip = fields[0]
names = fields[1:]
if hosts_ip != ip:
continue
if host_name is not None:
if host_name in names:
return (hosts_ip, names)
else:
return None
return (hosts_ip, names)
except IndexError:
print("Warning: Erroneous line '%s' in %s" % (line, conf_file))
continue
return None
def add_record_to_hosts(ip, host_name, conf_file=paths.HOSTS):
hosts_fd = open(conf_file, 'r+')
hosts_fd.seek(0, 2)
hosts_fd.write(ip+'\t'+host_name+' '+host_name.split('.')[0]+'\n')
hosts_fd.close()
def read_ip_addresses():
ips = []
print("Enter the IP address to use, or press Enter to finish.")
while True:
ip = ipautil.user_input("Please provide the IP address to be used for this host name", allow_empty = True)
if not ip:
break
try:
ip_parsed = ipautil.CheckedIPAddress(ip, match_local=True)
except Exception as e:
print("Error: Invalid IP Address %s: %s" % (ip, e))
continue
ips.append(ip_parsed)
return ips
def read_dns_forwarders():
addrs = []
if ipautil.user_input("Do you want to configure DNS forwarders?", True):
print("Following DNS servers are configured in /etc/resolv.conf: %s" %
", ".join(resolver.get_default_resolver().nameservers))
if ipautil.user_input("Do you want to configure these servers as DNS "
"forwarders?", True):
addrs = resolver.default_resolver.nameservers[:]
print("All DNS servers from /etc/resolv.conf were added. You can "
"enter additional addresses now:")
while True:
ip = ipautil.user_input("Enter an IP address for a DNS forwarder, "
"or press Enter to skip", allow_empty=True)
if not ip:
break
try:
ip_parsed = ipautil.CheckedIPAddress(ip, parse_netmask=False)
except Exception as e:
print("Error: Invalid IP Address %s: %s" % (ip, e))
print("DNS forwarder %s not added." % ip)
continue
print("DNS forwarder %s added. You may add another." % ip)
addrs.append(str(ip_parsed))
if not addrs:
print("No DNS forwarders configured")
return addrs
def get_password(prompt):
if os.isatty(sys.stdin.fileno()):
return getpass.getpass(prompt)
else:
sys.stdout.write(prompt)
sys.stdout.flush()
line = sys.stdin.readline()
if not line:
raise EOFError()
return line.rstrip()
def _read_password_default_validator(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
def read_password(user, confirm=True, validate=True, retry=True, validator=_read_password_default_validator):
correct = False
pwd = None
try:
while not correct:
if not retry:
correct = True
pwd = get_password(user + " password: ")
if not pwd:
continue
if validate:
try:
validator(pwd)
except ValueError as e:
print(str(e))
pwd = None
continue
if not confirm:
correct = True
continue
pwd_confirm = get_password("Password (confirm): ")
if pwd != pwd_confirm:
print("Password mismatch!")
print("")
pwd = None
else:
correct = True
except EOFError:
return None
finally:
print("")
return pwd
def update_file(filename, orig, subst):
if os.path.exists(filename):
st = os.stat(filename)
pattern = "%s" % re.escape(orig)
p = re.compile(pattern)
for line in fileinput.input(filename, inplace=1):
if not p.search(line):
sys.stdout.write(line)
else:
sys.stdout.write(p.sub(subst, line))
fileinput.close()
os.chown(filename, st.st_uid, st.st_gid) # reset perms
return 0
else:
print("File %s doesn't exist." % filename)
return 1
def set_directive(filename, directive, value, quotes=True, separator=' '):
"""Set a name/value pair directive in a configuration file.
A value of None means to drop the directive.
This has only been tested with nss.conf
"""
valueset = False
st = os.stat(filename)
fd = open(filename)
newfile = []
for line in fd:
if line.lstrip().startswith(directive):
valueset = True
if value is not None:
if quotes:
newfile.append('%s%s"%s"\n' % (directive, separator, value))
else:
newfile.append('%s%s%s\n' % (directive, separator, value))
else:
newfile.append(line)
fd.close()
if not valueset:
if value is not None:
if quotes:
newfile.append('%s%s"%s"\n' % (directive, separator, value))
else:
newfile.append('%s%s%s\n' % (directive, separator, value))
fd = open(filename, "w")
fd.write("".join(newfile))
fd.close()
os.chown(filename, st.st_uid, st.st_gid) # reset perms
def get_directive(filename, directive, separator=' '):
"""
A rather inefficient way to get a configuration directive.
"""
fd = open(filename, "r")
for line in fd:
if line.lstrip().startswith(directive):
line = line.strip()
result = line.split(separator, 1)[1]
result = result.strip('"')
result = result.strip(' ')
fd.close()
return result
fd.close()
return None
def kadmin(command):
ipautil.run(["kadmin.local", "-q", command,
"-x", "ipa-setup-override-restrictions"])
def kadmin_addprinc(principal):
kadmin("addprinc -randkey " + principal)
def kadmin_modprinc(principal, options):
kadmin("modprinc " + options + " " + principal)
def create_keytab(path, principal):
try:
if ipautil.file_exists(path):
os.remove(path)
except os.error:
root_logger.critical("Failed to remove %s." % path)
kadmin("ktadd -k " + path + " " + principal)
def resolve_host(host_name):
try:
addrinfos = socket.getaddrinfo(host_name, None,
socket.AF_UNSPEC, socket.SOCK_STREAM)
ip_list = []
for ai in addrinfos:
ip = ai[4][0]
if ip == "127.0.0.1" or ip == "::1":
raise HostnameLocalhost("The hostname resolves to the localhost address")
ip_list.append(ip)
return ip_list
except socket.error:
return []
def get_host_name(no_host_dns):
"""
Get the current FQDN from the socket and verify that it is valid.
no_host_dns is a boolean that determines whether we enforce that the
hostname is resolvable.
Will raise a RuntimeError on error, returns hostname on success
"""
hostname = get_fqdn()
verify_fqdn(hostname, no_host_dns)
return hostname
def get_server_ip_address(host_name, unattended, setup_dns, ip_addresses):
# Check we have a public IP that is associated with the hostname
try:
hostaddr = resolve_host(host_name)
except HostnameLocalhost:
print("The hostname resolves to the localhost address (127.0.0.1/::1)", file=sys.stderr)
print("Please change your /etc/hosts file so that the hostname", file=sys.stderr)
print("resolves to the ip address of your network interface.", file=sys.stderr)
print("The KDC service does not listen on localhost", file=sys.stderr)
print("", file=sys.stderr)
print("Please fix your /etc/hosts file and restart the setup program", file=sys.stderr)
sys.exit(1)
ips = []
if len(hostaddr):
for ha in hostaddr:
try:
ips.append(ipautil.CheckedIPAddress(ha, match_local=True))
except ValueError as e:
root_logger.warning("Invalid IP address %s for %s: %s", ha, host_name, unicode(e))
if not ips and not ip_addresses:
if not unattended:
ip_addresses = read_ip_addresses()
if ip_addresses:
if setup_dns:
ips = ip_addresses
else:
# all specified addresses was resolved for this host
if set(ip_addresses) <= set(ips):
ips = ip_addresses
else:
print("Error: the hostname resolves to IP address(es) that are different", file=sys.stderr)
print("from those provided on the command line. Please fix your DNS", file=sys.stderr)
print("or /etc/hosts file and restart the installation.", file=sys.stderr)
print("Provided but not resolved address(es): %s" % \
", ".join(str(ip) for ip in (set(ip_addresses) - set(ips))), file=sys.stderr)
sys.exit(1)
if not ips:
print("No usable IP address provided nor resolved.", file=sys.stderr)
sys.exit(1)
for ip_address in ips:
# check /etc/hosts sanity
hosts_record = record_in_hosts(str(ip_address))
if hosts_record is not None:
primary_host = hosts_record[1][0]
if primary_host != host_name:
print("Error: there is already a record in /etc/hosts for IP address %s:" \
% ip_address, file=sys.stderr)
print(hosts_record[0], " ".join(hosts_record[1]), file=sys.stderr)
print("Chosen hostname %s does not match configured canonical hostname %s" \
% (host_name, primary_host), file=sys.stderr)
print("Please fix your /etc/hosts file and restart the installation.", file=sys.stderr)
sys.exit(1)
return ips
def update_hosts_file(ip_addresses, host_name, fstore):
"""
Update hosts with specified addresses
:param ip_addresses: list of IP addresses
:return:
"""
if not fstore.has_file(paths.HOSTS):
fstore.backup_file(paths.HOSTS)
for ip_address in ip_addresses:
if record_in_hosts(str(ip_address)):
continue
print("Adding [{address!s} {name}] to your /etc/hosts file".format(
address=ip_address, name=host_name))
add_record_to_hosts(str(ip_address), host_name)
def expand_replica_info(filename, password):
"""
Decrypt and expand a replica installation file into a temporary
location. The caller is responsible to remove this directory.
"""
top_dir = tempfile.mkdtemp("ipa")
tarfile = top_dir+"/files.tar"
dir_path = top_dir + "/realm_info"
ipautil.decrypt_file(filename, tarfile, password, top_dir)
ipautil.run(["tar", "xf", tarfile, "-C", top_dir])
os.remove(tarfile)
return top_dir, dir_path
def read_replica_info(dir_path, rconfig):
"""
Read the contents of a replica installation file.
rconfig is a ReplicaConfig object
"""
filename = dir_path + "/realm_info"
fd = open(filename)
config = SafeConfigParser()
config.readfp(fd)
rconfig.realm_name = config.get("realm", "realm_name")
rconfig.master_host_name = config.get("realm", "master_host_name")
rconfig.domain_name = config.get("realm", "domain_name")
rconfig.host_name = config.get("realm", "destination_host")
rconfig.subject_base = config.get("realm", "subject_base")
try:
rconfig.version = int(config.get("realm", "version"))
except NoOptionError:
pass
def read_replica_info_dogtag_port(config_dir):
portfile = config_dir + "/dogtag_directory_port.txt"
default_port = 7389
if not ipautil.file_exists(portfile):
dogtag_master_ds_port = default_port
else:
with open(portfile) as fd:
try:
dogtag_master_ds_port = int(fd.read())
except (ValueError, IOError) as e:
root_logger.debug('Cannot parse dogtag DS port: %s', e)
root_logger.debug('Default to %d', default_port)
dogtag_master_ds_port = default_port
return dogtag_master_ds_port
def create_replica_config(dirman_password, filename, options):
top_dir = None
try:
top_dir, dir = expand_replica_info(filename, dirman_password)
except Exception as e:
root_logger.error("Failed to decrypt or open the replica file.")
print("ERROR: Failed to decrypt or open the replica file.")
print("Verify you entered the correct Directory Manager password.")
sys.exit(1)
config = ReplicaConfig(top_dir)
read_replica_info(dir, config)
root_logger.debug(
'Installing replica file with version %d (0 means no version in prepared file).',
config.version)
if config.version and config.version > version.NUM_VERSION:
root_logger.error(
'A replica file from a newer release (%d) cannot be installed on an older version (%d)',
config.version, version.NUM_VERSION)
sys.exit(1)
config.dirman_password = dirman_password
try:
host = get_host_name(options.no_host_dns)
except BadHostError as e:
root_logger.error(str(e))
sys.exit(1)
if config.host_name != host:
try:
print("This replica was created for '%s' but this machine is named '%s'" % (config.host_name, host))
if not ipautil.user_input("This may cause problems. Continue?", False):
root_logger.debug(
"Replica was created for %s but machine is named %s "
"User chose to exit",
config.host_name, host)
sys.exit(0)
config.host_name = host
print("")
except KeyboardInterrupt:
root_logger.debug("Keyboard Interrupt")
sys.exit(0)
config.dir = dir
config.ca_ds_port = read_replica_info_dogtag_port(config.dir)
return config
def check_server_configuration():
"""
Check if IPA server is configured on the system.
This is done by checking if there are system restore (uninstall) files
present on the system. Note that this check can only be run with root
privileges.
When IPA is not configured, this function raises a RuntimeError exception.
Most convenient use case for the function is in install tools that require
configured IPA for its function.
"""
server_fstore = sysrestore.FileStore(paths.SYSRESTORE)
if not server_fstore.has_files():
raise RuntimeError("IPA is not configured on this system.")
def remove_file(filename):
"""
Remove a file and log any exceptions raised.
"""
try:
if os.path.lexists(filename):
os.unlink(filename)
except Exception as e:
root_logger.error('Error removing %s: %s' % (filename, str(e)))
def rmtree(path):
"""
Remove a directory structure and log any exceptions raised.
"""
try:
if os.path.exists(path):
shutil.rmtree(path)
except Exception as e:
root_logger.error('Error removing %s: %s' % (path, str(e)))
def is_ipa_configured():
"""
Using the state and index install files determine if IPA is already
configured.
"""
installed = False
sstore = sysrestore.StateFile(paths.SYSRESTORE)
fstore = sysrestore.FileStore(paths.SYSRESTORE)
for module in IPA_MODULES:
if sstore.has_state(module):
root_logger.debug('%s is configured' % module)
installed = True
else:
root_logger.debug('%s is not configured' % module)
if fstore.has_files():
root_logger.debug('filestore has files')
installed = True
else:
root_logger.debug('filestore is tracking no files')
return installed
def run_script(main_function, operation_name, log_file_name=None,
fail_message=None):
"""Run the given function as a command-line utility
This function:
- Runs the given function
- Formats any errors
- Exits with the appropriate code
:param main_function: Function to call
:param log_file_name: Name of the log file (displayed on unexpected errors)
:param operation_name: Name of the script
:param fail_message: Optional message displayed on failure
"""
root_logger.info('Starting script: %s', operation_name)
try:
try:
return_value = main_function()
except BaseException as e:
if (
isinstance(e, SystemExit) and
(e.code is None or e.code == 0) # pylint: disable=no-member
):
# Not an error after all
root_logger.info('The %s command was successful',
operation_name)
else:
# Log at the DEBUG level, which is not output to the console
# (unless in debug/verbose mode), but is written to a logfile
# if one is open.
tb = sys.exc_info()[2]
root_logger.debug('\n'.join(traceback.format_tb(tb)))
root_logger.debug('The %s command failed, exception: %s: %s',
operation_name, type(e).__name__, e)
if fail_message and not isinstance(e, SystemExit):
print(fail_message)
raise
else:
if return_value:
root_logger.info('The %s command failed, return value %s',
operation_name, return_value)
else:
root_logger.info('The %s command was successful',
operation_name)
sys.exit(return_value)
except BaseException as error:
message, exitcode = handle_error(error, log_file_name)
if message:
print(message, file=sys.stderr)
sys.exit(exitcode)
def handle_error(error, log_file_name=None):
"""Handle specific errors. Returns a message and return code"""
if isinstance(error, SystemExit):
if isinstance(error.code, int):
return None, error.code
elif error.code is None:
return None, 0
else:
return str(error), 1
if isinstance(error, RuntimeError):
return str(error), 1
if isinstance(error, KeyboardInterrupt):
return "Cancelled.", 1
if isinstance(error, admintool.ScriptError):
return error.msg, error.rval
if isinstance(error, socket.error):
return error, 1
if isinstance(error, errors.ACIError):
return error.message, 1
if isinstance(error, ldap.INVALID_CREDENTIALS):
return "Invalid password", 1
if isinstance(error, ldap.INSUFFICIENT_ACCESS):
return "Insufficient access", 1
if isinstance(error, ldap.LOCAL_ERROR):
return error.args[0].get('info', ''), 1
if isinstance(error, ldap.SERVER_DOWN):
return error.args[0]['desc'], 1
if isinstance(error, ldap.LDAPError):
message = 'LDAP error: %s\n%s\n%s' % (
type(error).__name__,
error.args[0]['desc'].strip(),
error.args[0].get('info', '').strip()
)
return message, 1
if isinstance(error, config.IPAConfigError):
message = "An IPA server to update cannot be found. Has one been configured yet?"
message += "\nThe error was: %s" % error
return message, 1
if isinstance(error, errors.LDAPError):
return "An error occurred while performing operations: %s" % error, 1
if isinstance(error, HostnameLocalhost):
message = textwrap.dedent("""
The hostname resolves to the localhost address (127.0.0.1/::1)
Please change your /etc/hosts file so that the hostname
resolves to the ip address of your network interface.
Please fix your /etc/hosts file and restart the setup program
""").strip()
return message, 1
if log_file_name:
message = "Unexpected error - see %s for details:" % log_file_name
else:
message = "Unexpected error"
message += '\n%s: %s' % (type(error).__name__, error)
return message, 1
def load_pkcs12(cert_files, key_password, key_nickname, ca_cert_files,
host_name):
"""
Load and verify server certificate and private key from multiple files
The files are accepted in PEM and DER certificate, PKCS#7 certificate
chain, PKCS#8 and raw private key and PKCS#12 formats.
:param cert_files: Names of server certificate and private key files to
import
:param key_password: Password to decrypt private keys
:param key_nickname: Nickname of the private key to import from PKCS#12
files
:param ca_cert_files: Names of CA certificate files to import
:param host_name: Host name of the server
:returns: Temporary PKCS#12 file with the server certificate, private key
and CA certificate chain, password to unlock the PKCS#12 file and
the CA certificate of the CA that issued the server certificate
"""
with certs.NSSDatabase() as nssdb:
db_password = ipautil.ipa_generate_password()
db_pwdfile = ipautil.write_tmp_file(db_password)
nssdb.create_db(db_pwdfile.name)
try:
nssdb.import_files(cert_files, db_pwdfile.name,
True, key_password, key_nickname)
except RuntimeError as e:
raise ScriptError(str(e))
if ca_cert_files:
try:
nssdb.import_files(ca_cert_files, db_pwdfile.name)
except RuntimeError as e:
raise ScriptError(str(e))
for nickname, trust_flags in nssdb.list_certs():
if 'u' in trust_flags:
key_nickname = nickname
continue
nssdb.trust_root_cert(nickname)
# Check we have the whole cert chain & the CA is in it
trust_chain = list(reversed(nssdb.get_trust_chain(key_nickname)))
ca_cert = None
for nickname in trust_chain[1:]:
cert = nssdb.get_cert(nickname)
if ca_cert is None:
ca_cert = cert
nss_cert = x509.load_certificate(cert, x509.DER)
subject = DN(str(nss_cert.subject))
issuer = DN(str(nss_cert.issuer))
del nss_cert
if subject == issuer:
break
else:
raise ScriptError(
"The full certificate chain is not present in %s" %
(", ".join(cert_files)))
for nickname in trust_chain[1:]:
try:
nssdb.verify_ca_cert_validity(nickname)
except ValueError as e:
raise ScriptError(
"CA certificate %s in %s is not valid: %s" %
(subject, ", ".join(cert_files), e))
# Check server validity
try:
nssdb.verify_server_cert_validity(key_nickname, host_name)
except ValueError as e:
raise ScriptError(
"The server certificate in %s is not valid: %s" %
(", ".join(cert_files), e))
out_file = tempfile.NamedTemporaryFile()
out_password = ipautil.ipa_generate_password()
out_pwdfile = ipautil.write_tmp_file(out_password)
args = [
paths.PK12UTIL,
'-o', out_file.name,
'-n', key_nickname,
'-d', nssdb.secdir,
'-k', db_pwdfile.name,
'-w', out_pwdfile.name,
]
ipautil.run(args)
return out_file, out_password, ca_cert
@contextmanager
def stopped_service(service, instance_name=""):
"""
Ensure that the specified service is stopped while the commands within
this context are executed.
Service is started at the end of the execution.
"""
if instance_name:
log_instance_name = "@{instance}".format(instance=instance_name)
else:
log_instance_name = ""
root_logger.debug('Ensuring that service %s%s is not running while '
'the next set of commands is being executed.', service,
log_instance_name)
service_obj = services.service(service)
# Figure out if the service is running, if not, yield
if not service_obj.is_running(instance_name):
root_logger.debug('Service %s%s is not running, continue.', service,
log_instance_name)
yield
else:
# Stop the service, do the required stuff and start it again
root_logger.debug('Stopping %s%s.', service, log_instance_name)
service_obj.stop(instance_name)
try:
yield
finally:
root_logger.debug('Starting %s%s.', service, log_instance_name)
service_obj.start(instance_name)
def check_entropy():
"""
Checks if the system has enough entropy, if not, displays warning message
"""
try:
with open(paths.ENTROPY_AVAIL, 'r') as efname:
if int(efname.read()) < 200:
emsg = 'WARNING: Your system is running out of entropy, ' \
'you may experience long delays'
service.print_msg(emsg)
root_logger.debug(emsg)
except IOError as e:
root_logger.debug(
"Could not open %s: %s", paths.ENTROPY_AVAIL, e)
except ValueError as e:
root_logger.debug("Invalid value in %s %s", paths.ENTROPY_AVAIL, e)
def load_external_cert(files, subject_base):
"""
Load and verify external CA certificate chain from multiple files.
The files are accepted in PEM and DER certificate and PKCS#7 certificate
chain formats.
:param files: Names of files to import
:param subject_base: Subject name base for IPA certificates
:returns: Temporary file with the IPA CA certificate and temporary file
with the external CA certificate chain
"""
with certs.NSSDatabase() as nssdb:
db_password = ipautil.ipa_generate_password()
db_pwdfile = ipautil.write_tmp_file(db_password)
nssdb.create_db(db_pwdfile.name)
try:
nssdb.import_files(files, db_pwdfile.name)
except RuntimeError as e:
raise ScriptError(str(e))
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
ca_nickname = None
cache = {}
for nickname, trust_flags in nssdb.list_certs():
cert = nssdb.get_cert(nickname, pem=True)
nss_cert = x509.load_certificate(cert)
subject = DN(str(nss_cert.subject))
issuer = DN(str(nss_cert.issuer))
del nss_cert
cache[nickname] = (cert, subject, issuer)
if subject == ca_subject:
ca_nickname = nickname
nssdb.trust_root_cert(nickname)
if ca_nickname is None:
raise ScriptError(
"IPA CA certificate not found in %s" % (", ".join(files)))
trust_chain = reversed(nssdb.get_trust_chain(ca_nickname))
ca_cert_chain = []
for nickname in trust_chain:
cert, subject, issuer = cache[nickname]
ca_cert_chain.append(cert)
if subject == issuer:
break
else:
raise ScriptError(
"CA certificate chain in %s is incomplete" %
(", ".join(files)))
for nickname in trust_chain:
try:
nssdb.verify_ca_cert_validity(nickname)
except ValueError as e:
raise ScriptError(
"CA certificate %s in %s is not valid: %s" %
(subject, ", ".join(files), e))
cert_file = tempfile.NamedTemporaryFile()
cert_file.write(ca_cert_chain[0] + '\n')
cert_file.flush()
ca_file = tempfile.NamedTemporaryFile()
ca_file.write('\n'.join(ca_cert_chain[1:]) + '\n')
ca_file.flush()
return cert_file, ca_file
def store_version():
"""Store current data version and platform. This is required for check if
upgrade is required.
"""
sysupgrade.set_upgrade_state('ipa', 'data_version',
version.VENDOR_VERSION)
sysupgrade.set_upgrade_state('ipa', 'platform', ipaplatform.NAME)
def check_version():
"""
:raise UpgradePlatformError: if platform is not the same
:raise UpgradeDataOlderVersionError: if data needs to be upgraded
:raise UpgradeDataNewerVersionError: older version of IPA was detected than data
:raise UpgradeMissingVersionError: if platform or version is missing
"""
platform = sysupgrade.get_upgrade_state('ipa', 'platform')
if platform is not None:
if platform != ipaplatform.NAME:
raise UpgradePlatformError(
"platform mismatch (expected '%s', current '%s')" % (
platform, ipaplatform.NAME)
)
else:
raise UpgradeMissingVersionError("no platform stored")
data_version = sysupgrade.get_upgrade_state('ipa', 'data_version')
if data_version is not None:
parsed_data_ver = tasks.parse_ipa_version(data_version)
parsed_ipa_ver = tasks.parse_ipa_version(version.VENDOR_VERSION)
if parsed_data_ver < parsed_ipa_ver:
raise UpgradeDataOlderVersionError(
"data needs to be upgraded (expected version '%s', current "
"version '%s')" % (version.VENDOR_VERSION, data_version)
)
elif parsed_data_ver > parsed_ipa_ver:
raise UpgradeDataNewerVersionError(
"data are in newer version than IPA (data version '%s', IPA "
"version '%s')" % (data_version, version.VENDOR_VERSION)
)
else:
raise UpgradeMissingVersionError("no data_version stored")
def realm_to_serverid(realm_name):
return "-".join(realm_name.split("."))
def realm_to_ldapi_uri(realm_name):
serverid = realm_to_serverid(realm_name)
socketname = paths.SLAPD_INSTANCE_SOCKET_TEMPLATE % (serverid,)
return 'ldapi://' + ldapurl.ldapUrlEscape(socketname)
def install_service_keytab(principal, server, path, force_service_add=False):
try:
api.Backend.rpcclient.connect()
# Create services if none exists (we use the .forward method
# here so that we can control the client version number and avoid
# errors. This is a workaround until the API becomes version
# independent: FIXME
api.Backend.rpcclient.forward(
'service_add',
krbprincipalname=principal,
force=force_service_add,
version=u'2.112' # All the way back to 3.0 servers
)
except errors.DuplicateEntry:
pass
finally:
if api.Backend.rpcclient.isconnected():
api.Backend.rpcclient.disconnect()
args = [paths.IPA_GETKEYTAB, '-k', path, '-p', principal, '-s', server]
ipautil.run(args)
def check_creds(options, realm_name):
# Check if ccache is available
default_cred = None
try:
root_logger.debug('KRB5CCNAME set to %s' %
os.environ.get('KRB5CCNAME', None))
# get default creds, will raise if none found
default_cred = gssapi.creds.Credentials()
principal = str(default_cred.name)
except gssapi.raw.misc.GSSError as e:
root_logger.debug('Failed to find default ccache: %s' % e)
principal = None
# Check if the principal matches the requested one (if any)
if principal is not None and options.principal is not None:
op = options.principal
if op.find('@') == -1:
op = '%s@%s' % (op, realm_name)
if principal != op:
root_logger.debug('Specified principal %s does not match '
'available credentials (%s)' %
(options.principal, principal))
principal = None
if principal is None:
(ccache_fd, ccache_name) = tempfile.mkstemp()
os.close(ccache_fd)
options.created_ccache_file = ccache_name
if options.principal is not None:
principal = options.principal
else:
principal = 'admin'
stdin = None
if principal.find('@') == -1:
principal = '%s@%s' % (principal, realm_name)
if options.admin_password is not None:
stdin = options.admin_password
else:
if not options.unattended:
try:
stdin = getpass.getpass("Password for %s: " % principal)
except EOFError:
stdin = None
if not stdin:
root_logger.error(
"Password must be provided for %s.", principal)
raise ScriptError("Missing password for %s" % principal)
else:
if sys.stdin.isatty():
root_logger.error("Password must be provided in " +
"non-interactive mode.")
root_logger.info("This can be done via " +
"echo password | ipa-client-install " +
"... or with the -w option.")
raise ScriptError("Missing password for %s" % principal)
else:
stdin = sys.stdin.readline()
# set options.admin_password for future use
options.admin_password = stdin
try:
ipautil.kinit_password(principal, stdin, ccache_name)
except RuntimeError as e:
root_logger.error("Kerberos authentication failed: %s" % e)
raise ScriptError("Invalid credentials: %s" % e)
os.environ['KRB5CCNAME'] = ccache_name
class ModifyLDIF(ldif.LDIFParser):
"""
Allows to modify LDIF file.
Operations keep the order in which were specified per DN.
Warning: only modifications of existing DNs are supported
"""
def __init__(self, input_file, output_file):
"""
:param input_file: an LDIF
:param output_file: an LDIF file
"""
ldif.LDIFParser.__init__(self, input_file)
self.writer = ldif.LDIFWriter(output_file)
self.dn_updated = set()
self.modifications = {} # keep modify operations in original order
def add_value(self, dn, attr, values):
"""
Add value to LDIF.
:param dn: DN of entry (must exists)
:param attr: attribute name
:param value: value to be added
"""
assert isinstance(values, list)
self.modifications.setdefault(dn, []).append(
dict(
op="add",
attr=attr,
values=values,
)
)
def remove_value(self, dn, attr, values=None):
"""
Remove value from LDIF.
:param dn: DN of entry
:param attr: attribute name
:param value: value to be removed, if value is None, attribute will
be removed
"""
assert values is None or isinstance(values, list)
self.modifications.setdefault(dn, []).append(
dict(
op="del",
attr=attr,
values=values,
)
)
def replace_value(self, dn, attr, values):
"""
Replace values in LDIF with new value.
:param dn: DN of entry
:param attr: attribute name
:param value: new value for atribute
"""
assert isinstance(values, list)
self.remove_value(dn, attr)
self.add_value(dn, attr, values)
def modifications_from_ldif(self, ldif_file):
"""
Parse ldif file. Default operation is add, only changetypes "add"
and "modify" are supported.
:param ldif_file: an opened file for read
:raises: ValueError
"""
parser = ldif.LDIFRecordList(ldif_file)
parser.parse()
last_dn = None
for dn, entry in parser.all_records:
if dn is None:
# ldif parser return None, if records belong to previous DN
dn = last_dn
else:
last_dn = dn
if "replace" in entry:
for attr in entry["replace"]:
try:
self.replace_value(dn, attr, entry[attr])
except KeyError:
raise ValueError("replace: {dn}, {attr}: values are "
"missing".format(dn=dn, attr=attr))
elif "delete" in entry:
for attr in entry["delete"]:
self.remove_value(dn, attr, entry.get(attr, None))
elif "add" in entry:
for attr in entry["add"]:
try:
self.replace_value(dn, attr, entry[attr])
except KeyError:
raise ValueError("add: {dn}, {attr}: values are "
"missing".format(dn=dn, attr=attr))
else:
root_logger.error("Ignoring entry: %s : only modifications "
"are allowed (missing \"changetype: "
"modify\")", dn)
def handle(self, dn, entry):
if dn in self.modifications:
self.dn_updated.add(dn)
for mod in self.modifications.get(dn, []):
attr_name = mod["attr"]
values = mod["values"]
if mod["op"] == "del":
# delete
attribute = entry.setdefault(attr_name, [])
if values is None:
attribute = []
else:
attribute = [v for v in attribute if v not in values]
if not attribute: # empty
del entry[attr_name]
elif mod["op"] == "add":
# add
attribute = entry.setdefault(attr_name, [])
attribute.extend([v for v in values if v not in attribute])
else:
assert False, "Unknown operation: %r" % mod["op"]
self.writer.unparse(dn, entry)
def parse(self):
ldif.LDIFParser.parse(self)
# check if there are any remaining modifications
remaining_changes = set(self.modifications.keys()) - self.dn_updated
for dn in remaining_changes:
root_logger.error(
"DN: %s does not exists or haven't been updated", dn)
def remove_keytab(keytab_path):
"""
Remove Kerberos keytab and issue a warning if the procedure fails
:param keytab_path: path to the keytab file
"""
try:
root_logger.debug("Removing service keytab: {}".format(keytab_path))
os.remove(keytab_path)
except OSError as e:
if e.errno != errno.ENOENT:
root_logger.warning("Failed to remove Kerberos keytab '{}': "
"{}".format(keytab_path, e))
root_logger.warning("You may have to remove it manually")
def remove_ccache(ccache_path=None, run_as=None):
"""
remove Kerberos credential cache, essentially a wrapper around kdestroy.
:param ccache_path: path to the ccache file
:param run_as: run kdestroy as this user
"""
root_logger.debug("Removing service credentials cache")
kdestroy_cmd = [paths.KDESTROY]
if ccache_path is not None:
root_logger.debug("Ccache path: '{}'".format(ccache_path))
kdestroy_cmd.extend(['-c', ccache_path])
try:
ipautil.run(kdestroy_cmd, runas=run_as, env={})
except ipautil.CalledProcessError as e:
root_logger.warning(
"Failed to clear Kerberos credentials cache: {}".format(e))
|
tbabej/freeipa
|
ipaserver/install/installutils.py
|
Python
|
gpl-3.0
| 48,113
|
"""
********************************************************************************
Learn Python the Hard Way Third Edition, by
Zed A. Shaw
ISBN: 978-0321884916
********************************************************************************
"""
# A mini-console game; to be completed
import time #module imports
from sys import exit #module imports
debug = "DEBUG: " #print for debugging
testing = False #switch degugging on/off
greedybastard = False #bad final choice
juststarted = True #test if just spawned or not.
#the final room
def theGoldDen():
print """
Woot! You reached the Gold Den well done!
If you want the gold first you must say why:
1) To spend on a ferrari
2) To give it all away to charity
3) To invest in education and good food and give some surplus to charity.
"""
options = [ '1', '2', '3']
next = raw_input(">>> ")
if next == "1":
print "Sure thing man, it's waiting for you at the Bear Pit,",
print " here I'll drop you off!"
time.sleep(3)
greedybastard = True
theBearPit(greedybastard, juststarted)
elif next == "2":
print "You philanthropist!",
print " Well you should keep some for yourself too =)",
print " but here you go, take a bit of the gold!"
died("", "winner")
elif next == "3":
print "A wise choice my friend, go take most of the gold))"
died("", "winner")
elif not next.isdigit():
died(next, "wtf")
else:
reason = chkinput(next, options)
died(next, reason)
def standrews():
print """
Welcome to St Andrews! A real life street art gallery!
Go visit one of the many awesome cafes.
Don't stay too long there is treasure to find!
From here you can go NW, NE or SE.
Please choose a direction)))
"""
options = [ 'NW', 'NE', 'SE']
next = getinput()
if next == "NW":
sainsburys()
elif next == "NE":
thepark()
elif next == "SE":
easton()
else:
reason = chkinput(next, options)
died(next, reason)
def easton():
print """
Welcome to Easton the home of Banksy!
Go check out Clic Clack Boom, the Cat
and Dog or the Masked Gorilla if you dare!
From here you can go W or S.
"""
options = [ 'W', 'S']
next = getinput()
if next == "W":
theBearPit(greedybastard, juststarted)
elif next == "S":
brislington()
else:
reason = chkinput(next, options)
died(next, reason)
def brislington():
print """
You've reached brislington! Once favored settling ground of Henry VII!
So much to see in this deeply rich and historic area.
Go check out one of the many shops in this wonderfully multicultural setting.
Not for long though it's time to move on soon)))
From here you can go W or SW.
"""
options = [ 'W', 'SW']
next = getinput()
if next == "W":
avongorge()
elif next == "SW":
bedminster()
else:
reason = chkinput(next, options)
died(next, reason)
def avongorge():
print """
What a beautiful gorge and castle.
Take a look around at your pleasure
But not enough time to stick around too long!
From here you can go N or S.
"""
options = [ 'N', 'S']
next = getinput()
if next == "N":
clifton()
elif next == "S":
theGoldDen()
else:
reason = chkinput(next, options)
died(next, reason)
def clifton():
print """
Ah the Students Union and the village all in one!
So much to see and do but not enough time to do it =)
From here you can go N or E.
"""
options = [ 'N', 'E']
next = getinput()
if next == "N":
sainsburys()
elif next == "E":
theBearPit(greedybastard, juststarted)
else:
reason = chkinput(next, options)
died(next, reason)
def bedminster():
print """
This place almost feels like a whole new world!
What's that?! A table tennis setup on the roadside.
If only there were more time to explore...
From here you can go S or NW.
"""
options = [ 'S', 'NW']
next = getinput()
if next == "S":
theGoldDen()
elif next == "NW":
avongorge()
else:
reason = chkinput(next, options)
died(next, reason)
def sainsburys():
print """
Ah you reached sainsburys, the fright of the local
people! You're in a homegrown community and this is the big
boy in town. Well don't hang around too long!
From here you can go N or SE))).
"""
options = [ 'N', 'SE']
next = getinput()
if next == "N":
theGoldDen()
elif next == "SE":
standrews()
else:
reason = chkinput(next, options)
died(next, reason)
def thepark():
print """
Empty by Winter and bustling by Summer, welcome to St Andrews
park! If you catch this place at the right time you might just
meet a juggler or two!
From here you can go N or W.
"""
options = [ 'N', 'W']
if next == "N":
theGoldDen()
elif next == "W":
sainsburys()
else:
reason = chkinput(next, options)
died(next, reason)
#test if response is in options or a keyword
def chkinput(response, ol):
for i in ol:
if i not in response:
reason = "wtf"
if response == "QUIT":
response = ""
reason = "leave"
elif response == "IAMCHEATER":
response = ""
reason = "cheater"
return reason
#checks in case stdin string was lowercase.
def getinput():
next = raw_input(">>> ")
if next.islower:
next = next.upper()
print debug + next
return next
def died(s, r):
print debug + "%r" % r
dead = "player (you) = dead"
if r == "wtf":
print "'" + s + "'?!? ",
print "That's not even an option lolz"
time.sleep(1)
print dead
elif r == "leave":
print "No time to stay? :( np. bb!)))"
elif r == "cheater":
print """
Haha, sorry for the troll but you didn't think
it was that easy did you!?!?
"""
print dead
elif r == "winner":
print "YOU WON THE GAME! GOOD JOB)"
time.sleep(1)
thanks()
exit(0)
def thanks():
print "Thanks for playing matey"
time.sleep(1)
print " o o "
time.sleep(1)
print "= ="
time.sleep(1)
print " == . == "
time.sleep(1)
print " === === "
time.sleep(1)
print " ==== ==== "
time.sleep(1)
print " === === "
time.sleep(1)
print " == "
#this is where you start the game and all lvl1 portals
def theBearPit(gb, juststarted):
if not gb and juststarted:
print "From virtual world import hunter"
loading = "."
if not testing and juststarted:
for i in range(0,3):
print loading
loading += "."
time.sleep(1)
juststarted = False
if not gb:
print """
********************************
Welcome to the Bear Pit!
I guess you are here to find treasure?
From here you can go N,E,SE,SW or W.
Please choose a direction)))
You can quit the game at any time by typing QUIT.
You can cheat at any point by typing IAMCHEATER.
********************************
"""
else:
print """
********************************
Welcome to the Bear Pit!
You've been here before haven't you? (smirk)
Well good luck finding the treasure!
From here you can go N,E,SE,SW or W.
Please choose a direction)))
You can quit the game at any time by typing QUIT
You can cheat at any point by typing IAMCHEATER.
********************************
"""
options = ['N', 'S', 'SE', 'SW', 'W']
next = getinput()
if next == "N":
standrews()
elif next =="E":
easton()
elif next == "SE":
brislington()
elif next =="SW":
avongorge()
elif next == "W":
clifton()
elif next == "CHEAT":
theGoldDen()
else:
reason = chkinput(next, options)
died(next, reason)
#function call to start the game
theBearPit(greedybastard, juststarted)
|
msnorm/projects
|
zspy2/ex36/ex36.py
|
Python
|
mit
| 8,354
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
blast2demPro.py
---------------------
Date : October 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class blast2demPro(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('blast2demPro')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParametersPointInputMergedGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(blast2demPro.ATTRIBUTE,
self.tr("Attribute"), blast2demPro.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(blast2demPro.PRODUCT,
self.tr("Product"), blast2demPro.PRODUCTS, 0))
self.addParameter(ParameterBoolean(blast2demPro.USE_TILE_BB,
self.tr("Use tile bounding box (after tiling with buffer)"), False))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersRasterOutputFormatGUI()
self.addParametersRasterOutputGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "blast2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
self.addParametersPointInputMergedCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(blast2demPro.ATTRIBUTE)
if attribute != 0:
commands.append("-" + blast2demPro.ATTRIBUTES[attribute])
product = self.getParameterValue(blast2demPro.PRODUCT)
if product != 0:
commands.append("-" + blast2demPro.PRODUCTS[product])
if (self.getParameterValue(blast2demPro.USE_TILE_BB)):
commands.append("-use_tile_bb")
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
self.addParametersRasterOutputFormatCommands(commands)
self.addParametersRasterOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
AsgerPetersen/QGIS
|
python/plugins/processing/algs/lidar/lastools/blast2demPro.py
|
Python
|
gpl-2.0
| 4,005
|
#!/usr/bin/env python
from __future__ import division
import sys
import datetime
import time
import tarfile
start = time.time()
def log(output):
global start
now = datetime.datetime.utcfromtimestamp(time.time() - start)
sys.stderr.write("[%s] %s" % (now.strftime("%H:%M:%S"), output))
sys.stderr.flush()
def unpack(name, folder):
tar = tarfile.open(name)
tar.extractall(folder)
tar.close()
|
ut-planteco/ssu-pipeline
|
console.py
|
Python
|
gpl-3.0
| 404
|
"""
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import re
import random
import json
from time import sleep
from dogapi import dog_stats_api
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery import task, current_task
from celery.utils.log import get_task_logger
from celery.states import SUCCESS, FAILURE, RETRY
from celery.exceptions import RetryTaskError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.urlresolvers import reverse
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SEND_TO_MYSELF, SEND_TO_ALL, TO_OPTIONS,
)
from courseware.courses import get_course, course_image_url
from student.roles import CourseStaffRole, CourseInstructorRole
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
SubtaskStatus,
queue_subtasks_for_query,
check_subtask_is_valid,
update_subtask_status,
)
from xmodule.modulestore import Location
log = get_task_logger(__name__)
# Errors that an individual email is failing to be sent, and should just
# be treated as a fail.
SINGLE_EMAIL_FAILURE_ERRORS = (
SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted.
SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot.
SESIllegalAddressError, # Raised when an illegal address is encountered.
SESLocalAddressCharacterError, # An address contained a control or whitespace character.
)
# Exceptions that, if caught, should cause the task to be re-tried.
# These errors will be caught a limited number of times before the task fails.
LIMITED_RETRY_ERRORS = (
SMTPConnectError,
SMTPServerDisconnected,
AWSConnectionError,
)
# Errors that indicate that a mailing task should be retried without limit.
# An example is if email is being sent too quickly, but may succeed if sent
# more slowly. When caught by a task, it triggers an exponential backoff and retry.
# Retries happen continuously until the email is sent.
# Note that the SMTPDataErrors here are only those within the 4xx range.
# Those not in this range (i.e. in the 5xx range) are treated as hard failures
# and thus like SINGLE_EMAIL_FAILURE_ERRORS.
INFINITE_RETRY_ERRORS = (
SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded.
SMTPDataError,
)
# Errors that are known to indicate an inability to send any more emails,
# and should therefore not be retried. For example, exceeding a quota for emails.
# Also, any SMTP errors that are not explicitly enumerated above.
BULK_EMAIL_FAILURE_ERRORS = (
SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet.
SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet.
SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM.
SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded.
SMTPException,
)
def _get_recipient_queryset(user_id, to_option, course_id, course_location):
"""
Returns a query set of email recipients corresponding to the requested to_option category.
`to_option` is either SEND_TO_MYSELF, SEND_TO_STAFF, or SEND_TO_ALL.
Recipients who are in more than one category (e.g. enrolled in the course and are staff or self)
will be properly deduped.
"""
if to_option not in TO_OPTIONS:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
else:
staff_qset = CourseStaffRole(course_location).users_with_role()
instructor_qset = CourseInstructorRole(course_location).users_with_role()
recipient_qset = staff_qset | instructor_qset
if to_option == SEND_TO_ALL:
# We also require students to have activated their accounts to
# provide verification that the provided email address is valid.
enrollment_qset = User.objects.filter(
is_active=True,
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
recipient_qset = recipient_qset | enrollment_qset
recipient_qset = recipient_qset.distinct()
recipient_qset = recipient_qset.order_by('pk')
return recipient_qset
def _get_course_email_context(course):
"""
Returns context arguments to apply to all emails, independent of recipient.
"""
course_id = course.id
course_title = course.display_name
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
email_context = {
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),
'platform_name': settings.PLATFORM_NAME,
}
return email_context
def perform_delegate_email_batches(entry_id, course_id, task_input, action_name):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of no more than settings.BULK_EMAIL_EMAILS_PER_TASK
in size, and queueing up worker jobs.
"""
entry = InstructorTask.objects.get(pk=entry_id)
# Get inputs to use in this task from the entry.
user_id = entry.requester.id
task_id = entry.task_id
# Perfunctory check, since expansion is made for convenience of other task
# code that doesn't need the entry_id.
if course_id != entry.course_id:
format_msg = u"Course id conflict: explicit value {} does not match task value {}"
log.warning("Task %s: %s", task_id, format_msg.format(course_id, entry.course_id))
raise ValueError("Course id conflict: explicit value does not match task value")
# Fetch the CourseEmail.
email_id = task_input['email_id']
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
# The CourseEmail object should be committed in the view function before the task
# is submitted and reaches this point.
log.warning("Task %s: Failed to get CourseEmail with id %s", task_id, email_id)
raise
# Check to see if email batches have already been defined. This seems to
# happen sometimes when there is a loss of connection while a task is being
# queued. When this happens, the same task gets called again, and a whole
# new raft of subtasks gets queued up. We will assume that if subtasks
# have already been defined, there is no need to redefine them below.
# So we just return right away. We don't raise an exception, because we want
# the current task to be marked with whatever it had been marked with before.
if len(entry.subtasks) > 0 and len(entry.task_output) > 0:
log.warning("Task %s has already been processed for email %s! InstructorTask = %s", task_id, email_id, entry)
progress = json.loads(entry.task_output)
return progress
# Sanity check that course for email_obj matches that of the task referencing it.
if course_id != email_obj.course_id:
format_msg = u"Course id conflict: explicit value {} does not match email value {}"
log.warning("Task %s: %s", task_id, format_msg.format(course_id, entry.course_id))
raise ValueError("Course id conflict: explicit value does not match email value")
# Fetch the course object.
try:
course = get_course(course_id)
except ValueError:
log.exception("Task %s: course not found: %s", task_id, course_id)
raise
# Get arguments that will be passed to every subtask.
to_option = email_obj.to_option
global_email_context = _get_course_email_context(course)
def _create_send_email_subtask(to_list, initial_subtask_status):
"""Creates a subtask to send email to a given recipient list."""
subtask_id = initial_subtask_status.task_id
new_subtask = send_course_email.subtask(
(
entry_id,
email_id,
to_list,
global_email_context,
initial_subtask_status.to_dict(),
),
task_id=subtask_id,
routing_key=settings.BULK_EMAIL_ROUTING_KEY,
)
return new_subtask
recipient_qset = _get_recipient_queryset(user_id, to_option, course_id, course.location)
recipient_fields = ['profile__name', 'email']
log.info(u"Task %s: Preparing to queue subtasks for sending emails for course %s, email %s, to_option %s",
task_id, course_id, email_id, to_option)
progress = queue_subtasks_for_query(
entry,
action_name,
_create_send_email_subtask,
recipient_qset,
recipient_fields,
settings.BULK_EMAIL_EMAILS_PER_QUERY,
settings.BULK_EMAIL_EMAILS_PER_TASK
)
# We want to return progress here, as this is what will be stored in the
# AsyncResult for the parent task as its return value.
# The AsyncResult will then be marked as SUCCEEDED, and have this return value as its "result".
# That's okay, for the InstructorTask will have the "real" status, and monitoring code
# should be using that instead.
return progress
@task(default_retry_delay=settings.BULK_EMAIL_DEFAULT_RETRY_DELAY, max_retries=settings.BULK_EMAIL_MAX_RETRIES) # pylint: disable=E1102
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
"""
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status_dict` : dict containing values representing current status. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Most values will be zero on initial call, but may be different when the task is
invoked as part of a retry.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html. Updates InstructorTask object
with status information (sends, failures, skips) and updates number of subtasks completed.
"""
subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
current_task_id = subtask_status.task_id
num_to_send = len(to_list)
log.info("Preparing to send email %s to %d recipients as subtask %s for instructor task %d: context = %s, status=%s",
email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status)
# Check that the requested subtask is actually known to the current InstructorTask entry.
# If this fails, it throws an exception, which should fail this subtask immediately.
# This can happen when the parent task has been run twice, and results in duplicate
# subtasks being created for the same InstructorTask entry. This can happen when Celery
# loses its connection to its broker, and any current tasks get requeued.
# We hope to catch this condition in perform_delegate_email_batches() when it's the parent
# task that is resubmitted, but just in case we fail to do so there, we check here as well.
# There is also a possibility that this task will be run twice by Celery, for the same reason.
# To deal with that, we need to confirm that the task has not already been completed.
check_subtask_is_valid(entry_id, current_task_id, subtask_status)
send_exception = None
new_subtask_status = None
try:
course_title = global_email_context['course_title']
with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
# We got here for really unexpected reasons. Since we don't know how far
# the task got in emailing, we count all recipients as having failed.
# It at least keeps the counts consistent.
subtask_status.increment(failed=num_to_send, state=FAILURE)
update_subtask_status(entry_id, current_task_id, subtask_status)
raise
if send_exception is None:
# Update the InstructorTask object that is storing its progress.
log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
elif isinstance(send_exception, RetryTaskError):
# If retrying, a RetryTaskError needs to be returned to Celery.
# We assume that the the progress made before the retry condition
# was encountered has already been updated before the retry call was made,
# so we only log here.
log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
raise send_exception # pylint: disable=E0702
else:
log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception # pylint: disable=E0702
# return status in a form that can be serialized by Celery into JSON:
log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
return new_subtask_status.to_dict()
def _filter_optouts_from_recipients(to_list, course_id):
"""
Filters a recipient list based on student opt-outs for a given course.
Returns the filtered recipient list, as well as the number of optouts
removed from the list.
"""
optouts = Optout.objects.filter(
course_id=course_id,
user__in=[i['pk'] for i in to_list]
).values_list('user__email', flat=True)
optouts = set(optouts)
# Only count the num_optout for the first time the optouts are calculated.
# We assume that the number will not change on retries, and so we don't need
# to calculate it each time.
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
return to_list, num_optout
def _get_source_address(course_id, course_title):
"""
Calculates an email address to be used as the 'from-address' for sent emails.
Makes a unique from name and address for each course, e.g.
"COURSE_TITLE" Course Staff <coursenum-no-reply@courseupdates.edx.org>
"""
course_title_no_quotes = re.sub(r'"', '', course_title)
# The course_id is assumed to be in the form 'org/course_num/run',
# so pull out the course_num. Then make sure that it can be used
# in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)
# character appears.
course_num = Location.parse_course_id(course_id)['course']
invalid_chars = re.compile(r"[^\w.-]")
course_num = invalid_chars.sub('_', course_num)
from_addr = u'"{0}" Course Staff <{1}-{2}>'.format(course_title_no_quotes, course_num, settings.BULK_EMAIL_DEFAULT_FROM_EMAIL)
return from_addr
def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):
"""
Performs the email sending task.
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status` : object of class SubtaskStatus representing current status.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html.
Returns a tuple of two values:
* First value is a SubtaskStatus object which represents current progress at the end of this call.
* Second value is an exception returned by the innards of the method, indicating a fatal error.
In this case, the number of recipients that were not sent have already been added to the
'failed' count above.
"""
# Get information from current task's request:
task_id = subtask_status.task_id
try:
course_email = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
log.exception("Task %s: could not find email id:%s to send.", task_id, email_id)
raise
# Exclude optouts (if not a retry):
# Note that we don't have to do the optout logic at all if this is a retry,
# because we have presumably already performed the optout logic on the first
# attempt. Anyone on the to_list on a retry has already passed the filter
# that existed at that time, and we don't need to keep checking for changes
# in the Optout list.
if subtask_status.get_retry_count() == 0:
to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)
subtask_status.increment(skipped=num_optout)
course_title = global_email_context['course_title']
subject = "[" + course_title + "] " + course_email.subject
from_addr = _get_source_address(course_email.course_id, course_title)
course_email_template = CourseEmailTemplate.get_template()
try:
connection = get_connection()
connection.open()
# Define context values to use in all course emails:
email_context = {'name': '', 'email': ''}
email_context.update(global_email_context)
while to_list:
# Update context with user-specific values from the user at the end of the list.
# At the end of processing this user, they will be popped off of the to_list.
# That way, the to_list will always contain the recipients remaining to be emailed.
# This is convenient for retries, which will need to send to those who haven't
# yet been emailed, but not send to those who have already been sent to.
current_recipient = to_list[-1]
email = current_recipient['email']
email_context['email'] = email
email_context['name'] = current_recipient['profile__name']
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)
html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we have gotten the rate limiter. This is not very high-tech,
# but if a task has been retried for rate-limiting reasons, then we sleep
# for a period of time between all emails within this task. Choice of
# the value depends on the number of workers that might be sending email in
# parallel, and what the SES throttle rate is.
if subtask_status.retried_nomax > 0:
sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
try:
log.debug('Email with id %s to be sent to %s', email_id, email)
with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):
connection.send_messages([email_msg])
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task.
raise exc
else:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
except SINGLE_EMAIL_FAILURE_ERRORS as exc:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
else:
dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])
if settings.BULK_EMAIL_LOG_SENT_EMAILS:
log.info('Email with id %s sent to %s', email_id, email)
else:
log.debug('Email with id %s sent to %s', email_id, email)
subtask_status.increment(succeeded=1)
# Pop the user that was emailed off the end of the list only once they have
# successfully been processed. (That way, if there were a failure that
# needed to be retried, the user is still on the list.)
to_list.pop()
except INFINITE_RETRY_ERRORS as exc:
dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_nomax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_nomax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True
)
except LIMITED_RETRY_ERRORS as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# Errors caught are those that indicate a temporary condition that might succeed on retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
except BULK_EMAIL_FAILURE_ERRORS as exc:
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_pending = len(to_list)
log.exception('Task %s: email with id %d caused send_course_email task to fail with "fatal" exception. %d emails unsent.',
task_id, email_id, num_pending)
# Update counters with progress to date, counting unsent emails as failures,
# and set the state to FAILURE:
subtask_status.increment(failed=num_pending, state=FAILURE)
return subtask_status, exc
except Exception as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# These are unexpected errors. Since they might be due to a temporary condition that might
# succeed on retry, we give them a retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',
task_id, email_id)
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
else:
# All went well. Update counters with progress to date,
# and set the state to SUCCESS:
subtask_status.increment(state=SUCCESS)
# Successful completion is marked by an exception value of None.
return subtask_status, None
finally:
# Clean up at the end.
connection.close()
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):
"""
Helper function to requeue a task for retry, using the new version of arguments provided.
Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
These include the `current_exception` that the task encountered that is causing the retry attempt,
and the `subtask_status` that is to be returned. A third extra argument `skip_retry_max`
indicates whether the current retry should be subject to a maximum test.
Returns a tuple of two values:
* First value is a dict which represents current progress. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
* Second value is an exception returned by the innards of the method. If the retry was
successfully submitted, this value will be the RetryTaskError that retry() returns.
Otherwise, it (ought to be) the current_exception passed in.
"""
task_id = subtask_status.task_id
log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)
# Calculate time until we retry this task (in seconds):
# The value for max_retries is increased by the number of times an "infinite-retry" exception
# has been retried. We want the regular retries to trigger max-retry checking, but not these
# special retries. So we count them separately.
max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
base_delay = _get_current_task().default_retry_delay
if skip_retry_max:
# once we reach five retries, don't increase the countdown further.
retry_index = min(subtask_status.retried_nomax, 5)
exception_type = 'sending-rate'
# if we have a cap, after all, apply it now:
if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
max_retries = min(max_retries, retry_cap)
else:
retry_index = subtask_status.retried_withmax
exception_type = 'transient'
# Skew the new countdown value by a random factor, so that not all
# retries are deferred by the same amount.
countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)
log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',
task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)
# we make sure that we update the InstructorTask with the current subtask status
# *before* actually calling retry(), to be sure that there is no race
# condition between this update and the update made by the retried task.
update_subtask_status(entry_id, task_id, subtask_status)
# Now attempt the retry. If it succeeds, it returns a RetryTaskError that
# needs to be returned back to Celery. If it fails, we return the existing
# exception.
try:
send_course_email.retry(
args=[
entry_id,
email_id,
to_list,
global_email_context,
subtask_status.to_dict(),
],
exc=current_exception,
countdown=countdown,
max_retries=max_retries,
throw=True,
)
except RetryTaskError as retry_error:
# If the retry call is successful, update with the current progress:
log.exception('Task %s: email with id %d caused send_course_email task to retry.',
task_id, email_id)
return subtask_status, retry_error
except Exception as retry_exc:
# If there are no more retries, because the maximum has been reached,
# we expect the original exception to be raised. We catch it here
# (and put it in retry_exc just in case it's different, but it shouldn't be),
# and update status as if it were any other failure. That means that
# the recipients still in the to_list are counted as failures.
log.exception('Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
task_id, email_id, [i['email'] for i in to_list])
num_failed = len(to_list)
subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)
return subtask_status, retry_exc
def _statsd_tag(course_title):
"""
Calculate the tag we will use for DataDog.
"""
tag = u"course_email:{0}".format(course_title)
return tag[:200]
|
hkawasaki/kawasaki-aio8-1
|
lms/djangoapps/bulk_email/tasks.py
|
Python
|
agpl-3.0
| 33,434
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Generic libs
import sys
# Framework modules
from bottle import app, run
# Application modules
from src.app.middleware import CustomApp
import src.app.routes
# Run application on port provided from cmd (heroku), use gunicorn server
run(app=CustomApp(app()), server='gunicorn', host='0.0.0.0', port=sys.argv[1])
|
Xifax/suzu-web
|
suzu.py
|
Python
|
bsd-2-clause
| 363
|
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
google-research/prompt-tuning
|
prompt_tuning/train/__init__.py
|
Python
|
apache-2.0
| 572
|
# Copyright (C) 2013 Cisco Systems Inc.
# All rights reserved
#$Id: eor_utils.py,v 1.427 2013/06/24 23:56:03 venksrin Exp $
#ident $Source: /cvsroot/eor/systest/lib/eor_utils.py,v $ $Revision: 1.427 $
# Best Pratices for get() functions:
# 1. Use class rex as much as possible for standard regular expressions
# 2. Use underscore in keys wherever white-space appears in the output header
# 3. Add author name, description of function, sample usage examples and return value
# 4. Use python documentation format for #3 above, so that the documentation for all the functions can be pulled out easily
from nxapi_utils import NXAPITransport
import re
import collections
import string
import subprocess
import shlex
import sys, socket
import datetime
import time
MASKS=['0.0.0.0','128.0.0.0','192.0.0.0','224.0.0.0','240.0.0.0','248.0.0.0','252.0.0.0','254.0.0.0','255.0.0.0','255.128.0.0','255.192.0.0','255.224.0.0','255.240.0.0','255.248.0.0','255.252.0.0', '255.254.0.0', '255.255.0.0', '255.255.128.0', '255.255.192.0', '255.255.224.0', '255.255.240.0', '255.255.248.0', '255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128', '255.255.255.192', '255.255.255.224', '255.255.255.240', '255.255.255.248', '255.255.255.252', '255.255.255.254', '255.255.255.255']
def runNXAPIConf(cmd):
output,code,msg = NXAPITransport.send_cmd_int(cmd, "cli_conf")
return output,msg,code
def runNXAPIShow(cmd):
xml_index = cmd.find("| xml")
if xml_index == -1:
output,code,msg = NXAPITransport.send_cmd_int(cmd, "cli_show_ascii")
else:
cmd = cmd[:xml_index]
output,code,msg = NXAPITransport.send_cmd_int(cmd, "cli_show")
return output
def runVshCmdEx(cmd, _shell = False, _stdout = None):
output,error,status = runNXAPIConf(cmd)
return output,error,status
def cli_ex(cmd):
return runNXAPIShow(cmd)
class rex:
INTERFACE_TYPE="[Ff]ast[Ee]thernet|[Ff][Ee]th|[Gg]igabit[Ee]thernet|[Gg]ig[Ee]|[Ee]thernet|[Ee]th|[Tt]unnel ?|[Ll]oopback ?|[Pp]ort-channel ?|[Oo]verlay ?|[Nn]ull|[Mm]gmt|[Vv]lan ?|[Pp]o ?|[Ll]o ?|[Oo]vl ?|[Vv][Ll]|[Rr]epl|[Rr]eplicator|[Ff]as|[Ss]up-eth"
INTERFACE_NUMBER="[0-9]+/[0-9]+/[0-9]+|[0-9]+/[0-9]+|[0-9]+/[0-9]+\.[0-9]+|[0-9]+\.[0-9]+|[0-9]+|[0-9]+/[0-9]+/[0-9]+"
# INTERFACE_NAME="(?:{0})(?:{1})|[Nn]ull".format(INTERFACE_TYPE,INTERFACE_NUMBER)
INTERFACE_NAME='(?:(?:{0})(?:{1})|(?:[Nn]ull))'.format(INTERFACE_TYPE,INTERFACE_NUMBER)
INTERFACE_RANGE='(?:(?:{0}-[0-9]+|{0}-{0}|{0}),?)+'.format(INTERFACE_NAME)
BCM_FP_INTERFACE='([Xx]e([0-9]+))'
BCM_FP_INTERFACE_RANGE='[Xx]e([0-9]+)-[Xx]e([0-9]+)'
PHYSICAL_INTERFACE_TYPE="[Ff]ast[Ee]thernet|[Ff][Ee]th|[Gg]igabit[Ee]thernet|[Gg]ig[Ee]|[Gg]i|[Ee]thernet|[Ee]th"
PHYSICAL_INTERFACE_NUMBER="[0-9]+/[0-9]+/[0-9]+|[0-9]+/[0-9]+|[0-9]+"
PHYSICAL_INTERFACE_NAME="(?:{0})(?:{1})".format(PHYSICAL_INTERFACE_TYPE,PHYSICAL_INTERFACE_NUMBER)
PHYSICAL_INTERFACE_RANGE='(?:(?:{0}-[0-9]+|{0}-{0}|{0}),?)+'.format(PHYSICAL_INTERFACE_NAME)
DEVICE_TYPE='EOR|sTOR|N7K|N5K|N3K|itgen|fanout|UNKNOWN|NA'
FEX_MODEL='N2148T|N2232P|N2232TM-E|N2248TP-E|N2248T|NB22FJ|NB22HP'
FEX_INTERFACE_TYPE='{0}[0-9][0-9][0-9]/[0-9]+/[0-9]+'.format(PHYSICAL_INTERFACE_TYPE)
SWITCH_NAME = '[0-9A-Za-z_-]+'
#VLAN_RANGE = '[0-9]+(?:\-[0-9]+)?'
HEX="[0-9a-fA-F]+"
HEX_VAL="[x0-9a-fA-F]+"
MACDELIMITER="[\.:\-]"
# Following will match the following combinations
# Aa.Bb.Cc.Dd.Ee.Ff
# Aa-Bb-Cc-Dd-Ee-Ff
# Aa:Bb:Cc:Dd:Ee:Ff
# AaBb.CcDd.EeFf
# AaBb-CcDd-EeFf
# AaBb:CcDd:EeFf
MACADDR=HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+"|"+HEX+HEX+HEX+HEX+MACDELIMITER+HEX+HEX+HEX+HEX+MACDELIMITER+HEX+HEX+HEX+HEX
IPv4_ADDR="[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"
IPv6_ADDR="[0-9A-Fa-f]+:[0-9A-Fa-f:]+"
LINK_LOCAL_IPv6_ADDR="fe80::[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+"
IP_ADDRESS="(?:(?:{0})|(?:{1}))".format(IPv4_ADDR,IPv6_ADDR)
NETADDR ='{0}/[0-9]+'.format(IPv4_ADDR)
NUM="[0-9]+"
BOOL="[01]"
DECIMAL_NUM="[0-9\.]+"
ALPHA="[a-zA-Z]+"
ALPHAUPPER="[A-Z]+"
ALPHALOWER="[a-z]+"
ALPHASPECIAL="[a-zA-Z_\-\.#/]+"
ALPHANUM="[a-zA-Z0-9]+"
ALPHANUMSPECIAL="[a-zA-Z0-9\-\._/]+"
SYSMGR_SERVICE_NAME = "[a-zA-Z0-9\-\._ ]+"
VRF_NAME="[a-zA-Z0-9_\-#]+"
ALL="?:[.\s]+"
#
# Number and time formats
#
VLAN_RANGE='(?:(?:{0}-[0-9]+|{0}-{0}|{0}),?)+'.format(NUM)
DATE = '[0-9]+\-[0-9]+\-[0-9]+'
U_TIME="[0-9]+\.[0-9]+"
CLOCK_TIME="[0-9]+[0-9]+:[0-9]+[0-9]+:[0-9]+[0-9]+"
HH_MM_SS="[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}"
TIME="(?:$U_TIME|$CLOCK_TIME)"
MONTH="Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"
YEAR="[12]+[0-9][0-9][0-9]"
UPTIME="(?:\d+[dwmy]\d+[hdwm]|\d+:\d+:\d+|\d+\.\d+)"
XPTIME="(?:\d+:\d+:\d+|\d+\.\d+|never)"
LC_STATUS='(?:pwr-?denied|err-?pwd-?dn|pwr-?cycle?d|upgrading|powered-?up|powered-?dn|failure|initializing|testing|ok)'
LC_MODEL='(?:N7K-F2-?48X[PT]-?\d+[E]*| +|Cortina-Test-LC|N9k-X9636PQ)'
FC_MODEL='(?:N7K-C[0-9]+-FAB-?\d+|N/A| +)'
LC_MODULE_TYPE='(?:[0-9]+/[0-9]+ Gbps (?:BASE-T )?Ethernet Module|Cortina-Test-LC|Snowbird|Seymour)'
FC_MODULE_TYPE='(?:Fabric Module(?: [0-9]+)?|Sierra|Shasta)'
VLAN_STATUS='active|suspended|act.lshut'
#Verify_list defined for stimuli classes
VERIFY_LIST=['none','all','traffic','l2_unicast_pi','l3_unicast_pi','l2_multicast_pi','l3_multicast_pi','l2_unicast_pd','l3_unicast_pd','l2_multicast_pd','l3_multicast_pd','system','exception','vpc_consistency']
TRIGGER_VERIFY_LIST=['traffic','none','all']
# To be depreceated, use strTolist instead
# Usages strtolist('1,2,3')
# strtolist('1 2 3')
# strtolist('1, 2, 3')
# All three will return list of ['1',2,'3']
def strtolist(inputstr,retainint=False):
inputstr=str(inputstr)
inputstr=inputstr.strip("[]")
splitbycomma=inputstr.split(",")
splitbyspace=inputstr.split()
if len(splitbycomma) >= 2:
returnlist=[]
for elem in splitbycomma:
elem=elem.strip(" '")
elem=elem.strip('"')
if elem.isdigit() and retainint:
returnlist.append(int(elem))
else:
returnlist.append(elem)
return returnlist
returnlist=[]
for elem in splitbyspace:
elem=elem.strip(" '")
elem=elem.strip('"')
if elem.isdigit() and retainint:
returnlist.append(int(elem))
else:
returnlist.append(elem)
return returnlist
def normalizeInterfaceName(log, interface):
in_type=type(interface)
pattern1='[Ee]thernet|[Ee]th|[Ee]t'
pattern2='[Vv]lan|[Vv]l'
pattern3='[Pp]ort-channel|[Pp]ortchannel|[Pp]o'
pattern4='[Ll]oopback|[Ll]oop-back|[Ll]o'
if (in_type == str):
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',interface)
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',interface)
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',interface)
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',interface)
if (in_type == list):
for int in interface:
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',int)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',tmp)
interface[interface.index(int)]=tmp
if (in_type == tuple):
int_list=list(interface)
for int in int_list:
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',int)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',tmp)
int_list[int_list.index(int)]=tmp
interface=tuple(int_list)
if (in_type == dict):
dct={}
for key in interface.keys():
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',key)
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',int)
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',int)
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',int)
tmp={int:interface[key]}
dct.update(tmp)
interface=dct
return interface
def convertListToDict(table,columns=[],keys=None,keytype="tuple"):
# Returns dictionary based on given list & columns
# If it is a list, each column is a key
# If it is a list of lists, then first level keys are passed keys argument
# and columns is second level key
returnDict = collections.OrderedDict()
if keys:
keyIndexes = []
if "split" in dir(keys):
keys=keys.split()
for key in keys:
keyIndexes.append(columns.index(key))
valueIndex=-1
if len(columns) - len(keys) == 1:
for i in range(len(columns)):
if not i in keyIndexes:
valueIndex=i
break
for row in table:
key=""
keyitems=[]
initial=True
for keyIndex in keyIndexes:
interface=""
temp=re.match(rex.INTERFACE_NAME,row[keyIndex])
if temp and temp.group(0) == row[keyIndex]:
interface=normalizeInterfaceName("",row[keyIndex])
if initial:
if interface == "":
key = key + row[keyIndex]
else:
key = key + interface
initial=False
else:
if interface == "":
key = key + " " + row[keyIndex]
else:
key = key + " " + interface
if interface == "":
keyitems.append(row[keyIndex])
else:
keyitems.append(interface)
if keytype == "tuple" and len(keys) > 1:
key=tuple(keyitems)
returnDict[key] = collections.OrderedDict()
if valueIndex == -1:
for i in range(len(columns)):
if not i in keyIndexes:
temp=re.match(rex.INTERFACE_NAME,row[i].strip())
if temp and temp.group(0) == row[i].strip():
returnDict[key][columns[i]]=normalizeInterfaceName("",row[i].strip())
else:
returnDict[key][columns[i]] = row[i].strip()
else:
temp=re.match(rex.INTERFACE_NAME,row[valueIndex].strip())
if temp and temp.group(0) == row[valueIndex].strip():
returnDict[key]=normalizeInterfaceName("",row[valueIndex].strip())
else:
returnDict[key] = row[valueIndex]
else:
#Single level dictionary need to handle 6 different use cases
#eor_utils.convertListToDict(['x','y','z'],['a','b','c'])
#eor_utils.convertListToDict([],['a','b','c'])
#eor_utils.convertListToDict(['x','y'],['a','b','c'])
#eor_utils.convertListToDict([('x','y','z')],['a','b','c'])
#eor_utils.convertListToDict([('x','y'),('c','d')],['a','b'])
#eor_utils.convertListToDict([('x','y'),('c','d')])
if len(table):
if len(columns) == len(table) and not re.search('tuple',str(type(table[0]))):
for key in columns:
temp=re.match(rex.INTERFACE_NAME,table[columns.index(key)])
if temp and temp.group(0) == table[columns.index(key)]:
returnDict[key]=normalizeInterfaceName("",table[columns.index(key)])
else:
returnDict[key]=table[columns.index(key)]
elif len(table) == 1 and len(table[0]) == len(columns) and re.search('tuple',str(type(table[0]))):
for key in columns:
temp=re.match(rex.INTERFACE_NAME,table[0][columns.index(key)])
if temp and temp.group(0) == table[0][columns.index(key)]:
returnDict[key]=normalizeInterfaceName("",table[0][columns.index(key)])
else:
returnDict[key]=table[0][columns.index(key)]
elif (len(columns) == 2 or len(columns) == 0)and re.search('tuple',str(type(table[0]))):
for row in table:
if len(row) == 2:
temp=re.match(rex.INTERFACE_NAME,row[1])
if temp and temp.group(0) == row[1]:
returnDict[row[0]]=normalizeInterfaceName("",row[1])
else:
returnDict[row[0]]=row[1]
else:
return collections.OrderedDict()
return returnDict
def getUnwrappedBuffer(buffer,delimiter=" "):
# Returns a string
# If output has wrapped lines as follows (port-channel summary)
# "21 Po21(SU) Eth NONE Eth2/11(P) Eth2/12(D)
# 22 Po22(SU) Eth NONE Eth1/1(P) Eth1/2(P) Eth1/3(P)
# Eth1/4(P)
# 101 Po101(SD) Eth NONE Eth2/1(D) Eth2/2(D)"
# This converts to
# "21 Po21(SU) Eth NONE Eth2/11(P) Eth2/12(D)
# 22 Po22(SU) Eth NONE Eth1/1(P) Eth1/2(P) Eth1/3(P) Eth1/4(P)
# 101 Po101(SD) Eth NONE Eth2/1(D) Eth2/2(D)"
#
# This helps to write get procedures with everyoutput being a single line
# and makes regular expressions seamless independent of wrapped output
previousline=""
lines=[]
returnbuffer = ""
buffer=re.sub("\r","",buffer)
for line in buffer.split("\n"):
wrappedline=re.findall("^[ \t]+(.*)",line,flags=re.I)
if len(wrappedline) > 0:
previousline = previousline + delimiter + re.sub("\r\n","",wrappedline[0])
else:
if (previousline != ""):
returnbuffer = returnbuffer + previousline + "\n"
previousline=re.sub("[\r\n]+","",line)
if (previousline != ""):
returnbuffer = returnbuffer + previousline + "\n"
return returnbuffer
def getVlanDict(vlan):
cmd = "show vlan id " + vlan
showoutput=cli_ex(cmd)
vlanmemberlist=re.findall("("+rex.NUM+")[ \t]+("+rex.ALPHANUM+")[ \t]+("+rex.VLAN_STATUS+")[ \t]+(.*)",getUnwrappedBuffer(showoutput,", "),flags=re.I|re.M)
vlanmemberdict=convertListToDict(vlanmemberlist,['VLAN','Name','Status','Ports'],['VLAN'])
return vlanmemberdict
"""This scrpit should not contain any thing other than enums"""
class IfType():
Ethernet = 1
PortChannel = 2
Internal = 3
Cpu = 4
def replace_output(_lines, _find_word, _replace_word):
hw_name = _find_word
new_lines = []
for line in _lines:
x = re.sub(r'\b%s\b'%(hw_name), _replace_word, line)
new_lines.append(x)
return new_lines
class createHwTableObject(object):
""" Class to parse the broadcom table outputs and convert to dictionary format. Expects the
input as 'Index: <Row>' where the <Row> is in key value pairs separated by commas"""
def __init__( self, bcm_cmd_dump ):
import re
self.table=collections.OrderedDict()
table_rows=bcm_cmd_dump.split('\n')
for row in table_rows:
if "d chg" in row:
continue
if ":" not in row:
continue
if "Private image version" in row:
continue
(row_key, row_value)=row.split(': ')
(row_key, row_value)=row.split(': ')
value_row=row_value.rstrip('\r').lstrip('<').rstrip('>')
self.table[row_key]=collections.OrderedDict()
for data_params in value_row.split(','):
if len(data_params) == 0:
continue
(data_key,data_value)=data_params.split('=')
self.table[row_key][data_key]=data_value
#print('Table Data', self.table )
def getSpanningTreeVlanPortStateDict(vlan):
cmd = "show spanning-tree " + vlan
showoutput=cli_ex(cmd)
stplist=re.findall("^([^ \t]+)[ \s]+([^ \t]+)[ \s]+([A-Za-z]+)[ \s]+([0-9]+)[ \s]+\
([^ \t]+)[ \s]+([^ \t]+)[ \s\r\n]+",showoutput,flags=re.I|re.M)
if stplist:
# if vlan port state is found
stpdict=convertListToDict(stplist,['vlan','role','state','cost','prio.nbr','type'])
log.info(" STP state for " + \
parserutils_lib.argsToCommandOptions(args,arggrammar,log,"str") + " is : " + str(stpdict))
return stpdict
def getShowSpanningTreeDict( vlan ):
show_stp_dict=collections.OrderedDict()
# Define the Regexp Patterns to Parse ..
root_params_pat_non_root='\s+Root ID\s+Priority\s+([0-9]+)\r\n\s+Address\s+({0})\r\n\s+Cost\s+([0-9]+)\r\nPort\s+([0-9]+)\s+\(([a-zA-Z0-9\-]+)\)\r\n\s+Hello Time\s+([0-9]+)\s+sec\s+Max\s+Age\s+([0-9]+)\s+sec\s+Forward\s+Delay\s+([0-9]+)\s+sec\r\n'.format(rex.MACADDR)
root_params_pat_root='\s+Root ID\s+Priority\s+([0-9]+)\r\n\s+Address\s+({0})\r\n\s+This bridge is the root\r\n\s+Hello Time\s+([0-9]+)\s+sec\s+Max\s+Age\s+([0-9]+)\s+sec\s+Forward\s+Delay\s+([0-9]+)\s+sec\r\n'.format(rex.MACADDR)
bridge_params_pat='\s+Bridge ID\s+Priority\s+([0-9]+)\s+\(priority\s+([0-9]+)\s+sys-id-ext ([0-9]+)\)\r\n\s+Address\s+({0})\r\n\s+Hello\s+Time\s+([0-9]+)\s+sec\s+Max\s+Age\s+([0-9+)\s+sec\s+Forward Delay\s+([0-9]+) sec\r\n'.format(rex.MACADDR)
#interface_params_pat='-------\r\n({0})\s+([a-zA-Z]+)\s+([A-Z]+)\s+([0-9]+)\s+([0-9]+).([0-9]+)\s+([\(\)a-zA-Z0-9\s]+)\r'.format(rex.INTERFACE_NAME)
interface_params_pat='({0})\s+([a-zA-Z]+)\s+([A-Z]+)[\*\s]+([0-9]+)\s+([0-9]+).([0-9]+)\s+'.format(rex.INTERFACE_NAME)
# Build the command to be executed based on the arguments passed ..
cmd = 'show spanning-tree '
cmd = cmd + 'vlan ' + str(vlan)
show_stp=cli_ex(cmd)
# Split the output of STP based on VLAN
show_stp_vlan_split=show_stp.split('VLAN')
# Iterate over every VLAN block and build the show_stp_dict
for stp_vlan in show_stp_vlan_split:
if re.search( '^([0-9]+)', stp_vlan ):
#removed backslash r
match=re.search( '^([0-9]+)\n\s+Spanning tree enabled protocol ([a-z]+)', stp_vlan, re.I )
vlan_id = int(match.group(1))
stp_mode = match.group(2)
show_stp_dict[vlan_id]={}
show_stp_dict[vlan_id]['stp_mode']=stp_mode
if re.search( root_params_pat_root, stp_vlan, re.I ):
root_info=re.findall( root_params_pat_root, stp_vlan, re.I )
show_stp_dict[vlan_id]['root_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
show_stp_dict[vlan_id]['root']=True
else:
root_info=re.findall( root_params_pat_non_root, stp_vlan, re.I )
show_stp_dict[vlan_id]['root_info']=convertListToDict( root_info, ['Priority','Address','Cost', \
'Port','Hello Time','Max Age','Forward Delay'], ['Priority','Address','Cost', 'Port'])
show_stp_dict[vlan_id]['root']=False
bridge_info=re.findall( bridge_params_pat, stp_vlan, re.I )
show_stp_dict[vlan_id]['bridge_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
intf_info=re.findall( interface_params_pat, stp_vlan, re.I )
show_stp_dict[vlan_id]['Interface_info']=convertListToDict( intf_info, [ 'Interface', 'Role', 'Status', \
'Cost', 'Prio', 'Nbr' ] , [ 'Interface' ] )
# Split the output of STP based on MST
show_stp_mst_split=show_stp.split('MST')
for mst_id in show_stp_mst_split:
if re.search( '^([0-9]+)', mst_id):
#removed backslash r
match=re.search( '^([0-9]+)\n\s+Spanning tree enabled protocol ([a-z]+)', mst_id, re.I )
mst = vlan
stp_mode = match.group(2)
show_stp_dict[mst]={}
show_stp_dict[mst]['stp_mode']=stp_mode
if re.search( root_params_pat_root, mst_id, re.I ):
root_info=re.findall( root_params_pat_root, mst_id, re.I )
show_stp_dict[mst]['root_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
show_stp_dict[mst]['root']=True
else:
root_info=re.findall( root_params_pat_non_root, mst_id, re.I )
show_stp_dict[mst]['root_info']=convertListToDict( root_info, ['Priority','Address','Cost', \
'Port','Hello Time','Max Age','Forward Delay'], ['Priority','Address','Cost', 'Port'])
show_stp_dict[mst]['root']=False
bridge_info=re.findall( bridge_params_pat, mst_id, re.I )
show_stp_dict[mst]['bridge_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
intf_info=re.findall( interface_params_pat, mst_id, re.I )
show_stp_dict[mst]['Interface_info']=convertListToDict( intf_info, [ 'Interface', 'Role', 'Status', \
'Cost', 'Prio', 'Nbr' ] , [ 'Interface' ] )
return show_stp_dict
def pprint_table(out, table):
"""Prints out a table of data, padded for alignment
@param out: Output stream (file-like object)
@param table: The table to print. A list of lists.
Each row must have the same number of columns. """
col_paddings = []
for i in range(len(table[0])):
col_paddings.append(get_max_width(table, i))
for row in table:
# left col
print >> out, row[0].ljust(col_paddings[0] + 1),
# rest of the cols
for i in range(1, len(row)):
col = format_num(row[i]).rjust(col_paddings[i] + 2)
print >> out, col,
print >> out
def validateIP(ip):
try:
socket.inet_aton(ip)
return 0
except socket.error:
return 1
def convertIP(ip):
hexIP = []
[hexIP.append(hex(int(x))[2:].zfill(2)) for x in ip.split('.')]
hexIP = "0x" + "".join(hexIP)
return hexIP
class createEventHistoryTableObject(object):
""" Class to parse the event history outputs and convert to dictionary format. Expects the
input as 'Index: <Row>' where the <Row> is in key value pairs separated by commas"""
def __init__( self, event_history_dump ):
import re
time_format = "at %f usecs after %a %b %d %H:%M:%S %Y"
self.table=[]
table_rows=event_history_dump.split('\n')
new = {}
esq_req_rsp = {}
esqs = []
esq_start = []
req_rsp = True
for row in table_rows:
if "FSM" in row:
continue
if ":" not in row:
continue
if "Previous state:" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "Triggered event:" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "Next state:" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "ESQ_START" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "ESQ_REQ" in row or "ESQ_RSP" in row:
old = esq_req_rsp
esq_req_rsp = {}
if len(old) > 0:
esq_start.append(old)
req_rsp = True
if "usecs after" in row:
y = row.split(',')[1].strip()
t = datetime.datetime.strptime(y, time_format)
esq_req_rsp['TIME'] = t
esq_req_rsp['TIME_STRING'] = row
kvpairs = row.split(',')
for val in kvpairs:
x = val.strip(' ').strip('\r').split(':')
if len(x) != 2:
continue
(tk, tv)=val.split(':')
row_key = tk.strip(' ')
row_value = tv.strip(' ')
req_rsp = False
esq_req_rsp[row_key]=row_value
if req_rsp == False:
esq_start.append(esq_req_rsp)
esqs.append(esq_start)
self.table = esqs
|
fmichalo/n9k-programmability
|
Python nxapi scripts/utils/nxos_utils.py
|
Python
|
apache-2.0
| 27,716
|
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
def downsample_with_randomforest(rs_variables,target_variable,n_trees_in_forest = 100, min_samples_leaf = 50, n_cores = 1):
print "\t downsampling with random forest regression"
# first of all get rid of nodata values in target data to build random forest
rs = rs_variables[np.isfinite(target_variable),:]
target = target_variable[np.isfinite(target_variable)]
# split the data into calibration and validation datasets
rs_cal, rs_val, target_cal, target_val = train_test_split(rs,target,train_size=0.5)
# build random forest regressor
randomforest = RandomForestRegressor(n_estimators=n_trees_in_forest, min_samples_leaf=min_samples_leaf, bootstrap=True, oob_score=True, n_jobs=n_cores)
randomforest.fit(rs_cal,target_cal)
print "\t\tcalibration score = %.3f" % randomforest.score(rs_cal,target_cal)
print "\t\tvalidation score = %.3f" % randomforest.score(rs_val,target_val)
rs_rf = randomforest.predict(rs_variables)
return rs_rf
|
DTMilodowski/SPA_tools
|
construct_drivers/downscale_rs_data_random_forests.py
|
Python
|
gpl-3.0
| 1,110
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Optional
from rfmt.blocks import LineBlock as LB
from rfmt.blocks import TextBlock as TB
from rfmt.blocks import StackBlock as SB
from rfmt.blocks import ChoiceBlock as CB
from rfmt.blocks import IndentBlock as IB
from rfmt.blocks import WrapBlock as WB
from .utils import with_commas
from .node import SQLNodeList
from .node import SQLNode
from .query import SQLQuery
from .types import SQLType
from .expr import SQLExpr
from .expr import SQLBaseExpr
@dataclass(frozen=True)
class SQLBiOp(SQLExpr):
sql_op: str
left: SQLExpr
right: SQLExpr
def sqlf(self, compact):
right_expr_compact = self.right.sqlf(True)
left_expr_compact = self.left.sqlf(True)
compact_sql = LB([
left_expr_compact, TB(' '), TB(self.sql_op),
TB(' '),
right_expr_compact
])
if compact:
return compact_sql
right_expr = self.right.sqlf(False)
left_expr = self.left.sqlf(False)
return CB([
compact_sql, SB([
LB([left_expr, TB(' '), TB(self.sql_op)]),
right_expr
])
])
@staticmethod
def parse(lex):
return SQLBiOp.parse_or_expression(lex)
@staticmethod
def parse_or_expression(lex):
expr = SQLBiOp.parse_and_expression(lex)
while lex.consume('OR'):
expr = SQLBiOp('OR', expr, SQLBiOp.parse_and_expression(lex))
return expr
@staticmethod
def parse_and_expression(lex):
expr = SQLBiOp.parse_not_expression(lex)
while lex.consume('AND'):
expr = SQLBiOp('AND', expr, SQLBiOp.parse_not_expression(lex))
return expr
@staticmethod
def parse_not_expression(lex):
# Add while loop here.
if lex.consume('NOT'):
return SQLUniOp('NOT', SQLBiOp.parse_compare_expression(lex))
return SQLBiOp.parse_compare_expression(lex)
@staticmethod
def consume_basic_cmp_operator(lex):
return lex.consume_any(['!=', '<>', '<=>', '<=',
'>=', '=', '<', '>'])
@staticmethod
def consume_is_operator(lex):
if not lex.consume('IS'):
return None
is_op = 'IS '
if lex.consume('NOT'):
is_op += 'NOT '
typ = lex.consume_any(['TRUE', 'FALSE', 'NULL'])
if typ:
return is_op+typ
if lex.consume('DISTINCT'):
lex.expect('FROM')
return is_op + ' DISTINCT FROM'
lex.error('Expected TRUE, FALSE, NULL, or DISTINCT FROM')
return None
@staticmethod
def parse_compare_expression(lex):
expr = SQLBiOp.parse_bit_or_expression(lex)
while True:
sql_op = SQLBiOp.consume_basic_cmp_operator(lex)
if sql_op:
expr = SQLBiOp(sql_op, expr,
SQLBiOp.parse_bit_or_expression(lex))
continue
is_op = SQLBiOp.consume_is_operator(lex)
if is_op:
expr = SQLUniOp(is_op, expr)
continue
inverted = ''
if lex.consume('NOT'):
inverted = 'NOT '
if lex.consume('LIKE'):
expr = SQLLike(inverted + 'LIKE', expr,
SQLBiOp.parse_bit_or_expression(lex))
elif lex.consume('BETWEEN'):
expr_l = SQLBiOp.parse_bit_or_expression(lex)
lex.expect('AND')
expr_r = SQLBiOp.parse_bit_or_expression(lex)
expr = SQLBetween(inverted + 'BETWEEN',
expr, expr_l, expr_r)
elif lex.consume('IN'):
lex.expect('(')
in_query = SQLQuery.consume(lex)
if in_query:
lex.expect(')')
return SQLINSQL(inverted + 'IN', expr, in_query)
vals = []
while True:
vals.append(SQLBiOp.parse_bit_or_expression(lex))
if not lex.consume(','):
break
lex.expect(')')
return SQLIN(inverted + 'IN', expr, SQLNodeList(vals))
else:
break
return expr
@staticmethod
def parse_bit_or_expression(lex) -> SQLExpr:
expr = SQLBiOp.parse_bit_xor_expression(lex)
while lex.consume('|'):
expr = SQLBiOp('|', expr, SQLBiOp.parse_bit_xor_expression(lex))
return expr
@staticmethod
def parse_bit_xor_expression(lex) -> SQLExpr:
expr = SQLBiOp.parse_bit_and_expression(lex)
while lex.consume('^'):
expr = SQLBiOp('^', expr, SQLBiOp.parse_bit_and_expression(lex))
return expr
@staticmethod
def parse_bit_and_expression(lex):
expr = SQLBiOp.parse_bit_shift_expression(lex)
while lex.consume('&'):
expr = SQLBiOp('&', expr,
SQLBiOp.parse_bit_shift_expression(lex))
return expr
@staticmethod
def parse_bit_shift_expression(lex):
expr = SQLBiOp.parse_add_sub_expression(lex)
while True:
if lex.consume('<<'):
expr = SQLBiOp('<<', expr,
SQLBiOp.parse_add_sub_expression(lex))
elif lex.consume('>>'):
expr = SQLBiOp('>>', expr,
SQLBiOp.parse_add_sub_expression(lex))
else:
break
return expr
@staticmethod
def parse_add_sub_expression(lex):
expr = SQLBiOp.parse_mul_div_expression(lex)
while True:
if lex.consume('+'):
expr = SQLBiOp('+', expr,
SQLBiOp.parse_mul_div_expression(lex))
elif lex.consume('-'):
expr = SQLBiOp('-', expr,
SQLBiOp.parse_mul_div_expression(lex))
else:
break
return expr
@staticmethod
def parse_mul_div_expression(lex):
expr = SQLBiOp.parse_str_expression(lex)
while True:
if lex.consume('*'):
expr = SQLBiOp('*', expr,
SQLBiOp.parse_str_expression(lex))
elif lex.consume('/'):
expr = SQLBiOp('/', expr,
SQLBiOp.parse_str_expression(lex))
else:
break
return expr
@staticmethod
def parse_str_expression(lex):
expr = SQLUniOp.parse(lex)
while True:
if lex.consume('||'):
expr = SQLBiOp('||', expr,
SQLUniOp.parse(lex))
else:
break
return expr
# Pass in priority for expanding on the brackets.
@dataclass(frozen=True)
class SQLUniOp(SQLExpr):
sql_op: str
arg: SQLExpr
def sqlf(self, compact):
if self.sql_op.startswith('IS'):
return LB(
[self.arg.sqlf(compact), TB(' '), TB(self.sql_op)])
if self.sql_op.isalpha():
return LB(
[TB(self.sql_op), TB(' '), self.arg.sqlf(compact)])
return LB([TB(self.sql_op), self.arg.sqlf(compact)])
@staticmethod
def parse(lex):
if lex.consume('-'):
expr = SQLUniOp('-', SQLCase.parse(lex))
elif lex.consume('~'):
expr = SQLUniOp('~', SQLCase.parse(lex))
else:
expr = SQLCase.parse(lex)
return expr
@dataclass(frozen=True)
class SQLBetween(SQLExpr):
sql_op: str
bexpr: SQLExpr
left: SQLExpr
right: SQLExpr
def sqlf(self, compact):
return LB([
self.bexpr.sqlf(compact), TB(' '), TB(self.sql_op),
TB(' '), self.left.sqlf(compact), TB(' '),
TB('AND'), TB(' '), self.right.sqlf(compact)
])
@dataclass(frozen=True)
class SQLLike(SQLExpr):
sql_op: str
lexpr: SQLExpr
match: SQLExpr
def sqlf(self, compact):
return LB([
self.lexpr.sqlf(True), TB(' ' + self.sql_op + ' '),
self.match.sqlf(True)
])
@dataclass(frozen=True)
class SQLINSQL(SQLExpr):
sql_op: str
expr: SQLExpr
sql: SQLNode
def sqlf(self, compact):
sql_op = TB(' ' + self.sql_op + ' (')
compact_sql = LB([self.expr.sqlf(True), sql_op] +
[self.sql.sqlf(True), TB(')')])
if compact:
return compact_sql
return CB([
compact_sql,
SB([
LB([self.expr.sqlf(False), sql_op]),
IB(self.sql.sqlf(True)),
TB(')')
]),
])
@dataclass(frozen=True)
class SQLIN(SQLExpr):
sql_op: str
iexpr: SQLExpr
args: SQLNodeList[SQLExpr]
def sqlf(self, compact):
in_vals = []
for arg in self.args[:-1]:
in_vals.append(LB([arg.sqlf(True), TB(',')]))
in_vals.append(self.args[-1].sqlf(True))
sql_op = TB(' ' + self.sql_op + ' (')
compact_sql = LB([self.iexpr.sqlf(True), sql_op] + in_vals +
[TB(')')])
if compact:
return compact_sql
# TODO(scannell): Bug with separator - incorrect usage
return CB([
compact_sql,
SB([
LB([self.iexpr.sqlf(False), sql_op]),
IB(WB(in_vals, sep=' ')),
TB(')')
]),
])
@dataclass(frozen=True)
class SQLCase(SQLExpr):
base_expr: Optional[SQLExpr]
else_expr: Optional[SQLExpr]
args: SQLNodeList
def sqlf(self, compact):
big_block = []
small_block = []
for if_then in zip(self.args[0::2], self.args[1::2]):
if_expr = if_then[0].sqlf(False)
if_expr_compact = if_then[0].sqlf(True)
then_expr = if_then[1].sqlf(False)
then_expr_compact = if_then[1].sqlf(True)
# Most compact form - one line
small_block.append(
LB([
TB(' WHEN '), if_expr_compact, TB(' THEN '),
then_expr_compact
]))
# Choice
big_block.append(
CB([
SB([
TB('WHEN'),
IB(if_expr),
TB('THEN'),
IB(then_expr)
]),
SB([
LB([TB('WHEN '), if_expr_compact, TB(' THEN')]),
IB(then_expr)
])
])
)
if self.else_expr:
else_expr_compact = LB([TB(' ELSE '), self.else_expr.sqlf(True)])
small_block.append(else_expr_compact)
else_expr_norm = self.else_expr.sqlf(False)
big_block.append(
CB([
SB([TB('ELSE '), IB(else_expr_norm)]),
else_expr_compact
])
)
case_block = TB('CASE ')
case_block_compact = TB('CASE')
if self.base_expr:
case_block_compact = LB([
TB('CASE '), self.base_expr.sqlf(True)
])
case_block = LB([
TB('CASE '), self.base_expr.sqlf(False), TB(' ')
])
compact_sql = LB([case_block_compact] + small_block +
[TB(' END')])
if compact:
return compact_sql
return CB([
compact_sql,
SB([case_block, IB(SB(big_block)), TB('END')])
])
@staticmethod
def parse(lex):
if not lex.consume('CASE'):
return SQLColonCast.parse(lex)
base_expr: Optional[SQLExpr] = None
if not lex.peek('WHEN'):
base_expr = SQLExpr.parse(lex)
else_expr: Optional[SQLExpr] = None
args = []
while True:
if lex.consume('WHEN'):
args.append(SQLExpr.parse(lex))
lex.expect('THEN')
args.append(SQLExpr.parse(lex))
continue
if lex.consume('ELSE'):
else_expr = SQLExpr.parse(lex)
lex.expect('END')
break
if lex.consume('END'):
break
lex.error('Expected WHEN, ELSE, or END')
return SQLCase(base_expr, else_expr, SQLNodeList(args))
@dataclass(frozen=True)
class SQLColonCast(SQLExpr):
arg: SQLExpr
typ: SQLType
def sqlf(self, compact):
return LB([
self.arg.sqlf(True),
TB('::'),
self.typ.sqlf(True),
])
@staticmethod
def parse(lex) -> SQLExpr:
expr = (SQLBrackets.consume(lex) or
SQLBaseExpr.parse(lex))
if lex.consume('::'):
typ = SQLType.parse(lex)
expr = SQLColonCast(expr, typ)
return expr
@dataclass(frozen=True)
class SQLStruct(SQLExpr):
exprs: SQLNodeList[SQLExpr]
def sqlf(self, compact):
compact_sql = LB([TB('(')] +
with_commas(True, self.exprs, tail=')'))
if compact:
return compact_sql
return CB([
compact_sql,
SB([
TB('('),
WB(with_commas(False, self.exprs, tail=')')),
])
])
@dataclass(frozen=True)
class SQLBrackets(SQLExpr):
query: SQLNode
def sqlf(self, compact):
compact_sql = LB([
TB('('),
self.query.sqlf(True),
TB(')')
])
if compact:
return compact_sql
return CB([
compact_sql,
SB([
TB('('),
IB(self.query.sqlf(compact)),
TB(')')
])
])
@staticmethod
def consume(lex) -> Optional[SQLExpr]:
if not lex.consume('('):
return None
# If peek of SELECT or WITH then a sub-select it is
if lex.peek('WITH') or lex.peek('SELECT'):
query = SQLQuery.parse(lex)
lex.expect(')')
return SQLBrackets(query)
exprs = []
while True:
sub_expr = SQLExpr.parse(lex)
exprs.append(sub_expr)
if not lex.consume(','):
break
lex.expect(')')
if len(exprs) == 1:
return SQLBrackets(exprs[0])
return SQLStruct(SQLNodeList(exprs))
|
google/sample-sql-translator
|
sql_parser/expr_op.py
|
Python
|
apache-2.0
| 15,201
|
# -*- coding: utf-8 -*-
"""Implementations of feature tracking methods."""
from pysteps.tracking.interface import get_method
|
pySTEPS/pysteps
|
pysteps/tracking/__init__.py
|
Python
|
bsd-3-clause
| 126
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
while (root.val - p.val) * (root.val - q.val) > 0:
root = root.left if root.val > p.val else root.right
return root
|
zqfan/leetcode
|
algorithms/235. Lowest Common Ancestor of a Binary Search Tree/solution.py
|
Python
|
gpl-3.0
| 517
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
class TestWriteCellStyles(unittest.TestCase):
"""
Test the Styles _write_cell_styles() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_cell_styles(self):
"""Test the _write_cell_styles() method"""
self.styles._write_cell_styles()
exp = """<cellStyles count="1"><cellStyle name="Normal" xfId="0" builtinId="0"/></cellStyles>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
liukaijv/XlsxWriter
|
xlsxwriter/test/styles/test_write_cell_styles.py
|
Python
|
bsd-2-clause
| 804
|
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIClient
class SnippetTests(APITestCase):
def test_create_invalid_snippet(self):
self.client = APIClient()
self.client.login(username="rodrigo", password="hola123")
url = reverse('snippet-list')
data = {'snippet': '5///5'}
response = self.client.post(url, data, format='json')
msg = response.data['snippet'][0]
self.assertEquals(response.status_code, 400)
self.assertEquals(msg, 'Snippet is not valid python code')
|
rodrigorn/rest_demo
|
restapp/tests.py
|
Python
|
mit
| 577
|
#!/bin/env python
# -*- coding: utf-8 -*-
from __init__ import app
app.run(debug = True)
|
tieugene/uvedomlenie
|
run.py
|
Python
|
gpl-3.0
| 90
|
"""Classes that determine convergence of an algorithm run
based on population stagnation or max raw score reached"""
class Convergence(object):
"""
Base class for all convergence object to be based on.
It is necessary to supply the population instance, to be
able to obtain current and former populations.
"""
def __init__(self, population_instance):
self.pop = population_instance
self.pops = {}
def converged(self):
"""This function is called to find out if the algorithm
run has converged, it should return True or False.
Overwrite this in the inherited class."""
raise NotImplementedError
def populate_pops(self, to_gen):
"""Populate the pops dictionary with how the population
looked after i number of generations."""
for i in range(to_gen):
if i not in self.pops.keys():
self.pops[i] = self.pop.get_population_after_generation(i)
class GenerationRepetitionConvergence(Convergence):
"""Returns True if the latest finished population is stagnated for
number_of_generations.
Parameters:
number_of_generations: int
How many generations need to be equal before convergence.
number_of_individuals: int
How many of the fittest individuals should be included in the
convergence test. Default is -1 meaning all in the population.
max_generations: int
The maximum number of generations the GA is allowed to run.
Default is indefinite.
"""
def __init__(self, population_instance, number_of_generations,
number_of_individuals=-1, max_generations=100000000):
Convergence.__init__(self, population_instance)
self.numgens = number_of_generations
self.numindis = number_of_individuals
self.maxgen = max_generations
def converged(self):
size = self.pop.pop_size
cur_gen_num = self.pop.dc.get_generation_number(size)
if cur_gen_num >= self.maxgen:
return True
if cur_gen_num <= 1:
return False
cur_pop = self.pop.get_current_population()
newest = max([i.info['key_value_pairs']['generation']
for i in cur_pop[:self.numindis]])
if newest + self.numgens > cur_gen_num:
return False
self.populate_pops(cur_gen_num)
duplicate_gens = 1
latest_pop = self.pops[cur_gen_num - 1]
for i in range(cur_gen_num - 2, -1, -1):
test_pop = self.pops[i]
if test_pop[:self.numindis] == latest_pop[:self.numindis]:
duplicate_gens += 1
if duplicate_gens >= self.numgens:
return True
return False
class RawScoreConvergence(Convergence):
"""Returns True if the supplied max_raw_score has been reached"""
def __init__(self, population_instance, max_raw_score, eps=1e-3):
Convergence.__init__(self, population_instance)
self.max_raw_score = max_raw_score
self.eps = eps
def converged(self):
cur_pop = self.pop.get_current_population()
if abs(cur_pop[0].get_raw_score() - self.max_raw_score) <= self.eps:
return True
return False
class NeverConvergence(object):
"""Test class that never converges."""
def __init__(self):
pass
def converged(self):
return False
|
suttond/MODOI
|
ase/ga/convergence.py
|
Python
|
lgpl-3.0
| 3,442
|
#!/usr/bin/env python
import re
import string
from collections import Counter, OrderedDict
checksum_pattern = re.compile(r'(?P<name>[a-z\-]+)(?P<id>\d+)\[(?P<checksum>[a-z]+)\]')
def is_real_room(s):
match = checksum_pattern.match(s)
d = match.groupdict()
name = d['name']
name = name.replace('-', '')
counter = Counter(name)
checksum_letters = d['checksum']
for (w, count), chk in zip(sorted(counter.items(), key=lambda x: (-x[1], x[0])), checksum_letters):
if w != chk:
return False
return True
def get_checksum(s):
match = checksum_pattern.match(s)
d = match.groupdict()
return int(d['id'])
def sum_of_sector_ids(lines):
s = 0
for line in lines:
if is_real_room(line):
s += get_checksum(line)
return s
def shift_letter(letter, amount):
lowercase_characters = list(string.ascii_lowercase)
return lowercase_characters[(lowercase_characters.index(letter) + amount) % 26]
def decode_shift_cipher(s):
chk = get_checksum(s)
match = checksum_pattern.match(s)
d = match.groupdict()
name = d['name']
name = name.replace('-', ' ').rstrip()
decoded_name = ''
for c in list(name):
if c != ' ':
decoded_name += shift_letter(c, chk)
else:
decoded_name += ' '
return decoded_name
def main():
with open('input.txt') as f:
lines = f.readlines()
print('Answer #1={}'.format(sum_of_sector_ids(lines)))
with open('input.txt') as f:
lines = f.readlines()
for i in lines:
print('{} -> {}'.format(i.strip(), decode_shift_cipher(i)))
if __name__ == '__main__':
main()
|
lnunno/advent-of-code-2016
|
day04/checksum.py
|
Python
|
mit
| 1,697
|
from collections import namedtuple
from blocks.models import Toolbox
from practice.models import StudentModel
from practice.models import StudentTaskInfoModel
from practice.services.statistics_service import percentil as compute_percentil
def get_statistics_for_user(user):
student = StudentModel.objects.get_or_create(user=user)[0]
return get_statistics_for_student(student)
def get_statistics_for_student(student):
StudentStatistics = namedtuple('StudentStatistics',
['blocks', 'finished_tasks'])
statistics = StudentStatistics(
blocks=get_blocks(student),
finished_tasks=get_finished_tasks(student))
return statistics
StudentBlockInfo = namedtuple('StudentBlockInfo',
'identifier level purchased active credits credits_paid')
def get_blocks(student):
# NOTE: current model is not very suitable for this query and should be
# changed (problems: we need to recalculate a lot of information and some
# extension to toolbox model or credits handling could brake the current
# logic)
block_infos = []
for toolbox in Toolbox.objects.all():
for block in toolbox.get_new_blocks():
purchased = (toolbox.level < student.get_level())
active = (toolbox.level == student.get_level())
if purchased:
credits_paid = toolbox.credits
elif active:
credits_paid = student.free_credits
else:
credits_paid = 0
block_info = StudentBlockInfo(
identifier=block.identifier,
level=toolbox.level,
purchased=purchased,
active=active,
credits=toolbox.credits,
credits_paid=credits_paid)
block_infos.append(block_info)
return block_infos
def get_finished_tasks(student):
task_infos = StudentTaskInfoModel.objects.filter(student=student)
finished_tasks_instances = [t_info.last_solved_instance
for t_info in task_infos if t_info.is_solved()]
sorted_finished_tasks_instances = sorted(finished_tasks_instances,
key=lambda instance: (instance.task.get_level(), instance.task.pk))
finished_tasks = [FinishedTask.from_task_instance(task_instance)
for task_instance in sorted_finished_tasks_instances]
return finished_tasks
class FinishedTask(namedtuple('FinishedTaskTuple',
'task_id title credits concepts time percentil flow')):
@staticmethod
def from_task_instance(instance):
task = instance.task
finished_task = FinishedTask(
task_id=task.pk,
title=task.title,
credits=task.get_level(), # hack -> TODO: synchronize with computing credits
concepts=task.get_programming_concepts(),
time=instance.time_spent, # fake -> TODO: compute real time
percentil=compute_percentil(instance),
flow=instance.get_reported_flow_key())
return finished_task
|
MatheusArrudaLab/flocos
|
stats/services/student_statistics.py
|
Python
|
gpl-2.0
| 3,092
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fastboot debugging binary.
Call it similar to how you call android's fastboot. Call it similar to how you
call android's fastboot, but this only accepts usb paths and no serials.
"""
import sys
import gflags
import progressbar
import common_cli
import fastboot
gflags.ADOPT_module_key_flags(common_cli)
FLAGS = gflags.FLAGS
def KwargHandler(kwargs, argspec):
if 'info_cb' in argspec.args:
# Use an unbuffered version of stdout.
def InfoCb(message):
if not message.message:
return
sys.stdout.write('%s: %s\n' % (message.header, message.message))
sys.stdout.flush()
kwargs['info_cb'] = InfoCb
if 'progress_callback' in argspec.args:
bar = progressbar.ProgessBar(
widgets=[progressbar.Bar(), progressbar.Percentage()])
bar.start()
def SetProgress(current, total):
bar.update(current / total * 100.0)
if current == total:
bar.finish()
kwargs['progress_callback'] = SetProgress
def main(argv):
common_cli.StartCli(
argv, fastboot.FastbootCommands.ConnectDevice,
list_callback=fastboot.FastbootCommands.Devices,
kwarg_callback=KwargHandler)
if __name__ == '__main__':
main(FLAGS(sys.argv))
|
erock2112/python-adb
|
adb/fastboot_debug.py
|
Python
|
apache-2.0
| 1,825
|
import MDAnalysis
import matplotlib.pyplot as plt
import numpy as np
from MDAnalysis.analysis.align import *
from MDAnalysis.analysis.rms import rmsd
def ligRMSD(u,ref):
"""
This function produces RMSD data and plots for ligand.
:input
1) Universe of Trajectory
2) reference universe
:return
1) matplot object
2) array for RMSD data.
"""
RMSD_lig = []
ligand = u.select_atoms("(resid 142:146) and not name H*") ## include selection based on user description
#current = u.select_atoms("segname BGLC and not name H*")
reference = ref.select_atoms("(resid 142:146) and not name H*")
for ts in u.trajectory:
A = ligand.coordinates()
B = reference.coordinates()
C = rmsd(A,B)
RMSD_lig.append((u.trajectory.frame, C))
RMSD_lig = np.array(RMSD_lig)
#print RMSD_lig
import matplotlib.pyplot as plt
ax = plt.subplot(111)
ax.plot(RMSD_lig[:,0], RMSD_lig[:,1], 'r--', lw=2, label=r"$R_G$")
ax.set_xlabel("Frame")
ax.set_ylabel(r"RMSD of ligand ($\AA$)")
#ax.figure.savefig("RMSD_ligand.pdf")
#plt.draw()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc = 'lower left')
return ax, RMSD_lig
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='This function will plot RMSD for a given universe (trajectory).')
parser.add_argument('-j', '--jobname', help='Enter your job name and it will appear as first coloumn in the result file', default='Test')
parser.add_argument('-trj', '--trajectory', help='Filename of Trajecotry file.', required=True)
parser.add_argument('-top', '--topology', help='Filename of psf/topology file', required=True)
args = parser.parse_args()
u = MDAnalysis.Universe(args.topology, args.trajectory)
ref = MDAnalysis.Universe(args.topology, args.trajectory)
ligandRMSD = []
fig,ligandRMSD = ligRMSD(u,ref)
#print caRMSD
np.savetxt(args.jobname+"_ligRMSD.data", ligandRMSD)
fig.figure.savefig(args.jobname+"_ligRMSD.pdf")
|
mktumbi/SimAnaRep
|
SimRepAnaligRMSD.py
|
Python
|
gpl-2.0
| 2,124
|
from django.conf import settings
from django.test import TestCase
from haystack import connections, connection_router
from haystack.query import SearchQuerySet
from haystack.utils.loading import UnifiedIndex
from discovery.models import Foo
from discovery.search_indexes import FooIndex, BarIndex
class ManualDiscoveryTestCase(TestCase):
def test_discovery(self):
old_ui = connections['default'].get_unified_index()
connections['default']._index = UnifiedIndex()
ui = connections['default'].get_unified_index()
self.assertEqual(len(ui.get_indexed_models()), 2)
ui.build(indexes=[FooIndex()])
self.assertEqual(len(ui.get_indexed_models()), 1)
ui.build(indexes=[])
self.assertEqual(len(ui.get_indexed_models()), 0)
connections['default']._index = old_ui
class AutomaticDiscoveryTestCase(TestCase):
def test_discovery(self):
old_ui = connections['default'].get_unified_index()
connections['default']._index = UnifiedIndex()
ui = connections['default'].get_unified_index()
self.assertEqual(len(ui.get_indexed_models()), 2)
# Test exclusions.
ui.excluded_indexes = ['discovery.search_indexes.BarIndex']
ui.build()
self.assertEqual(len(ui.get_indexed_models()), 1)
ui.excluded_indexes = ['discovery.search_indexes.BarIndex', 'discovery.search_indexes.FooIndex']
ui.build()
self.assertEqual(len(ui.get_indexed_models()), 0)
connections['default']._index = old_ui
def test_signal_setup_handling(self):
foo_1 = Foo.objects.create(
title='chekin sigalz',
body='stuff'
)
fi = connections['default'].get_unified_index().get_index(Foo)
fi.clear()
fi.update()
sqs = SearchQuerySet()
existing_foo = sqs.filter(id='discovery.foo.1')[0]
self.assertEqual(existing_foo.text, u'stuff')
foo_1 = Foo.objects.get(pk=1)
foo_1.title = 'Checking signals'
foo_1.body = 'Stuff.'
# This save should trigger an update.
foo_1.save()
sqs = SearchQuerySet()
new_foo = sqs.filter(id='discovery.foo.1')[0]
self.assertEqual(new_foo.text, u'Stuff.')
|
gregplaysguitar/django-haystack
|
tests/discovery/tests.py
|
Python
|
bsd-3-clause
| 2,353
|
"""Python script to measure temperature, humidity and CO2 concentration with a Raspberry Pi.
A SHT21 sensor and a MCP3424 analog digital converter are connected to gpio pins, i.e. to the I2C bus.
The BayEOSGatewayClient class is extended to transfer data to the BayEOSGateway.
The sender runs in a separate process. Origin frames are sent to distinguish CO2 chambers."""
import sys, numpy # apt-get install python-numpy
from scipy import stats # apt-get install python-scipy
from time import sleep, time
from thread import start_new_thread
from bayeosgatewayclient import BayEOSGatewayClient, bayeos_confparser
from gpio import GPIO
from i2c import I2C
from sht21 import SHT21
from mcp3424 import MCP3424
class RaspberryPiClient(BayEOSGatewayClient):
"""Raspberry Pi client class."""
def init_writer(self):
"""Overwrites the init_writer() method of the BayEOSGatewayClient class."""
# gpio pins
ADDR_PINS = [11, 12, 13, 15, 16, 18] # GPIO 17, 18, 27, 22, 23, 24
DATA_PIN = 24 # GPIO 8
EN_PIN = 26 # GPIO 7
self.gpio = GPIO(ADDR_PINS, EN_PIN, DATA_PIN)
self.init_sensors()
self.addr = 1 # current address
def read_data(self):
"""Overwrites the read_data() method of the BayEOSGatewayClient class."""
# address 0 is reserved for flushing with air
self.gpio.set_addr(0) # set flushing address
sleep(.6) # flush for 60 seconds
self.gpio.reset() # stop flushing
self.gpio.set_addr(self.addr) # start measuring wait 60 seconds, 240 measure
measurement_data = self.measure(3)
self.gpio.reset()
return measurement_data
def save_data(self, values=[], origin='CO2_Chambers'):
"""Overwrites the save_data() method of the BayEOSGatewayClient class."""
self.writer.save(values, origin='RaspberryPi-Chamber-' + str(self.addr))
self.writer.flush()
print 'saved data: ' + str(values)
self.addr += 1
if self.addr > 15:
self.addr = 1
def init_sensors(self):
"""Initializes the I2C Bus including the SHT21 and MCP3424 sensors."""
try:
self.i2c = I2C()
self.sht21 = SHT21(1)
self.mcp3424 = MCP3424(self.i2c.get_smbus())
except IOError as err:
sys.stderr.write('I2C Connection Error: ' + str(err) + '. This must be run as root. Did you use the right device number?')
def measure(self, seconds=10):
"""Measures temperature, humidity and CO2 concentration.
@param seconds: how long should be measured
@return statistically calculated parameters
"""
measured_seconds = []
temp = []
hum = []
co2 = []
start_time = time()
for i in range(0, seconds):
start_new_thread(temp.append, (self.sht21.read_temperature(),))
start_new_thread(hum.append, (self.sht21.read_humidity(),))
start_new_thread(co2.append, (self.mcp3424.read_voltage(1),))
measured_seconds.append(time())
sleep(start_time + i - time() + 1) # to keep in time
mean_temp = numpy.mean(temp)
var_temp = numpy.var(temp)
mean_hum = numpy.mean(hum)
var_hum = numpy.var(hum)
lin_model = stats.linregress(measured_seconds, co2)
slope = lin_model[0]
intercept = lin_model[1]
r_squared = lin_model[2]*lin_model[2]
slope_err = lin_model[4]
return [mean_temp, var_temp, mean_hum, var_hum, slope, intercept, r_squared, slope_err]
OPTIONS = bayeos_confparser('../config/bayeosraspberrypi.ini')
client = RaspberryPiClient(OPTIONS['name'], OPTIONS)
client.run(thread=False) # sender runs in a separate process
|
kleebaum/bayeosraspberrypi
|
bayeosraspberrypi/bayeosraspberrypiclient.py
|
Python
|
gpl-2.0
| 3,774
|
__all__ = ('AuthenticationError', 'TwoFactorError', 'UnexpectedError')
class AuthenticationError(Exception):
pass
class TwoFactorError(AuthenticationError):
pass
class UnexpectedError(RuntimeError):
pass
|
Tatsh/youtube-unofficial
|
youtube_unofficial/exceptions.py
|
Python
|
mit
| 222
|
# Copyright 2014 Mark Chilenski
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Contains exceptions specific to the :py:mod:`gptools` package.
"""
from __future__ import division
class GPArgumentError(Exception):
"""Exception class raised when an incorrect combination of keyword arguments is given.
"""
pass
class GPImpossibleParamsError(Exception):
"""Exception class raised when parameters are not possible.
"""
pass
|
markchil/gptools
|
gptools/error_handling.py
|
Python
|
gpl-3.0
| 1,176
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.entity_pb import EntityProto
class SchemaEntry(ProtocolBuffer.ProtocolMessage):
STRING = 1
INT32 = 2
BOOLEAN = 3
DOUBLE = 4
POINT = 5
USER = 6
REFERENCE = 7
_Type_NAMES = {
1: "STRING",
2: "INT32",
3: "BOOLEAN",
4: "DOUBLE",
5: "POINT",
6: "USER",
7: "REFERENCE",
}
def Type_Name(cls, x): return cls._Type_NAMES.get(x, "")
Type_Name = classmethod(Type_Name)
has_name_ = 0
name_ = ""
has_type_ = 0
type_ = 0
has_meaning_ = 0
meaning_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = 0
def has_type(self): return self.has_type_
def meaning(self): return self.meaning_
def set_meaning(self, x):
self.has_meaning_ = 1
self.meaning_ = x
def clear_meaning(self):
if self.has_meaning_:
self.has_meaning_ = 0
self.meaning_ = 0
def has_meaning(self): return self.has_meaning_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_type()): self.set_type(x.type())
if (x.has_meaning()): self.set_meaning(x.meaning())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_meaning_ != x.has_meaning_: return 0
if self.has_meaning_ and self.meaning_ != x.meaning_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthVarInt64(self.type_)
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_type_):
n += 1
n += self.lengthVarInt64(self.type_)
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
return n
def Clear(self):
self.clear_name()
self.clear_type()
self.clear_meaning()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
out.putVarInt32(16)
out.putVarInt32(self.type_)
if (self.has_meaning_):
out.putVarInt32(24)
out.putVarInt32(self.meaning_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_type_):
out.putVarInt32(16)
out.putVarInt32(self.type_)
if (self.has_meaning_):
out.putVarInt32(24)
out.putVarInt32(self.meaning_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 16:
self.set_type(d.getVarInt32())
continue
if tt == 24:
self.set_meaning(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_))
if self.has_meaning_: res+=prefix+("meaning: %s\n" % self.DebugFormatInt32(self.meaning_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
ktype = 2
kmeaning = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "type",
3: "meaning",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SchemaEntry'
class SubscribeRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_sub_id_ = 0
sub_id_ = ""
has_lease_duration_sec_ = 0
lease_duration_sec_ = 0.0
has_vanilla_query_ = 0
vanilla_query_ = ""
def __init__(self, contents=None):
self.schema_entry_ = []
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def sub_id(self): return self.sub_id_
def set_sub_id(self, x):
self.has_sub_id_ = 1
self.sub_id_ = x
def clear_sub_id(self):
if self.has_sub_id_:
self.has_sub_id_ = 0
self.sub_id_ = ""
def has_sub_id(self): return self.has_sub_id_
def lease_duration_sec(self): return self.lease_duration_sec_
def set_lease_duration_sec(self, x):
self.has_lease_duration_sec_ = 1
self.lease_duration_sec_ = x
def clear_lease_duration_sec(self):
if self.has_lease_duration_sec_:
self.has_lease_duration_sec_ = 0
self.lease_duration_sec_ = 0.0
def has_lease_duration_sec(self): return self.has_lease_duration_sec_
def vanilla_query(self): return self.vanilla_query_
def set_vanilla_query(self, x):
self.has_vanilla_query_ = 1
self.vanilla_query_ = x
def clear_vanilla_query(self):
if self.has_vanilla_query_:
self.has_vanilla_query_ = 0
self.vanilla_query_ = ""
def has_vanilla_query(self): return self.has_vanilla_query_
def schema_entry_size(self): return len(self.schema_entry_)
def schema_entry_list(self): return self.schema_entry_
def schema_entry(self, i):
return self.schema_entry_[i]
def mutable_schema_entry(self, i):
return self.schema_entry_[i]
def add_schema_entry(self):
x = SchemaEntry()
self.schema_entry_.append(x)
return x
def clear_schema_entry(self):
self.schema_entry_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_sub_id()): self.set_sub_id(x.sub_id())
if (x.has_lease_duration_sec()): self.set_lease_duration_sec(x.lease_duration_sec())
if (x.has_vanilla_query()): self.set_vanilla_query(x.vanilla_query())
for i in xrange(x.schema_entry_size()): self.add_schema_entry().CopyFrom(x.schema_entry(i))
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_sub_id_ != x.has_sub_id_: return 0
if self.has_sub_id_ and self.sub_id_ != x.sub_id_: return 0
if self.has_lease_duration_sec_ != x.has_lease_duration_sec_: return 0
if self.has_lease_duration_sec_ and self.lease_duration_sec_ != x.lease_duration_sec_: return 0
if self.has_vanilla_query_ != x.has_vanilla_query_: return 0
if self.has_vanilla_query_ and self.vanilla_query_ != x.vanilla_query_: return 0
if len(self.schema_entry_) != len(x.schema_entry_): return 0
for e1, e2 in zip(self.schema_entry_, x.schema_entry_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_sub_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: sub_id not set.')
if (not self.has_lease_duration_sec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lease_duration_sec not set.')
if (not self.has_vanilla_query_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: vanilla_query not set.')
for p in self.schema_entry_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(len(self.sub_id_))
n += self.lengthString(len(self.vanilla_query_))
n += 1 * len(self.schema_entry_)
for i in xrange(len(self.schema_entry_)): n += self.lengthString(self.schema_entry_[i].ByteSize())
return n + 12
def ByteSizePartial(self):
n = 0
if (self.has_topic_):
n += 1
n += self.lengthString(len(self.topic_))
if (self.has_sub_id_):
n += 1
n += self.lengthString(len(self.sub_id_))
if (self.has_lease_duration_sec_):
n += 9
if (self.has_vanilla_query_):
n += 1
n += self.lengthString(len(self.vanilla_query_))
n += 1 * len(self.schema_entry_)
for i in xrange(len(self.schema_entry_)): n += self.lengthString(self.schema_entry_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_topic()
self.clear_sub_id()
self.clear_lease_duration_sec()
self.clear_vanilla_query()
self.clear_schema_entry()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putPrefixedString(self.sub_id_)
out.putVarInt32(25)
out.putDouble(self.lease_duration_sec_)
out.putVarInt32(34)
out.putPrefixedString(self.vanilla_query_)
for i in xrange(len(self.schema_entry_)):
out.putVarInt32(42)
out.putVarInt32(self.schema_entry_[i].ByteSize())
self.schema_entry_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_topic_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_sub_id_):
out.putVarInt32(18)
out.putPrefixedString(self.sub_id_)
if (self.has_lease_duration_sec_):
out.putVarInt32(25)
out.putDouble(self.lease_duration_sec_)
if (self.has_vanilla_query_):
out.putVarInt32(34)
out.putPrefixedString(self.vanilla_query_)
for i in xrange(len(self.schema_entry_)):
out.putVarInt32(42)
out.putVarInt32(self.schema_entry_[i].ByteSizePartial())
self.schema_entry_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
self.set_sub_id(d.getPrefixedString())
continue
if tt == 25:
self.set_lease_duration_sec(d.getDouble())
continue
if tt == 34:
self.set_vanilla_query(d.getPrefixedString())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_schema_entry().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_sub_id_: res+=prefix+("sub_id: %s\n" % self.DebugFormatString(self.sub_id_))
if self.has_lease_duration_sec_: res+=prefix+("lease_duration_sec: %s\n" % self.DebugFormat(self.lease_duration_sec_))
if self.has_vanilla_query_: res+=prefix+("vanilla_query: %s\n" % self.DebugFormatString(self.vanilla_query_))
cnt=0
for e in self.schema_entry_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("schema_entry%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
ksub_id = 2
klease_duration_sec = 3
kvanilla_query = 4
kschema_entry = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "sub_id",
3: "lease_duration_sec",
4: "vanilla_query",
5: "schema_entry",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SubscribeRequest'
class SubscribeResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SubscribeResponse'
class UnsubscribeRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_sub_id_ = 0
sub_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def sub_id(self): return self.sub_id_
def set_sub_id(self, x):
self.has_sub_id_ = 1
self.sub_id_ = x
def clear_sub_id(self):
if self.has_sub_id_:
self.has_sub_id_ = 0
self.sub_id_ = ""
def has_sub_id(self): return self.has_sub_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_sub_id()): self.set_sub_id(x.sub_id())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_sub_id_ != x.has_sub_id_: return 0
if self.has_sub_id_ and self.sub_id_ != x.sub_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_sub_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: sub_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(len(self.sub_id_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_topic_):
n += 1
n += self.lengthString(len(self.topic_))
if (self.has_sub_id_):
n += 1
n += self.lengthString(len(self.sub_id_))
return n
def Clear(self):
self.clear_topic()
self.clear_sub_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putPrefixedString(self.sub_id_)
def OutputPartial(self, out):
if (self.has_topic_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_sub_id_):
out.putVarInt32(18)
out.putPrefixedString(self.sub_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
self.set_sub_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_sub_id_: res+=prefix+("sub_id: %s\n" % self.DebugFormatString(self.sub_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
ksub_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "sub_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.UnsubscribeRequest'
class UnsubscribeResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.UnsubscribeResponse'
class SubscriptionRecord(ProtocolBuffer.ProtocolMessage):
OK = 0
PENDING = 1
ERROR = 2
_State_NAMES = {
0: "OK",
1: "PENDING",
2: "ERROR",
}
def State_Name(cls, x): return cls._State_NAMES.get(x, "")
State_Name = classmethod(State_Name)
has_id_ = 0
id_ = ""
has_vanilla_query_ = 0
vanilla_query_ = ""
has_expiration_time_sec_ = 0
expiration_time_sec_ = 0.0
has_state_ = 0
state_ = 0
has_error_message_ = 0
error_message_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = ""
def has_id(self): return self.has_id_
def vanilla_query(self): return self.vanilla_query_
def set_vanilla_query(self, x):
self.has_vanilla_query_ = 1
self.vanilla_query_ = x
def clear_vanilla_query(self):
if self.has_vanilla_query_:
self.has_vanilla_query_ = 0
self.vanilla_query_ = ""
def has_vanilla_query(self): return self.has_vanilla_query_
def expiration_time_sec(self): return self.expiration_time_sec_
def set_expiration_time_sec(self, x):
self.has_expiration_time_sec_ = 1
self.expiration_time_sec_ = x
def clear_expiration_time_sec(self):
if self.has_expiration_time_sec_:
self.has_expiration_time_sec_ = 0
self.expiration_time_sec_ = 0.0
def has_expiration_time_sec(self): return self.has_expiration_time_sec_
def state(self): return self.state_
def set_state(self, x):
self.has_state_ = 1
self.state_ = x
def clear_state(self):
if self.has_state_:
self.has_state_ = 0
self.state_ = 0
def has_state(self): return self.has_state_
def error_message(self): return self.error_message_
def set_error_message(self, x):
self.has_error_message_ = 1
self.error_message_ = x
def clear_error_message(self):
if self.has_error_message_:
self.has_error_message_ = 0
self.error_message_ = ""
def has_error_message(self): return self.has_error_message_
def MergeFrom(self, x):
assert x is not self
if (x.has_id()): self.set_id(x.id())
if (x.has_vanilla_query()): self.set_vanilla_query(x.vanilla_query())
if (x.has_expiration_time_sec()): self.set_expiration_time_sec(x.expiration_time_sec())
if (x.has_state()): self.set_state(x.state())
if (x.has_error_message()): self.set_error_message(x.error_message())
def Equals(self, x):
if x is self: return 1
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_vanilla_query_ != x.has_vanilla_query_: return 0
if self.has_vanilla_query_ and self.vanilla_query_ != x.vanilla_query_: return 0
if self.has_expiration_time_sec_ != x.has_expiration_time_sec_: return 0
if self.has_expiration_time_sec_ and self.expiration_time_sec_ != x.expiration_time_sec_: return 0
if self.has_state_ != x.has_state_: return 0
if self.has_state_ and self.state_ != x.state_: return 0
if self.has_error_message_ != x.has_error_message_: return 0
if self.has_error_message_ and self.error_message_ != x.error_message_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: id not set.')
if (not self.has_vanilla_query_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: vanilla_query not set.')
if (not self.has_expiration_time_sec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: expiration_time_sec not set.')
if (not self.has_state_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: state not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.id_))
n += self.lengthString(len(self.vanilla_query_))
n += self.lengthVarInt64(self.state_)
if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
return n + 12
def ByteSizePartial(self):
n = 0
if (self.has_id_):
n += 1
n += self.lengthString(len(self.id_))
if (self.has_vanilla_query_):
n += 1
n += self.lengthString(len(self.vanilla_query_))
if (self.has_expiration_time_sec_):
n += 9
if (self.has_state_):
n += 1
n += self.lengthVarInt64(self.state_)
if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
return n
def Clear(self):
self.clear_id()
self.clear_vanilla_query()
self.clear_expiration_time_sec()
self.clear_state()
self.clear_error_message()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.id_)
out.putVarInt32(18)
out.putPrefixedString(self.vanilla_query_)
out.putVarInt32(25)
out.putDouble(self.expiration_time_sec_)
out.putVarInt32(32)
out.putVarInt32(self.state_)
if (self.has_error_message_):
out.putVarInt32(42)
out.putPrefixedString(self.error_message_)
def OutputPartial(self, out):
if (self.has_id_):
out.putVarInt32(10)
out.putPrefixedString(self.id_)
if (self.has_vanilla_query_):
out.putVarInt32(18)
out.putPrefixedString(self.vanilla_query_)
if (self.has_expiration_time_sec_):
out.putVarInt32(25)
out.putDouble(self.expiration_time_sec_)
if (self.has_state_):
out.putVarInt32(32)
out.putVarInt32(self.state_)
if (self.has_error_message_):
out.putVarInt32(42)
out.putPrefixedString(self.error_message_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_id(d.getPrefixedString())
continue
if tt == 18:
self.set_vanilla_query(d.getPrefixedString())
continue
if tt == 25:
self.set_expiration_time_sec(d.getDouble())
continue
if tt == 32:
self.set_state(d.getVarInt32())
continue
if tt == 42:
self.set_error_message(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatString(self.id_))
if self.has_vanilla_query_: res+=prefix+("vanilla_query: %s\n" % self.DebugFormatString(self.vanilla_query_))
if self.has_expiration_time_sec_: res+=prefix+("expiration_time_sec: %s\n" % self.DebugFormat(self.expiration_time_sec_))
if self.has_state_: res+=prefix+("state: %s\n" % self.DebugFormatInt32(self.state_))
if self.has_error_message_: res+=prefix+("error_message: %s\n" % self.DebugFormatString(self.error_message_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kid = 1
kvanilla_query = 2
kexpiration_time_sec = 3
kstate = 4
kerror_message = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "id",
2: "vanilla_query",
3: "expiration_time_sec",
4: "state",
5: "error_message",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.SubscriptionRecord'
class ListSubscriptionsRequest(ProtocolBuffer.ProtocolMessage):
has_topic_ = 0
topic_ = ""
has_max_results_ = 0
max_results_ = 1000
has_expires_before_ = 0
expires_before_ = 0
has_subscription_id_start_ = 0
subscription_id_start_ = ""
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def max_results(self): return self.max_results_
def set_max_results(self, x):
self.has_max_results_ = 1
self.max_results_ = x
def clear_max_results(self):
if self.has_max_results_:
self.has_max_results_ = 0
self.max_results_ = 1000
def has_max_results(self): return self.has_max_results_
def expires_before(self): return self.expires_before_
def set_expires_before(self, x):
self.has_expires_before_ = 1
self.expires_before_ = x
def clear_expires_before(self):
if self.has_expires_before_:
self.has_expires_before_ = 0
self.expires_before_ = 0
def has_expires_before(self): return self.has_expires_before_
def subscription_id_start(self): return self.subscription_id_start_
def set_subscription_id_start(self, x):
self.has_subscription_id_start_ = 1
self.subscription_id_start_ = x
def clear_subscription_id_start(self):
if self.has_subscription_id_start_:
self.has_subscription_id_start_ = 0
self.subscription_id_start_ = ""
def has_subscription_id_start(self): return self.has_subscription_id_start_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_max_results()): self.set_max_results(x.max_results())
if (x.has_expires_before()): self.set_expires_before(x.expires_before())
if (x.has_subscription_id_start()): self.set_subscription_id_start(x.subscription_id_start())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_max_results_ != x.has_max_results_: return 0
if self.has_max_results_ and self.max_results_ != x.max_results_: return 0
if self.has_expires_before_ != x.has_expires_before_: return 0
if self.has_expires_before_ and self.expires_before_ != x.expires_before_: return 0
if self.has_subscription_id_start_ != x.has_subscription_id_start_: return 0
if self.has_subscription_id_start_ and self.subscription_id_start_ != x.subscription_id_start_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_)
if (self.has_expires_before_): n += 1 + self.lengthVarInt64(self.expires_before_)
if (self.has_subscription_id_start_): n += 1 + self.lengthString(len(self.subscription_id_start_))
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_topic_):
n += 1
n += self.lengthString(len(self.topic_))
if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_)
if (self.has_expires_before_): n += 1 + self.lengthVarInt64(self.expires_before_)
if (self.has_subscription_id_start_): n += 1 + self.lengthString(len(self.subscription_id_start_))
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_topic()
self.clear_max_results()
self.clear_expires_before()
self.clear_subscription_id_start()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_max_results_):
out.putVarInt32(16)
out.putVarInt64(self.max_results_)
if (self.has_expires_before_):
out.putVarInt32(24)
out.putVarInt64(self.expires_before_)
if (self.has_subscription_id_start_):
out.putVarInt32(34)
out.putPrefixedString(self.subscription_id_start_)
if (self.has_app_id_):
out.putVarInt32(42)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_topic_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_max_results_):
out.putVarInt32(16)
out.putVarInt64(self.max_results_)
if (self.has_expires_before_):
out.putVarInt32(24)
out.putVarInt64(self.expires_before_)
if (self.has_subscription_id_start_):
out.putVarInt32(34)
out.putPrefixedString(self.subscription_id_start_)
if (self.has_app_id_):
out.putVarInt32(42)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 16:
self.set_max_results(d.getVarInt64())
continue
if tt == 24:
self.set_expires_before(d.getVarInt64())
continue
if tt == 34:
self.set_subscription_id_start(d.getPrefixedString())
continue
if tt == 42:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_max_results_: res+=prefix+("max_results: %s\n" % self.DebugFormatInt64(self.max_results_))
if self.has_expires_before_: res+=prefix+("expires_before: %s\n" % self.DebugFormatInt64(self.expires_before_))
if self.has_subscription_id_start_: res+=prefix+("subscription_id_start: %s\n" % self.DebugFormatString(self.subscription_id_start_))
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
kmax_results = 2
kexpires_before = 3
ksubscription_id_start = 4
kapp_id = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "max_results",
3: "expires_before",
4: "subscription_id_start",
5: "app_id",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListSubscriptionsRequest'
class ListSubscriptionsResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.subscription_ = []
if contents is not None: self.MergeFromString(contents)
def subscription_size(self): return len(self.subscription_)
def subscription_list(self): return self.subscription_
def subscription(self, i):
return self.subscription_[i]
def mutable_subscription(self, i):
return self.subscription_[i]
def add_subscription(self):
x = SubscriptionRecord()
self.subscription_.append(x)
return x
def clear_subscription(self):
self.subscription_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.subscription_size()): self.add_subscription().CopyFrom(x.subscription(i))
def Equals(self, x):
if x is self: return 1
if len(self.subscription_) != len(x.subscription_): return 0
for e1, e2 in zip(self.subscription_, x.subscription_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.subscription_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.subscription_)
for i in xrange(len(self.subscription_)): n += self.lengthString(self.subscription_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.subscription_)
for i in xrange(len(self.subscription_)): n += self.lengthString(self.subscription_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_subscription()
def OutputUnchecked(self, out):
for i in xrange(len(self.subscription_)):
out.putVarInt32(10)
out.putVarInt32(self.subscription_[i].ByteSize())
self.subscription_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.subscription_)):
out.putVarInt32(10)
out.putVarInt32(self.subscription_[i].ByteSizePartial())
self.subscription_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_subscription().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.subscription_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("subscription%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ksubscription = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "subscription",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListSubscriptionsResponse'
class ListTopicsRequest(ProtocolBuffer.ProtocolMessage):
has_topic_start_ = 0
topic_start_ = ""
has_max_results_ = 0
max_results_ = 1000
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def topic_start(self): return self.topic_start_
def set_topic_start(self, x):
self.has_topic_start_ = 1
self.topic_start_ = x
def clear_topic_start(self):
if self.has_topic_start_:
self.has_topic_start_ = 0
self.topic_start_ = ""
def has_topic_start(self): return self.has_topic_start_
def max_results(self): return self.max_results_
def set_max_results(self, x):
self.has_max_results_ = 1
self.max_results_ = x
def clear_max_results(self):
if self.has_max_results_:
self.has_max_results_ = 0
self.max_results_ = 1000
def has_max_results(self): return self.has_max_results_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic_start()): self.set_topic_start(x.topic_start())
if (x.has_max_results()): self.set_max_results(x.max_results())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_topic_start_ != x.has_topic_start_: return 0
if self.has_topic_start_ and self.topic_start_ != x.topic_start_: return 0
if self.has_max_results_ != x.has_max_results_: return 0
if self.has_max_results_ and self.max_results_ != x.max_results_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_topic_start_): n += 1 + self.lengthString(len(self.topic_start_))
if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_)
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_topic_start_): n += 1 + self.lengthString(len(self.topic_start_))
if (self.has_max_results_): n += 1 + self.lengthVarInt64(self.max_results_)
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_topic_start()
self.clear_max_results()
self.clear_app_id()
def OutputUnchecked(self, out):
if (self.has_topic_start_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_start_)
if (self.has_max_results_):
out.putVarInt32(16)
out.putVarInt64(self.max_results_)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_topic_start_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_start_)
if (self.has_max_results_):
out.putVarInt32(16)
out.putVarInt64(self.max_results_)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic_start(d.getPrefixedString())
continue
if tt == 16:
self.set_max_results(d.getVarInt64())
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_start_: res+=prefix+("topic_start: %s\n" % self.DebugFormatString(self.topic_start_))
if self.has_max_results_: res+=prefix+("max_results: %s\n" % self.DebugFormatInt64(self.max_results_))
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic_start = 1
kmax_results = 2
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic_start",
2: "max_results",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListTopicsRequest'
class ListTopicsResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.topic_ = []
if contents is not None: self.MergeFromString(contents)
def topic_size(self): return len(self.topic_)
def topic_list(self): return self.topic_
def topic(self, i):
return self.topic_[i]
def set_topic(self, i, x):
self.topic_[i] = x
def add_topic(self, x):
self.topic_.append(x)
def clear_topic(self):
self.topic_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.topic_size()): self.add_topic(x.topic(i))
def Equals(self, x):
if x is self: return 1
if len(self.topic_) != len(x.topic_): return 0
for e1, e2 in zip(self.topic_, x.topic_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.topic_)
for i in xrange(len(self.topic_)): n += self.lengthString(len(self.topic_[i]))
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.topic_)
for i in xrange(len(self.topic_)): n += self.lengthString(len(self.topic_[i]))
return n
def Clear(self):
self.clear_topic()
def OutputUnchecked(self, out):
for i in xrange(len(self.topic_)):
out.putVarInt32(10)
out.putPrefixedString(self.topic_[i])
def OutputPartial(self, out):
for i in xrange(len(self.topic_)):
out.putVarInt32(10)
out.putPrefixedString(self.topic_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_topic(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.topic_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("topic%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.ListTopicsResponse'
class MatchRequest(ProtocolBuffer.ProtocolMessage):
ENTITY = 1
MODEL = 2
_PythonDocumentClass_NAMES = {
1: "ENTITY",
2: "MODEL",
}
def PythonDocumentClass_Name(cls, x): return cls._PythonDocumentClass_NAMES.get(x, "")
PythonDocumentClass_Name = classmethod(PythonDocumentClass_Name)
has_topic_ = 0
topic_ = ""
has_document_ = 0
has_result_batch_size_ = 0
result_batch_size_ = 0
has_result_task_queue_ = 0
result_task_queue_ = ""
has_result_relative_url_ = 0
result_relative_url_ = ""
has_result_key_ = 0
result_key_ = ""
has_result_python_document_class_ = 0
result_python_document_class_ = 0
def __init__(self, contents=None):
self.document_ = EntityProto()
if contents is not None: self.MergeFromString(contents)
def topic(self): return self.topic_
def set_topic(self, x):
self.has_topic_ = 1
self.topic_ = x
def clear_topic(self):
if self.has_topic_:
self.has_topic_ = 0
self.topic_ = ""
def has_topic(self): return self.has_topic_
def document(self): return self.document_
def mutable_document(self): self.has_document_ = 1; return self.document_
def clear_document(self):self.has_document_ = 0; self.document_.Clear()
def has_document(self): return self.has_document_
def result_batch_size(self): return self.result_batch_size_
def set_result_batch_size(self, x):
self.has_result_batch_size_ = 1
self.result_batch_size_ = x
def clear_result_batch_size(self):
if self.has_result_batch_size_:
self.has_result_batch_size_ = 0
self.result_batch_size_ = 0
def has_result_batch_size(self): return self.has_result_batch_size_
def result_task_queue(self): return self.result_task_queue_
def set_result_task_queue(self, x):
self.has_result_task_queue_ = 1
self.result_task_queue_ = x
def clear_result_task_queue(self):
if self.has_result_task_queue_:
self.has_result_task_queue_ = 0
self.result_task_queue_ = ""
def has_result_task_queue(self): return self.has_result_task_queue_
def result_relative_url(self): return self.result_relative_url_
def set_result_relative_url(self, x):
self.has_result_relative_url_ = 1
self.result_relative_url_ = x
def clear_result_relative_url(self):
if self.has_result_relative_url_:
self.has_result_relative_url_ = 0
self.result_relative_url_ = ""
def has_result_relative_url(self): return self.has_result_relative_url_
def result_key(self): return self.result_key_
def set_result_key(self, x):
self.has_result_key_ = 1
self.result_key_ = x
def clear_result_key(self):
if self.has_result_key_:
self.has_result_key_ = 0
self.result_key_ = ""
def has_result_key(self): return self.has_result_key_
def result_python_document_class(self): return self.result_python_document_class_
def set_result_python_document_class(self, x):
self.has_result_python_document_class_ = 1
self.result_python_document_class_ = x
def clear_result_python_document_class(self):
if self.has_result_python_document_class_:
self.has_result_python_document_class_ = 0
self.result_python_document_class_ = 0
def has_result_python_document_class(self): return self.has_result_python_document_class_
def MergeFrom(self, x):
assert x is not self
if (x.has_topic()): self.set_topic(x.topic())
if (x.has_document()): self.mutable_document().MergeFrom(x.document())
if (x.has_result_batch_size()): self.set_result_batch_size(x.result_batch_size())
if (x.has_result_task_queue()): self.set_result_task_queue(x.result_task_queue())
if (x.has_result_relative_url()): self.set_result_relative_url(x.result_relative_url())
if (x.has_result_key()): self.set_result_key(x.result_key())
if (x.has_result_python_document_class()): self.set_result_python_document_class(x.result_python_document_class())
def Equals(self, x):
if x is self: return 1
if self.has_topic_ != x.has_topic_: return 0
if self.has_topic_ and self.topic_ != x.topic_: return 0
if self.has_document_ != x.has_document_: return 0
if self.has_document_ and self.document_ != x.document_: return 0
if self.has_result_batch_size_ != x.has_result_batch_size_: return 0
if self.has_result_batch_size_ and self.result_batch_size_ != x.result_batch_size_: return 0
if self.has_result_task_queue_ != x.has_result_task_queue_: return 0
if self.has_result_task_queue_ and self.result_task_queue_ != x.result_task_queue_: return 0
if self.has_result_relative_url_ != x.has_result_relative_url_: return 0
if self.has_result_relative_url_ and self.result_relative_url_ != x.result_relative_url_: return 0
if self.has_result_key_ != x.has_result_key_: return 0
if self.has_result_key_ and self.result_key_ != x.result_key_: return 0
if self.has_result_python_document_class_ != x.has_result_python_document_class_: return 0
if self.has_result_python_document_class_ and self.result_python_document_class_ != x.result_python_document_class_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_topic_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: topic not set.')
if (not self.has_document_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: document not set.')
elif not self.document_.IsInitialized(debug_strs): initialized = 0
if (not self.has_result_batch_size_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_batch_size not set.')
if (not self.has_result_task_queue_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_task_queue not set.')
if (not self.has_result_relative_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result_relative_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.topic_))
n += self.lengthString(self.document_.ByteSize())
n += self.lengthVarInt64(self.result_batch_size_)
n += self.lengthString(len(self.result_task_queue_))
n += self.lengthString(len(self.result_relative_url_))
if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_))
if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_)
return n + 5
def ByteSizePartial(self):
n = 0
if (self.has_topic_):
n += 1
n += self.lengthString(len(self.topic_))
if (self.has_document_):
n += 1
n += self.lengthString(self.document_.ByteSizePartial())
if (self.has_result_batch_size_):
n += 1
n += self.lengthVarInt64(self.result_batch_size_)
if (self.has_result_task_queue_):
n += 1
n += self.lengthString(len(self.result_task_queue_))
if (self.has_result_relative_url_):
n += 1
n += self.lengthString(len(self.result_relative_url_))
if (self.has_result_key_): n += 1 + self.lengthString(len(self.result_key_))
if (self.has_result_python_document_class_): n += 1 + self.lengthVarInt64(self.result_python_document_class_)
return n
def Clear(self):
self.clear_topic()
self.clear_document()
self.clear_result_batch_size()
self.clear_result_task_queue()
self.clear_result_relative_url()
self.clear_result_key()
self.clear_result_python_document_class()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
out.putVarInt32(18)
out.putVarInt32(self.document_.ByteSize())
self.document_.OutputUnchecked(out)
out.putVarInt32(24)
out.putVarInt32(self.result_batch_size_)
out.putVarInt32(34)
out.putPrefixedString(self.result_task_queue_)
out.putVarInt32(42)
out.putPrefixedString(self.result_relative_url_)
if (self.has_result_key_):
out.putVarInt32(50)
out.putPrefixedString(self.result_key_)
if (self.has_result_python_document_class_):
out.putVarInt32(56)
out.putVarInt32(self.result_python_document_class_)
def OutputPartial(self, out):
if (self.has_topic_):
out.putVarInt32(10)
out.putPrefixedString(self.topic_)
if (self.has_document_):
out.putVarInt32(18)
out.putVarInt32(self.document_.ByteSizePartial())
self.document_.OutputPartial(out)
if (self.has_result_batch_size_):
out.putVarInt32(24)
out.putVarInt32(self.result_batch_size_)
if (self.has_result_task_queue_):
out.putVarInt32(34)
out.putPrefixedString(self.result_task_queue_)
if (self.has_result_relative_url_):
out.putVarInt32(42)
out.putPrefixedString(self.result_relative_url_)
if (self.has_result_key_):
out.putVarInt32(50)
out.putPrefixedString(self.result_key_)
if (self.has_result_python_document_class_):
out.putVarInt32(56)
out.putVarInt32(self.result_python_document_class_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_topic(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_document().TryMerge(tmp)
continue
if tt == 24:
self.set_result_batch_size(d.getVarInt32())
continue
if tt == 34:
self.set_result_task_queue(d.getPrefixedString())
continue
if tt == 42:
self.set_result_relative_url(d.getPrefixedString())
continue
if tt == 50:
self.set_result_key(d.getPrefixedString())
continue
if tt == 56:
self.set_result_python_document_class(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_topic_: res+=prefix+("topic: %s\n" % self.DebugFormatString(self.topic_))
if self.has_document_:
res+=prefix+"document <\n"
res+=self.document_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_result_batch_size_: res+=prefix+("result_batch_size: %s\n" % self.DebugFormatInt32(self.result_batch_size_))
if self.has_result_task_queue_: res+=prefix+("result_task_queue: %s\n" % self.DebugFormatString(self.result_task_queue_))
if self.has_result_relative_url_: res+=prefix+("result_relative_url: %s\n" % self.DebugFormatString(self.result_relative_url_))
if self.has_result_key_: res+=prefix+("result_key: %s\n" % self.DebugFormatString(self.result_key_))
if self.has_result_python_document_class_: res+=prefix+("result_python_document_class: %s\n" % self.DebugFormatInt32(self.result_python_document_class_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktopic = 1
kdocument = 2
kresult_batch_size = 3
kresult_task_queue = 4
kresult_relative_url = 5
kresult_key = 6
kresult_python_document_class = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "topic",
2: "document",
3: "result_batch_size",
4: "result_task_queue",
5: "result_relative_url",
6: "result_key",
7: "result_python_document_class",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.MatchRequest'
class MatchResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.MatchResponse'
if _extension_runtime:
pass
__all__ = ['SchemaEntry','SubscribeRequest','SubscribeResponse','UnsubscribeRequest','UnsubscribeResponse','SubscriptionRecord','ListSubscriptionsRequest','ListSubscriptionsResponse','ListTopicsRequest','ListTopicsResponse','MatchRequest','MatchResponse']
|
adviti/melange
|
thirdparty/google_appengine/google/appengine/api/prospective_search/prospective_search_pb.py
|
Python
|
apache-2.0
| 60,260
|
#!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
import os
import sys
import platform
import importlib.util
import argparse
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
MIN_PYTHON_VERSION = "3.6.1"
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
# load version.py; needlessly complicated alternative to "imp.load_source":
version_spec = importlib.util.spec_from_file_location('version', 'electrum/version.py')
version_module = version = importlib.util.module_from_spec(version_spec)
version_spec.loader.exec_module(version_module)
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['electrum/gui/icons/electrum.png']),
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
'gui': ['pyqt5'],
}
extras_require['full'] = [pkg for sublist in list(extras_require.values()) for pkg in sublist]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
python_requires='>={}'.format(MIN_PYTHON_VERSION),
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum.gui',
'electrum.gui.qt',
'electrum.plugins',
] + [('electrum.plugins.'+pkg) for pkg in find_packages('electrum/plugins')],
package_dir={
'electrum': 'electrum'
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
'electrum.gui': [
'icons/*',
],
},
scripts=['electrum/electrum'],
data_files=data_files,
description="Lightweight Fujicoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="MIT Licence",
url="https://www.fujicoin.org",
long_description="""Lightweight Fujicoin Wallet""",
)
|
fujicoin/electrum-fjc
|
setup.py
|
Python
|
mit
| 3,023
|
from __future__ import print_function, division
import warnings
from itertools import permutations
import hmmlearn.hmm
import numpy as np
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import SuperposeFeaturizer
from msmbuilder.hmm import GaussianHMM
rs = np.random.RandomState(42)
def test_ala2():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = SuperposeFeaturizer(indices, trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = GaussianHMM(n_states=4, n_init=3, random_state=rs)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def create_timeseries(means, vars, transmat):
"""Construct a random timeseries based on a specified Markov model."""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
model = hmmlearn.hmm.GaussianHMM(n_components=len(means),
random_state=rs)
model.means_ = means
model.covars_ = vars
model.transmat_ = transmat
X, Y = model.sample(1000)
return X
def validate_timeseries(means, vars, transmat, model,
valuetol=1e-3, transmattol=1e-3):
"""Whether our model matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability - 1.0) < 1e-5).all()
# The states may have come out in a different order,
# so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(means[i] - model.means_[order[i]]) > valuetol:
match = False
break
if abs(vars[i] - model.vars_[order[i]]) > valuetol:
match = False
break
for j in range(numStates):
diff = transmat[i, j] - model.transmat_[order[i], order[j]]
if abs(diff) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2_state():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means = np.array([[0.0], [5.0]])
vars = np.array([[1.0], [1.0]])
X = [create_timeseries(means, vars, transmat) for i in range(10)]
# For each value of various options,
# create a 2 state HMM and see if it is correct.
class two_state_tester(object):
def __init__(self, init_algo, reversible_type):
self.init_algo = init_algo
self.reversible_type = reversible_type
self.description = ("{}.test_3_state_{}_{}"
.format(__name__, init_algo, reversible_type))
def __call__(self, *args, **kwargs):
model = GaussianHMM(n_states=2, init_algo=self.init_algo,
reversible_type=self.reversible_type,
thresh=1e-4, n_iter=30, random_state=rs)
model.fit(X)
validate_timeseries(means, vars, transmat, model, 0.1, 0.05)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
for init_algo in ('kmeans', 'GMM'):
for reversible_type in ('mle', 'transpose'):
yield two_state_tester(init_algo, reversible_type)
def test_3_state():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [10.0], [5.0]])
vars = np.array([[1.0], [2.0], [0.3]])
X = [create_timeseries(means, vars, transmat) for i in range(20)]
# For each value of various options,
# create a 3 state HMM and see if it is correct.
class three_state_tester(object):
def __init__(self, init_algo, reversible_type):
self.init_algo = init_algo
self.reversible_type = reversible_type
self.description = ("{}.test_2_state_{}_{}"
.format(__name__, init_algo, reversible_type))
def __call__(self, *args, **kwargs):
model = GaussianHMM(n_states=3, init_algo=self.init_algo,
reversible_type=self.reversible_type,
thresh=1e-4, n_iter=30, random_state=rs)
model.fit(X)
validate_timeseries(means, vars, transmat, model, 0.1, 0.1)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
for init_algo in ('kmeans', 'GMM'):
for reversible_type in ('mle', 'transpose'):
yield three_state_tester(init_algo, reversible_type)
|
mpharrigan/mixtape
|
msmbuilder/tests/test_ghmm.py
|
Python
|
lgpl-2.1
| 5,095
|
import warnings
from django.conf import settings
from django.template import Context, Template
from django.template.loader import render_to_string
from django.utils import six
from django.utils.html import conditional_escape
from crispy_forms.utils import render_field, flatatt
TEMPLATE_PACK = getattr(settings, 'CRISPY_TEMPLATE_PACK', 'bootstrap')
class LayoutObject(object):
def __getitem__(self, slice):
return self.fields[slice]
def __setitem__(self, slice, value):
self.fields[slice] = value
def __delitem__(self, slice):
del self.fields[slice]
def __len__(self):
return len(self.fields)
def __getattr__(self, name):
"""
This allows us to access self.fields list methods like append or insert, without
having to declaee them one by one
"""
# Check necessary for unpickling, see #107
if 'fields' in self.__dict__ and hasattr(self.fields, name):
return getattr(self.fields, name)
else:
return object.__getattribute__(self, name)
def get_field_names(self, index=None):
"""
Returns a list of lists, those lists are named pointers. First parameter
is the location of the field, second one the name of the field. Example::
[
[[0,1,2], 'field_name1'],
[[0,3], 'field_name2']
]
"""
return self.get_layout_objects(six.string_types, greedy=True)
def get_layout_objects(self, *LayoutClasses, **kwargs):
"""
Returns a list of lists pointing to layout objects of any type matching
`LayoutClasses`::
[
[[0,1,2], 'div'],
[[0,3], 'field_name']
]
:param max_level: An integer that indicates max level depth to reach when
traversing a layout.
:param greedy: Boolean that indicates whether to be greedy. If set, max_level
is skipped.
"""
index = kwargs.pop('index', None)
max_level = kwargs.pop('max_level', 0)
greedy = kwargs.pop('greedy', False)
pointers = []
if index is not None and not isinstance(index, list):
index = [index]
elif index is None:
index = []
for i, layout_object in enumerate(self.fields):
if isinstance(layout_object, LayoutClasses):
if len(LayoutClasses) == 1 and LayoutClasses[0] == six.string_types:
pointers.append([index + [i], layout_object])
else:
pointers.append([index + [i], layout_object.__class__.__name__.lower()])
# If it's a layout object and we haven't reached the max depth limit or greedy
# we recursive call
if hasattr(layout_object, 'get_field_names') and (len(index) < max_level or greedy):
new_kwargs = {'index': index + [i], 'max_level': max_level, 'greedy': greedy}
pointers = pointers + layout_object.get_layout_objects(*LayoutClasses, **new_kwargs)
return pointers
class Layout(LayoutObject):
"""
Form Layout. It is conformed by Layout objects: `Fieldset`, `Row`, `Column`, `MultiField`,
`HTML`, `ButtonHolder`, `Button`, `Hidden`, `Reset`, `Submit` and fields. Form fields
have to be strings.
Layout objects `Fieldset`, `Row`, `Column`, `MultiField` and `ButtonHolder` can hold other
Layout objects within. Though `ButtonHolder` should only hold `HTML` and BaseInput
inherited classes: `Button`, `Hidden`, `Reset` and `Submit`.
Example::
helper.layout = Layout(
Fieldset('Company data',
'is_company'
),
Fieldset(_('Contact details'),
'email',
Row('password1', 'password2'),
'first_name',
'last_name',
HTML('<img src="/media/somepicture.jpg"/>'),
'company'
),
ButtonHolder(
Submit('Save', 'Save', css_class='button white'),
),
)
"""
def __init__(self, *fields):
self.fields = list(fields)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
html = ""
for field in self.fields:
html += render_field(
field,
form,
form_style,
context,
template_pack=template_pack,
**kwargs
)
return html
class ButtonHolder(LayoutObject):
"""
Layout object. It wraps fields in a <div class="buttonHolder">
This is where you should put Layout objects that render to form buttons like Submit.
It should only hold `HTML` and `BaseInput` inherited objects.
Example::
ButtonHolder(
HTML(<span style="display: hidden;">Information Saved</span>),
Submit('Save', 'Save')
)
"""
template = "%s/layout/buttonholder.html"
def __init__(self, *fields, **kwargs):
self.fields = list(fields)
self.css_class = kwargs.get('css_class', None)
self.css_id = kwargs.get('css_id', None)
self.template = kwargs.get('template', self.template)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
html = u''
for field in self.fields:
html += render_field(
field, form, form_style, context, template_pack=template_pack, **kwargs
)
template = self.template % template_pack
return render_to_string(
template,
{'buttonholder': self, 'fields_output': html},
context
)
class BaseInput(object):
"""
A base class to reduce the amount of code in the Input classes.
"""
template = "%s/layout/baseinput.html"
def __init__(self, name, value, **kwargs):
self.name = name
self.value = value
self.id = kwargs.pop('css_id', '')
self.attrs = {}
if 'css_class' in kwargs:
self.field_classes += ' %s' % kwargs.pop('css_class')
self.template = kwargs.pop('template', self.template)
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
"""
Renders an `<input />` if container is used as a Layout object.
Input button value can be a variable in context.
"""
self.value = Template(six.text_type(self.value)).render(context)
template = self.template % template_pack
return render_to_string(template, {'input': self}, context)
class Submit(BaseInput):
"""
Used to create a Submit button descriptor for the {% crispy %} template tag::
submit = Submit('Search the Site', 'search this site')
.. note:: The first argument is also slugified and turned into the id for the submit button.
"""
input_type = 'submit'
field_classes = 'submit submitButton' if TEMPLATE_PACK == 'uni_form' else 'btn btn-primary'
class Button(BaseInput):
"""
Used to create a Submit input descriptor for the {% crispy %} template tag::
button = Button('Button 1', 'Press Me!')
.. note:: The first argument is also slugified and turned into the id for the button.
"""
input_type = 'button'
field_classes = 'button' if TEMPLATE_PACK == 'uni_form' else 'btn'
class Hidden(BaseInput):
"""
Used to create a Hidden input descriptor for the {% crispy %} template tag.
"""
input_type = 'hidden'
field_classes = 'hidden'
class Reset(BaseInput):
"""
Used to create a Reset button input descriptor for the {% crispy %} template tag::
reset = Reset('Reset This Form', 'Revert Me!')
.. note:: The first argument is also slugified and turned into the id for the reset.
"""
input_type = 'reset'
field_classes = 'reset resetButton' if TEMPLATE_PACK == 'uni_form' else 'btn btn-inverse'
class Fieldset(LayoutObject):
"""
Layout object. It wraps fields in a <fieldset>
Example::
Fieldset("Text for the legend",
'form_field_1',
'form_field_2'
)
The first parameter is the text for the fieldset legend. This text is context aware,
so you can do things like::
Fieldset("Data for {{ user.username }}",
'form_field_1',
'form_field_2'
)
"""
template = "%s/layout/fieldset.html"
def __init__(self, legend, *fields, **kwargs):
self.fields = list(fields)
self.legend = legend
self.css_class = kwargs.pop('css_class', '')
self.css_id = kwargs.pop('css_id', None)
self.template = kwargs.pop('template', self.template)
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
fields = ''
for field in self.fields:
fields += render_field(
field, form, form_style, context, template_pack=template_pack, **kwargs
)
legend = ''
if self.legend:
legend = u'%s' % Template(six.text_type(self.legend)).render(context)
template = self.template % template_pack
return render_to_string(
template,
{'fieldset': self, 'legend': legend, 'fields': fields, 'form_style': form_style}
)
class MultiField(LayoutObject):
""" MultiField container. Renders to a MultiField <div> """
template = "%s/layout/multifield.html"
field_template = "%s/multifield.html"
def __init__(self, label, *fields, **kwargs):
self.fields = list(fields)
self.label_html = label
self.label_class = kwargs.pop('label_class', u'blockLabel')
self.css_class = kwargs.pop('css_class', u'ctrlHolder')
self.css_id = kwargs.pop('css_id', None)
self.template = kwargs.pop('template', self.template)
self.field_template = kwargs.pop('field_template', self.field_template)
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
# If a field within MultiField contains errors
if context['form_show_errors']:
for field in map(lambda pointer: pointer[1], self.get_field_names()):
if field in form.errors:
self.css_class += " error"
fields_output = u''
field_template = self.field_template % template_pack
for field in self.fields:
fields_output += render_field(
field, form, form_style, context,
field_template, self.label_class, layout_object=self,
template_pack=template_pack, **kwargs
)
extra_context = {
'multifield': self,
'fields_output': fields_output
}
template = self.template % template_pack
return render_to_string(template, extra_context, context)
class Div(LayoutObject):
"""
Layout object. It wraps fields in a <div>
You can set `css_id` for a DOM id and `css_class` for a DOM class. Example::
Div('form_field_1', 'form_field_2', css_id='div-example', css_class='divs')
"""
template = "%s/layout/div.html"
def __init__(self, *fields, **kwargs):
self.fields = list(fields)
if hasattr(self, 'css_class') and 'css_class' in kwargs:
self.css_class += ' %s' % kwargs.pop('css_class')
if not hasattr(self, 'css_class'):
self.css_class = kwargs.pop('css_class', None)
self.css_id = kwargs.pop('css_id', '')
self.template = kwargs.pop('template', self.template)
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
fields = ''
for field in self.fields:
fields += render_field(
field, form, form_style, context, template_pack=template_pack, **kwargs
)
template = self.template % template_pack
return render_to_string(template, {'div': self, 'fields': fields})
class Row(Div):
"""
Layout object. It wraps fields in a div whose default class is "formRow". Example::
Row('form_field_1', 'form_field_2', 'form_field_3')
"""
css_class = 'formRow' if TEMPLATE_PACK == 'uni_form' else 'row'
class Column(Div):
"""
Layout object. It wraps fields in a div whose default class is "formColumn". Example::
Column('form_field_1', 'form_field_2')
"""
css_class = 'formColumn'
class HTML(object):
"""
Layout object. It can contain pure HTML and it has access to the whole
context of the page where the form is being rendered.
Examples::
HTML("{% if saved %}Data saved{% endif %}")
HTML('<input type="hidden" name="{{ step_field }}" value="{{ step0 }}" />')
"""
def __init__(self, html):
self.html = html
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
return Template(six.text_type(self.html)).render(context)
class Field(LayoutObject):
"""
Layout object, It contains one field name, and you can add attributes to it easily.
For setting class attributes, you need to use `css_class`, as `class` is a Python keyword.
Example::
Field('field_name', style="color: #333;", css_class="whatever", id="field_name")
"""
template = "%s/field.html"
def __init__(self, *args, **kwargs):
self.fields = list(args)
if not hasattr(self, 'attrs'):
self.attrs = {}
if 'css_class' in kwargs:
if 'class' in self.attrs:
self.attrs['class'] += " %s" % kwargs.pop('css_class')
else:
self.attrs['class'] = kwargs.pop('css_class')
self.wrapper_class = kwargs.pop('wrapper_class', None)
self.template = kwargs.pop('template', self.template)
# We use kwargs as HTML attributes, turning data_id='test' into data-id='test'
self.attrs.update(dict([(k.replace('_', '-'), conditional_escape(v)) for k, v in kwargs.items()]))
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, extra_context=None, **kwargs):
if extra_context is None:
extra_context = {}
if hasattr(self, 'wrapper_class'):
extra_context['wrapper_class'] = self.wrapper_class
html = ''
template = self.template % template_pack
for field in self.fields:
html += render_field(
field, form, form_style, context,
template=template, attrs=self.attrs, template_pack=template_pack,
extra_context=extra_context, **kwargs
)
return html
class MultiWidgetField(Field):
"""
Layout object. For fields with :class:`~django.forms.MultiWidget` as `widget`, you can pass
additional attributes to each widget.
Example::
MultiWidgetField(
'multiwidget_field_name',
attrs=(
{'style': 'width: 30px;'},
{'class': 'second_widget_class'}
),
)
.. note:: To override widget's css class use ``class`` not ``css_class``.
"""
def __init__(self, *args, **kwargs):
self.fields = list(args)
self.attrs = kwargs.pop('attrs', {})
self.template = kwargs.pop('template', self.template)
|
uranusjr/django-crispy-forms-ng
|
crispy_forms/layout.py
|
Python
|
mit
| 15,616
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 KMEE (http://www.kmee.com.br)
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class AccountMove(models.Model):
_inherit = "account.move"
payslip_id = fields.Many2one(
comodel_name="hr.payslip",
string="Payslip",
ondelete='cascade',
)
payslip_run_id = fields.Many2one(
comodel_name="hr.payslip.run",
string="Payslip Run",
ondelete='cascade',
)
@api.multi
def name_get(self):
res = []
for record in self:
if record.payslip_id or record.payslip_run_id:
name = record.name
res.append((record.id, name))
if res:
return res
else:
return super(AccountMove, self).name_get()
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
payslip_id = fields.Many2one(
comodel_name="hr.payslip",
string="Payslip",
)
payslip_run_id = fields.Many2one(
comodel_name="hr.payslip.run",
string="Payslip Run",
)
|
kmee/odoo-brazil-hr
|
l10n_br_hr_payroll_account/models/account_move.py
|
Python
|
agpl-3.0
| 1,158
|
"""nose tests"""
from keypool import KeypoolDict, Keypool
from operator import delitem
import random
MAX_ITER = 10
def assert_unique(keys):
assert all([x == y for x,y in zip(sorted(set(keys)), sorted(keys))])
def test_init():
pool = Keypool(start=0)
dict = KeypoolDict()
assert dict == {}
del pool
del dict
dict = KeypoolDict(hello='world')
assert dict == {'hello' : 'world'}
del dict
dict = KeypoolDict([('start', 'world')], start=100)
assert dict == {'start' : 'world'}
assert dict._pool.start == 100
def test_keygen():
pool = Keypool(start=0)
dict = KeypoolDict()
for i in xrange(0, MAX_ITER):
k1 = pool.next()
k2 = dict.next()
print k1
print k2
def test_quick_allocate():
items = KeypoolDict()
min = 2
max = 10
for i in xrange(0, MAX_ITER):
rand = random.randint(min, max)
keys = [items.setitem(i) for i in xrange(0, rand)]
assert_unique(keys)
def test_reuse(min_items=2, max_items=1000):
items = KeypoolDict()
for i in xrange(0, MAX_ITER):
rand = random.randint(min_items, max_items)
keys = [items.setitem(i) for i in xrange(0, rand)]
# No keys are identical
assert_unique(keys)
# Delete all the items
[delitem(items, key) for key in items.keys()]
# The old keys are now reused
keys2 = [items.setitem(i) for i in xrange(0, rand)]
assert keys == keys2
assert_unique(keys)
[delitem(items, key) for key in items.keys()]
def test_contains(min_items=2, max_items=1000):
items = KeypoolDict()
for i in xrange(0, MAX_ITER):
rand = random.randint(min_items, max_items)
keys = [items.setitem(i) for i in xrange(0, rand)]
assert all([key in items for key in keys])
def test_no_intermediate_assignment():
items = KeypoolDict()
items[items.next()] = 'a'
items[items.next()] = 'b'
items[items.next()] = 'c'
del items[0]
assert 0 not in items
assert 1 in items
assert 2 in items
items[items.next()] = 'd'
assert 0 in items
assert items[0] == 'd'
|
tyrannosaur/keypool
|
tests.py
|
Python
|
mit
| 2,191
|
"""
General purpose XML library for CPython and IronPython
"""
#from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
from setuptools import setup
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup(name = "bridge",
version = '0.4.0',
description = "General purpose XML library for CPython and IronPython",
maintainer = "Sylvain Hellegouarch",
maintainer_email = "sh@defuze.org",
url = "http://trac.defuze.org/wiki/bridge",
download_url = "http://www.defuze.org/oss/bridge/",
packages = ["bridge", "bridge.parser", "bridge.lib",
"bridge.filter", "bridge.validator"],
platforms = ["any"],
license = 'BSD',
long_description = "",
)
|
Lawouach/bridge
|
setup.py
|
Python
|
bsd-3-clause
| 792
|
#!/usr/bin/python
#
# DumbQ - A lightweight job scheduler - Metrics Library
# Copyright (C) 2014-2015 Ioannis Charalampidis, PH-SFT, CERN
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import fcntl
import json
import sys
import os
#: The path to the database
_db_path = "/var/www/html/metrics.json"
#: The database file descriptor
_db_fd = None
#: The database contents
_db = None
#: If we should automatically commit changes
_autocommit = True
##########################################################
# Configuration Function
##########################################################
def configure(database=None, autocommit=None):
"""
Global function to configure module
"""
global _db_path
global _autocommit
# Update database if specified
if not database is None:
_db_path = database
# Update autocommit flag if specified
if not autocommit is None:
_autocommit = autocommit
##########################################################
# Low level database operations
##########################################################
def load():
"""
Load database from disk
"""
global _db
global _db_fd
# If we have an open file descriptor, save
if not _db_fd is None:
commit()
# Load database from file
_db = { }
# Open file
isnew = False
try:
# Open file
if os.path.exists(_db_path):
_db_fd = open(_db_path, 'r+')
else:
_db_fd = open(_db_path, 'w+')
isnew = True
# Get exclusive lock on the entire file
fcntl.lockf(_db_fd, fcntl.LOCK_EX)
except Exception as e:
raise IOError("ERROR: Unable to open database file %s for reading! (%s)\n" % (_db_path, str(e)))
# Try to read file
if not isnew:
try:
_db_fd.seek(0)
_db = json.loads(_db_fd.read())
except IOError as e:
# Close database
_db_fd.close()
_db_fd = None
raise IOError("ERROR: Unable to read database file %s (%s)!\n" % (_db_path, str(e)))
except ValueError as e:
_db = { }
sys.stderr.write("WARNING: Invalid contents of database %s!\n" % _db_path)
def commit():
"""
Save database to disk
"""
global _db
global _db_fd
# If _db is none, replace with {}
if _db is None:
_db = { }
# If we have a missing _db_fd, open file now
if _db_fd is None:
try:
# Open file
_db_fd = open(_db_path, 'w')
# Get exclusive lock on the entire file
fcntl.lockf(_db_fd, fcntl.LOCK_EX)
except Exception as e:
raise IOError("ERROR: Unable to open database file %s for writing!\n" % _db_path)
# Update database
succeed = True
try:
# Replace file contents
_db_fd.seek(0)
_db_fd.write(json.dumps(_db))
# And if new object is smaller, truncate
# remaining file size
_db_fd.truncate()
except Exception as e:
# Close FDs
_db_fd.close()
_db_fd = None
raise IOError("ERROR: Unable to update database file %s! (%s)\n" % (_db_path, str(e)))
# Release lock and close
_db_fd.close()
_db_fd = None
def getKey(key, default=None):
"""
Return a key from the datbase
"""
# Get path components
path = key.split("/")
# Walk through
cdict = _db
while len(path) > 0:
p = path.pop(0)
if not (p in cdict) or (not isinstance(cdict[p], dict) and (len(path)>0)):
return default
cdict = cdict[p]
# Return value
return cdict
def setKey(key, value):
"""
Set a value to a key in the database
"""
global _db
# Get path components
path = key.split("/")
# Walk through
cdict = _db
while len(path) > 0:
p = path.pop(0)
if len(path) == 0:
# Reached the leaf
cdict[p] = value
else:
# Walk and allocate missing paths and destroy non-dicts
if not (p in cdict) or not isinstance(cdict[p], dict):
cdict[p] = { }
cdict = cdict[p]
def hasKey(key):
"""
Check if key exists in the database
"""
# Get path components
path = key.split("/")
# Walk through
cdict = _db
while len(path) > 0:
p = path.pop(0)
if not (p in cdict) or (not isinstance(cdict[p], dict) and (len(path)>0)):
return False
cdict = cdict[p]
# Return true
return True
def delKey(key):
"""
Delete a particular key
"""
global _db
# Get path components
path = key.split("/")
# Walk through
cdict = _db
while len(path) > 0:
p = path.pop(0)
if len(path) == 0:
# Reached the leaf
if p in cdict:
del cdict[p]
else:
# Walk and allocate missing paths and destroy non-dicts
if not (p in cdict) or not isinstance(cdict[p], dict):
cdict[p] = { }
cdict = cdict[p]
##########################################################
# High level interface functions
##########################################################
def set(key, value):
"""
Set a property to a value
"""
# Load database if missing
if (_db is None) or (_autocommit):
load()
# Update database
setKey(key, value)
# Commit database if autocommit
if _autocommit:
commit()
def delete(key):
"""
Delete a property in the database
"""
# Load database if missing
if (_db is None) or (_autocommit):
load()
# Delete key
delKey(key)
# Commit database if autocommit
if _autocommit:
commit()
def add(key, value):
"""
Add value to the specified key
"""
# Load database if missing
if (_db is None) or (_autocommit):
load()
# Update database
if '.' in value:
if not hasKey(key):
setKey( key, float(value) )
else:
setKey( key, float(getKey(key)) + float(value) )
else:
if not hasKey(key):
setKey( key, int(value) )
else:
setKey( key, int(getKey(key)) + int(value) )
# Commit database if autocommit
if _autocommit:
commit()
def multiply(key, value):
"""
Multiply database value with given value
"""
# Load database if missing
if (_db is None) or (_autocommit):
load()
# Update database
if '.' in value:
if not hasKey(key):
setKey( key, float(value) )
else:
setKey( key, float(getKey(key)) * float(value) )
else:
if not hasKey(key):
setKey( key, int(value) )
else:
setKey( key, int(getKey(key)) * int(value) )
# Commit database if autocommit
if _autocommit:
commit()
def average(key, value, ring=20):
"""
Average values in the database, using up to 'ring' values stored in it
"""
global _db
# Load database if missing
if (_db is None) or (_autocommit):
load()
# Operate on float or int
if '.' in value:
value = float(value)
else:
value = int(value)
# Append and rotate values
vals = getKey('%s_values' % key, default=[])
vals.append( value )
setKey( '%s_values' % key, vals )
# Trim ring
while len(vals) > ring:
del vals[0]
# Store values & Update average
setKey( key, sum( vals ) / float(len( vals )) )
# Commit database if autocommit
if _autocommit:
commit()
|
jvican/dumbq
|
client/utils/dumbq/metrics.py
|
Python
|
gpl-2.0
| 7,235
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import doctest
import unittest
from babel.messages import plurals
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(plurals))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
patrys/babel
|
babel/messages/tests/plurals.py
|
Python
|
bsd-3-clause
| 742
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import image_ops_impl as image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class ResizingTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({'height': expected_height, 'width': expected_width})
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2),
('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2),
('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2),
('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2),
('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2),
('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2))
def test_down_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12),
('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12),
('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12),
('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12),
('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12),
('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12))
def test_up_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4))
def test_reshaping(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, 'invalid_interpolation')
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def get_numpy_center_crop(images, expected_height, expected_width):
orig_height = images.shape[1]
orig_width = images.shape[2]
height_start = int((orig_height - expected_height) / 2)
width_start = int((orig_width - expected_width) / 2)
height_end = height_start + expected_height
width_end = width_start + expected_width
return images[:, height_start:height_end, width_start:width_end, :]
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CenterCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
input_images = np.random.random(
(num_samples, orig_height, orig_width, channels)).astype(np.float32)
expected_output = get_numpy_center_crop(
input_images, expected_height, expected_width)
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.CenterCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
input_data=input_images,
expected_output=expected_output,
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('center_crop_3_by_4', 3, 4),
('center_crop_3_by_2', 3, 2))
def test_center_crop_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_4_by_5', 4, 5),
('center_crop_4_by_3', 4, 3))
def test_center_crop_mis_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_4_by_6', 4, 6),
('center_crop_3_by_2', 3, 2))
def test_center_crop_half_mis_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_5_by_12', 5, 12),
('center_crop_10_by_8', 10, 8),
('center_crop_10_by_12', 10, 12))
def test_invalid_center_crop(self, expected_height, expected_width):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'assertion failed'):
self._run_test(expected_height, expected_width)
def test_config_with_custom_name(self):
layer = image_preprocessing.CenterCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.CenterCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('random_crop_5_by_12', 5, 12),
('random_crop_10_by_8', 10, 8),
('random_crop_10_by_12', 10, 12))
def test_invalid_random_crop(self, expected_height, expected_width):
with self.assertRaises(errors.InvalidArgumentError):
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width)
def test_training_with_mock(self):
if test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
np.random.seed(1337)
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
mock_offset = [0, height_offset, width_offset, 0]
with test.mock.patch.object(
stateless_random_ops, 'stateless_random_uniform',
return_value=mock_offset):
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=1)
expected_output = inp[:, height_offset:(height_offset + height),
width_offset:(width_offset + width), :]
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_crop_4_by_6', 4, 6),
('random_crop_3_by_2', 3, 2))
def test_random_crop_output_shape(self, expected_height, expected_width):
if test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width)
def test_predicting_with_mock_longer_height(self):
np.random.seed(1337)
height, width = 3, 3
inp = np.random.random((12, 10, 6, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = image_ops.resize_images_v2(
inp, size=[5, 3])
expected_output = resized_inp[:, 1:4, :, :]
self.assertAllClose(expected_output, actual_output)
def test_predicting_with_mock_longer_width(self):
np.random.seed(1337)
height, width = 4, 6
inp = np.random.random((12, 8, 16, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = image_ops.resize_images_v2(
inp, size=[4, 8])
expected_output = resized_inp[:, :, 1:7, :]
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
class RescalingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_rescaling_base(self):
kwargs = {'scale': 0.004}
testing_utils.layer_test(
image_preprocessing.Rescaling,
kwargs=kwargs,
input_shape=(2, 5, 6, 3),
expected_output_shape=(None, 5, 6, 3))
@tf_test_util.run_v2_only
def test_rescaling_correctness_float(self):
layer = image_preprocessing.Rescaling(0.004)
inputs = random_ops.random_uniform((2, 4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * 0.004)
@tf_test_util.run_v2_only
def test_rescaling_correctness_int(self):
layer = image_preprocessing.Rescaling(0.004)
inputs = random_ops.random_uniform((2, 4, 5, 3), 0, 100, dtype='int32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype.name, 'float32')
self.assertAllClose(outputs.numpy(), inputs.numpy() * 0.004)
def test_config_with_custom_name(self):
layer = image_preprocessing.Rescaling(0.5, name='rescaling')
config = layer.get_config()
layer_1 = image_preprocessing.Rescaling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomFlipTest(keras_parameterized.TestCase):
def _run_test(self,
flip_horizontal,
flip_vertical,
expected_output=None,
mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = [1 for _ in range(num_samples)]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
expected_output = inp
if flip_horizontal:
expected_output = np.flip(expected_output, axis=1)
if flip_vertical:
expected_output = np.flip(expected_output, axis=2)
with test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomFlip(flip_horizontal, flip_vertical)
actual_output = layer(inp, training=1)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(('random_flip_horizontal', True, False),
('random_flip_vertical', False, True),
('random_flip_both', True, True),
('random_flip_neither', False, False))
def test_random_flip(self, flip_horizontal, flip_vertical):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
self._run_test(flip_horizontal, flip_vertical)
def test_random_flip_horizontal_half(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
self._run_test(True, False, expected_output, mock_random)
def test_random_flip_vertical_half(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1)
self._run_test(False, True, expected_output, mock_random)
def test_random_flip_inference(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomFlip(True, True)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
mock_random = [1, 1]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
with test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with self.cached_session(use_gpu=True):
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=1)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomFlip(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomContrastTest(keras_parameterized.TestCase):
def _run_test(self,
lower,
upper,
expected_output=None,
mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = 0.2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
# reduce mean on height.
inp_mean = np.mean(inp, axis=1, keepdims=True)
# reduce mean on width.
inp_mean = np.mean(inp_mean, axis=2, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast((lower, upper))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_contrast_2_by_5', 0.2, 0.5),
('random_contrast_2_by_13', 0.2, 1.3),
('random_contrast_5_by_2', 0.5, 0.2))
def test_random_contrast(self, lower, upper):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
self._run_test(lower, upper)
@parameterized.named_parameters(
('random_contrast_amplitude_2', 0.2),
('random_contrast_amplitude_5', 0.5))
def test_random_contrast_amplitude(self, amplitude):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.random((2, 5, 8, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast(amplitude)
layer(input_images)
def test_random_contrast_inference(self):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_contrast_int_dtype(self):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
layer(input_images)
def test_random_contrast_invalid_bounds(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((-0.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((1.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((0.1, -0.2))
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomContrast((.5, .6), name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomContrast.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomTranslationTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomTranslation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_translate_4_by_6', .4, .6), ('random_translate_3_by_2', .3, .2),
('random_translate_tuple_factor', (.5, .4), (.2, .3)))
def test_random_translation(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_translation_negative_lower(self):
mock_offset = np.random.random((12, 1))
with test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_offset):
with self.cached_session(use_gpu=True):
layer = image_preprocessing.RandomTranslation((-0.2, .3), .4)
layer_2 = image_preprocessing.RandomTranslation((0.2, .3), .4)
inp = np.random.random((12, 5, 8, 3)).astype(np.float32)
actual_output = layer(inp, training=1)
actual_output_2 = layer_2(inp, training=1)
self.assertAllClose(actual_output, actual_output_2)
def test_random_translation_inference(self):
with CustomObjectScope(
{'RandomTranslation': image_preprocessing.RandomTranslation}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomTranslation(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomTranslation(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomRotationTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'factor': factor}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomRotation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(('random_rotate_4', .4),
('random_rotate_3', .3),
('random_rotate_tuple_factor', (.5, .4)))
def test_random_rotation(self, factor):
self._run_test(factor)
def test_random_rotation_inference(self):
with CustomObjectScope(
{'RandomTranslation': image_preprocessing.RandomRotation}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomRotation(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomRotation(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomRotation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomZoomTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with tf_test_util.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomZoom,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_zoom_4_by_6', .4, .6), ('random_zoom_2_by_3', .2, .3),
('random_zoom_tuple_factor', (.4, .5), (.2, .3)))
def test_random_zoom_in(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
@parameterized.named_parameters(
('random_zoom_4_by_6', 1.4, 1.6), ('random_zoom_2_by_3', 1.2, 1.3),
('random_zoom_tuple_factor', (1.4, 1.5), (1.2, 1.3)))
def test_random_zoom_out(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_zoom_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomZoom((.5, .4), .2)
with self.assertRaises(ValueError):
image_preprocessing.RandomZoom(.2, (.5, .4))
def test_random_zoom_inference(self):
with CustomObjectScope(
{'RandomZoom': image_preprocessing.RandomZoom}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomZoom(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomZoom(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomHeightTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with tf_test_util.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomHeight(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[2], 8)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_height_4_by_6', (.4, .6)),
('random_height_3_by_2', (.3, 1.2)),
('random_height_3', .3))
def test_random_height_basic(self, factor):
self._run_test(factor)
def test_valid_random_height(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf_test_util.use_gpu():
img = np.random.random((12, 5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
def test_random_height_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomHeight((-1.5, .4))
def test_random_height_inference(self):
with CustomObjectScope({'RandomHeight': image_preprocessing.RandomHeight}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomHeight(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomHeight(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomHeight.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomWidthTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with tf_test_util.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomWidth(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[1], 5)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_width_4_by_6', (.4, .6)),
('random_width_3_by_2', (.3, 1.2)),
('random_width_3', .3))
def test_random_width_basic(self, factor):
self._run_test(factor)
def test_valid_random_width(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf_test_util.use_gpu():
img = np.random.random((12, 8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[2], 3)
def test_random_width_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomWidth((-1.5, .4))
def test_random_width_inference(self):
with CustomObjectScope({'RandomWidth': image_preprocessing.RandomWidth}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with tf_test_util.use_gpu():
layer = image_preprocessing.RandomWidth(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@tf_test_util.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomWidth(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomWidth.from_config(config)
self.assertEqual(layer_1.name, layer.name)
if __name__ == '__main__':
test.main()
|
renyi533/tensorflow
|
tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py
|
Python
|
apache-2.0
| 29,932
|
"""
WSGI config for Practice_Referral project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Practice_Referral.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Heteroskedastic/Dr-referral-tracker
|
Practice_Referral/wsgi.py
|
Python
|
mit
| 409
|
"""
==============================================
The :mod:`mpi_array.globale_ufunc_test` Module
==============================================
Module defining :mod:`mpi_array.globale` unit-tests.
Execute as::
python -m mpi_array.globale_ufunc_test
and with parallelism::
mpirun -n 2 python -m mpi_array.globale_ufunc_test
mpirun -n 4 python -m mpi_array.globale_ufunc_test
mpirun -n 27 python -m mpi_array.globale_ufunc_test
Classes
=======
.. autosummary::
:toctree: generated/
:template: autosummary/inherits_TestCase_class.rst
UfuncResultTypeTest - Tests for :func:`mpi_array.globale_ufunc.ufunc_result_type` function.
BroadcastShapeTest - Tests for :func:`mpi_array.globale_ufunc.broadcast_shape` function.
GndarrayUfuncTest - Tests for :func:`mpi_array.globale_ufunc.gndarray_array_ufunc` function.
ToGndarrayConverter - Base class for :obj:`numpy.ndarray` to :obj:`mpi_array.globale.gndarray`.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import unittest as _unittest
from . import logging as _logging # noqa: E402,F401
from .comms import LT_NODE, LT_PROCESS, DT_CLONED, DT_SINGLE_LOCALE, DT_BLOCK # , DT_SLAB
from . import comms as _comms
from . import distribution as _distribution
from .globale_ufunc import broadcast_shape, ufunc_result_type, get_extents
from .globale_ufunc import check_equivalent_inter_locale_comms
from .globale import gndarray as _gndarray
from .globale_creation import ones as _ones, zeros as _zeros, asarray as _asarray
from .globale_creation import empty as _empty
from .globale import copyto as _copyto
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class UfuncResultTypeTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_ufunc.ufunc_result_type`.
"""
def test_single_output(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_ufunc.ufunc_result_type`,
with single output array.
"""
rank_logger = _logging.get_rank_logger(self.id())
uft = ['ff->f', 'll->l', 'cc->c', 'fl->b', 'dd->d']
inputs = (_np.array([1, 2], dtype='l'), _np.array([3, 4], dtype='l'))
outputs = None
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('l'),), dtypes)
inputs = (_np.array([1, 2], dtype='f'), _np.array([3, 4], dtype='f'))
outputs = (_np.array([0, 0], dtype='f'),)
dtypes = ufunc_result_type(uft, inputs, outputs)
self.assertSequenceEqual((_np.dtype('f'),), dtypes)
outputs = (_np.array([0, 0], dtype='d'),)
dtypes = ufunc_result_type(uft, inputs, outputs)
self.assertSequenceEqual((_np.dtype('d'),), dtypes)
outputs = (_np.array([0, 0], dtype='b'),)
self.assertRaises(ValueError, ufunc_result_type, uft, inputs, outputs)
inputs = (_np.array([1, 2], dtype='f'), _np.array([3, 4], dtype='l'))
outputs = None
dtypes = ufunc_result_type(uft, inputs, outputs)
self.assertSequenceEqual((_np.dtype('b'),), dtypes)
inputs = (_np.array([1, 2], dtype='f'), 5.0)
outputs = None
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('f'),), dtypes)
inputs = (_np.array([1, 2], dtype='f'), 5.0e150)
outputs = None
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('d'),), dtypes)
inputs = (_np.array([1, 2], dtype='complex128'), 5.0e150)
outputs = None
self.assertRaises(
ValueError,
ufunc_result_type, uft, inputs, outputs
)
def test_tuple_input(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_ufunc.ufunc_result_type`,
with single output array.
"""
rank_logger = _logging.get_rank_logger(self.id())
uft = ['ff->f', 'll->l', 'cc->c', 'fl->b', 'dd->d']
inputs = (_np.array((1, 2, 3, 4), dtype='int32'), (1, 2, 3, 4))
outputs = None
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('l'),), dtypes)
def test_multiple_output(self):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_ufunc.ufunc_result_type`,
with multiple output arrays.
"""
rank_logger = _logging.get_rank_logger(self.id())
uft = ['eee->eBl', 'fff->fBl', 'ddd->dBl', ]
inputs = (_np.array([1, 2], dtype='e'), _np.array([3, 4], dtype='f'), 4.0)
outputs = None
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('f'), _np.dtype('B'), _np.dtype('l')), dtypes)
inputs = (_np.array([1, 2], dtype='e'), _np.array([3, 4], dtype='f'), 4.0)
outputs = (_np.array([1, 2], dtype='f'),)
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('f'), _np.dtype('B'), _np.dtype('l')), dtypes)
inputs = (_np.array([1, 2], dtype='e'), _np.array([3, 4], dtype='f'), 4.0)
outputs = (_np.array([1, 2], dtype='d'), _np.array([1, 2], dtype='i'))
dtypes = ufunc_result_type(uft, inputs, outputs)
rank_logger.debug("dtypes=%s", dtypes)
self.assertSequenceEqual((_np.dtype('d'), _np.dtype('i'), _np.dtype('l')), dtypes)
inputs = (_np.array([1, 2], dtype='e'), _np.array([3, 4], dtype='f'), 4.0)
outputs = (_np.array([1, 2], dtype='d'), _np.array([1, 2], dtype='b'))
self.assertRaises(
ValueError,
ufunc_result_type, uft, inputs, outputs
)
inputs = (_np.array([1, 2], dtype='e'), _np.array([3, 4], dtype='f'), 4.0)
outputs = \
(
_np.array([1, 2], dtype='d'),
_np.array([1, 2], dtype='i'),
_np.array([1, 2], dtype='uint16')
)
self.assertRaises(
ValueError,
ufunc_result_type, uft, inputs, outputs
)
def test_example(self):
import numpy as np
import mpi_array as mpia
try:
inp = (
np.zeros((10, 10, 10), dtype='float16'),
16.0,
mpia.zeros((10, 10, 10), dtype='float32'),
)
dtypes = ufunc_result_type(['eee->e?', 'fff->f?', 'ddd->d?'], inputs=inp)
self.assertSequenceEqual((_np.dtype('float32'), _np.dtype('bool')), dtypes)
out = (mpia.zeros((10, 10, 10), dtype="float64"),)
dtypes = ufunc_result_type(['eee->e?', 'fff->f?', 'ddd->d?'], inputs=inp, outputs=out)
self.assertSequenceEqual((_np.dtype('float64'), _np.dtype('bool')), dtypes)
out += (mpia.zeros((10, 10, 10), dtype="uint16"),)
dtypes = ufunc_result_type(['eee->e?', 'fff->f?', 'ddd->d?'], inputs=inp, outputs=out)
self.assertSequenceEqual((_np.dtype('float64'), _np.dtype('uint16')), dtypes)
finally:
inp[2].free()
out[0].free()
out[1].free()
class BroadcastShapeTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_ufunc.broadcast_shape`.
"""
def test_non_broadcastable(self):
"""
Test that :func:`mpi_array.globale_ufunc.broadcast_shape` raises
a :obj:`ValueError` if shapes are not broadcastable.
"""
self.assertRaises(
ValueError,
broadcast_shape,
(4,),
(5,)
)
self.assertRaises(
ValueError,
broadcast_shape,
(5,),
(6,)
)
self.assertRaises(
ValueError,
broadcast_shape,
(5, 5),
(6, 5)
)
self.assertRaises(
ValueError,
broadcast_shape,
(5, 6),
(5, 5)
)
self.assertRaises(
ValueError,
broadcast_shape,
(5, 6, 7),
(5, 1, 1),
(5, 6, 1),
(1, 1, 7),
(1, 1, 6)
)
def test_broadcastable(self):
"""
Asserts for variety of broadcastable shapes.
"""
self.assertSequenceEqual((), broadcast_shape(()))
self.assertSequenceEqual((), broadcast_shape((), ()))
self.assertSequenceEqual((), broadcast_shape((), (), ()))
self.assertSequenceEqual((1,), broadcast_shape((), (), (1, )))
self.assertSequenceEqual((4, ), broadcast_shape((4, ), (), (1, )))
self.assertSequenceEqual((1, 4), broadcast_shape((4, ), (1, 1), (1, )))
self.assertSequenceEqual((4, 5), broadcast_shape((5, ), (1, 5), (4, 1)))
class ToGndarrayConverter(object):
"""
Base class for converting :obj:`numpy.ndarray` objects
to :obj:`mpi_array.globale.gndarray` objects.
"""
def __init__(self, **kwargs):
"""
The :samp:`kwargs` are passed directly to the :func:`mpi_array.globale_creation.asarray`
function in :meth:`__call__`.
"""
self.kwargs = kwargs
def __call__(self, npy_ary):
"""
Converts the :samp:`{npy_ary}` to a :obj:`mpi_array.globale.gndarray` instance.
:type npy_ary: :obj:`numpy.ndarray`
:param npy_ary: Array converted to :obj:`mpi_array.globale.gndarray`.
This array is assumed to be identical on all peer-rank MPI processes.
:rtype: :obj:`mpi_array.globale.gndarray`
:return: The :samp:`{npy_ary}` converted to a :obj:`mpi_array.globale.gndarray` instance.
"""
if "halo" not in self.kwargs.keys():
halo = _np.random.randint(low=1, high=4, size=(npy_ary.ndim, 2))
gnd_ary = _asarray(npy_ary, halo=halo, **self.kwargs)
else:
gnd_ary = _asarray(npy_ary, **self.kwargs)
return gnd_ary
class GetExtentsTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :func:`mpi_array.globale_ufunc.get_extents`.
"""
def setUp(self):
"""
"""
locale_comms, inter_locale_rank_to_peer_rank, this_locale = _comms.create_locale_comms()
self.comms = locale_comms
self.locale_info = this_locale
del inter_locale_rank_to_peer_rank
def test_scalar(self):
"""
Test :func:`mpi_array.globale_ufunc.get_extents` with scalar input.
"""
l, g = get_extents(5.0, self.locale_info)
self.assertTrue(isinstance(l, _distribution.ScalarLocaleExtent))
self.assertTrue(isinstance(g, _distribution.ScalarGlobaleExtent))
class GndarrayUfuncTest(_unittest.TestCase):
"""
:obj:`unittest.TestCase` for :obj:`mpi_array.globale_ufunc`.
"""
def setUp(self):
"""
Initialise :func:`numpy.random.seed`.
"""
_np.random.seed(1531796312)
self._rank_logger = _logging.get_rank_logger(self.id())
with _asarray(_np.zeros((100,))) as gary:
self.num_node_locales = gary.num_locales
@property
def rank_logger(self):
"""
A :obj:`logging.Logger` object.
"""
return self._rank_logger
def compare_results(self, mpi_cln_npy_result_ary, mpi_result_ary):
"""
Asserts that all elements of
the :obj:`mpi_array.globale.gndarray` :samp:`{mpi_cln_npy_result_ary}`
equal all elements of the :obj:`mpi_array.globale.gndarray` :samp:`{mpi_result_ary}`.
:type mpi_cln_npy_result_ary: :obj:`mpi_array.globale.gndarray`
:param mpi_cln_npy_result_ary: The result returned by :samp:`{func}(*{func_args})`
converted to a cloned-distribution :obj:`mpi_array.globale.gndarray`.
:type mpi_result_ary: :obj:`mpi_array.globale.gndarray`
:param mpi_result_ary: The result array
from :meth:`mpi_array.globale.gndarray.__array_ufunc__` execution.
"""
with \
_zeros(
shape=mpi_result_ary.shape,
dtype=mpi_result_ary.dtype,
locale_type=LT_NODE,
distrib_type=DT_CLONED
) as mpi_cln_mpi_result_ary:
_copyto(dst=mpi_cln_mpi_result_ary, src=mpi_result_ary)
self.assertSequenceEqual(
tuple(mpi_cln_npy_result_ary.shape),
tuple(mpi_cln_mpi_result_ary.shape)
)
self.assertTrue(
_np.all(
mpi_cln_npy_result_ary.lndarray_proxy.view_n
==
mpi_cln_mpi_result_ary.lndarray_proxy.view_n
)
)
def convert_func_args_to_gndarrays(self, converter, func_args):
"""
:type converter: :obj:`ToGndarrayConverter`
:param converter: Used to convert the :obj:`numpy.ndarray` instances
of :samp:`{func_args}` to :obj:`mpi_array.globale.gndarray` instances.
:type func_args: sequence of :obj:`numpy.ndarray` or array-like objects
:param func_args: Sequence of array-like objects.
Can be comprised of misture of :obj:`numpy.ndarray` instances, scalars
or (broadcastable) sequences (e.g. tuple of scalars) elements.
:rtype: :obj:`list`
:return: The :samp:`{func_args}` list with :obj:`numpy.ndarray` instances
converted to :obj:`mpi_array.globale.gndarray` instances.
"""
return [converter(arg) if isinstance(arg, _np.ndarray) else arg for arg in func_args]
def do_convert_execute_and_compare(self, mpi_cln_npy_result_ary, converter, func, *func_args):
"""
Compares the result of :samp:`{func}` called
with :samp:`self.convert_func_args_to_gndarrays({converter}, {func_args})`
converted arguments with the :samp:`{mpi_cln_npy_result_ary}` array (which should
have been produced by
calling :samp:`mpi_array.globale_creation.asarray({func}(*func_args))`).
:type mpi_cln_npy_result_ary: :obj:`mpi_array.globale.gndarray`
:param mpi_cln_npy_result_ary: The result returned by :samp:`{func}(*{func_args})`
converted to a cloned-distribution :obj:`mpi_array.globale.gndarray`.
:type converter: :obj:`ToGndarrayConverter`
:param converter: Used to convert the :obj:`numpy.ndarray` instances
of :samp:`{func_args}` to :samp:`mpi_array.globale.gndarray` instances.
:type func: callable
:param func: Function which computes a new array from the :samp:`*{func_args}`
arguments and for arguments converted with :samp:`{converter}`.
:type func_args: sequence of :obj:`numpy.ndarray` or array-like objects
:param func_args: The arguments for the :samp:`{func}` function.
Can be comprised of :obj:`numpy.ndarray`, scalars or broadcastable
sequence (e.g. tuple of scalars) elements.
.. seealso: :meth:`convert_func_args_to_gndarrays`
"""
mpi_func_args = self.convert_func_args_to_gndarrays(converter, func_args)
with func(*mpi_func_args) as mpi_result_ary:
if mpi_cln_npy_result_ary is None:
mpi_cln_npy_result_ary = func(*func_args)
self.compare_results(mpi_cln_npy_result_ary, mpi_result_ary)
for arg in mpi_func_args:
if hasattr(arg, "free"):
arg.free()
def do_cloned_distribution_test(self, mpi_cln_npy_result_ary, func, *func_args):
"""
Converts :obj:`numpy.ndarray` elements of :samp:`func_args`
to :obj:`mpi_array.globale.gndarray` instances distributed as
the :attr:`mpi_array.comms.DT_CLONED` distribution type.
:type mpi_cln_npy_result_ary: :obj:`mpi_array.globale.gndarray`
:param mpi_cln_npy_result_ary: The result returned by :samp:`{func}(*{func_args})`
converted to a cloned-distribution :obj:`mpi_array.globale.gndarray`.
:type func: callable
:param func: Function which computes a new array from the :samp:`*{func_args}`
arguments.
:type func_args: sequence of :obj:`numpy.ndarray` or array-like objects
:param func_args: The arguments for the :samp:`{func}` function.
Can be comprised of :obj:`numpy.ndarray`, scalars or broadcastable
sequence (e.g. tuple of scalars) elements.
.. seealso: :meth:`do_convert_execute_and_compare`, :meth:`compare_results`
"""
converter = ToGndarrayConverter(locale_type=LT_PROCESS, distrib_type=DT_CLONED)
self.do_convert_execute_and_compare(mpi_cln_npy_result_ary, converter, func, *func_args)
converter = ToGndarrayConverter(locale_type=LT_NODE, distrib_type=DT_CLONED, halo=0)
self.do_convert_execute_and_compare(mpi_cln_npy_result_ary, converter, func, *func_args)
def do_single_locale_distribution_test(self, mpi_cln_npy_result_ary, func, *func_args):
"""
Converts :obj:`numpy.ndarray` elements of :samp:`func_args`
to :obj:`mpi_array.globale.gndarray` instances distributed as
the :attr:`mpi_array.comms.DT_SINGLE_LOCALE` distribution type.
:type mpi_cln_npy_result_ary: :obj:`mpi_array.globale.gndarray`
:param mpi_cln_npy_result_ary: The result returned by :samp:`{func}(*{func_args})`
converted to a cloned-distribution :obj:`mpi_array.globale.gndarray`.
:type func: callable
:param func: Function which computes a new array from the :samp:`*{func_args}`
arguments.
:type func_args: sequence of :obj:`numpy.ndarray` or array-like objects
:param func_args: The arguments for the :samp:`{func}` function.
Can be comprised of :obj:`numpy.ndarray`, scalars or broadcastable
sequence (e.g. tuple of scalars) elements.
.. seealso: :meth:`do_convert_execute_and_compare`
"""
class Converter(ToGndarrayConverter):
def __call__(self, np_ary):
gndary = ToGndarrayConverter.__call__(self, np_ary)
num_locales = gndary.locale_comms.num_locales
self.kwargs["inter_locale_rank"] = \
((self.kwargs["inter_locale_rank"] + 1) % num_locales)
return gndary
converter = \
Converter(locale_type=LT_PROCESS, distrib_type=DT_SINGLE_LOCALE, inter_locale_rank=0)
self.do_convert_execute_and_compare(mpi_cln_npy_result_ary, converter, func, *func_args)
converter = \
Converter(locale_type=LT_NODE, distrib_type=DT_SINGLE_LOCALE, inter_locale_rank=0)
self.do_convert_execute_and_compare(mpi_cln_npy_result_ary, converter, func, *func_args)
def do_block_distribution_test(self, mpi_cln_npy_result_ary, func, *func_args):
"""
Converts :obj:`numpy.ndarray` elements of :samp:`func_args`
to :obj:`mpi_array.globale.gndarray` instances distributed as
the :attr:`mpi_array.comms.DT_BLOCK` distribution type.
:type mpi_cln_npy_result_ary: :obj:`mpi_array.globale.gndarray`
:param mpi_cln_npy_result_ary: The result returned by :samp:`{func}(*{func_args})`
converted to a cloned-distribution :obj:`mpi_array.globale.gndarray`.
:type func: callable
:param func: Function which computes a new array from the :samp:`*{func_args}`
arguments.
:type func_args: sequence of :obj:`numpy.ndarray` or array-like objects
:param func_args: The arguments for the :samp:`{func}` function.
Can be comprised of :obj:`numpy.ndarray`, scalars or broadcastable
sequence (e.g. tuple of scalars) elements.
.. seealso: :meth:`do_convert_execute_and_compare`
"""
class Converter(ToGndarrayConverter):
def __init__(self, **kwargs):
ToGndarrayConverter.__init__(self, **kwargs)
self.dims = None
self.axis = 1
def __call__(self, np_ary):
if self.dims is None:
self.kwargs["dims"] = tuple(_np.zeros((np_ary.ndim,), dtype="int64"))
gndary = ToGndarrayConverter.__call__(self, np_ary)
self.axis = min([self.axis, np_ary.ndim])
self.kwargs["dims"] = _np.ones((np_ary.ndim,), dtype="int64")
self.kwargs["dims"][self.axis] = 0
self.kwargs["dims"] = tuple(self.kwargs["dims"])
self.dims = self.kwargs["dims"]
self.axis = ((self.axis + 1) % np_ary.ndim)
return gndary
converter = Converter(locale_type=LT_PROCESS, distrib_type=DT_BLOCK)
self.do_convert_execute_and_compare(mpi_cln_npy_result_ary, converter, func, *func_args)
converter = Converter(locale_type=LT_NODE, distrib_type=DT_BLOCK)
self.do_convert_execute_and_compare(mpi_cln_npy_result_ary, converter, func, *func_args)
def do_multi_distribution_tests(self, func, *func_args):
"""
Compares result of :samp:`{func}` called with :obj:`numpy.ndarray`
arguments and result of :samp:`{func}` called with :obj:`mpi_array.globale.gndarray`
arguments.
Executes :samp:`{func}(*{func_args})` and compares the result
with :samp:`{func}(*self.convert_func_args_to_gndarrays(converter, {func_args}))`,
where multiple versions instances of :samp:`converter` are used to generate
different distributions for the :obj:`mpi_array.globale.gndarray` :samp:`{func}`
arguments.
:type func: callable
:param func: Function which computes a new array from the :samp:`*{func_args}`
arguments.
:type func_args: sequence of :obj:`numpy.ndarray` or array-like objects
:param func_args: The arguments for the :samp:`{func}` function.
Can be comprised of :obj:`numpy.ndarray`, scalars or broadcastable
sequence (e.g. tuple of scalars) elements.
.. seealso: :meth:`do_cloned_distribution_test`, :meth:`do_single_locale_distribution_test`
, :meth:`do_block_distribution_test` and :meth:`do_convert_execute_and_compare`
"""
with _asarray(func(*func_args)) as mpi_cln_npy_result_ary:
self.do_cloned_distribution_test(mpi_cln_npy_result_ary, func, *func_args)
self.do_single_locale_distribution_test(mpi_cln_npy_result_ary, func, *func_args)
self.do_block_distribution_test(mpi_cln_npy_result_ary, func, *func_args)
def test_umath_multiply(self):
"""
Asserts that binary ufunc multiplication (:obj:`numpy.multiply`) computation
for :obj:`mpi_array.globale.gndarray` arguments produces same results as
for :obj:`numpy.ndarray` arguments. Tries various argument combinations
and different distribution types for the :obj:`mpi_array.globale.gndarray`
arguments.
"""
per_axis_size_factor = int(_np.floor(_np.sqrt(float(self.num_node_locales))))
gshape0 = (41 * per_axis_size_factor + 1, 43 * per_axis_size_factor + 3, 5)
npy_ary0 = _np.random.uniform(low=0.5, high=1.75, size=gshape0)
with _asarray(npy_ary0) as cln_ary:
self.assertTrue(_np.all(npy_ary0 == cln_ary.lndarray_proxy.lndarray))
def multiply(ary0, ary1):
return ary0 * ary1
self.do_multi_distribution_tests(multiply, npy_ary0, 1.0 / 3.0)
self.do_multi_distribution_tests(multiply, npy_ary0, (0.1, 0.3, 0.5, 0.7, 1.9))
self.do_multi_distribution_tests(multiply, npy_ary0, npy_ary0)
gshape1 = gshape0[0:2] + (1,)
npy_ary1 = _np.random.uniform(low=-0.5, high=2.9, size=gshape1)
self.do_multi_distribution_tests(multiply, npy_ary0, npy_ary1)
self.do_multi_distribution_tests(multiply, npy_ary1, npy_ary0)
npy_ary0 = _np.random.uniform(low=-0.5, high=2.9, size=(gshape0[0], 1, gshape0[2]))
npy_ary1 = _np.random.uniform(low=-0.5, high=2.9, size=(1, gshape0[1], gshape0[2]))
self.do_multi_distribution_tests(multiply, npy_ary1, npy_ary0)
def do_test_umath(self, halo=0, gshape=(32, 48)):
"""
Test binary op for a :obj:`mpi_array.globale.gndarray` object
and a scalar.
"""
with _ones(gshape, dtype="int32", locale_type=_comms.LT_PROCESS, halo=halo) as c:
# if True:
# c = _ones(gshape, dtype="int32", locale_type=_comms.LT_PROCESS, halo=halo)
c_orig_halo = c.distribution.halo
self.assertTrue(isinstance(c, _gndarray))
self.assertTrue((c == 1).all())
c *= 2
self.assertTrue((c == 2).all())
self.assertTrue(_np.all(c.distribution.halo == c_orig_halo))
with (c + 2) as d:
self.assertTrue(isinstance(d, _gndarray))
self.assertEqual(c.dtype, d.dtype)
self.assertTrue((d == 4).all())
self.assertTrue(_np.all(d.distribution.halo == c_orig_halo))
def test_umath_no_halo(self):
"""
Test binary op for a :obj:`mpi_array.globale.gndarray` object
and a scalar.
"""
self.do_test_umath(halo=0)
def test_umath_halo(self):
"""
Test binary op for a :obj:`mpi_array.globale.gndarray` object
and a scalar, test halo is preserved.
"""
self.do_test_umath(halo=[[1, 2], [3, 4]])
def do_test_umath_broadcast(self, halo=0, dims=(0, 0, 0)):
"""
Test binary op for a :obj:`mpi_array.globale.gndarray` objects
and an *array-like* object which requires requiring broadcast to result shape.
"""
with \
_ones(
(61, 55, 3),
dtype="int32",
locale_type=_comms.LT_PROCESS,
distrib_type=_comms.DT_BLOCK,
dims=dims,
halo=halo
) as c:
c_orig_halo = c.distribution.halo
with (c * (2, 2, 2)) as d:
self.assertTrue(isinstance(d, _gndarray))
self.assertEqual(_np.asarray((2, 2, 2)).dtype, d.dtype)
self.assertSequenceEqual(tuple(c.shape), tuple(d.shape))
self.assertSequenceEqual(d.distribution.halo.tolist(), c_orig_halo.tolist())
self.assertTrue((d.view_n == 2).all())
self.assertTrue((d == 2).all())
def test_umath_broadcast_no_halo(self):
"""
Test binary op for a :obj:`mpi_array.globale.gndarray` objects
and an *array-like* object which requires requiring broadcast to result shape.
"""
self.do_test_umath_broadcast(halo=0, dims=(0, 0, 0))
self.do_test_umath_broadcast(halo=0, dims=(1, 1, 0))
def test_umath_broadcast_halo(self):
"""
Test binary op for a :obj:`mpi_array.globale.gndarray` objects
and an *array-like* object which requires requiring broadcast to result shape.
"""
self.do_test_umath_broadcast(halo=[[1, 2], [3, 4], [2, 1]], dims=(0, 0, 0))
self.do_test_umath_broadcast(halo=[[1, 2], [3, 4], [2, 1]], dims=(1, 1, 0))
def do_test_umath_broadcast_upsized_result(
self,
halo_a=0,
halo_b=0,
dims_a=(0, 0),
dims_b=(0, 0, 0)
):
"""
Test binary op for two :obj:`mpi_array.globale.gndarray` objects
with the resulting :obj:`mpi_array.globale.gndarray` object having
different (larger) shape than that of both inputs.
"""
with \
_ones(
(19, 3),
dtype="int32",
locale_type=_comms.LT_PROCESS,
distrib_type=_comms.DT_BLOCK,
dims=dims_a,
halo=halo_a
) as a, \
_ones(
(23, 1, 3),
dtype="int32",
locale_type=_comms.LT_PROCESS,
distrib_type=_comms.DT_BLOCK,
dims=dims_b,
halo=halo_b
) as b:
with (a + b) as d:
self.assertTrue(isinstance(d, _gndarray))
self.assertSequenceEqual((b.shape[0], a.shape[0], 3), tuple(d.shape))
self.assertTrue((d == 2).all())
def test_umath_broadcast_upsized_result(self):
"""
Test binary op for two :obj:`mpi_array.globale.gndarray` objects
with the resulting :obj:`mpi_array.globale.gndarray` object having
different (larger) shape than that of both inputs.
"""
self.do_test_umath_broadcast_upsized_result(
halo_a=0,
halo_b=0,
dims_a=(0, 0),
dims_b=(0, 0, 0)
)
self.do_test_umath_broadcast_upsized_result(
halo_a=0,
halo_b=0,
dims_a=(0, 1),
dims_b=(0, 0, 1)
)
self.do_test_umath_broadcast_upsized_result(
halo_a=[[1, 2], [2, 1]],
halo_b=0,
dims_a=(0, 1),
dims_b=(0, 0, 1)
)
self.do_test_umath_broadcast_upsized_result(
halo_a=0,
halo_b=[[1, 2], [3, 4], [2, 1]],
dims_a=(0, 1),
dims_b=(0, 0, 1)
)
self.do_test_umath_broadcast_upsized_result(
halo_a=[[1, 2], [2, 1]],
halo_b=[[1, 2], [3, 4], [2, 1]],
dims_a=(0, 1),
dims_b=(0, 0, 1)
)
def do_test_umath_distributed_broadcast(self, halo_a=0, halo_b=0):
"""
Test binary op for two :obj:`mpi_array.globale.gndarray` objects
which requires remote fetch of data when broadcasting to result shape.
"""
with \
_ones((61, 53, 5), dtype="int32", locale_type=_comms.LT_PROCESS, halo=halo_a) as a,\
_ones(a.shape, dtype="int32", locale_type=_comms.LT_PROCESS, halo=halo_b) as b:
a_orig_halo = a.distribution.halo
b_orig_halo = b.distribution.halo
with (a + b) as c:
self.assertTrue(isinstance(c, _gndarray))
self.assertTrue((c == 2).all())
self.assertSequenceEqual(c.distribution.halo.tolist(), a_orig_halo.tolist())
with \
_ones(
tuple(a.shape[1:]),
dtype=c.dtype,
locale_type=_comms.LT_PROCESS,
dims=(0, 1),
halo=b_orig_halo[1:]
) as twos:
twos.fill_h(2)
with (a * twos) as d:
self.assertTrue(isinstance(d, _gndarray))
self.assertSequenceEqual(tuple(a.shape), tuple(d.shape))
self.assertTrue((d == 2).all())
def test_umath_distributed_broadcast_no_halo(self):
"""
Test binary op for two :obj:`mpi_array.globale.gndarray` objects
which requires remote fetch of data when broadcasting to result shape.
"""
self.do_test_umath_distributed_broadcast(halo_a=0, halo_b=0)
def test_umath_distributed_broadcast_halo(self):
"""
Test binary op for two :obj:`mpi_array.globale.gndarray` objects
which requires remote fetch of data when broadcasting to result shape.
Ghost elements added to arrays.
"""
self.do_test_umath_distributed_broadcast(halo_a=[[1, 2], [3, 4], [2, 1]], halo_b=0)
self.do_test_umath_distributed_broadcast(halo_a=0, halo_b=[[1, 2], [3, 4], [2, 1]])
self.do_test_umath_distributed_broadcast(
halo_a=[[2, 1], [4, 3], [1, 2]],
halo_b=[[1, 2], [3, 4], [2, 1]]
)
def test_ufunc_casting_arg(self):
"""
Test ufunc with casting argument.
"""
self.rank_logger.info("Calling ufunc with 'casting' kwarg.")
with \
_ones(
(64, 32, 100),
dtype="float32",
locale_type=_comms.LT_PROCESS,
dims=(0, 0, 0),
halo=2
) as a:
_np.add(a, 1, casting="same_kind", out=a)
self.assertTrue((a == 2).all())
def test_check_equivalent_inter_locale_comms(self):
with \
_empty((50, 50, 50), locale_type=LT_NODE) as gary0,\
_empty((50, 50, 50), locale_type=LT_PROCESS) as gary1:
if gary1.distribution.num_locales > 1:
self.assertRaises(
ValueError,
check_equivalent_inter_locale_comms,
(gary0, gary1)
)
def test_not_implemented(self):
uf = _np.add
with _empty((50, 50, 50), locale_type=LT_NODE) as gary0:
for method in ["reduce", "accumulate", "reduceat", "at", "outer"]:
self.assertRaises(
TypeError,
getattr(uf, method),
gary0,
5.0
)
_unittest.main(__name__)
__all__ = [s for s in dir() if not s.startswith('_')]
|
mpi-array/mpi_array
|
mpi_array/globale_ufunc_test.py
|
Python
|
mit
| 33,737
|
import atexit
from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
from itertools import chain
import json
import psutil
import socket
import sys
from threading import Lock, Thread, RLock
import time
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
import ray
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import (ClientServerHandle,
CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS)
from ray._private.parameter import RayParams
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.utils import detect_fate_sharing_support
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 10
LOGSTREAM_RETRIES = 5
LOGSTREAM_RETRY_INTERVAL_SEC = 2
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return client_id
@dataclass
class SpecificServer:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.process_handle_future.done():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the the first three arguments are similar to:
<python> -m ray.util.client.server
"""
flattened = " ".join(command)
rejoined = flattened.split()
if len(rejoined) < 3:
return False
return rejoined[1:3] == ["-m", "ray.util.client.server"]
class ProxyManager():
def __init__(self,
redis_address: Optional[str],
session_dir: Optional[str] = None):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = RLock()
self._redis_address = redis_address
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT))
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
self._node: Optional[ray.node.Node] = None
atexit.register(self._cleanup)
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
@property
def redis_address(self) -> str:
"""
Returns the provided Ray Redis address, or creates a new cluster.
"""
if self._redis_address:
return self._redis_address
# Start a new, locally scoped cluster.
connection_tuple = ray.init()
self._redis_address = connection_tuple["redis_address"]
self._session_dir = connection_tuple["session_dir"]
return self._redis_address
@property
def node(self) -> ray.node.Node:
"""Gets a 'ray.Node' object for this node (the head node).
If it does not already exist, one is created using the redis_address.
"""
if self._node:
return self._node
self._node = ray.node.Node(
RayParams(redis_address=self.redis_address),
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
return self._node
def create_specific_server(self, client_id: str) -> SpecificServer:
"""
Create, but not start a SpecificServer for a given client. This
method must be called once per client.
"""
with self.server_lock:
assert self.servers.get(client_id) is None, (
f"Server already created for Client: {client_id}")
port = self._get_unused_port()
server = SpecificServer(
port=port,
process_handle_future=futures.Future(),
channel=grpc.insecure_channel(
f"localhost:{port}", options=GRPC_OPTIONS))
self.servers[client_id] = server
return server
def start_specific_server(self, client_id: str,
job_config: JobConfig) -> bool:
"""
Start up a RayClient Server for an incoming client to
communicate with. Returns whether creation was successful.
"""
specific_server = self._get_server_for_client(client_id)
assert specific_server, f"Server has not been created for: {client_id}"
serialized_runtime_env = job_config.get_serialized_runtime_env()
output, error = self.node.get_log_file_handles(
f"ray_client_server_{specific_server.port}", unique=True)
proc = start_ray_client_server(
self.redis_address,
specific_server.port,
stdout_file=output,
stderr_file=error,
fate_share=self.fate_share,
server_type="specific-server",
serialized_runtime_env=serialized_runtime_env,
session_dir=self.node.get_session_dir_path())
# Wait for the process being run transitions from the shim process
# to the actual RayClient Server.
pid = proc.process.pid
if sys.platform != "win32":
psutil_proc = psutil.Process(pid)
else:
psutil_proc = None
# Don't use `psutil` on Win32
while psutil_proc is not None:
if proc.process.poll() is not None:
logger.error(
f"SpecificServer startup failed for client: {client_id}")
break
cmd = psutil_proc.cmdline()
if _match_running_client_server(cmd):
break
logger.debug(
"Waiting for Process to reach the actual client server.")
time.sleep(0.5)
specific_server.set_result(proc)
logger.info(f"SpecificServer started on port: {specific_server.port} "
f"with PID: {pid} for client: {client_id}")
return proc.process.poll() is None
def _get_server_for_client(self,
client_id: str) -> Optional[SpecificServer]:
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return client
def get_channel(
self,
client_id: str,
) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id. This will block until
the server process has started.
"""
server = self._get_server_for_client(client_id)
if server is None:
return None
# Wait for the SpecificServer to become ready.
server.wait_ready()
try:
grpc.channel_ready_future(
server.channel).result(timeout=CHECK_CHANNEL_TIMEOUT_S)
return server.channel
except grpc.FutureTimeoutError:
logger.exception(f"Timeout waiting for channel for {client_id}")
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
if specific_server.poll() is not None:
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
def _cleanup(self) -> None:
"""
Forcibly kill all spawned RayClient Servers. This ensures cleanup
for platforms where fate sharing is not supported.
"""
for server in self.servers.values():
server.kill()
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable,
proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
try:
return getattr(stub, method)(
request, metadata=[("client_id", client_id)])
except Exception:
logger.exception(f"Proxying call to {method} failed!")
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def PrepRuntimeEnv(self, request,
context=None) -> ray_client_pb2.PrepRuntimeEnvResponse:
return self._call_inner_function(request, context, "PrepRuntimeEnv")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVGet")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def ray_client_server_env_prep(job_config: JobConfig) -> JobConfig:
return job_config
def prepare_runtime_init_req(init_request: ray_client_pb2.DataRequest
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
init_type = init_request.WhichOneof("type")
assert init_type == "init", ("Received initial message of type "
f"{init_type}, not 'init'.")
req = init_request.init
job_config = JobConfig()
if req.job_config:
job_config = pickle.loads(req.job_config)
new_job_config = ray_client_server_env_prep(job_config)
modified_init_req = ray_client_pb2.InitRequest(
job_config=pickle.dumps(new_job_config))
init_request.init.CopyFrom(modified_init_req)
return (init_request, new_job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
def modify_connection_info_resp(self,
init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
try:
with self.clients_lock:
self.num_clients += 1
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
try:
modified_init_req, job_config = prepare_runtime_init_req(
init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!")
raise RuntimeError(
"Starting up Server Failed! Check "
"`ray_client_server.err` on the cluster.")
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` on the cluster.")
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()))
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
resp_stream = stub.Datapath(
new_iter, metadata=[("client_id", client_id)])
for resp in resp_stream:
yield self.modify_connection_info_resp(resp)
except Exception:
logger.exception("Proxying Datapath failed!")
finally:
server.set_result(None)
with self.clients_lock:
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New logstream connection from client {client_id}: ")
channel = None
# We need to retry a few times because the LogClient *may* connect
# Before the DataClient has finished connecting.
for i in range(LOGSTREAM_RETRIES):
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(
f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(LOGSTREAM_RETRY_INTERVAL_SEC)
if channel is None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
resp_stream = stub.Logstream(
request_iterator, metadata=[("client_id", client_id)])
try:
for resp in resp_stream:
yield resp
except Exception:
logger.exception("Proxying Logstream failed!")
def serve_proxier(connection_str: str,
redis_address: str,
session_dir: Optional[str] = None):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
proxy_manager = ProxyManager(redis_address, session_dir)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
logs_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
pcmoritz/ray-1
|
python/ray/util/client/server/proxier.py
|
Python
|
apache-2.0
| 20,149
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Structured error classes in TVM.
Each error class takes an error message as its input.
See the example sections for for suggested message conventions.
To make the code more readable, we recommended developers to
copy the examples and raise errors with the same message convention.
.. note::
Please also refer to :ref:`error-handling-guide`.
"""
from tvm._ffi.base import register_error, TVMError
@register_error
class InternalError(TVMError):
"""Internal error in the system.
Examples
--------
.. code :: c++
// Example code C++
LOG(FATAL) << "InternalError: internal error detail.";
.. code :: python
# Example code in python
raise InternalError("internal error detail")
"""
def __init__(self, msg):
# Patch up additional hint message.
if "TVM hint:" not in msg:
msg += (
"\nTVM hint: You hit an internal error. "
+ "Please open a thread on https://discuss.tvm.apache.org/ to report it."
)
super(InternalError, self).__init__(msg)
register_error("ValueError", ValueError)
register_error("TypeError", TypeError)
register_error("AttributeError", AttributeError)
register_error("KeyError", KeyError)
register_error("IndexError", IndexError)
@register_error
class RPCError(TVMError):
"""Error thrown by the remote server handling the RPC call."""
@register_error
class OpError(TVMError):
"""Base class of all operator errors in frontends."""
@register_error
class OpNotImplemented(OpError, NotImplementedError):
"""Operator is not implemented.
Example
-------
.. code:: python
raise OpNotImplemented(
"Operator {} is not supported in {} frontend".format(
missing_op, frontend_name))
"""
@register_error
class OpAttributeRequired(OpError, AttributeError):
"""Required attribute is not found.
Example
-------
.. code:: python
raise OpAttributeRequired(
"Required attribute {} not found in operator {}".format(
attr_name, op_name))
"""
@register_error
class OpAttributeInvalid(OpError, AttributeError):
"""Attribute value is invalid when taking in a frontend operator.
Example
-------
.. code:: python
raise OpAttributeInvalid(
"Value {} in attribute {} of operator {} is not valid".format(
value, attr_name, op_name))
"""
@register_error
class OpAttributeUnImplemented(OpError, NotImplementedError):
"""Attribute is not supported in a certain frontend.
Example
-------
.. code:: python
raise OpAttributeUnImplemented(
"Attribute {} is not supported in operator {}".format(
attr_name, op_name))
"""
@register_error
class DiagnosticError(TVMError):
"""Error diagnostics were reported during the execution of a pass.
See the configured diagnostic renderer for detailed error information.
"""
|
Laurawly/tvm-1
|
python/tvm/error.py
|
Python
|
apache-2.0
| 3,795
|
from ansiblelint import AnsibleLintRule
class ShellHasCreates(AnsibleLintRule):
id = 'E511'
shortdesc = 'Shell/command module must contain creates or removes'
description = ''
tags = ['shell']
def matchtask(self, file, task):
if task['action']['__ansible_module__'] not in ['shell', 'command']:
return False
if 'creates' in task['action'] or 'removes' in task['action']:
return False
if 'register' in task:
return False
return True
|
tsukinowasha/ansible-lint-rules
|
rules/ShellHasCreates.py
|
Python
|
mit
| 522
|
# Copyright 2009 Google Inc. Released under the GPL v2
"""
This module defines the base classes for the Host hierarchy.
Implementation details:
You should import the "hosts" package instead of importing each type of host.
Host: a machine on which you can run programs
"""
__author__ = """
mbligh@google.com (Martin J. Bligh),
poirier@google.com (Benjamin Poirier),
stutsman@google.com (Ryan Stutsman)
"""
import cPickle, cStringIO, logging, os, re, time
from autotest_lib.client.common_lib import global_config, error, utils
from autotest_lib.client.common_lib import host_protections
from autotest_lib.client.bin import partition
class Host(object):
"""
This class represents a machine on which you can run programs.
It may be a local machine, the one autoserv is running on, a remote
machine or a virtual machine.
Implementation details:
This is an abstract class, leaf subclasses must implement the methods
listed here. You must not instantiate this class but should
instantiate one of those leaf subclasses.
When overriding methods that raise NotImplementedError, the leaf class
is fully responsible for the implementation and should not chain calls
to super. When overriding methods that are a NOP in Host, the subclass
should chain calls to super(). The criteria for fitting a new method into
one category or the other should be:
1. If two separate generic implementations could reasonably be
concatenated, then the abstract implementation should pass and
subclasses should chain calls to super.
2. If only one class could reasonably perform the stated function
(e.g. two separate run() implementations cannot both be executed)
then the method should raise NotImplementedError in Host, and
the implementor should NOT chain calls to super, to ensure that
only one implementation ever gets executed.
"""
job = None
DEFAULT_REBOOT_TIMEOUT = 1800
WAIT_DOWN_REBOOT_TIMEOUT = 840
WAIT_DOWN_REBOOT_WARNING = 540
HOURS_TO_WAIT_FOR_RECOVERY = 2.5
# the number of hardware repair requests that need to happen before we
# actually send machines to hardware repair
HARDWARE_REPAIR_REQUEST_THRESHOLD = 4
def __init__(self, *args, **dargs):
self._initialize(*args, **dargs)
def _initialize(self, *args, **dargs):
self._already_repaired = []
self._removed_files = False
def close(self):
pass
def setup(self):
pass
def run(self, command, timeout=3600, ignore_status=False,
stdout_tee=utils.TEE_TO_LOGS, stderr_tee=utils.TEE_TO_LOGS,
stdin=None, args=()):
"""
Run a command on this host.
@param command: the command line string
@param timeout: time limit in seconds before attempting to
kill the running process. The run() function
will take a few seconds longer than 'timeout'
to complete if it has to kill the process.
@param ignore_status: do not raise an exception, no matter
what the exit code of the command is.
@param stdout_tee/stderr_tee: where to tee the stdout/stderr
@param stdin: stdin to pass (a string) to the executed command
@param args: sequence of strings to pass as arguments to command by
quoting them in " and escaping their contents if necessary
@return a utils.CmdResult object
@raises AutotestHostRunError: the exit code of the command execution
was not 0 and ignore_status was not enabled
"""
raise NotImplementedError('Run not implemented!')
def run_output(self, command, *args, **dargs):
return self.run(command, *args, **dargs).stdout.rstrip()
def reboot(self):
raise NotImplementedError('Reboot not implemented!')
def sysrq_reboot(self):
raise NotImplementedError('Sysrq reboot not implemented!')
def reboot_setup(self, *args, **dargs):
pass
def reboot_followup(self, *args, **dargs):
pass
def get_file(self, source, dest, delete_dest=False):
raise NotImplementedError('Get file not implemented!')
def send_file(self, source, dest, delete_dest=False):
raise NotImplementedError('Send file not implemented!')
def get_tmp_dir(self):
raise NotImplementedError('Get temp dir not implemented!')
def is_up(self):
raise NotImplementedError('Is up not implemented!')
def is_shutting_down(self):
""" Indicates is a machine is currently shutting down. """
# runlevel() may not be available, so wrap it in try block.
try:
runlevel = int(self.run("runlevel").stdout.strip().split()[1])
return runlevel in (0, 6)
except:
return False
def get_wait_up_processes(self):
""" Gets the list of local processes to wait for in wait_up. """
get_config = global_config.global_config.get_config_value
proc_list = get_config("HOSTS", "wait_up_processes",
default="").strip()
processes = set(p.strip() for p in proc_list.split(","))
processes.discard("")
return processes
def get_boot_id(self, timeout=60):
""" Get a unique ID associated with the current boot.
Should return a string with the semantics such that two separate
calls to Host.get_boot_id() return the same string if the host did
not reboot between the two calls, and two different strings if it
has rebooted at least once between the two calls.
@param timeout The number of seconds to wait before timing out.
@return A string unique to this boot or None if not available."""
BOOT_ID_FILE = '/proc/sys/kernel/random/boot_id'
NO_ID_MSG = 'no boot_id available'
cmd = 'if [ -f %r ]; then cat %r; else echo %r; fi' % (
BOOT_ID_FILE, BOOT_ID_FILE, NO_ID_MSG)
boot_id = self.run(cmd, timeout=timeout).stdout.strip()
if boot_id == NO_ID_MSG:
return None
return boot_id
def wait_up(self, timeout=None):
raise NotImplementedError('Wait up not implemented!')
def wait_down(self, timeout=None, warning_timer=None, old_boot_id=None):
raise NotImplementedError('Wait down not implemented!')
def wait_for_restart(self, timeout=DEFAULT_REBOOT_TIMEOUT,
log_failure=True, old_boot_id=None, **dargs):
""" Wait for the host to come back from a reboot. This is a generic
implementation based entirely on wait_up and wait_down. """
if not self.wait_down(timeout=self.WAIT_DOWN_REBOOT_TIMEOUT,
warning_timer=self.WAIT_DOWN_REBOOT_WARNING,
old_boot_id=old_boot_id):
if log_failure:
self.record("ABORT", None, "reboot.verify", "shut down failed")
raise error.AutoservShutdownError("Host did not shut down")
self.wait_up(timeout)
time.sleep(2) # this is needed for complete reliability
if self.wait_up(timeout):
self.record("GOOD", None, "reboot.verify")
self.reboot_followup(**dargs)
else:
self.record("ABORT", None, "reboot.verify",
"Host did not return from reboot")
raise error.AutoservRebootError("Host did not return from reboot")
def verify(self):
self.verify_hardware()
self.verify_connectivity()
self.verify_software()
def verify_hardware(self):
pass
def verify_connectivity(self):
pass
def verify_software(self):
pass
def check_diskspace(self, path, gb):
"""Raises an error if path does not have at least gb GB free.
@param path The path to check for free disk space.
@param gb A floating point number to compare with a granularity
of 1 MB.
1000 based SI units are used.
@raises AutoservDiskFullHostError if path has less than gb GB free.
"""
one_mb = 10**6 # Bytes (SI unit).
mb_per_gb = 1000.0
logging.info('Checking for >= %s GB of space under %s on machine %s',
gb, path, self.hostname)
df = self.run('df -PB %d %s | tail -1' % (one_mb, path)).stdout.split()
free_space_gb = int(df[3])/mb_per_gb
if free_space_gb < gb:
raise error.AutoservDiskFullHostError(path, gb, free_space_gb)
else:
logging.info('Found %s GB >= %s GB of space under %s on machine %s',
free_space_gb, gb, path, self.hostname)
def get_open_func(self, use_cache=True):
"""
Defines and returns a function that may be used instead of built-in
open() to open and read files. The returned function is implemented
by using self.run('cat <file>') and may cache the results for the same
filename.
@param use_cache Cache results of self.run('cat <filename>') for the
same filename
@return a function that can be used instead of built-in open()
"""
cached_files = {}
def open_func(filename):
if not use_cache or filename not in cached_files:
output = self.run('cat \'%s\'' % filename,
stdout_tee=open('/dev/null', 'w')).stdout
fd = cStringIO.StringIO(output)
if not use_cache:
return fd
cached_files[filename] = fd
else:
cached_files[filename].seek(0)
return cached_files[filename]
return open_func
def check_partitions(self, root_part, filter_func=None):
""" Compare the contents of /proc/partitions with those of
/proc/mounts and raise exception in case unmounted partitions are found
root_part: in Linux /proc/mounts will never directly mention the root
partition as being mounted on / instead it will say that /dev/root is
mounted on /. Thus require this argument to filter out the root_part
from the ones checked to be mounted
filter_func: unnary predicate for additional filtering out of
partitions required to be mounted
Raise: error.AutoservHostError if unfiltered unmounted partition found
"""
print 'Checking if non-swap partitions are mounted...'
unmounted = partition.get_unmounted_partition_list(root_part,
filter_func=filter_func, open_func=self.get_open_func())
if unmounted:
raise error.AutoservNotMountedHostError(
'Found unmounted partitions: %s' %
[part.device for part in unmounted])
def _repair_wait_for_reboot(self):
TIMEOUT = int(self.HOURS_TO_WAIT_FOR_RECOVERY * 3600)
if self.is_shutting_down():
logging.info('Host is shutting down, waiting for a restart')
self.wait_for_restart(TIMEOUT)
else:
self.wait_up(TIMEOUT)
def _get_mountpoint(self, path):
"""Given a "path" get the mount point of the filesystem containing
that path."""
code = ('import os\n'
# sanitize the path and resolve symlinks
'path = os.path.realpath(%r)\n'
"while path != '/' and not os.path.ismount(path):\n"
' path, _ = os.path.split(path)\n'
'print path\n') % path
return self.run('python -c "%s"' % code,
stdout_tee=open(os.devnull, 'w')).stdout.rstrip()
def erase_dir_contents(self, path, ignore_status=True, timeout=3600):
"""Empty a given directory path contents."""
rm_cmd = 'find "%s" -mindepth 1 -maxdepth 1 -print0 | xargs -0 rm -rf'
self.run(rm_cmd % path, ignore_status=ignore_status, timeout=timeout)
self._removed_files = True
def repair_full_disk(self, mountpoint):
# it's safe to remove /tmp and /var/tmp, site specific overrides may
# want to remove some other places too
if mountpoint == self._get_mountpoint('/tmp'):
self.erase_dir_contents('/tmp')
if mountpoint == self._get_mountpoint('/var/tmp'):
self.erase_dir_contents('/var/tmp')
def _call_repair_func(self, err, func, *args, **dargs):
for old_call in self._already_repaired:
if old_call == (func, args, dargs):
# re-raising the original exception because surrounding
# error handling may want to try other ways to fix it
logging.warn('Already done this (%s) repair procedure, '
're-raising the original exception.', func)
raise err
try:
func(*args, **dargs)
except (error.AutoservHardwareRepairRequestedError,
error.AutoservHardwareRepairRequiredError):
# let these special exceptions propagate
raise
except error.AutoservError:
logging.exception('Repair failed but continuing in case it managed'
' to repair enough')
self._already_repaired.append((func, args, dargs))
def repair_filesystem_only(self):
"""perform file system repairs only"""
while True:
# try to repair specific problems
try:
logging.info('Running verify to find failures to repair...')
self.verify()
if self._removed_files:
logging.info('Removed files, rebooting to release the'
' inodes')
self.reboot()
return # verify succeeded, then repair succeeded
except error.AutoservHostIsShuttingDownError, err:
logging.exception('verify failed')
self._call_repair_func(err, self._repair_wait_for_reboot)
except error.AutoservDiskFullHostError, err:
logging.exception('verify failed')
self._call_repair_func(err, self.repair_full_disk,
self._get_mountpoint(err.path))
def repair_software_only(self):
"""perform software repairs only"""
while True:
try:
self.repair_filesystem_only()
break
except (error.AutoservSshPingHostError, error.AutoservSSHTimeout,
error.AutoservSshPermissionDeniedError,
error.AutoservDiskFullHostError), err:
logging.exception('verify failed')
logging.info('Trying to reinstall the machine')
self._call_repair_func(err, self.machine_install)
def repair_full(self):
hardware_repair_requests = 0
while True:
try:
self.repair_software_only()
break
except error.AutoservHardwareRepairRequiredError, err:
logging.exception('software repair failed, '
'hardware repair requested')
hardware_repair_requests += 1
try_hardware_repair = (hardware_repair_requests >=
self.HARDWARE_REPAIR_REQUEST_THRESHOLD)
if try_hardware_repair:
logging.info('hardware repair requested %d times, '
'trying hardware repair',
hardware_repair_requests)
self._call_repair_func(err, self.request_hardware_repair)
else:
logging.info('hardware repair requested %d times, '
'trying software repair again',
hardware_repair_requests)
except error.AutoservHardwareHostError, err:
logging.exception('verify failed')
# software repair failed, try hardware repair
logging.info('Hardware problem found, '
'requesting hardware repairs')
self._call_repair_func(err, self.request_hardware_repair)
def repair_with_protection(self, protection_level):
"""Perform the maximal amount of repair within the specified
protection level.
@param protection_level: the protection level to use for limiting
repairs, a host_protections.Protection
"""
protection = host_protections.Protection
if protection_level == protection.DO_NOT_REPAIR:
logging.info('Protection is "Do not repair" so just verifying')
self.verify()
elif protection_level == protection.REPAIR_FILESYSTEM_ONLY:
logging.info('Attempting filesystem-only repair')
self.repair_filesystem_only()
elif protection_level == protection.REPAIR_SOFTWARE_ONLY:
logging.info('Attempting software repair only')
self.repair_software_only()
elif protection_level == protection.NO_PROTECTION:
logging.info('Attempting full repair')
self.repair_full()
else:
raise NotImplementedError('Unknown host protection level %s'
% protection_level)
def disable_ipfilters(self):
"""Allow all network packets in and out of the host."""
self.run('iptables-save > /tmp/iptable-rules')
self.run('iptables -P INPUT ACCEPT')
self.run('iptables -P FORWARD ACCEPT')
self.run('iptables -P OUTPUT ACCEPT')
def enable_ipfilters(self):
"""Re-enable the IP filters disabled from disable_ipfilters()"""
if self.path_exists('/tmp/iptable-rules'):
self.run('iptables-restore < /tmp/iptable-rules')
def cleanup(self):
pass
def machine_install(self):
raise NotImplementedError('Machine install not implemented!')
def install(self, installableObject):
installableObject.install(self)
def get_autodir(self):
raise NotImplementedError('Get autodir not implemented!')
def set_autodir(self):
raise NotImplementedError('Set autodir not implemented!')
def start_loggers(self):
""" Called to start continuous host logging. """
pass
def stop_loggers(self):
""" Called to stop continuous host logging. """
pass
# some extra methods simplify the retrieval of information about the
# Host machine, with generic implementations based on run(). subclasses
# should feel free to override these if they can provide better
# implementations for their specific Host types
def get_num_cpu(self):
""" Get the number of CPUs in the host according to /proc/cpuinfo. """
proc_cpuinfo = self.run('cat /proc/cpuinfo',
stdout_tee=open(os.devnull, 'w')).stdout
cpus = 0
for line in proc_cpuinfo.splitlines():
if line.startswith('processor'):
cpus += 1
return cpus
def get_arch(self):
""" Get the hardware architecture of the remote machine. """
arch = self.run('/bin/uname -m').stdout.rstrip()
if re.match(r'i\d86$', arch):
arch = 'i386'
return arch
def get_kernel_ver(self):
""" Get the kernel version of the remote machine. """
return self.run('/bin/uname -r').stdout.rstrip()
def get_cmdline(self):
""" Get the kernel command line of the remote machine. """
return self.run('cat /proc/cmdline').stdout.rstrip()
def get_meminfo(self):
""" Get the kernel memory info (/proc/meminfo) of the remote machine
and return a dictionary mapping the various statistics. """
meminfo_dict = {}
meminfo = self.run('cat /proc/meminfo').stdout.splitlines()
for key, val in (line.split(':', 1) for line in meminfo):
meminfo_dict[key.strip()] = val.strip()
return meminfo_dict
def path_exists(self, path):
""" Determine if path exists on the remote machine. """
result = self.run('ls "%s" > /dev/null' % utils.sh_escape(path),
ignore_status=True)
return result.exit_status == 0
# some extra helpers for doing job-related operations
def record(self, *args, **dargs):
""" Helper method for recording status logs against Host.job that
silently becomes a NOP if Host.job is not available. The args and
dargs are passed on to Host.job.record unchanged. """
if self.job:
self.job.record(*args, **dargs)
def log_kernel(self):
""" Helper method for logging kernel information into the status logs.
Intended for cases where the "current" kernel is not really defined
and we want to explicitly log it. Does nothing if this host isn't
actually associated with a job. """
if self.job:
kernel = self.get_kernel_ver()
self.job.record("INFO", None, None,
optional_fields={"kernel": kernel})
def log_reboot(self, reboot_func):
""" Decorator for wrapping a reboot in a group for status
logging purposes. The reboot_func parameter should be an actual
function that carries out the reboot.
"""
if self.job and not hasattr(self, "RUNNING_LOG_REBOOT"):
self.RUNNING_LOG_REBOOT = True
try:
self.job.run_reboot(reboot_func, self.get_kernel_ver)
finally:
del self.RUNNING_LOG_REBOOT
else:
reboot_func()
def request_hardware_repair(self):
""" Should somehow request (send a mail?) for hardware repairs on
this machine. The implementation can either return by raising the
special error.AutoservHardwareRepairRequestedError exception or can
try to wait until the machine is repaired and then return normally.
"""
raise NotImplementedError("request_hardware_repair not implemented")
def list_files_glob(self, glob):
"""
Get a list of files on a remote host given a glob pattern path.
"""
SCRIPT = ("python -c 'import cPickle, glob, sys;"
"cPickle.dump(glob.glob(sys.argv[1]), sys.stdout, 0)'")
output = self.run(SCRIPT, args=(glob,), stdout_tee=None,
timeout=60).stdout
return cPickle.loads(output)
def symlink_closure(self, paths):
"""
Given a sequence of path strings, return the set of all paths that
can be reached from the initial set by following symlinks.
@param paths: sequence of path strings.
@return: a sequence of path strings that are all the unique paths that
can be reached from the given ones after following symlinks.
"""
SCRIPT = ("python -c 'import cPickle, os, sys\n"
"paths = cPickle.load(sys.stdin)\n"
"closure = {}\n"
"while paths:\n"
" path = paths.keys()[0]\n"
" del paths[path]\n"
" if not os.path.exists(path):\n"
" continue\n"
" closure[path] = None\n"
" if os.path.islink(path):\n"
" link_to = os.path.join(os.path.dirname(path),\n"
" os.readlink(path))\n"
" if link_to not in closure.keys():\n"
" paths[link_to] = None\n"
"cPickle.dump(closure.keys(), sys.stdout, 0)'")
input_data = cPickle.dumps(dict((path, None) for path in paths), 0)
output = self.run(SCRIPT, stdout_tee=None, stdin=input_data,
timeout=60).stdout
return cPickle.loads(output)
def cleanup_kernels(self, boot_dir='/boot'):
"""
Remove any kernel image and associated files (vmlinux, system.map,
modules) for any image found in the boot directory that is not
referenced by entries in the bootloader configuration.
@param boot_dir: boot directory path string, default '/boot'
"""
# find all the vmlinuz images referenced by the bootloader
vmlinuz_prefix = os.path.join(boot_dir, 'vmlinuz-')
boot_info = self.bootloader.get_entries()
used_kernver = [boot['kernel'][len(vmlinuz_prefix):]
for boot in boot_info.itervalues()]
# find all the unused vmlinuz images in /boot
all_vmlinuz = self.list_files_glob(vmlinuz_prefix + '*')
used_vmlinuz = self.symlink_closure(vmlinuz_prefix + kernver
for kernver in used_kernver)
unused_vmlinuz = set(all_vmlinuz) - set(used_vmlinuz)
# find all the unused vmlinux images in /boot
vmlinux_prefix = os.path.join(boot_dir, 'vmlinux-')
all_vmlinux = self.list_files_glob(vmlinux_prefix + '*')
used_vmlinux = self.symlink_closure(vmlinux_prefix + kernver
for kernver in used_kernver)
unused_vmlinux = set(all_vmlinux) - set(used_vmlinux)
# find all the unused System.map files in /boot
systemmap_prefix = os.path.join(boot_dir, 'System.map-')
all_system_map = self.list_files_glob(systemmap_prefix + '*')
used_system_map = self.symlink_closure(
systemmap_prefix + kernver for kernver in used_kernver)
unused_system_map = set(all_system_map) - set(used_system_map)
# find all the module directories associated with unused kernels
modules_prefix = '/lib/modules/'
all_moddirs = [dir for dir in self.list_files_glob(modules_prefix + '*')
if re.match(modules_prefix + r'\d+\.\d+\.\d+.*', dir)]
used_moddirs = self.symlink_closure(modules_prefix + kernver
for kernver in used_kernver)
unused_moddirs = set(all_moddirs) - set(used_moddirs)
# remove all the vmlinuz files we don't use
# TODO: if needed this should become package manager agnostic
for vmlinuz in unused_vmlinuz:
# try and get an rpm package name
rpm = self.run('rpm -qf', args=(vmlinuz,),
ignore_status=True, timeout=120)
if rpm.exit_status == 0:
packages = set(line.strip() for line in
rpm.stdout.splitlines())
# if we found some package names, try to remove them
for package in packages:
self.run('rpm -e', args=(package,),
ignore_status=True, timeout=120)
# remove the image files anyway, even if rpm didn't
self.run('rm -f', args=(vmlinuz,),
ignore_status=True, timeout=120)
# remove all the vmlinux and System.map files left over
for f in (unused_vmlinux | unused_system_map):
self.run('rm -f', args=(f,),
ignore_status=True, timeout=120)
# remove all unused module directories
# the regex match should keep us safe from removing the wrong files
for moddir in unused_moddirs:
self.run('rm -fr', args=(moddir,), ignore_status=True)
|
wuzhy/autotest
|
client/common_lib/hosts/base_classes.py
|
Python
|
gpl-2.0
| 27,724
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute API that proxies via Cells Service."""
from nova import availability_zones
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
check_instance_state = compute_api.check_instance_state
wrap_check_policy = compute_api.wrap_check_policy
check_policy = compute_api.check_policy
check_instance_lock = compute_api.check_instance_lock
def validate_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
class ComputeRPCAPINoOp(object):
def __getattr__(self, key):
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class SchedulerRPCAPIRedirect(object):
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
def run_instance(self, context, **kwargs):
self.cells_rpcapi.schedule_run_instance(context, **kwargs)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
"""Class used to substitute Compute RPC API that will proxy
via the cells manager to a compute manager in a child cell.
"""
def __init__(self, *args, **kwargs):
super(ComputeRPCProxyAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def cast(self, ctxt, msg, topic=None, version=None):
self._set_version(msg, version)
topic = self._get_topic(topic)
self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
def call(self, ctxt, msg, topic=None, version=None, timeout=None):
self._set_version(msg, version)
topic = self._get_topic(topic)
return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic,
call=True,
timeout=timeout)
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
# Avoid casts/calls directly to compute
self.compute_rpcapi = ComputeRPCAPINoOp()
# Redirect scheduler run_instance to cells.
self.scheduler_rpcapi = SchedulerRPCAPIRedirect(self.cells_rpcapi)
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _cast_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
self.cells_rpcapi.cast_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _call_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
return self.cells_rpcapi.call_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _check_requested_networks(self, context, requested_networks):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return
def _validate_image_href(self, context, image_href):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None, image_id=None):
"""Backup the given instance."""
image_meta = super(ComputeCellsAPI, self).backup(context,
instance, name, backup_type, rotation,
extra_properties=extra_properties, image_id=image_id)
image_id = image_meta['id']
self._cast_to_cells(context, instance, 'backup', name,
backup_type=backup_type, rotation=rotation,
extra_properties=extra_properties, image_id=image_id)
return image_meta
def snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Snapshot the given instance."""
image_meta = super(ComputeCellsAPI, self).snapshot(context,
instance, name, extra_properties=extra_properties,
image_id=image_id)
image_id = image_meta['id']
self._cast_to_cells(context, instance, 'snapshot',
name, extra_properties=extra_properties, image_id=image_id)
return image_meta
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
for completeness.
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
def update_state(self, context, instance, new_state):
"""Updates the state of a compute instance.
For example to 'active' or 'error'.
Also sets 'task_state' to None.
Used by admin_actions api
:param context: The security context
:param instance: The instance to update
:param new_state: A member of vm_state to change
the instance's state to,
eg. 'active'
"""
self.update(context, instance,
pass_on_state_change=True,
vm_state=new_state,
task_state=None)
def update(self, context, instance, pass_on_state_change=False, **kwargs):
"""
Update an instance.
:param pass_on_state_change: if true, the state change will be passed
on to child cells
"""
cell_name = instance['cell_name']
if cell_name and self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method='update')
rv = super(ComputeCellsAPI, self).update(context,
instance, **kwargs)
kwargs_copy = kwargs.copy()
if not pass_on_state_change:
# We need to skip vm_state/task_state updates... those will
# happen via a _cast_to_cells when running a different
# compute api method
kwargs_copy.pop('vm_state', None)
kwargs_copy.pop('task_state', None)
if kwargs_copy:
try:
self._cast_to_cells(context, instance, 'update',
**kwargs_copy)
except exception.InstanceUnknownCell:
pass
return rv
def _local_delete(self, context, instance, bdms):
# This will get called for every delete in the API cell
# because _delete() in compute/api.py will not find a
# service when checking if it's up.
# We need to only take action if there's no cell_name. Our
# overrides of delete() and soft_delete() will take care of
# the rest.
cell_name = instance['cell_name']
if not cell_name:
return super(ComputeCellsAPI, self)._local_delete(context,
instance, bdms)
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance,
super(ComputeCellsAPI, self).soft_delete, 'soft_delete')
def delete(self, context, instance):
self._handle_cell_delete(context, instance,
super(ComputeCellsAPI, self).delete, 'delete')
def _handle_cell_delete(self, context, instance, method, method_name):
"""Terminate an instance."""
# We can't use the decorator because we have special logic in the
# case we don't know the cell_name...
cell_name = instance['cell_name']
if cell_name and self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method_name)
method(context, instance)
try:
self._cast_to_cells(context, instance, method_name)
except exception.InstanceUnknownCell:
# If there's no cell, there's also no host... which means
# the instance was destroyed from the DB here. Let's just
# broadcast a message down to all cells and hope this ends
# up resolving itself... Worse case.. the instance will
# show back up again here.
delete_type = method == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance['uuid'], delete_type)
@validate_cell
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).restore(context, instance)
self._cast_to_cells(context, instance, 'restore')
@validate_cell
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).force_delete(context, instance)
self._cast_to_cells(context, instance, 'force_delete')
@validate_cell
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
super(ComputeCellsAPI, self).stop(context, instance)
if do_cast:
self._cast_to_cells(context, instance, 'stop', do_cast=True)
else:
return self._call_to_cells(context, instance, 'stop',
do_cast=False)
@validate_cell
def start(self, context, instance):
"""Start an instance."""
super(ComputeCellsAPI, self).start(context, instance)
self._cast_to_cells(context, instance, 'start')
@validate_cell
def reboot(self, context, instance, *args, **kwargs):
"""Reboot the given instance."""
super(ComputeCellsAPI, self).reboot(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'reboot', *args,
**kwargs)
@validate_cell
def rebuild(self, context, instance, *args, **kwargs):
"""Rebuild the given instance with the provided attributes."""
super(ComputeCellsAPI, self).rebuild(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'rebuild', *args, **kwargs)
@validate_cell
def evacuate(self, context, instance, *args, **kwargs):
"""Evacuate the given instance with the provided attributes."""
super(ComputeCellsAPI, self).evacuate(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'evacuate', *args, **kwargs)
@check_instance_state(vm_state=[vm_states.RESIZED])
@validate_cell
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
super(ComputeCellsAPI, self).revert_resize(context, instance)
self._cast_to_cells(context, instance, 'revert_resize')
@check_instance_state(vm_state=[vm_states.RESIZED])
@validate_cell
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
super(ComputeCellsAPI, self).confirm_resize(context, instance)
self._cast_to_cells(context, instance, 'confirm_resize')
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
@validate_cell
def resize(self, context, instance, *args, **kwargs):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)
# NOTE(johannes): If we get to this point, then we know the
# specified flavor_id is valid and exists. We'll need to load
# it again, but that should be safe.
old_instance_type = instance_types.extract_instance_type(instance)
flavor_id = kwargs.get('flavor_id')
if not flavor_id:
new_instance_type = old_instance_type
else:
new_instance_type = instance_types.extract_instance_type(instance,
'new_')
# NOTE(johannes): Later, when the resize is confirmed or reverted,
# the superclass implementations of those methods will need access
# to a local migration record for quota reasons. We don't need
# source and/or destination information, just the old and new
# instance_types. Status is set to 'finished' since nothing else
# will update the status along the way.
self.db.migration_create(context.elevated(),
{'instance_uuid': instance['uuid'],
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': new_instance_type['id'],
'status': 'finished'})
# FIXME(comstud): pass new instance_type object down to a method
# that'll unfold it
self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
@validate_cell
def add_fixed_ip(self, context, instance, *args, **kwargs):
"""Add fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'add_fixed_ip',
*args, **kwargs)
@validate_cell
def remove_fixed_ip(self, context, instance, *args, **kwargs):
"""Remove fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'remove_fixed_ip',
*args, **kwargs)
@validate_cell
def pause(self, context, instance):
"""Pause the given instance."""
super(ComputeCellsAPI, self).pause(context, instance)
self._cast_to_cells(context, instance, 'pause')
@validate_cell
def unpause(self, context, instance):
"""Unpause the given instance."""
super(ComputeCellsAPI, self).unpause(context, instance)
self._cast_to_cells(context, instance, 'unpause')
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_diagnostics(context, instance)
return self._call_to_cells(context, instance, 'get_diagnostics')
@validate_cell
def suspend(self, context, instance):
"""Suspend the given instance."""
super(ComputeCellsAPI, self).suspend(context, instance)
self._cast_to_cells(context, instance, 'suspend')
@validate_cell
def resume(self, context, instance):
"""Resume the given instance."""
super(ComputeCellsAPI, self).resume(context, instance)
self._cast_to_cells(context, instance, 'resume')
@validate_cell
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password)
@validate_cell
def unrescue(self, context, instance):
"""Unrescue the given instance."""
super(ComputeCellsAPI, self).unrescue(context, instance)
self._cast_to_cells(context, instance, 'unrescue')
@validate_cell
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
super(ComputeCellsAPI, self).set_admin_password(context, instance,
password=password)
self._cast_to_cells(context, instance, 'set_admin_password',
password=password)
@validate_cell
def inject_file(self, context, instance, *args, **kwargs):
"""Write a file to the given instance."""
super(ComputeCellsAPI, self).inject_file(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'inject_file', *args, **kwargs)
@wrap_check_policy
@validate_cell
def get_vnc_console(self, context, instance, console_type):
"""Get a url to a VNC Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_vnc_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance_uuid=instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@validate_cell
def get_spice_console(self, context, instance, console_type):
"""Get a url to a SPICE Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_spice_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance_uuid=instance['uuid'])
return {'url': connect_info['access_url']}
@validate_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)
return self._call_to_cells(context, instance, 'get_console_output',
*args, **kwargs)
def lock(self, context, instance):
"""Lock the given instance."""
super(ComputeCellsAPI, self).lock(context, instance)
self._cast_to_cells(context, instance, 'lock')
def unlock(self, context, instance):
"""Unlock the given instance."""
super(ComputeCellsAPI, self).lock(context, instance)
self._cast_to_cells(context, instance, 'unlock')
@validate_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
super(ComputeCellsAPI, self).reset_network(context, instance)
self._cast_to_cells(context, instance, 'reset_network')
@validate_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
super(ComputeCellsAPI, self).inject_network_info(context, instance)
self._cast_to_cells(context, instance, 'inject_network_info')
@wrap_check_policy
@validate_cell
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
self._cast_to_cells(context, instance, 'attach_volume',
volume_id, device)
@check_instance_lock
@validate_cell
def _detach_volume(self, context, instance, volume_id):
"""Detach a volume from an instance."""
check_policy(context, 'detach_volume', instance)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self._cast_to_cells(context, instance, 'detach_volume',
volume_id)
@wrap_check_policy
@validate_cell
def associate_floating_ip(self, context, instance, address):
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
self._cast_to_cells(context, instance, 'associate_floating_ip',
address)
@validate_cell
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
super(ComputeCellsAPI, self).delete_instance_metadata(context,
instance, key)
self._cast_to_cells(context, instance, 'delete_instance_metadata',
key)
@wrap_check_policy
@validate_cell
def update_instance_metadata(self, context, instance,
metadata, delete=False):
rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
instance, metadata, delete=delete)
try:
self._cast_to_cells(context, instance,
'update_instance_metadata',
metadata, delete=delete)
except exception.InstanceUnknownCell:
pass
return rv
class HostAPI(compute_api.HostAPI):
"""HostAPI() class for cells.
Implements host management related operations. Works by setting the
RPC API used by the base class to proxy via the cells manager to the
compute manager in the correct cell. Hosts specified with cells will
need to be of the format 'path!to!cell@host'.
DB methods in the base class are also overridden to proxy via the
cells manager.
"""
def __init__(self):
super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _assert_host_exists(self, context, host_name):
"""Cannot check this in API cell. This will be checked in the
target child cell.
"""
pass
def service_get_all(self, context, filters=None, set_zones=False):
if filters is None:
filters = {}
if 'availability_zone' in filters:
zone_filter = filters.pop('availability_zone')
set_zones = True
else:
zone_filter = None
services = self.cells_rpcapi.service_get_all(context,
filters=filters)
if set_zones:
services = availability_zones.set_availability_zones(context,
services)
if zone_filter is not None:
services = [s for s in services
if s['availability_zone'] == zone_filter]
return services
def service_get_by_compute_host(self, context, host_name):
return self.cells_rpcapi.service_get_by_compute_host(context,
host_name)
def instance_get_all_by_host(self, context, host_name):
"""Get all instances by host. Host might have a cell prepended
to it, so we'll need to strip it out. We don't need to proxy
this call to cells, as we have instance information here in
the API cell.
"""
try:
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
except ValueError:
cell_name = None
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
instances = [i for i in instances
if i['cell_name'] == cell_name]
return instances
def task_log_get_all(self, context, task_name, beginning, ending,
host=None, state=None):
"""Return the task logs within a given range from cells,
optionally filtering by the host and/or state. For cells, the
host should be a path like 'path!to!cell@host'. If no @host
is given, only task logs from a particular cell will be returned.
"""
return self.cells_rpcapi.task_log_get_all(context,
task_name,
beginning,
ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Get a compute node from a particular cell by its integer ID.
compute_id should be in the format of 'path!to!cell@ID'.
"""
return self.cells_rpcapi.compute_node_get(context, compute_id)
def compute_node_get_all(self, context):
return self.cells_rpcapi.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.cells_rpcapi.compute_node_get_all(context,
hypervisor_match=hypervisor_match)
def compute_node_statistics(self, context):
return self.cells_rpcapi.compute_node_stats(context)
class InstanceActionAPI(compute_api.InstanceActionAPI):
"""InstanceActionAPI() class for cells."""
def __init__(self):
super(InstanceActionAPI, self).__init__()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def actions_get(self, context, instance):
return self.cells_rpcapi.actions_get(context, instance)
def action_get_by_request_id(self, context, instance, request_id):
return self.cells_rpcapi.action_get_by_request_id(context, instance,
request_id)
def action_events_get(self, context, instance, action_id):
return self.cells_rpcapi.action_events_get(context, instance,
action_id)
|
dstroppa/openstack-smartos-nova-grizzly
|
nova/compute/cells_api.py
|
Python
|
apache-2.0
| 28,581
|
# -*- coding: utf-8 -*-
import os
import httplib
import logging
import functools
from modularodm.exceptions import ValidationValueError
from framework.exceptions import HTTPError
from framework.analytics import update_counter
from website.addons.osfstorage import settings
logger = logging.getLogger(__name__)
LOCATION_KEYS = ['service', settings.WATERBUTLER_RESOURCE, 'object']
def update_analytics(node, file_id, version_idx):
"""
:param Node node: Root node to update
:param str file_id: The _id field of a filenode
:param int version_idx: Zero-based version index
"""
update_counter(u'download:{0}:{1}'.format(node._id, file_id))
update_counter(u'download:{0}:{1}:{2}'.format(node._id, file_id, version_idx))
def serialize_revision(node, record, version, index, anon=False):
"""Serialize revision for use in revisions table.
:param Node node: Root node
:param FileRecord record: Root file record
:param FileVersion version: The version to serialize
:param int index: One-based index of version
"""
if anon:
user = None
else:
user = {
'name': version.creator.fullname,
'url': version.creator.url,
}
return {
'user': user,
'index': index + 1,
'date': version.date_created.isoformat(),
'downloads': record.get_download_count(version=index),
'md5': version.metadata.get('md5'),
'sha256': version.metadata.get('sha256'),
}
SIGNED_REQUEST_ERROR = HTTPError(
httplib.SERVICE_UNAVAILABLE,
data={
'message_short': 'Upload service unavailable',
'message_long': (
'Upload service is not available; please retry '
'your upload in a moment'
),
},
)
def get_filename(version_idx, file_version, file_record):
"""Build name for downloaded file, appending version date if not latest.
:param int version_idx: One-based version index
:param FileVersion file_version: Version to name
:param FileRecord file_record: Root file object
"""
if version_idx == len(file_record.versions):
return file_record.name
name, ext = os.path.splitext(file_record.name)
return u'{name}-{date}{ext}'.format(
name=name,
date=file_version.date_created.isoformat(),
ext=ext,
)
def validate_location(value):
for key in LOCATION_KEYS:
if key not in value:
raise ValidationValueError
def must_be(_type):
"""A small decorator factory for OsfStorageFileNode. Acts as a poor mans
polymorphic inheritance, ensures that the given instance is of "kind" folder or file
"""
def _must_be(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
if not self.kind == _type:
raise ValueError('This instance is not a {}'.format(_type))
return func(self, *args, **kwargs)
return wrapped
return _must_be
def copy_files(src, target_settings, parent=None, name=None):
"""Copy the files from src to the target nodesettings
:param OsfStorageFileNode src: The source to copy children from
:param OsfStorageNodeSettings target_settings: The node settings of the project to copy files to
:param OsfStorageFileNode parent: The parent of to attach the clone of src to, if applicable
"""
cloned = src.clone()
cloned.parent = parent
cloned.name = name or cloned.name
cloned.node_settings = target_settings
if src.is_file:
cloned.versions = src.versions
cloned.save()
if src.is_folder:
for child in src.children:
copy_files(child, target_settings, parent=cloned)
return cloned
|
ticklemepierce/osf.io
|
website/addons/osfstorage/utils.py
|
Python
|
apache-2.0
| 3,726
|
#!/usr/bin/python2.5
"""
MasterChess library - Mac OS X QuickLook generator
Copyright (C) 2013 Jake Hartz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, cgi, datetime
from MasterChess import open_database
def generate_html(mc, use_big_letters=False):
tbl = mc.get_grand_table(full_names="db")
notempty = bool(len(tbl.rows) > 1 and len(tbl.rows[0]) > 0)
returning = """<!DOCTYPE html>
<html>
<head>
<style>
h1 {
text-align: center;
margin: auto;
}"""
if notempty:
returning += """
table {
border-collapse: collapse;
}
table th, table td {
border: 1px solid black;
font-size: 11pt;
padding: 6px;
text-align: center;
}"""
if use_big_letters:
returning += """
p {
font-size: 62pt;
position: absolute;
bottom: 0;
right: 4%;
text-align: right;
opacity: .8;
color: red;
text-shadow: #6374AB 7px 7px 3px;
font-weight: bold;
}
p.bigger {
font-size: 85pt;
}"""
returning += "\n</style>\n</head>\n<body>\n"
if notempty:
returning += "<table><thead><tr><th> </th>"
for header in tbl.column_headers:
if isinstance(header, tuple):
header = header[1]
returning += "<th>" + cgi.escape(header, True) + "</th>"
returning += "</tr></thead>"
returning += "<tbody>"
for index, row in enumerate(tbl.rows):
header = tbl.row_headers[index]
if isinstance(header, tuple):
header = header[1]
returning += "<tr><th>" + cgi.escape(header, True) + "</th>"
for other_index, val in enumerate(row):
try:
if val == int(val):
val = int(val)
except:
pass
if val == None: val = " "
if index == len(tbl.row_headers) - 1 or other_index == len(tbl.column_headers) - 1:
val = "<i>" + str(val) + "</i>"
returning += "<td>" + str(val) + "</td>"
returning += "</tr>"
returning += "</tbody></table>\n"
matches = mc.get_matches()
returning += "<p>"
if use_big_letters: returning += "Matches: "
else: returning += "Total matches: "
returning += str(len(matches))
if len(matches) > 0:
latest_match = datetime.date.fromtimestamp(matches[-1].timestamp)
if use_big_letters: returning += "<br><span>Latest: "
else: returning += " <span style=\"float: right;\">Latest match: "
returning += cgi.escape(latest_match.strftime("%m/%d/%y"), True) + "</span>"
returning += "</p>\n"
else:
if use_big_letters:
returning += "<p class=\"bigger\">Empty<br>database</p>\n"
else:
returning += "<h1>Empty MasterChess database</h1>\n"
returning += "</body>\n</html>"
return returning
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
smallsize = False
try:
if sys.argv[2] == "SMALLER":
smallsize = True
except:
pass
mc_instance = open_database(path)
if mc_instance:
print generate_html(mc_instance, smallsize)
else:
print "<html><body><h1>Error</h1><p><code>An error occurred while attempting to open the database.</code></p></body></html>"
|
jhartz/masterchess
|
QuickLook.py
|
Python
|
gpl-3.0
| 4,057
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Walloon():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Walloon"]))
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_languages_walloon.py
|
Python
|
gpl-3.0
| 1,111
|
from mediawords.db import connect_to_db
from webapp.auth.register import add_user
from webapp.auth.reset_password import send_password_reset_token
from webapp.auth.user import NewUser
from webapp.test.dummy_emails import TestDoNotSendEmails
class TestResetPassword(TestDoNotSendEmails):
def test_send_password_reset_token(self):
db = connect_to_db()
email = 'test@user.login'
password = 'userlogin123'
password_reset_link = 'http://password-reset.com/'
add_user(
db=db,
new_user=NewUser(
email=email,
full_name='Test user login',
has_consented=True,
notes='Test test test',
role_ids=[1],
active=True,
password=password,
password_repeat=password,
activation_url='', # user is active, no need for activation URL
),
)
# Existing user
send_password_reset_token(db=db, email=email, password_reset_link=password_reset_link)
# Nonexisting user (call shouldn't fail because we don't want to reveal which users are in the system so we
# pretend that we've sent the email)
send_password_reset_token(db=db, email='does@not.exist', password_reset_link=password_reset_link)
|
berkmancenter/mediacloud
|
apps/webapp-api/tests/python/auth/reset_password/test_send_password_reset_token.py
|
Python
|
agpl-3.0
| 1,341
|
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Usage: python high_level_mnist_export.py {dest dir}
import os
import sys
import numpy.random as npr
from examples import datasets
from iree.compiler import (
tools as iree_tools,)
import jax.core
import jax.numpy as jnp
from jax import jit, grad, random
from jax.example_libraries import optimizers
from jax.example_libraries import stax
from jax.example_libraries.stax import Dense, Relu, LogSoftmax
from jax.interpreters.xla import abstractify
from jax.tree_util import (tree_map, tree_flatten, tree_unflatten,
register_pytree_node)
from iree.jax2.staging_api import *
from iree.jax2.builtins import *
def main(args):
output_dir = args[0]
os.makedirs(output_dir, exist_ok=True)
jax.config.update("jax_enable_mlir", True)
staged_module = build_model()
print("Saving mlir...")
with open(os.path.join(output_dir, "mnist_train.mlir"), "wb") as f:
get_mlir_module(staged_module).operation.print(f, binary=True)
print("Compiling...")
compiled_module = staged_module()
print("Saving binary...")
with open(os.path.join(output_dir, "mnist_train.vmfb"), "wb") as f:
f.write(get_compiled_binary(compiled_module))
def build_model():
init_random_params, predict = stax.serial(
Dense(1024),
Relu,
Dense(1024),
Relu,
Dense(10),
LogSoftmax,
)
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -jnp.mean(jnp.sum(preds * targets, axis=1))
rng = random.PRNGKey(0)
_, init_params = init_random_params(rng, (-1, 28 * 28))
opt_init, opt_update, opt_get_params = optimizers.momentum(0.001, mass=0.9)
opt_state = opt_init(init_params)
example_batch = get_example_batch()
# Putting together the class which extends StagedModule implicitly assembles
# the corresponding MLIR module.
class MnistModule(StagedModule):
_params = export_global(init_params)
_opt_state = export_global(opt_state)
@export_kernel
def _initialize_optimizer(_, rng):
_, init_params = init_random_params(rng, (-1, 28 * 28))
return opt_init(init_params)
@export_kernel
def _update_step(_, batch, opt_state):
params = opt_get_params(opt_state)
# TODO: It appears that since the iteration count isn't used in this
# computation, it gets elided from the function signature.
# Just setting the first arg to None for this demo.
# It seems likely that we want to store the iteration count as a global
# anyway and tie it.
# Note that this may be a bug in the MLIR lowerings: the XLA lowering
# does some special things to preserve dead arguments.
return opt_update(None, grad(loss)(params, batch), opt_state)
@export_kernel
def _predict_target_class(mdl, params, inputs):
# TODO: An issue with argmax (https://github.com/google/iree/issues/7748).
#predicted_class = jnp.argmax(predict(params, inputs), axis=1)
#return predicted_class
prediction = predict(params, inputs)
return prediction
@export_traced_proc
def get_params(mdl):
return mdl._params
@export_traced_proc
def get_opt_state(mdl):
return mdl._opt_state
@export_traced_proc(signature=[opt_state])
def set_opt_state(mdl, new_opt_state):
store_global(mdl._opt_state, new_opt_state)
@export_traced_proc(signature=[rng])
def initialize(mdl, rng):
store_global(mdl._opt_state, mdl._initialize_optimizer(rng))
@export_traced_proc(signature=[example_batch])
def update(mdl, batch):
new_opt_state = mdl._update_step(batch, mdl._opt_state)
store_global(mdl._opt_state, new_opt_state)
@export_traced_proc(signature=[example_batch[0]])
def predict(mdl, inputs):
return mdl._predict_target_class(mdl._params, inputs)
return MnistModule
def get_example_batch():
batch_size = 128
train_images, train_labels, test_images, test_labels = datasets.mnist()
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
return next(batches)
main(sys.argv[1:])
|
google/iree-samples
|
iree-jax/examples/staged_mnist_export.py
|
Python
|
apache-2.0
| 4,638
|
#!/usr/bin/env python
from sys import exit
from os import environ
environ['KERAS_BACKEND'] = 'theano'
import numpy as np
from functools import partial
from tqdm import tqdm
from utils import *
from keras.layers import Input, Dense, Dropout, Activation, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.losses import categorical_crossentropy
### HELPERS ###
def reform(arr, train_frac, val_frac, fields, weight, label, extra_fields=[]):
n = arr.shape[0]
ns = {}
ns['train'] = (0,int(train_frac*n))
ns['val'] = (ns['train'][1],ns['train'][1]+int(val_frac*n))
ns['test'] = (ns['val'][1],n)
print 'label=%i, n_train=%i, n_val=%i, n_test=%i'%(label,ns['train'][1],ns['val'][1]-ns['val'][0],ns['test'][1]-ns['test'][0])
weight_norm = 100. / np.sum(arr[weight])
x = {}; y = {}; w = {}; extras = {}
for subset in ['train','val','test']:
n_ = ns[subset]
x[subset] = arr[fields].view(np.float64).reshape(arr[fields].shape+(-1,))[n_[0]:n_[1]]
w[subset] = arr[weight][n_[0]:n_[1]] * weight_norm
y[subset] = (label * np.ones(n_[1]-n_[0])).astype(int)
for e in extra_fields:
extras[subset+'_'+e] = arr[e][n_[0]:n_[1]]
return {'x':x,'y':y,'w':w,'extras':extras}
def load_data(train_frac,val_frac,fields):
# arr_bkg = np.load('../data/Background_selected.npy')
# arr_sig = np.load('../data/Top_selected.npy')
arr_bkg = np.load('../data/QCD_goodjets.npy')
arr_sig = np.load('../data/ZpTT_goodjets.npy')
np.random.shuffle(arr_bkg)
np.random.shuffle(arr_sig)
bkg = reform(arr_bkg,train_frac,val_frac,fields,'weight',0,['top_ecf_bdt','msd'])
sig = reform(arr_sig,train_frac,val_frac,fields,'weight',1,['top_ecf_bdt','msd'])
x = {}; y = {}; w = {}; bdt = {}; mass = {}
for subset in ['train','val','test']:
x[subset] = np.concatenate((bkg['x'][subset],sig['x'][subset]), axis=0)
w[subset] = np.concatenate((bkg['w'][subset],sig['w'][subset]), axis=0)
bdt[subset] = np.concatenate((bkg['extras'][subset+'_top_ecf_bdt'],
sig['extras'][subset+'_top_ecf_bdt']), axis=0)
mass[subset] = np.concatenate((bkg['extras'][subset+'_msd'],
sig['extras'][subset+'_msd']), axis=0)
y_vec = np.concatenate((bkg['y'][subset],sig['y'][subset]), axis=0)
y[subset] = np_utils.to_categorical(y_vec, 2)
mass[subset] = mass[subset].reshape((mass[subset].shape[0],1))
bdt[subset] = bdt[subset].reshape((bdt[subset].shape[0],1))
return x,y,w,bdt,mass
### ACQUIRE DATA ###
fields = ['tau32sd','frec'] + ['ecf%i'%i for i in xrange(11)]
x,y,w,bdt,mass = load_data(0.5,0.25,fields)
for subset in bdt:
bdt[subset] = bdt[subset].reshape((bdt[subset].shape[0],))
dim = x['train'].shape[1]
y_gan = {subset:np.concatenate([y[subset],mass[subset]], axis=1)
for subset in y}
### BUILD THE MODELS ###
# Discrimination model
d_input = Input(shape=(dim,), name='hlf')
l = Dense(64, activation='relu')(d_input)
l = Dense(64, activation='relu')(l)
l = Dense(32, activation='relu')(l)
d_output = Dense(2, activation='softmax', name='hlf_disc')(l)
d_model = Model(inputs=d_input, outputs=d_output)
d_model.compile(optimizer=Adam(),
loss='categorical_crossentropy')
d_model.summary()
# Generation model
g_input = Input(shape=(2,),name='disc')
# l = GradientReversalLayer(hp_lambda=100, name='reversal')(g_input)
l = Dense(32, activation='relu')(g_input)
l = Dense(32, activation='sigmoid')(l)
g_output = Dense(1, activation='linear', name='hlf_gen')(l)
g_model = Model(inputs=g_input, outputs=g_output)
g_model.compile(optimizer=Adam(lr=1.),
loss='mse')
g_model.summary()
# Add the models
gan_input = Input(shape=(dim,), name='hlf_gan')
gan_d = d_model(gan_input)
gan_reverse_1 = GradientReversalLayer(hp_lambda=1, name='reversal_1')(gan_d)
gan_g = g_model(gan_reverse_1)
gan_reverse_2 = GradientReversalLayer(hp_lambda=1, name='reversal_2')(gan_g)
gan_output = concatenate([gan_d,gan_reverse_2],axis=1)
my_adversarial_loss = partial(adversarial_loss, g_weight=100.)
my_adversarial_loss.__name__ = "my_adversarial_loss" # partial doesn't do this for some reason
gan_model = Model(inputs=gan_input, outputs=gan_output)
gan_model.compile(optimizer=Adam(lr=0.001),
loss=my_adversarial_loss)
### PRE-TRAIN DISCRIMINATOR ###
d_model.fit(x['train'], y['train'], sample_weight=w['train'],
batch_size=500, epochs=1, verbose=1,
shuffle=True)
y_pred_v0 = d_model.predict(x['test'])
### PRE-TRAIN GENERATOR ###
y_pred = d_model.predict(x['train'])
bkg_mask = y['train'][:,0]==1
g_model.fit(y_pred[bkg_mask], mass['train'][bkg_mask],
sample_weight=w['train'][bkg_mask],
batch_size=32, epochs=1, verbose=1,
shuffle=True)
### TRAIN THE ADVERSARIAL STACK ###
n_test_fast = 20
test_idx = np.random.random_integers(low=0,high=x['test'].shape[0],size=n_test_fast)
# y_pred = gan_model.predict(x['test'][test_idx])
# for i in range(n_test_fast):
# print 'tag: %i -> %4.3f, mass: %6.3f -> %6.3f'%(y_gan['test'][test_idx[i]][1],
# y_pred[i][1],
# y_gan['test'][test_idx[i]][2],
# y_pred[i][2],)
# checkpoint = ModelCheckpoint(filepath='simple_disc.h5', save_best_only=True)
for big_epoch in range(1):
batch_size = 500
n_train = x['train'].shape[0]
n_batch = n_train / batch_size
order = range(n_train)
np.random.shuffle(order)
for batch in tqdm(range(n_batch)):
idxs = order[batch*batch_size : (batch+1)*batch_size]
w_ = w['train'][idxs]
x_ = x['train'][idxs]
y_ = y['train'][idxs]
y_gan_ = y_gan['train'][idxs]
mass_ = mass['train'][idxs]
bkg_mask = y_[:,0]==1
# # now train the stack
# make_trainable(g_model,False)
gan_loss = gan_model.train_on_batch(x_, y_gan_, sample_weight=w_)
# make_trainable(g_model,True)
# run the discriminator
y_pred = d_model.predict(x_)
d_loss = d_model.evaluate(x_, y_,
verbose=0, sample_weight=w_)
# train the generator
g_loss = g_model.train_on_batch(y_pred[bkg_mask], mass_[bkg_mask],
sample_weight=w_[bkg_mask])
# if batch%1000==0:
# print d_loss, g_loss, gan_loss
# y_pred = d_model.predict(x['val'])
# print d_model.evaluate(x['val'],y['val'],
# verbose=1, sample_weight=w['val'])
# print g_model.evaluate(y_pred,mass['val'],
# verbose=1, sample_weight=w['val'])
# print gan_model.evaluate(x['val'],y_gan['val'],
# verbose=1, sample_weight=w['val'])
y_pred_v1 = gan_model.predict(x['test'])
dnn_v0_t = Tagger(y_pred_v0[:,1], 'DNN v0', 0, 1, False)
dnn_v1_t = Tagger(y_pred_v1[:,1], 'DNN v1', 0, 1, False)
bdt_t = Tagger(bdt['test'], 'BDT', -1, 1, False)
create_roc([dnn_v0_t,dnn_v1_t,bdt_t],
np.argmax(y['test'],axis=1),
w['test'],'gan')
mask = np.logical_and(110<mass['test'], mass['test']<210).reshape((y['test'].shape[0],))
dnn_v0_t_mass = Tagger(y_pred_v0[:,1][mask], 'DNN v0', 0, 1, False)
dnn_v1_t_mass = Tagger(y_pred_v1[:,1][mask], 'DNN v1', 0, 1, False)
bdt_t_mass = Tagger(bdt['test'][mask], 'BDT', -1, 1, False)
wps = create_roc([dnn_v0_t_mass, dnn_v1_t_mass, bdt_t_mass],
np.argmax(y['test'][mask],axis=1),
w['test'][mask],'gan_mass')
print wps
mask_v0 = np.logical_and(y_pred_v0[:,1]>wps[0], y['test'][:,0]==1)
mask_v1 = np.logical_and(y_pred_v1[:,1]>wps[1], y['test'][:,0]==1)
mask_bdt = np.logical_and(bdt['test']>wps[2], y['test'][:,0]==1)
mask_bkg = y['test'][:,0]==1
mass_test = mass['test'].reshape((mass['test'].shape[0],))
props = {'xlabel' : '$m_{SD}$ [GeV]',
'bins' : np.arange(0,500,20),
'output' : 'sculpt'}
h_inc = {'vals':mass_test[mask_bkg],
'weights':w['test'][mask_bkg],
'color':'b', 'label':'Inclusive'}
h_v0 = {'vals':mass_test[mask_v0],
'weights':w['test'][mask_v0],
'color':'k', 'label':'DDN v0'}
h_v1 = {'vals':mass_test[mask_v1],
'weights':w['test'][mask_v1],
'color':'r', 'label':'DDN v1'}
h_bdt = {'vals':mass_test[mask_bdt],
'weights':w['test'][mask_bdt],
'color':'g', 'label':'BDT'}
plot_hists(props, [h_inc, h_v0, h_v1])
|
sidnarayanan/BAdNet
|
train/hlf/train_gan.py
|
Python
|
mit
| 8,736
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper code for the iSCSI volume driver.
"""
import os
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
iscsi_helper_opt = [
cfg.StrOpt('iscsi_helper',
default='tgtadm',
help='iscsi target user-land tool to use'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configuration file storage directory'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(iscsi_helper_opt)
class TargetAdmin(object):
"""iSCSI target administration.
Base class for iSCSI target admin helpers.
"""
def __init__(self, cmd, execute):
self._cmd = cmd
self.set_execute(execute)
def set_execute(self, execute):
"""Set the function to be used to execute commands."""
self._execute = execute
def _run(self, *args, **kwargs):
self._execute(self._cmd, *args, run_as_root=True, **kwargs)
def create_iscsi_target(self, name, tid, lun, path, **kwargs):
"""Create a iSCSI target and logical unit"""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
"""Remove a iSCSI target and logical unit"""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
"""Create a new iSCSI target."""
raise NotImplementedError()
def _delete_target(self, tid, **kwargs):
"""Delete a target."""
raise NotImplementedError()
def show_target(self, tid, iqn=None, **kwargs):
"""Query the given target ID."""
raise NotImplementedError()
def _new_logicalunit(self, tid, lun, path, **kwargs):
"""Create a new LUN on a target using the supplied path."""
raise NotImplementedError()
def _delete_logicalunit(self, tid, lun, **kwargs):
"""Delete a logical unit from a target."""
raise NotImplementedError()
class TgtAdm(TargetAdmin):
"""iSCSI target administration using tgtadm."""
def __init__(self, execute=utils.execute):
super(TgtAdm, self).__init__('tgtadm', execute)
def _get_target(self, iqn):
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def create_iscsi_target(self, name, tid, lun, path, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
utils.ensure_tree(FLAGS.volumes_dir)
vol_id = name.split(':')[1]
volume_conf = """
<target %s>
backing-store %s
</target>
""" % (name, path)
LOG.info(_('Creating volume: %s') % vol_id)
volumes_dir = FLAGS.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
f = open(volume_path, 'w+')
f.write(volume_conf)
f.close()
try:
(out, err) = self._execute('tgt-admin',
'--update',
name,
run_as_root=True)
except exception.ProcessExecutionError, e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s.") % locals())
#Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s. Please ensure your tgtd config file "
"contains 'include %(volumes_dir)s/*'") % locals())
raise exception.NotFound()
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing volume: %s') % vol_id)
vol_uuid_file = 'volume-%s' % vol_id
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
self._execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except exception.ProcessExecutionError, e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(volume_id)s.") % locals())
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
os.unlink(volume_path)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
class IetAdm(TargetAdmin):
"""iSCSI target administration using ietadm."""
def __init__(self, execute=utils.execute):
super(IetAdm, self).__init__('ietadm', execute)
def create_iscsi_target(self, name, tid, lun, path, **kwargs):
self._new_target(name, tid, **kwargs)
self._new_logicalunit(tid, lun, path, **kwargs)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
def _new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
**kwargs)
def _delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, iqn=None, **kwargs):
self._run('--op', 'show',
'--tid=%s' % tid,
**kwargs)
def _new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params', 'Path=%s,Type=fileio' % path,
**kwargs)
def _delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
**kwargs)
def get_target_admin():
if FLAGS.iscsi_helper == 'tgtadm':
return TgtAdm()
else:
return IetAdm()
|
tylertian/Openstack
|
openstack F/cinder/cinder/volume/iscsi.py
|
Python
|
apache-2.0
| 7,764
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class SED_6_1_2(HarnessCase):
role = HarnessCase.ROLE_SED
case = '6 1 2'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
turon/openthread
|
tools/harness-automation/cases/sed_6_1_2.py
|
Python
|
bsd-3-clause
| 1,872
|
import os
import argparse
import os.path as op
import shutil
from collections import namedtuple
import pysam
try:
from seqcluster import prepare_data as prepare
from seqcluster import make_clusters as main_cluster
from seqcluster.libs.inputs import parse_ma_file
from seqcluster.libs import parse
except ImportError:
pass
from bcbio.utils import file_exists, safe_makedir
from bcbio.provenance import do
from bcbio.distributed.transaction import tx_tmpdir, file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline.sample import process_alignment
def run_prepare(*data):
"""
Run seqcluster prepare to merge all samples in one file
"""
out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare")
out_dir = os.path.abspath(safe_makedir(out_dir))
prepare_dir = os.path.join(out_dir, "prepare")
fn = []
for sample in data:
name = sample[0]["rgnames"]['sample']
fn.append("%s\t%s" % (sample[0]['collapse'], name))
args = namedtuple('args', 'debug print_debug minc minl maxl out')
args = args(False, False, 1, 17, 40, out_dir)
ma_out = op.join(out_dir, "seqs.ma")
seq_out = op.join(out_dir, "seqs.fastq")
min_shared = max(int(len(fn) / 10.0), 1)
if not file_exists(ma_out):
seq_l, sample_l = prepare._read_fastq_files(fn, args)
with file_transaction(ma_out) as ma_tx:
with open(ma_tx, 'w') as ma_handle:
with open(seq_out, 'w') as seq_handle:
prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared)
return data
def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = os.path.join(work_dir, "seqcluster", "prepare")
seq_out = op.join(out_dir, "seqs.fastq")
bam_dir = os.path.join(work_dir, "align")
new_bam_file = op.join(bam_dir, "seqs.bam")
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
# data = data[0][0]
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + ".bai", new_bam_file + ".bai")
shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample']))
return data
def run_cluster(*data):
"""
Run seqcluster cluster to detect smallRNA clusters
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = os.path.join(work_dir, "seqcluster", "cluster")
out_dir = os.path.abspath(safe_makedir(out_dir))
out_file = os.path.join(out_dir, "seqcluster.json")
prepare_dir = op.join(work_dir, "seqcluster", "prepare")
bam_file = op.join(work_dir, "align", "seqs.bam")
cluster_dir = _cluster(bam_file, prepare_dir, out_dir, dd.get_ref_file(data[0][0]), dd.get_srna_gtf_file(data[0][0]))
for sample in data:
sample[0]["seqcluster"] = out_dir
return data
def _get_arguments(cl):
p = argparse.ArgumentParser()
sbp = p.add_subparsers()
parse.add_subparser_cluster(sbp)
args = p.parse_args(cl)
return args
def _cluster(bam_file, prepare_dir, out_dir, reference, annotation_file=None):
"""
Connect to seqcluster to run cluster with python directly
"""
ma_file = op.join(prepare_dir, "seqs.ma")
cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference]
if annotation_file:
cl = cl + ["-g", annotation_file]
args = _get_arguments(cl)
if not file_exists(op.join(out_dir, "counts.tsv")):
main_cluster.cluster(args)
return out_dir
|
fw1121/bcbio-nextgen
|
bcbio/srna/group.py
|
Python
|
mit
| 3,686
|
from django.db import models
from django import forms
class TemplateFormField(forms.CharField):
def clean(self, value):
"""
Validates that the input can be compiled as a template.
"""
value = super(TemplateFormField, self).clean(value)
from django.template import Template, TemplateSyntaxError
try:
Template(value)
except TemplateSyntaxError as error:
raise forms.ValidationError(error)
return value
class TemplateCharField(models.CharField):
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {'form_class': TemplateFormField}
defaults.update(kwargs)
return super(TemplateCharField, self).formfield(**defaults)
class TemplateTextField(models.TextField):
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {'form_class': TemplateFormField}
defaults.update(kwargs)
return super(TemplateTextField, self).formfield(**defaults)
|
praekelt/django-form-designer
|
form_designer/template_field.py
|
Python
|
bsd-3-clause
| 1,188
|
'''
Created on 14/05/2013
@author: diogo
'''
from model.cache import Cache
from model.file import File
from model.frequency import Frequency
from model.log import Log
from random import choice, uniform
from copy import deepcopy
class Main(object):
'''
Principal controller of the application
'''
def __init__(self,params):
'''
get application params
'''
self.number = params.number_of_files
self.frequency = params.frequency
self.epsilon = params.epsilon
self.strategy = params.cache_strategy
self.size = params.cache_size
self.file_list = list()
self.list_of_frequencies = params.list_of_frequencies
self.timeslot = params.timeslot
self.hits = params.hits
self.log = Log(params)
self.increase_epsilon = params.increase_epsilon
self.decrease_epsilon = params.decrease_epsilon
self.z = params.zipf_exponent
def run(self):
cache = Cache(self.size,self.strategy)
self.gen_frequencies()
self.gen_file_list()
for timeslot in range(self.timeslot):
self.go_time()
self.apply_epsilon()
for hits in range(self.hits):
choice = self.select_file()
response = cache.append_item(choice)
if(response):
self.log.insert_log(response,timeslot+1,'hit',cache.get_file_status())
else:
self.log.insert_log(choice,timeslot+1,'miss',cache.get_file_status())
self.log.end_write()
def gen_frequencies(self):
if not self.list_of_frequencies and self.frequency == 'PERSONAL':
print "You need a list of preferences"
elif self.frequency != 'PERSONAL' and not not self.list_of_frequencies:
print "You need to set preference to PERSONAL"
else:
if not self.list_of_frequencies:
freq = Frequency(self.number,self.frequency,self.z)
else:
freq = Frequency(self.number,self.frequency,self.z,self.list_of_frequencies)
freq.gen_frequency()
self.list_of_frequencies = freq.frequency_
def gen_file_list(self):
for i in range(self.number):
self.file_list.append(File(i,self.list_of_frequencies[i]))
def select_file(self):
limit = uniform(0,1)
acum = 0
helper_list = deepcopy(self.file_list)
while acum <= limit:
item = choice(helper_list)
acum = item.frequency + acum
helper_list.remove(item)
for file_ in self.file_list:
if file_.name == item.name:
return file_
def go_time(self):
for file_ in self.file_list:
file_.usage = file_.usage + 1
def apply_epsilon(self):
if not not self.epsilon:
if len(self.epsilon) > 1:
if self.is_not_negative_list():
for i in range(len(self.file_list)):
self.file_list[i].frequency = self.file_list[i].frequency + self.epsilon[i]
elif not not self.increase_epsilon and not not self.decrease_epsilon:
if self.is_not_negative_increase():
for file_ in self.file_list:
if file_.name in self.increase_epsilon:
file_.frequency = file_.frequency + self.epsilon[0]
else:
file_.frequency = file_.frequency - self.epsilon[0]
else:
length = len(self.file_list)
if length % 2 == 0:
if self.is_not_negative_dual(self.epsilon[0],self.epsilon[0],length/2):
for file_ in self.file_list[:length/2]:
file_.frequency = file_.frequency - self.epsilon[0]
for file_ in self.file_list[length/2:]:
file_.frequency = file_.frequency + self.epsilon[0]
else:
majority = int(length / 2) + 1
minority = majority - 1
divided = (self.epsilon[0]*minority)/majority
if self.is_not_negative_dual(divided,self.epsilon[0],majority):
for file_ in self.file_list[:majority]:
file_.frequency = file_.frequency - divided
for file_ in self.file_list[majority:]:
file_.frequency = file_.frequency + self.epsilon[0]
def is_not_negative_list(self):
counter = 0
for i in range(len(self.file_list)):
counter = counter + self.file_list[i].frequency + self.epsilon[i]
if self.file_list[i].frequency + self.epsilon[i] < 0:
return False
if counter == 1:
return True
else:
return False
def is_not_negative_dual(self,minus,max_,cut):
counter = 0
for file_ in self.file_list[:cut]:
counter = counter + file_.frequency - minus
if file_.frequency - minus < 0:
return False
for file_ in self.file_list[cut:]:
counter = counter + file_.frequency + max_
if file_.frequency + max_ < 0:
return False
if counter == 1:
return True
else:
return False
def is_not_negative_increase(self):
counter = 0
for file_ in self.file_list:
if file_.name in self.increase_epsilon:
counter = counter + file_.frequency + self.epsilon[0]
if file_.frequency + self.epsilon[0] < 0:
return False
else:
counter = counter + file_.frequency - self.epsilon[0]
if file_.frequency - self.epsilon[0] < 0:
return False
if counter == 1:
return True
else:
return False
|
dcc-ufrj/SimuCache
|
simucache/controller/main.py
|
Python
|
gpl-3.0
| 6,144
|
# Copyright 2016, 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Merges several pronunciation dictionaries.
This is a priority-union in which an entry read from a lexicon later on the
command line superseded any entry with an identical key read earlier.
Can be used to patch lexicons by merging a large base dictionary with a small
list of corrections.
"""
from __future__ import unicode_literals
import sys
from utils import utf8
STDIN = utf8.stdin
STDOUT = utf8.stdout
def ReadTsvLexicon(reader):
"""Reads a lexicon in TSV format. Only the first 2 columns are used."""
lex = {}
for line in reader:
line = line.rstrip('\n')
if not line or line.strip()[0] == "#":
continue
fields = line.split('\t')
assert len(fields) >= 2
orth, pron = fields[:2]
if orth not in lex:
lex[orth] = []
lex[orth].append(pron)
return lex
def main(argv):
if len(argv) == 1:
STDOUT.write('Usage: %s LEXICON...\n' % argv[0])
sys.exit(2)
lex = {}
for path in argv[1:]:
if path == '-':
lex.update(ReadTsvLexicon(STDIN))
else:
with utf8.open(path) as reader:
lex.update(ReadTsvLexicon(reader))
for orth in sorted(lex):
for pron in lex[orth]:
STDOUT.write('%s\t%s\n' % (orth, pron))
return
if __name__ == '__main__':
main(sys.argv)
|
googlei18n/language-resources
|
utils/merge_lexicons.py
|
Python
|
apache-2.0
| 1,863
|
import datetime
from io import StringIO
from unittest import mock
from contexts.plugins.reporting import cli
class WhenPrintingFinalCountsForAnEmptyRun:
def context(self):
self.stringio = StringIO()
self.reporter = cli.FinalCountsReporter(self.stringio)
def because_a_test_run_ends(self):
self.reporter.test_run_ended()
def it_should_output_zeroes(self):
assert self.stringio.getvalue() == ("""\
----------------------------------------------------------------------
PASSED!
0 contexts, 0 assertions
""")
class WhenPrintingFinalCountsForASuccessfulRun:
def in_the_context_of_a_successful_run(self):
self.stringio = StringIO()
self.reporter = cli.FinalCountsReporter(self.stringio)
ctx1 = type('', (), {})
ctx2 = type('', (), {})
self.reporter.context_started(ctx1, '')
self.reporter.assertion_started(lambda: None)
self.reporter.assertion_passed(lambda: None)
self.reporter.assertion_started(lambda: None)
self.reporter.assertion_passed(lambda: None)
self.reporter.context_ended(ctx1, '')
self.reporter.context_started(ctx2, '')
self.reporter.assertion_started(lambda: None)
self.reporter.assertion_passed(lambda: None)
self.reporter.context_ended(ctx2, '')
def because_the_test_run_ends(self):
self.reporter.test_run_ended()
def it_should_output_the_correct_numbers(self):
assert self.stringio.getvalue() == ("""\
----------------------------------------------------------------------
PASSED!
2 contexts, 3 assertions
""")
class WhenPrintingFinalCountsAfterAnAssertionFails:
def establish_that_an_assertion_has_failed(self):
self.stringio = StringIO()
self.reporter = cli.FinalCountsReporter(self.stringio)
self.reporter.assertion_failed(lambda: None, Exception())
def because_the_test_run_ends(self):
self.reporter.test_run_ended()
def it_should_count_one_failure(self):
assert self.stringio.getvalue() == ("""\
----------------------------------------------------------------------
FAILED!
0 contexts, 0 assertions: 1 failed, 0 errors
""")
class WhenPrintingFinalCountsAfterAnAssertionErrors:
def establish_that_a_test_has_failed(self):
self.stringio = StringIO()
self.reporter = cli.FinalCountsReporter(self.stringio)
self.reporter.assertion_errored(lambda: None, Exception())
def because_the_test_run_ends(self):
self.reporter.test_run_ended()
def it_should_count_one_error(self):
assert self.stringio.getvalue() == ("""\
----------------------------------------------------------------------
FAILED!
0 contexts, 0 assertions: 0 failed, 1 error
""")
class WhenPrintingFinalCountsAfterAContextErrors:
def establish_that_a_test_has_failed(self):
self.stringio = StringIO()
self.reporter = cli.FinalCountsReporter(self.stringio)
self.reporter.context_errored("", '', Exception())
def because_the_test_run_ends(self):
self.reporter.test_run_ended()
def it_should_count_one_error(self):
assert self.stringio.getvalue() == ("""\
----------------------------------------------------------------------
FAILED!
0 contexts, 0 assertions: 0 failed, 1 error
""")
class WhenPrintingFinalCountsAfterATestClassErrors:
def establish_that_a_test_has_failed(self):
self.stringio = StringIO()
self.reporter = cli.FinalCountsReporter(self.stringio)
self.reporter.test_class_errored("", Exception())
def because_the_test_run_ends(self):
self.reporter.test_run_ended()
def it_should_count_one_error(self):
assert self.stringio.getvalue() == ("""\
----------------------------------------------------------------------
FAILED!
0 contexts, 0 assertions: 0 failed, 1 error
""")
class WhenTimingATestRun:
def context(self):
self.fake_now = datetime.datetime(2013, 10, 22, 13, 41, 0)
self.fake_soon = datetime.timedelta(seconds=10, milliseconds=490)
class FakeDateTime(datetime.datetime):
now = mock.Mock(return_value=self.fake_now)
self.FakeDateTime = FakeDateTime
self.stringio = StringIO()
self.reporter = cli.TimedReporter(self.stringio)
def because_we_run_a_test_run(self):
with mock.patch('datetime.datetime', self.FakeDateTime):
self.reporter.test_run_started()
datetime.datetime.now.return_value += self.fake_soon
self.reporter.test_run_ended()
def it_should_report_the_total_time_for_the_test_run(self):
assert self.stringio.getvalue() == "(10.5 seconds)\n"
|
benjamin-hodgson/Contexts
|
test/plugin_tests/reporting_tests/cli_tests.py
|
Python
|
mit
| 4,698
|
from hyo2.soundspeedsettings import gui
gui.gui()
|
hydroffice/hyo_soundspeed
|
hyo2/soundspeedsettings/__main__.py
|
Python
|
lgpl-2.1
| 51
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "labs_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
heldergg/labs
|
labs_django/manage.py
|
Python
|
gpl-3.0
| 254
|
#!/usr/bin/env python
#
# Copyright (c) 2008-2009 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
"""
Grinder Setup Script
"""
from setuptools import setup, find_packages
setup(
name="grinder",
version='0.1.12',
description='A tool for synching RPM based content.',
author='Mike McCune, John Matthews, Pradeep Kilambi, Jeff Ortel',
author_email='jmatthews@redhat.com',
url='http://git.fedorahosted.org/git/?p=grinder.git',
license='GPLv2+',
package_dir={
'': 'src',
},
packages = find_packages('src'),
include_package_data = True,
data_files = [("../etc/grinder", ["etc/grinder/grinder.yml"])],
# non-python scripts go here
scripts = [
'bin/grinder',
],
classifiers = [
'License :: OSI Approved :: GNU General Public License (GPL)',
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python'
],
# test_suite = 'nose.collector',
)
# XXX: this will also print on non-install targets
print("grinder target is complete")
|
jessegonzalez/grinder
|
setup.py
|
Python
|
gpl-2.0
| 1,714
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google BigQuery to BigQuery operator.
"""
import warnings
from typing import Dict, List, Optional, Union
from airflow.gcp.hooks.bigquery import BigQueryHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class BigQueryToBigQueryOperator(BaseOperator):
"""
Copies data from one BigQuery table to another.
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:param source_project_dataset_tables: One or more
dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the
source data. If ``<project>`` is not included, project will be the
project defined in the connection json. Use a list if there are multiple
source tables. (templated)
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>`` (templated)
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('source_project_dataset_tables',
'destination_project_dataset_table', 'labels')
template_ext = ('.sql',)
ui_color = '#e6f0e4'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
source_project_dataset_tables: Union[List[str], str],
destination_project_dataset_table: str,
write_disposition: str = 'WRITE_EMPTY',
create_disposition: str = 'CREATE_IF_NEEDED',
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.source_project_dataset_tables = source_project_dataset_tables
self.destination_project_dataset_table = destination_project_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
def execute(self, context):
self.log.info(
'Executing copy of %s into: %s',
self.source_project_dataset_tables, self.destination_project_dataset_table
)
hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_copy(
source_project_dataset_tables=self.source_project_dataset_tables,
destination_project_dataset_table=self.destination_project_dataset_table,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
labels=self.labels,
encryption_configuration=self.encryption_configuration)
|
Fokko/incubator-airflow
|
airflow/operators/bigquery_to_bigquery.py
|
Python
|
apache-2.0
| 5,756
|
#!/usr/bin/env python
#
# backend for serial IO for POSIX compatible systems, like Linux, OSX
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2016 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# parts based on code from Grant B. Edwards <grante@visi.com>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
# Collection of port names (was previously used by number_to_device which was
# removed.
# - Linux /dev/ttyS%d (confirmed)
# - cygwin/win32 /dev/com%d (confirmed)
# - openbsd (OpenBSD) /dev/cua%02d
# - bsd*, freebsd* /dev/cuad%d
# - darwin (OS X) /dev/cuad%d
# - netbsd /dev/dty%02d (NetBSD 1.6 testing by Erk)
# - irix (IRIX) /dev/ttyf%d (partially tested) names depending on flow control
# - hp (HP-UX) /dev/tty%dp0 (not tested)
# - sunos (Solaris/SunOS) /dev/tty%c (letters, 'a'..'z') (confirmed)
# - aix (AIX) /dev/tty%d
# pylint: disable=abstract-method
import errno
import fcntl
import os
import select
import struct
import sys
import termios
from .serialutil import *
from .serialutil import SerialBase, SerialException, to_bytes, \
portNotOpenError, writeTimeoutError, Timeout
class PlatformSpecificBase(object):
BAUDRATE_CONSTANTS = {}
def _set_special_baudrate(self, baudrate):
raise NotImplementedError('non-standard baudrates are not supported on this platform')
def _set_rs485_mode(self, rs485_settings):
raise NotImplementedError('RS485 not supported on this platform')
# some systems support an extra flag to enable the two in POSIX unsupported
# paritiy settings for MARK and SPACE
CMSPAR = 0 # default, for unsupported platforms, override below
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed) # noqa
import array
# extra termios flags
CMSPAR = 0o10000000000 # Use "stick" (mark/space) parity
# baudrate ioctls
TCGETS2 = 0x802C542A
TCSETS2 = 0x402C542B
BOTHER = 0o010000
# RS485 ioctls
TIOCGRS485 = 0x542E
TIOCSRS485 = 0x542F
SER_RS485_ENABLED = 0b00000001
SER_RS485_RTS_ON_SEND = 0b00000010
SER_RS485_RTS_AFTER_SEND = 0b00000100
SER_RS485_RX_DURING_TX = 0b00010000
class PlatformSpecific(PlatformSpecificBase):
BAUDRATE_CONSTANTS = {
0: 0o000000, # hang up
50: 0o000001,
75: 0o000002,
110: 0o000003,
134: 0o000004,
150: 0o000005,
200: 0o000006,
300: 0o000007,
600: 0o000010,
1200: 0o000011,
1800: 0o000012,
2400: 0o000013,
4800: 0o000014,
9600: 0o000015,
19200: 0o000016,
38400: 0o000017,
57600: 0o010001,
115200: 0o010002,
230400: 0o010003,
460800: 0o010004,
500000: 0o010005,
576000: 0o010006,
921600: 0o010007,
1000000: 0o010010,
1152000: 0o010011,
1500000: 0o010012,
2000000: 0o010013,
2500000: 0o010014,
3000000: 0o010015,
3500000: 0o010016,
4000000: 0o010017
}
def _set_special_baudrate(self, baudrate):
# right size is 44 on x86_64, allow for some growth
buf = array.array('i', [0] * 64)
try:
# get serial_struct
fcntl.ioctl(self.fd, TCGETS2, buf)
# set custom speed
buf[2] &= ~termios.CBAUD
buf[2] |= BOTHER
buf[9] = buf[10] = baudrate
# set serial_struct
fcntl.ioctl(self.fd, TCSETS2, buf)
except IOError as e:
raise ValueError('Failed to set custom baud rate ({}): {}'.format(baudrate, e))
def _set_rs485_mode(self, rs485_settings):
buf = array.array('i', [0] * 8) # flags, delaytx, delayrx, padding
try:
fcntl.ioctl(self.fd, TIOCGRS485, buf)
buf[0] |= SER_RS485_ENABLED
if rs485_settings is not None:
if rs485_settings.loopback:
buf[0] |= SER_RS485_RX_DURING_TX
else:
buf[0] &= ~SER_RS485_RX_DURING_TX
if rs485_settings.rts_level_for_tx:
buf[0] |= SER_RS485_RTS_ON_SEND
else:
buf[0] &= ~SER_RS485_RTS_ON_SEND
if rs485_settings.rts_level_for_rx:
buf[0] |= SER_RS485_RTS_AFTER_SEND
else:
buf[0] &= ~SER_RS485_RTS_AFTER_SEND
if rs485_settings.delay_before_tx is not None:
buf[1] = int(rs485_settings.delay_before_tx * 1000)
if rs485_settings.delay_before_rx is not None:
buf[2] = int(rs485_settings.delay_before_rx * 1000)
else:
buf[0] = 0 # clear SER_RS485_ENABLED
fcntl.ioctl(self.fd, TIOCSRS485, buf)
except IOError as e:
raise ValueError('Failed to set RS485 mode: {}'.format(e))
elif plat == 'cygwin': # cygwin/win32 (confirmed)
class PlatformSpecific(PlatformSpecificBase):
BAUDRATE_CONSTANTS = {
128000: 0x01003,
256000: 0x01005,
500000: 0x01007,
576000: 0x01008,
921600: 0x01009,
1000000: 0x0100a,
1152000: 0x0100b,
1500000: 0x0100c,
2000000: 0x0100d,
2500000: 0x0100e,
3000000: 0x0100f
}
elif plat[:6] == 'darwin': # OS X
import array
IOSSIOSPEED = 0x80045402 # _IOW('T', 2, speed_t)
class PlatformSpecific(PlatformSpecificBase):
osx_version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(osx_version[0]) >= 8:
def _set_special_baudrate(self, baudrate):
# use IOKit-specific call to set up high speeds
buf = array.array('i', [baudrate])
fcntl.ioctl(self.fd, IOSSIOSPEED, buf, 1)
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd' or \
plat[:6] == 'netbsd' or \
plat[:7] == 'openbsd':
class ReturnBaudrate(object):
def __getitem__(self, key):
return key
class PlatformSpecific(PlatformSpecificBase):
# Only tested on FreeBSD:
# The baud rate may be passed in as
# a literal value.
BAUDRATE_CONSTANTS = ReturnBaudrate()
else:
class PlatformSpecific(PlatformSpecificBase):
pass
# load some constants for later use.
# try to use values from termios, use defaults from linux otherwise
TIOCMGET = getattr(termios, 'TIOCMGET', 0x5415)
TIOCMBIS = getattr(termios, 'TIOCMBIS', 0x5416)
TIOCMBIC = getattr(termios, 'TIOCMBIC', 0x5417)
TIOCMSET = getattr(termios, 'TIOCMSET', 0x5418)
# TIOCM_LE = getattr(termios, 'TIOCM_LE', 0x001)
TIOCM_DTR = getattr(termios, 'TIOCM_DTR', 0x002)
TIOCM_RTS = getattr(termios, 'TIOCM_RTS', 0x004)
# TIOCM_ST = getattr(termios, 'TIOCM_ST', 0x008)
# TIOCM_SR = getattr(termios, 'TIOCM_SR', 0x010)
TIOCM_CTS = getattr(termios, 'TIOCM_CTS', 0x020)
TIOCM_CAR = getattr(termios, 'TIOCM_CAR', 0x040)
TIOCM_RNG = getattr(termios, 'TIOCM_RNG', 0x080)
TIOCM_DSR = getattr(termios, 'TIOCM_DSR', 0x100)
TIOCM_CD = getattr(termios, 'TIOCM_CD', TIOCM_CAR)
TIOCM_RI = getattr(termios, 'TIOCM_RI', TIOCM_RNG)
# TIOCM_OUT1 = getattr(termios, 'TIOCM_OUT1', 0x2000)
# TIOCM_OUT2 = getattr(termios, 'TIOCM_OUT2', 0x4000)
if hasattr(termios, 'TIOCINQ'):
TIOCINQ = termios.TIOCINQ
else:
TIOCINQ = getattr(termios, 'FIONREAD', 0x541B)
TIOCOUTQ = getattr(termios, 'TIOCOUTQ', 0x5411)
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = getattr(termios, 'TIOCSBRK', 0x5427)
TIOCCBRK = getattr(termios, 'TIOCCBRK', 0x5428)
class Serial(SerialBase, PlatformSpecific):
"""\
Serial port class POSIX implementation. Serial port configuration is
done with termios and fcntl. Runs on Linux and many other Un*x like
systems.
"""
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
self.fd = None
# open
try:
self.fd = os.open(self.portstr, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK)
except OSError as msg:
self.fd = None
raise SerialException(msg.errno, "could not open port {}: {}".format(self._port, msg))
#~ fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # set blocking
try:
self._reconfigure_port(force_update=True)
except:
try:
os.close(self.fd)
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.fd = None
raise
else:
self.is_open = True
try:
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
except IOError as e:
if e.errno in (errno.EINVAL, errno.ENOTTY):
# ignore Invalid argument and Inappropriate ioctl
pass
else:
raise
self.reset_input_buffer()
self.pipe_abort_read_r, self.pipe_abort_read_w = os.pipe()
self.pipe_abort_write_r, self.pipe_abort_write_w = os.pipe()
fcntl.fcntl(self.pipe_abort_read_r, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.pipe_abort_write_r, fcntl.F_SETFL, os.O_NONBLOCK)
def _reconfigure_port(self, force_update=False):
"""Set communication parameters on opened port."""
if self.fd is None:
raise SerialException("Can only operate on a valid file descriptor")
# if exclusive lock is requested, create it before we modify anything else
if self._exclusive is not None:
if self._exclusive:
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as msg:
raise SerialException(msg.errno, "Could not exclusively lock port {}: {}".format(self._port, msg))
else:
fcntl.flock(self.fd, fcntl.LOCK_UN)
custom_baud = None
vmin = vtime = 0 # timeout is done via select
if self._inter_byte_timeout is not None:
vmin = 1
vtime = int(self._inter_byte_timeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise SerialException("Could not configure port: {}".format(msg))
# set up raw mode / no echo / binary
cflag |= (termios.CLOCAL | termios.CREAD)
lflag &= ~(termios.ICANON | termios.ECHO | termios.ECHOE |
termios.ECHOK | termios.ECHONL |
termios.ISIG | termios.IEXTEN) # |termios.ECHOPRT
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
if hasattr(termios, flag):
lflag &= ~getattr(termios, flag)
oflag &= ~(termios.OPOST | termios.ONLCR | termios.OCRNL)
iflag &= ~(termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IGNBRK)
if hasattr(termios, 'IUCLC'):
iflag &= ~termios.IUCLC
if hasattr(termios, 'PARMRK'):
iflag &= ~termios.PARMRK
# setup baud rate
try:
ispeed = ospeed = getattr(termios, 'B{}'.format(self._baudrate))
except AttributeError:
try:
ispeed = ospeed = self.BAUDRATE_CONSTANTS[self._baudrate]
except KeyError:
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
# may need custom baud rate, it isn't in our list.
ispeed = ospeed = getattr(termios, 'B38400')
try:
custom_baud = int(self._baudrate) # store for later
except ValueError:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
else:
if custom_baud < 0:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
# setup char len
cflag &= ~termios.CSIZE
if self._bytesize == 8:
cflag |= termios.CS8
elif self._bytesize == 7:
cflag |= termios.CS7
elif self._bytesize == 6:
cflag |= termios.CS6
elif self._bytesize == 5:
cflag |= termios.CS5
else:
raise ValueError('Invalid char len: {!r}'.format(self._bytesize))
# setup stop bits
if self._stopbits == STOPBITS_ONE:
cflag &= ~(termios.CSTOPB)
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
cflag |= (termios.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
elif self._stopbits == STOPBITS_TWO:
cflag |= (termios.CSTOPB)
else:
raise ValueError('Invalid stop bit specification: {!r}'.format(self._stopbits))
# setup parity
iflag &= ~(termios.INPCK | termios.ISTRIP)
if self._parity == PARITY_NONE:
cflag &= ~(termios.PARENB | termios.PARODD | CMSPAR)
elif self._parity == PARITY_EVEN:
cflag &= ~(termios.PARODD | CMSPAR)
cflag |= (termios.PARENB)
elif self._parity == PARITY_ODD:
cflag &= ~CMSPAR
cflag |= (termios.PARENB | termios.PARODD)
elif self._parity == PARITY_MARK and CMSPAR:
cflag |= (termios.PARENB | CMSPAR | termios.PARODD)
elif self._parity == PARITY_SPACE and CMSPAR:
cflag |= (termios.PARENB | CMSPAR)
cflag &= ~(termios.PARODD)
else:
raise ValueError('Invalid parity: {!r}'.format(self._parity))
# setup flow control
# xonxoff
if hasattr(termios, 'IXANY'):
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF) # |termios.IXANY)
else:
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
else:
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
else:
iflag &= ~(termios.IXON | termios.IXOFF)
# rtscts
if hasattr(termios, 'CRTSCTS'):
if self._rtscts:
cflag |= (termios.CRTSCTS)
else:
cflag &= ~(termios.CRTSCTS)
elif hasattr(termios, 'CNEW_RTSCTS'): # try it with alternate constant name
if self._rtscts:
cflag |= (termios.CNEW_RTSCTS)
else:
cflag &= ~(termios.CNEW_RTSCTS)
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
# buffer
# vmin "minimal number of characters to be read. 0 for non blocking"
if vmin < 0 or vmin > 255:
raise ValueError('Invalid vmin: {!r}'.format(vmin))
cc[termios.VMIN] = vmin
# vtime
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
# activate settings
if force_update or [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
termios.tcsetattr(
self.fd,
termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
# apply custom baud rate, if any
if custom_baud is not None:
self._set_special_baudrate(custom_baud)
if self._rs485_mode is not None:
self._set_rs485_mode(self._rs485_mode)
def close(self):
"""Close port"""
if self.is_open:
if self.fd is not None:
os.close(self.fd)
self.fd = None
os.close(self.pipe_abort_read_w)
os.close(self.pipe_abort_read_r)
os.close(self.pipe_abort_write_w)
os.close(self.pipe_abort_write_r)
self.pipe_abort_read_r, self.pipe_abort_read_w = None, None
self.pipe_abort_write_r, self.pipe_abort_write_w = None, None
self.is_open = False
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
# select based implementation, proved to work on many systems
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
timeout = Timeout(self._timeout)
while len(read) < size:
try:
ready, _, _ = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout.time_left())
if self.pipe_abort_read_r in ready:
os.read(self.pipe_abort_read_r, 1000)
break
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when
# there is nothing to read.
if not ready:
break # timeout
buf = os.read(self.fd, size - len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point.
if not buf:
# Disconnected devices, at least on Linux, show the
# behavior that they are always ready to read immediately
# but reading returns nothing.
raise SerialException(
'device reports readiness to read but returned no data '
'(device disconnected or multiple access on port?)')
read.extend(buf)
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore BlockingIOErrors and EINTR. other errors are shown
# https://www.python.org/dev/peps/pep-0475.
if e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('read failed: {}'.format(e))
except select.error as e:
# this is for Python 2.x
# ignore BlockingIOErrors and EINTR. all errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('read failed: {}'.format(e))
if timeout.expired():
break
return bytes(read)
def cancel_read(self):
if self.is_open:
os.write(self.pipe_abort_read_w, b"x")
def cancel_write(self):
if self.is_open:
os.write(self.pipe_abort_write_w, b"x")
def write(self, data):
"""Output the given byte string over the serial port."""
if not self.is_open:
raise portNotOpenError
d = to_bytes(data)
tx_len = length = len(d)
timeout = Timeout(self._write_timeout)
while tx_len > 0:
try:
n = os.write(self.fd, d)
if timeout.is_non_blocking:
# Zero timeout indicates non-blocking - simply return the
# number of bytes of data actually written
return n
elif not timeout.is_infinite:
# when timeout is set, use select to wait for being ready
# with the time left as timeout
if timeout.expired():
raise writeTimeoutError
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], timeout.time_left())
if abort:
os.read(self.pipe_abort_write_r, 1000)
break
if not ready:
raise writeTimeoutError
else:
assert timeout.time_left() is None
# wait for write operation
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], None)
if abort:
os.read(self.pipe_abort_write_r, 1)
break
if not ready:
raise SerialException('write failed (select)')
d = d[n:]
tx_len -= n
except SerialException:
raise
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore BlockingIOErrors and EINTR. other errors are shown
# https://www.python.org/dev/peps/pep-0475.
if e.errno not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('write failed: {}'.format(e))
except select.error as e:
# this is for Python 2.x
# ignore BlockingIOErrors and EINTR. all errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] not in (errno.EAGAIN, errno.EALREADY, errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EINTR):
raise SerialException('write failed: {}'.format(e))
if not timeout.is_non_blocking and timeout.expired():
raise writeTimeoutError
return length - len(d)
def flush(self):
"""\
Flush of file like objects. In this case, wait until all data
is written.
"""
if not self.is_open:
raise portNotOpenError
termios.tcdrain(self.fd)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
termios.tcflush(self.fd, termios.TCIFLUSH)
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and discarding all
that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
termios.tcflush(self.fd, termios.TCOFLUSH)
def send_break(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self.is_open:
raise portNotOpenError
termios.tcsendbreak(self.fd, int(duration / 0.25))
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, no transmitting is possible.
"""
if self._break_state:
fcntl.ioctl(self.fd, TIOCSBRK)
else:
fcntl.ioctl(self.fd, TIOCCBRK)
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self._rts_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if self._dtr_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_CTS != 0
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_DSR != 0
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_RI != 0
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_CD != 0
# - - platform specific - - - -
@property
def out_waiting(self):
"""Return the number of bytes currently in the output buffer."""
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
def fileno(self):
"""\
For easier use of the serial port instance with select.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise portNotOpenError
return self.fd
def set_input_flow_control(self, enable=True):
"""\
Manually control flow - when software flow control is enabled.
This will send XON (true) or XOFF (false) to the other device.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise portNotOpenError
if enable:
termios.tcflow(self.fd, termios.TCION)
else:
termios.tcflow(self.fd, termios.TCIOFF)
def set_output_flow_control(self, enable=True):
"""\
Manually control flow of outgoing data - when hardware or software flow
control is enabled.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise portNotOpenError
if enable:
termios.tcflow(self.fd, termios.TCOON)
else:
termios.tcflow(self.fd, termios.TCOOFF)
def nonblocking(self):
"""DEPRECATED - has no use"""
import warnings
warnings.warn("nonblocking() has no effect, already nonblocking", DeprecationWarning)
class PosixPollSerial(Serial):
"""\
Poll based read implementation. Not all systems support poll properly.
However this one has better handling of errors, such as a device
disconnecting while it's in use (e.g. USB-serial unplugged).
"""
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
timeout = Timeout(self._timeout)
poll = select.poll()
poll.register(self.fd, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL)
poll.register(self.pipe_abort_read_r, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL)
if size > 0:
while len(read) < size:
# print "\tread(): size",size, "have", len(read) #debug
# wait until device becomes ready to read (or something fails)
for fd, event in poll.poll(None if timeout.is_infinite else (timeout.time_left() * 1000)):
if fd == self.pipe_abort_read_r:
break
if event & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
raise SerialException('device reports error (poll)')
# we don't care if it is select.POLLIN or timeout, that's
# handled below
if fd == self.pipe_abort_read_r:
os.read(self.pipe_abort_read_r, 1000)
break
buf = os.read(self.fd, size - len(read))
read.extend(buf)
if timeout.expired() \
or (self._inter_byte_timeout is not None and self._inter_byte_timeout > 0) and not buf:
break # early abort on timeout
return bytes(read)
class VTIMESerial(Serial):
"""\
Implement timeout using vtime of tty device instead of using select.
This means that no inter character timeout can be specified and that
the error handling is degraded.
Overall timeout is disabled when inter-character timeout is used.
"""
def _reconfigure_port(self, force_update=True):
"""Set communication parameters on opened port."""
super(VTIMESerial, self)._reconfigure_port()
fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # clear O_NONBLOCK
if self._inter_byte_timeout is not None:
vmin = 1
vtime = int(self._inter_byte_timeout * 10)
elif self._timeout is None:
vmin = 1
vtime = 0
else:
vmin = 0
vtime = int(self._timeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise serial.SerialException("Could not configure port: {}".format(msg))
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
cc[termios.VMIN] = vmin
termios.tcsetattr(
self.fd,
termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
while len(read) < size:
buf = os.read(self.fd, size - len(read))
if not buf:
break
read.extend(buf)
return bytes(read)
# hack to make hasattr return false
cancel_read = property()
|
gepd/uPiotMicroPythonTool
|
tools/pyserial/serialposix.py
|
Python
|
mit
| 31,982
|
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.cache import cache
from django.test import TestCase
from django_browserid.auth import default_username_algo
from commons.urlresolvers import reverse
from projects.models import Project
from topics.models import Topic
from users.cron import delete_abandoned_profiles
from users.models import Link, Profile
class CronTests(TestCase):
def test_deleting_abandoned_profiles(self):
"""Test the cmd to delete profiles that never agreed to the TOS."""
two_days_ago = datetime.now() - timedelta(days=2)
u1 = User.objects.create(
username=u'testaccount',
password=u'password1',
email=u'test@test.com',
is_active=True,
date_joined=two_days_ago,
)
Profile.objects.create(user=u1)
u2 = User.objects.create(
username=default_username_algo(u'test2@test.com'),
password=u'pass',
email=u'test2@test.com',
is_active=True,
date_joined=two_days_ago,
)
Profile.objects.create(user=u2)
self.assertEqual(Profile.objects.count(), 2)
self.assertEqual(User.objects.count(), 2)
delete_abandoned_profiles()
self.assertEqual(Profile.objects.count(), 1)
self.assertEqual(User.objects.count(), 1)
with self.assertRaises(User.DoesNotExist):
User.objects.get(pk=u2.pk)
def test_new_profiles_not_deleted(self):
"""Test that the profile deletion cmd doesn't delete new profiles."""
u1 = User.objects.create(
username=default_username_algo(u'test@test.com'),
password=u'password1',
email=u'test@test.com',
is_active=True,
)
Profile.objects.create(user=u1)
u2 = User.objects.create(
username=default_username_algo(u'test2@test.com'),
password=u'pass',
email=u'test2@test.com',
is_active=True,
date_joined=datetime.now() - timedelta(days=2),
)
Profile.objects.create(user=u2)
self.assertEqual(Profile.objects.count(), 2)
self.assertEqual(User.objects.count(), 2)
delete_abandoned_profiles()
self.assertEqual(Profile.objects.count(), 1)
self.assertEqual(User.objects.count(), 1)
with self.assertRaises(User.DoesNotExist):
User.objects.get(pk=u2.pk)
class ProfileData(TestCase):
def setUp(self):
cache.clear()
self.User = User.objects.create(
username=u'testaccount',
password=u'password1',
is_active=True
)
self.profile = Profile.objects.create(
user=self.User
)
def test_social_links(self):
user_slug = '/en-US/profile/%s/' % self.profile.user.username
response = self.client.get(user_slug)
self.assertEqual(len(response.context['social_links']), 0)
Link.objects.create(
name=u'Test',
url=u'http://www.mozilla.org',
profile=self.profile
)
response = self.client.get(user_slug)
self.assertEqual(len(response.context['social_links']), 1)
def test_project_links(self):
user_slug = '/en-US/profile/%s/' % self.profile.user.username
response = self.client.get(user_slug)
self.assertEqual(response.context['projects'], False)
p = Project.objects.create(
name=u'Shipment of Fail',
slug=u'shipment-of-fail',
description=u'Blah',
long_description=u'Blah blah'
)
p.team_members.add(self.profile)
response = self.client.get(user_slug)
self.assertNotEqual(response.context['projects'], False)
class ProfileViewTests(TestCase):
def setUp(self):
"""Create user and a project with a topic."""
self.password = 'lovezrugz'
self.user = User.objects.create_user(
username='TheDude',
password=self.password,
email='duder@aol.com'
)
self.profile = Profile.objects.create(user=self.user)
self.owner_password = 'TheBumsLost'
self.owner = User.objects.create_user(
username='jlebowski',
password=self.owner_password,
email='jlebowski@aol.com',
)
self.owner_profile = Profile.objects.create(user=self.owner)
self.topic = Topic.objects.create(
name='Bowling',
slug='bowling',
description='League play.',
)
self.project = Project.objects.create(
name='Get Rug Back',
slug='rug-back',
description='This aggression will not stand, man!',
long_description='Not into the whole, brevity thing.',
)
self.project.topics.add(self.topic)
self.project.team_members.add(self.profile)
self.project.owners.add(self.owner_profile)
def test_can_delete_profile(self):
self.assertTrue(self.client.login(
username=self.user.username,
password=self.password
))
resp = self.client.get(reverse('users_delete'), follow=True)
self.assertContains(resp, 'Delete Profile')
self.assertTrue(User.objects.get(username=self.user.username))
self.assertTrue(Profile.objects.get(pk=self.profile.pk))
resp = self.client.post(reverse('users_delete'), follow=True)
self.assertTrue(resp.redirect_chain[-1][0].endswith('/en-US/'))
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=self.user.username)
with self.assertRaises(Profile.DoesNotExist):
Profile.objects.get(pk=self.profile.pk)
def test_delete_owner_not_delete_project(self):
self.assertTrue(self.client.login(
username=self.owner.username,
password=self.owner_password
))
self.assertEqual(self.project.owners.count(), 1)
self.client.post(reverse('users_delete'), follow=True)
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=self.owner.username)
with self.assertRaises(Profile.DoesNotExist):
Profile.objects.get(pk=self.owner_profile.pk)
self.assertEqual(self.project, Project.objects.get(pk=self.project.pk))
self.assertEqual(self.project.owners.count(), 0)
def test_only_owner_not_allowed_to_delete(self):
"""Test a sole project owner is not shown delete form."""
self.assertTrue(self.client.login(
username=self.owner.username,
password=self.owner_password
))
self.assertEqual(self.project.owners.count(), 1)
resp = self.client.get(reverse('users_delete'), follow=True)
self.assertTrue(resp.context['problem_projects'])
self.assertContains(resp, 'Oops!')
def test_non_solo_owner_allowed_to_delete(self):
"""Test a non-sole project owner is shown delete form."""
self.project.owners.add(self.profile)
self.assertTrue(self.client.login(
username=self.owner.username,
password=self.owner_password
))
self.assertEqual(self.project.owners.count(), 2)
resp = self.client.get(reverse('users_delete'), follow=True)
self.assertFalse(resp.context['problem_projects'])
self.assertNotContains(resp, 'Oops!')
|
mozilla/betafarm
|
apps/users/tests/test_profiles.py
|
Python
|
bsd-3-clause
| 7,511
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.