repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
alphagov/notifications-admin
|
tests/app/main/views/test_find_services.py
|
1
|
3171
|
from flask import url_for
from tests import service_json
def test_find_services_by_name_page_loads_correctly(client_request, platform_admin_user):
client_request.login(platform_admin_user)
document = client_request.get('main.find_services_by_name')
assert document.h1.text.strip() == 'Find services by name'
assert len(document.find_all('input', {'type': 'search'})) > 0
def test_find_services_by_name_displays_services_found(
client_request,
platform_admin_user,
mocker
):
client_request.login(platform_admin_user)
get_services = mocker.patch(
'app.service_api_client.find_services_by_name',
return_value={"data": [service_json()]}
)
document = client_request.post(
'main.find_services_by_name',
_data={"search": "Test Service"},
_expected_status=200
)
get_services.assert_called_once_with(service_name="Test Service")
result = document.select_one('.browse-list-item a')
assert result.text.strip() == 'Test Service'
assert result.attrs["href"] == "/services/1234"
def test_find_services_by_name_displays_multiple_services(
client_request,
platform_admin_user,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.service_api_client.find_services_by_name',
return_value={"data": [service_json(name="Tadfield Police"), service_json(name="Tadfield Air Base")]}
)
document = client_request.post('main.find_services_by_name', _data={"search": "Tadfield"}, _expected_status=200)
results = document.find_all('li', {'class': 'browse-list-item'})
assert len(results) == 2
assert sorted([result.text.strip() for result in results]) == ["Tadfield Air Base", "Tadfield Police"]
def test_find_services_by_name_displays_message_if_no_services_found(
client_request,
platform_admin_user,
mocker
):
client_request.login(platform_admin_user)
mocker.patch('app.service_api_client.find_services_by_name', return_value={"data": []})
document = client_request.post(
'main.find_services_by_name', _data={"search": "Nabuchodonosorian Empire"}, _expected_status=200
)
assert document.find('p', {'class': 'browse-list-hint'}).text.strip() == 'No services found.'
def test_find_services_by_name_validates_against_empty_search_submission(
client_request,
platform_admin_user,
mocker
):
client_request.login(platform_admin_user)
document = client_request.post('main.find_services_by_name', _data={"search": ""}, _expected_status=200)
expected_message = "Error: You need to enter full or partial name to search by."
assert document.find('span', {'class': 'govuk-error-message'}).text.strip() == expected_message
def test_find_services_by_name_redirects_for_uuid(
client_request,
platform_admin_user,
mocker,
fake_uuid
):
client_request.login(platform_admin_user)
client_request.post(
'main.find_services_by_name',
_data={"search": fake_uuid},
_expected_redirect=url_for(
'main.service_dashboard',
service_id=fake_uuid,
_external=True,
),
)
|
mit
| 1,560,063,213,959,604,200
| 33.096774
| 116
| 0.668243
| false
| 3.431818
| true
| false
| false
|
rwl/PyCIM
|
CIM14/IEC61968/Metering/DynamicDemand.py
|
1
|
2894
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.Element import Element
class DynamicDemand(Element):
"""Dynamic demand description. The formula by which demand is measured is an important underlying definition to the measurement. Generally speaking, all of the meters in a given utility will be configured to measure demand the same way. Nevertheless, it must be defined. An 'interval' of 60, 30, 15, 10, or 5 minutes must be defined to describe the interval of time over which usage is measured. When demand is defined to be DemandKind.rollingBlock, both an 'interval' and a 'subinterval' must be defined, where the 'subinterval' must be a multiple of the 'interval' which contains it. A common setting is '15-minute rolling block with 5-minute subintervals.'
"""
def __init__(self, kind="logarithmic", interval=0.0, subInterval=0.0, *args, **kw_args):
"""Initialises a new 'DynamicDemand' instance.
@param kind: Kind of demand. Values are: "logarithmic", "fixedBlock", "rollingBlock"
@param interval: Demand interval.
@param subInterval: (if 'kind'=rollingBlock) Subinterval, must be multiple of 'interval' that contains it.
"""
#: Kind of demand. Values are: "logarithmic", "fixedBlock", "rollingBlock"
self.kind = kind
#: Demand interval.
self.interval = interval
#: (if 'kind'=rollingBlock) Subinterval, must be multiple of 'interval' that contains it.
self.subInterval = subInterval
super(DynamicDemand, self).__init__(*args, **kw_args)
_attrs = ["kind", "interval", "subInterval"]
_attr_types = {"kind": str, "interval": float, "subInterval": float}
_defaults = {"kind": "logarithmic", "interval": 0.0, "subInterval": 0.0}
_enums = {"kind": "DemandKind"}
_refs = []
_many_refs = []
|
mit
| -8,450,176,945,924,258,000
| 55.745098
| 663
| 0.719074
| false
| 4.087571
| false
| false
| false
|
cmrust/VineyardMonitor-ATT
|
tem.py
|
1
|
1668
|
import serial
import threading
from datetime import datetime
from m2x.client import M2XClient
# instantiate our M2X API client
client = M2XClient(key='#REMOVED#')
# instantiate our serial connection to the Arduino
arduino = serial.Serial('/dev/ttyUSB0', 9600)
# instantiate our global variables
temp = 0
light = 0
now = datetime.utcnow()
def pollArduino():
# update these globally
global temp
global light
global now
# poll time (m2x is only UTC currently)
now = datetime.utcnow()
# data from the serial port comes in comma-seperated for:
# $temp,$light
# poll temp/light values (rstrip() removes the \n's)
values = arduino.readline().rstrip().split(',')
# if our array is not empty
if len(values) > 1:
temp = values[0]
light = values[1]
# print values to the console
print "tempF: " + temp
print "light: " + light
print
# clear the serial input buffer
# this keeps it from building up a backlog and causing delays
arduino.flushInput()
def pushM2X():
# iterate through any feeds from blueprints
for feed in client.feeds.search(type='blueprint'):
# iterate through steams in feeds
for stream in feed.streams:
# upload the current values for each stream
if stream.name == 'temperature':
stream.values.add_value(temp, now)
if stream.name == 'light':
stream.values.add_value(light, now)
while True:
pollArduino()
# m2x calls were proving slow, so we've threaded it here so
# that the arduino doesn't get backed up while we're waiting
# create thread for m2x
m2x_thread = threading.Thread(target=pushM2X)
# if thread is still alive, pass
if m2x_thread.is_alive() is not True:
m2x_thread.start()
|
mit
| 6,485,409,181,414,895,000
| 23.173913
| 62
| 0.716427
| false
| 3.189293
| false
| false
| false
|
mscuthbert/abjad
|
abjad/tools/abjadbooktools/test/test_LaTeXDocumentHandler_hide.py
|
1
|
2580
|
# -*- encoding: utf-8 -*-
import platform
import unittest
from abjad.tools import abjadbooktools
from abjad.tools import systemtools
@unittest.skipIf(
platform.python_implementation() != 'CPython',
'Only for CPython',
)
class TestLaTeXDocumentHandler(unittest.TestCase):
def test_hide_1(self):
input_file_contents = [
'\\begin{comment}',
'<abjad>[hide=true]',
'def do_something(expr):',
" print('before')",
' print(expr + 1)',
" print('after')",
'',
'</abjad>',
'\\end{comment}',
'',
'\\begin{comment}',
'<abjad>',
'do_something(23)',
'</abjad>',
'\\end{comment}',
]
document_handler = abjadbooktools.LaTeXDocumentHandler()
input_blocks = document_handler.collect_input_blocks(input_file_contents)
input_blocks = tuple(input_blocks.values())
assert input_blocks[0].code_block_specifier is not None
assert input_blocks[0].code_block_specifier.hide
assert input_blocks[1].code_block_specifier is None
def test_hide_2(self):
input_file_contents = [
'\\begin{comment}',
'<abjad>[hide=true]',
'def do_something(expr):',
" print('before')",
' print(expr + 1)',
" print('after')",
'',
'</abjad>',
'\\end{comment}',
'',
'\\begin{comment}',
'<abjad>',
'do_something(23)',
'</abjad>',
'\\end{comment}',
]
document_handler = abjadbooktools.LaTeXDocumentHandler(
input_file_contents=input_file_contents,
)
rebuilt_source = document_handler(return_source=True)
assert rebuilt_source == systemtools.TestManager.clean_string(
"""
\\begin{comment}
<abjad>[hide=true]
def do_something(expr):
print('before')
print(expr + 1)
print('after')
</abjad>
\\end{comment}
\\begin{comment}
<abjad>
do_something(23)
</abjad>
\\end{comment}
%%% ABJADBOOK START %%%
\\begin{lstlisting}
>>> do_something(23)
before
24
after
\\end{lstlisting}
%%% ABJADBOOK END %%%
""",
)
|
gpl-3.0
| 6,283,707,193,494,840,000
| 28.329545
| 81
| 0.468992
| false
| 4.243421
| true
| false
| false
|
dcherian/pyroms
|
pyroms/pyroms/remapping/remap.py
|
1
|
5885
|
import numpy as np
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
def remap(src_array, remap_file, src_grad1=None, src_grad2=None, \
src_grad3=None, spval=1e37, verbose=False):
'''
remap based on addresses and weights computed in a setup phase
'''
# get info from remap_file
data = netCDF.Dataset(remap_file, 'r')
title = data.title
map_method = data.map_method
normalization = data.normalization
src_grid_name = data.source_grid
dst_grid_name = data.dest_grid
src_grid_size = len(data.dimensions['src_grid_size'])
dst_grid_size = len(data.dimensions['dst_grid_size'])
num_links = len(data.dimensions['num_links'])
src_grid_dims = data.variables['src_grid_dims']
dst_grid_dims = data.variables['dst_grid_dims']
# get weights and addresses from remap_file
map_wts = data.variables['remap_matrix'][:]
dst_add = data.variables['dst_address'][:]
src_add = data.variables['src_address'][:]
# get destination mask
dst_mask = data.variables['dst_grid_imask'][:]
# remap from src grid to dst grid
if src_grad1 is not None:
iorder = 2
else:
iorder = 1
if verbose is True:
print 'Reading remapping: ', title
print 'From file: ', remap_file
print ' '
print 'Remapping between:'
print src_grid_name
print 'and'
print dst_grid_name
print 'Remapping method: ', map_method
ndim = len(src_array.squeeze().shape)
if (ndim == 2):
tmp_dst_array = np.zeros((dst_grid_size))
tmp_src_array = src_array.flatten()
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0:2] = map_wts[:,0:2].copy()
map_wts = tmp_map_wts
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknow method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
elif (ndim == 3):
nlev = src_array.shape[0]
dst_array = np.zeros((nlev, dst_grid_dims[1], dst_grid_dims[0]))
# loop over vertical level
for k in range(nlev):
tmp_src_array = src_array[k,:,:].flatten()
tmp_dst_array = np.zeros((dst_grid_size))
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknow method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array[k,:,:] = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
else:
raise ValueError, 'src_array must have two or three dimensions'
# close data file
data.close()
return dst_array
|
bsd-3-clause
| 3,416,160,935,299,170,300
| 36.967742
| 83
| 0.492438
| false
| 3.671241
| false
| false
| false
|
joshzarrabi/e-mission-server
|
emission/net/api/cfc_webapp.py
|
1
|
21790
|
# Standard imports
import json
from random import randrange
from bottle import route, post, get, run, template, static_file, request, app, HTTPError, abort, BaseRequest, JSONPlugin
import bottle as bt
# To support dynamic loading of client-specific libraries
import sys
import os
import logging
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(thread)d:%(message)s',
filename='webserver_debug.log', level=logging.DEBUG)
logging.debug("This should go to the log file")
from datetime import datetime
import time
# So that we can set the socket timeout
import socket
# For decoding JWTs using the google decode URL
import urllib
import requests
# For decoding JWTs on the client side
import oauth2client.client
from oauth2client.crypt import AppIdentityError
import traceback
import xmltodict
import urllib2
import bson.json_util
# Our imports
import modeshare, zipcode, distance, tripManager, \
Berkeley, visualize, stats, usercache, timeline
import emission.net.ext_service.moves.register as auth
import emission.analysis.result.carbon as carbon
import emission.analysis.classification.inference.commute as commute
import emission.analysis.modelling.work_time as work_time
import emission.analysis.result.userclient as userclient
import emission.core.common as common
from emission.core.wrapper.client import Client
from emission.core.wrapper.user import User
from emission.core.get_database import get_uuid_db, get_mode_db
import emission.core.wrapper.motionactivity as ecwm
config_file = open('conf/net/api/webserver.conf')
config_data = json.load(config_file)
static_path = config_data["paths"]["static_path"]
python_path = config_data["paths"]["python_path"]
server_host = config_data["server"]["host"]
server_port = config_data["server"]["port"]
socket_timeout = config_data["server"]["timeout"]
log_base_dir = config_data["paths"]["log_base_dir"]
key_file = open('conf/net/keys.json')
key_data = json.load(key_file)
ssl_cert = key_data["ssl_certificate"]
private_key = key_data["private_key"]
client_key = key_data["client_key"]
client_key_old = key_data["client_key_old"]
ios_client_key = key_data["ios_client_key"]
BaseRequest.MEMFILE_MAX = 1024 * 1024 * 1024 # Allow the request size to be 1G
# to accomodate large section sizes
skipAuth = False
print "Finished configuring logging for %s" % logging.getLogger()
app = app()
# On MacOS, the current working directory is always in the python path However,
# on ubuntu, it looks like the script directory (api in our case) is in the
# python path, but the pwd is not. This means that "main" is not seen even if
# we run from the CFC_WebApp directory. Let's make sure to manually add it to
# the python path so that we can keep our separation between the main code and
# the webapp layer
#Simple path that serves up a static landing page with javascript in it
@route('/')
def index():
return static_file("server/index.html", static_path)
# Bunch of static pages that constitute our website
# Should we have gone for something like django instead after all?
# If this gets to be too much, we should definitely consider that
@route("/<filename>")
def doc(filename):
if filename != "privacy" and filename != "support" and filename != "about" and filename != "consent":
return HTTPError(404, "Don't try to hack me, you evil spammer")
else:
return static_file("%s.html" % filename, "%s/docs/" % static_path)
# Serve up javascript and css files properly
@route('/front/<filename:path>')
def server_static(filename):
logging.debug("static filename = %s" % filename)
return static_file(filename, static_path)
@route('/clients/<clientname>/front/<filename>')
def server_static(clientname, filename):
logging.debug("returning file %s from client %s " % (filename, clientname))
return static_file(filename, "clients/%s/%s" % (clientname, static_path))
# Returns the proportion of survey takers who use each mode
@route('/result/commute.modeshare.distance')
def getCommuteModeShare():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
return modeshare.get_Alluser_mode_share_by_distance("commute",
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# return modeshare.getModeShare()
@route('/result/internal.modeshare.distance')
def getBerkeleyModeShare():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
return Berkeley.get_berkeley_mode_share_by_distance(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# return modeshare.getModeShare()
# Returns the modeshare by zipcode
@route('/result/commute.modeshare/zipcode/<zc>')
def getCommuteModeShare(zc):
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
return zipcode.get_mode_share_by_Zipcode(zc, "commute",
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# Returns the proportion of survey takers from different zip codes
@route('/result/home.zipcode')
def getZipcode():
return zipcode.getZipcode()
# Returns the proportion of commute distances
@route('/result/commute.distance.to')
def getDistance():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
distances = distance.get_morning_commute_distance_pie(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# logging.debug("Returning distances = %s" % distances)
return distances
@route('/result/commute.distance.from')
def getDistance():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
distances = distance.get_evening_commute_distance_pie(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# logging.debug("Returning distances = %s" % distances)
return distances
# Returns the distribution of commute arrival and departure times
@route('/result/commute.arrivalTime')
def getArrivalTime():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
retVal = work_time.get_Alluser_work_start_time_pie(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# retVal = common.generateRandomResult(['00-04', '04-08', '08-10'])
# logging.debug("In getArrivalTime, retVal is %s" % retVal)
return retVal
@route('/result/commute.departureTime')
def getDepartureTime():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
retVal = work_time.get_Alluser_work_end_time_pie(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# retVal = common.generateRandomResult(['00-04', '04-08', '08-10'])
# logging.debug("In getDepartureTime, retVal is %s" % retVal)
return retVal
@route("/result/heatmap/carbon")
def getCarbonHeatmap():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
retVal = visualize.carbon_by_zip(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# retVal = common.generateRandomResult(['00-04', '04-08', '08-10'])
# logging.debug("In getCarbonHeatmap, retVal is %s" % retVal)
return retVal
@route("/result/heatmap/pop.route/cal")
def getCalPopRoute():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
retVal = visualize.Berkeley_pop_route(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# retVal = common.generateRandomResult(['00-04', '04-08', '08-10'])
# logging.debug("In getCalPopRoute, retVal is %s" % retVal)
return retVal
@route("/result/heatmap/pop.route/commute/<selMode>")
def getCommutePopRoute(selMode):
map_mode = {"motorized" : "MotionTypes.IN_VEHICLE", "walking" : "MotionTypes.ON_FOOT", "cycling" : "MotionTypes.BICYCLING"}
fromTs = request.query.from_ts
toTs = request.query.to_ts
mode = map_mode[selMode]
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
retVal = visualize.Commute_pop_route(mode,
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
# retVal = common.generateRandomResult(['00-04', '04-08', '08-10'])
# logging.debug("In getCalPopRoute, retVal is %s" % retVal)
return retVal
@get('/result/carbon/all/summary')
def carbonSummaryAllTrips():
fromTs = request.query.from_ts
toTs = request.query.to_ts
logging.debug("Filtering values for range %s -> %s" % (fromTs, toTs))
return carbon.getSummaryAllTrips(
datetime.fromtimestamp(float(fromTs)/1000), datetime.fromtimestamp(float(toTs)/1000))
@get('/tripManager/getModeOptions')
def getModeOptions():
return tripManager.getModeOptions()
@post('/tripManager/getUnclassifiedSections')
def getUnclassifiedSections():
user_uuid=getUUID(request)
return tripManager.getUnclassifiedSections(user_uuid)
@post('/tripManager/setSectionClassification')
def setSectionClassification():
user_uuid=getUUID(request)
updates = request.json['updates']
return tripManager.setSectionClassification(user_uuid, updates)
@post('/tripManager/storeSensedTrips')
def storeSensedTrips():
logging.debug("Called storeSensedTrips")
user_uuid=getUUID(request)
print "user_uuid %s" % user_uuid
logging.debug("user_uuid %s" % user_uuid)
sections = request.json['sections']
return tripManager.storeSensedTrips(user_uuid, sections)
@post('/usercache/get')
def getFromCache():
logging.debug("Called userCache.get")
user_uuid=getUUID(request)
logging.debug("user_uuid %s" % user_uuid)
to_phone = usercache.sync_server_to_phone(user_uuid)
return {'server_to_phone': to_phone}
@post('/usercache/put')
def putIntoCache():
logging.debug("Called userCache.put")
user_uuid=getUUID(request)
logging.debug("user_uuid %s" % user_uuid)
from_phone = request.json['phone_to_server']
return usercache.sync_phone_to_server(user_uuid, from_phone)
@post('/timeline/getTrips/<day>')
def getTrips(day):
logging.debug("Called timeline.getTrips/%s" % day)
user_uuid=getUUID(request)
force_refresh = request.query.get('refresh', False)
logging.debug("user_uuid %s" % user_uuid)
ret_geojson = timeline.get_trips_for_day(user_uuid, day, force_refresh)
logging.debug("type(ret_geojson) = %s" % type(ret_geojson))
ret_dict = {"timeline": ret_geojson}
logging.debug("type(ret_dict) = %s" % type(ret_dict))
return ret_dict
@post('/profile/create')
def createUserProfile():
logging.debug("Called createUserProfile")
userToken = request.json['user']
# This is the only place we should use the email, since we may not have a
# UUID yet. All others should only use the UUID.
if skipAuth:
userEmail = userToken
else:
userEmail = verifyUserToken(userToken)
logging.debug("userEmail = %s" % userEmail)
user = User.register(userEmail)
logging.debug("Looked up user = %s" % user)
logging.debug("Returning result %s" % {'uuid': str(user.uuid)})
return {'uuid': str(user.uuid)}
@post('/profile/update')
def updateUserProfile():
logging.debug("Called updateUserProfile")
user_uuid = getUUID(request)
user = User.fromUUID(user_uuid)
mpg_array = request.json['mpg_array']
return user.setMpgArray(mpg_array)
@post('/profile/consent')
def setConsentInProfile():
user_uuid = getUUID(request)
version = request.json['version']
print "Setting accepted version to %s for user %s" % (version, user_uuid)
logging.debug("Setting accepted version to %s for user %s" % (version, user_uuid))
return None
@post('/profile/settings')
def getCustomizationForProfile():
user_uuid = getUUID(request)
user = User.fromUUID(user_uuid)
logging.debug("Returning settings for user %s" % user_uuid)
return user.getSettings()
@post('/stats/set')
def setStats():
user_uuid=getUUID(request)
inStats = request.json['stats']
stats.setClientMeasurements(user_uuid, inStats)
@post('/compare')
def postCarbonCompare():
from clients.data import data
from clients.choice import choice
if not skipAuth:
if request.json == None:
return "Waiting for user data to become available..."
if 'user' not in request.json:
return "Waiting for user data to be become available.."
user_uuid = getUUID(request)
clientResult = userclient.getClientSpecificResult(user_uuid)
if clientResult != None:
logging.debug("Found overriding client result for user %s, returning it" % user_uuid)
return clientResult
else:
logging.debug("No overriding client result for user %s, returning choice " % user_uuid)
return choice.getResult(user_uuid)
@get('/compare')
def getCarbonCompare():
for key, val in request.headers.items():
print(" %s: %s" % (key, val))
from clients.data import data
if not skipAuth:
if 'User' not in request.headers or request.headers.get('User') == '':
return "Waiting for user data to become available..."
from clients.choice import choice
user_uuid = getUUID(request, inHeader=True)
print ('UUID', user_uuid)
clientResult = userclient.getClientSpecificResult(user_uuid)
if clientResult != None:
logging.debug("Found overriding client result for user %s, returning it" % user_uuid)
return clientResult
else:
logging.debug("No overriding client result for user %s, returning choice" % user_uuid)
return choice.getResult(user_uuid)
# Client related code START
@post("/client/<clientname>/<method>")
def callStudy(clientname, method):
user_uuid = getUUID(request)
request['user'] = user_uuid
return Client(clientname).callMethod(method, request)
@get('/client/pre-register')
def registeredForStudy():
userEmail = request.query.email
client = request.query.client
client_key = request.query.client_key
logging.debug("request = %s" % (request))
logging.debug("userEmail = %s, client = %s, client_key = %s" % (userEmail, client, client_key))
# try:
newSignupCount = Client(client).preRegister(client_key, userEmail)
# except Exception as e:
# abort(e.code, e.msg)
return {'email': userEmail, 'client': client, 'signup_count': newSignupCount }
@get('/client/<clientName>/<method>')
def javascriptCallback(clientName, method):
from clients.choice import choice
client = Client(clientName)
client_key = request.query.client_key
client.callJavascriptCallback(client_key, method, request.params)
return {'status': 'ok'}
# proxy used to request and process XML from an external API, then convert it to JSON
# original URL should be encoded in UTF-8
@get("/asJSON/<originalXMLWebserviceURL>")
def xmlProxy(originalXMLWebserviceURL):
decodedURL = urllib2.unquote(originalXMLWebserviceURL)
f = urllib2.urlopen(decodedURL)
xml = f.read()
parsedXML = xmltodict.parse(xml)
return json.dumps(parsedXML)
# Client related code END
# Data source integration START
@post('/movesCallback')
def movesCallback():
logging.debug("Request from user = %s" % request)
logging.debug("Request.json from user = %s" % request.json)
user_uuid = getUUID(request)
if user_uuid is None:
# Hack to support older clients that don't call register before calling movesCallback
# Remove by Dec 31, 2014
createUserProfile()
user_uuid = getUUID(request)
assert(user_uuid is not None)
code = request.json['code']
state = request.json['state']
return auth.movesCallback(code, state, user_uuid)
# Data source integration END
@app.hook('before_request')
def before_request():
print("START %s %s %s" % (datetime.now(), request.method, request.path))
request.params.start_ts = time.time()
logging.debug("START %s %s" % (request.method, request.path))
@app.hook('after_request')
def after_request():
msTimeNow = time.time()
duration = msTimeNow - request.params.start_ts
print("END %s %s %s %s %s " % (datetime.now(), request.method, request.path, request.params.user_uuid, duration))
logging.debug("END %s %s %s %s " % (request.method, request.path, request.params.user_uuid, duration))
# Keep track of the time and duration for each call
stats.storeServerEntry(request.params.user_uuid, "%s %s" % (request.method, request.path),
msTimeNow, duration)
# Auth helpers BEGIN
# This should only be used by createUserProfile since we may not have a UUID
# yet. All others should use the UUID.
def verifyUserToken(token):
try:
# attempt to validate token on the client-side
logging.debug("Using OAuth2Client to verify id token of length %d from android phones" % len(token))
tokenFields = oauth2client.client.verify_id_token(token,client_key)
logging.debug(tokenFields)
except AppIdentityError as androidExp:
try:
logging.debug("Using OAuth2Client to verify id token of length %d from android phones using old token" % len(token))
tokenFields = oauth2client.client.verify_id_token(token,client_key_old)
logging.debug(tokenFields)
except AppIdentityError as androidExpOld:
try:
logging.debug("Using OAuth2Client to verify id token from iOS phones")
tokenFields = oauth2client.client.verify_id_token(token, ios_client_key)
logging.debug(tokenFields)
except AppIdentityError as iOSExp:
traceback.print_exc()
logging.debug("OAuth failed to verify id token, falling back to constructedURL")
#fallback to verifying using Google API
constructedURL = ("https://www.googleapis.com/oauth2/v1/tokeninfo?id_token=%s" % token)
r = requests.get(constructedURL)
tokenFields = json.loads(r.content)
in_client_key = tokenFields['audience']
if (in_client_key != client_key):
if (in_client_key != ios_client_key):
abort(401, "Invalid client key %s" % in_client_key)
logging.debug("Found user email %s" % tokenFields['email'])
return tokenFields['email']
def getUUIDFromToken(token):
userEmail = verifyUserToken(token)
return __getUUIDFromEmail__(userEmail)
# This should not be used for general API calls
def __getUUIDFromEmail__(userEmail):
user=User.fromEmail(userEmail)
if user is None:
return None
user_uuid=user.uuid
return user_uuid
def __getToken__(request, inHeader):
if inHeader:
userHeaderSplitList = request.headers.get('User').split()
if len(userHeaderSplitList) == 1:
userToken = userHeaderSplitList[0]
else:
userToken = userHeaderSplitList[1]
else:
userToken = request.json['user']
return userToken
def getUUID(request, inHeader=False):
retUUID = None
if skipAuth:
if 'User' in request.headers or 'user' in request.json:
# skipAuth = true, so the email will be sent in plaintext
userEmail = __getToken__(request, inHeader)
retUUID = __getUUIDFromEmail__(userEmail)
logging.debug("skipAuth = %s, returning UUID directly from email %s" % (skipAuth, retUUID))
else:
# Return a random user to make it easy to experiment without having to specify a user
# TODO: Remove this if it is not actually used
from get_database import get_uuid_db
user_uuid = get_uuid_db().find_one()['uuid']
retUUID = user_uuid
logging.debug("skipAuth = %s, returning arbitrary UUID %s" % (skipAuth, retUUID))
if Client("choice").getClientKey() is None:
Client("choice").update(createKey = True)
else:
userToken = __getToken__(request, inHeader)
retUUID = getUUIDFromToken(userToken)
if retUUID is None:
raise HTTPError(403, "token is valid, but no account found for user")
request.params.user_uuid = retUUID
return retUUID
# Auth helpers END
# We have see the sockets hang in practice. Let's set the socket timeout = 1
# hour to be on the safe side, and see if it is hit.
socket.setdefaulttimeout(float(socket_timeout))
for plugin in app.plugins:
if isinstance(plugin, JSONPlugin):
print("Replaced json_dumps in plugin with the one from bson")
plugin.json_dumps = bson.json_util.dumps
print("Changing bt.json_loads from %s to %s" % (bt.json_loads, bson.json_util.loads))
bt.json_loads = bson.json_util.loads
# The selection of SSL versus non-SSL should really be done through a config
# option and not through editing source code, so let's make this keyed off the
# port number
if server_port == "443":
# We support SSL and want to use it
run(host=server_host, port=server_port, server='cherrypy', debug=True,
certfile=ssl_cert, keyfile=private_key, ssl_module='builtin')
else:
# Non SSL option for testing on localhost
# We can theoretically use a separate skipAuth flag specified in the config file,
# but then we have to define the behavior if SSL is true and we are not
# running on localhost but still want to run without authentication. That is
# not really an important use case now, and it makes people have to change
# two values and increases the chance of bugs. So let's key the auth skipping from this as well.
skipAuth = True
print "Running with HTTPS turned OFF, skipAuth = True"
run(host=server_host, port=server_port, server='cherrypy', debug=True)
# run(host="0.0.0.0", port=server_port, server='cherrypy', debug=True)
|
bsd-3-clause
| -3,426,859,423,235,193,300
| 38.403255
| 128
| 0.716613
| false
| 3.488633
| true
| false
| false
|
igmhub/pyLyA
|
py/picca/data.py
|
1
|
43998
|
"""This module defines data structure to deal with line of sight data.
This module provides with three classes (QSO, Forest, Delta)
to manage the line-of-sight data.
See the respective docstrings for more details
"""
import numpy as np
import iminuit
import fitsio
from picca import constants
from picca.utils import userprint, unred
from picca.dla import DLA
class QSO(object):
"""Class to represent quasar objects.
Attributes:
ra: float
Right-ascension of the quasar (in radians).
dec: float
Declination of the quasar (in radians).
z_qso: float
Redshift of the quasar.
plate: integer
Plate number of the observation.
fiberid: integer
Fiberid of the observation.
mjd: integer
Modified Julian Date of the observation.
thingid: integer
Thingid of the observation.
x_cart: float
The x coordinate when representing ra, dec in a cartesian
coordinate system.
y_cart: float
The y coordinate when representing ra, dec in a cartesian
coordinate system.
z_cart: float
The z coordinate when representing ra, dec in a cartesian
coordinate system.
cos_dec: float
Cosine of the declination angle.
weights: float
Weight assigned to object
r_comov: float or None
Comoving distance to the object
dist_m: float or None
Angular diameter distance to object
log_lambda: float or None
Wavelength associated with the quasar redshift
Note that plate-fiberid-mjd is a unique identifier
for the quasar.
Methods:
__init__: Initialize class instance.
get_angle_between: Computes the angular separation between two quasars.
"""
def __init__(self, thingid, ra, dec, z_qso, plate, mjd, fiberid):
"""Initializes class instance.
Args:
thingid: integer
Thingid of the observation.
ra: float
Right-ascension of the quasar (in radians).
dec: float
Declination of the quasar (in radians).
z_qso: float
Redshift of the quasar.
plate: integer
Plate number of the observation.
mjd: integer
Modified Julian Date of the observation.
fiberid: integer
Fiberid of the observation.
"""
self.ra = ra
self.dec = dec
self.plate = plate
self.mjd = mjd
self.fiberid = fiberid
## cartesian coordinates
self.x_cart = np.cos(ra) * np.cos(dec)
self.y_cart = np.sin(ra) * np.cos(dec)
self.z_cart = np.sin(dec)
self.cos_dec = np.cos(dec)
self.z_qso = z_qso
self.thingid = thingid
# variables computed in function io.read_objects
self.weight = None
self.r_comov = None
self.dist_m = None
# variables computed in modules bin.picca_xcf_angl and bin.picca_xcf1d
self.log_lambda = None
def get_angle_between(self, data):
"""Computes the angular separation between two quasars.
Args:
data: QSO or list of QSO
Objects with which the angular separation will
be computed.
Returns
A float or an array (depending on input data) with the angular
separation between this quasar and the object(s) in data.
"""
# case 1: data is list-like
try:
x_cart = np.array([d.x_cart for d in data])
y_cart = np.array([d.y_cart for d in data])
z_cart = np.array([d.z_cart for d in data])
ra = np.array([d.ra for d in data])
dec = np.array([d.dec for d in data])
cos = x_cart * self.x_cart + y_cart * self.y_cart + z_cart * self.z_cart
w = cos >= 1.
if w.sum() != 0:
userprint('WARNING: {} pairs have cos>=1.'.format(w.sum()))
cos[w] = 1.
w = cos <= -1.
if w.sum() != 0:
userprint('WARNING: {} pairs have cos<=-1.'.format(w.sum()))
cos[w] = -1.
angl = np.arccos(cos)
w = ((np.absolute(ra - self.ra) < constants.SMALL_ANGLE_CUT_OFF) &
(np.absolute(dec - self.dec) < constants.SMALL_ANGLE_CUT_OFF))
if w.sum() != 0:
angl[w] = np.sqrt((dec[w] - self.dec)**2 +
(self.cos_dec * (ra[w] - self.ra))**2)
# case 2: data is a QSO
except TypeError:
x_cart = data.x_cart
y_cart = data.y_cart
z_cart = data.z_cart
ra = data.ra
dec = data.dec
cos = x_cart * self.x_cart + y_cart * self.y_cart + z_cart * self.z_cart
if cos >= 1.:
userprint('WARNING: 1 pair has cosinus>=1.')
cos = 1.
elif cos <= -1.:
userprint('WARNING: 1 pair has cosinus<=-1.')
cos = -1.
angl = np.arccos(cos)
if ((np.absolute(ra - self.ra) < constants.SMALL_ANGLE_CUT_OFF) &
(np.absolute(dec - self.dec) < constants.SMALL_ANGLE_CUT_OFF)):
angl = np.sqrt((dec - self.dec)**2 + (self.cos_dec *
(ra - self.ra))**2)
return angl
class Forest(QSO):
"""Class to represent a Lyman alpha (or other absorption) forest
This class stores the information of an absorption forest.
This includes the information required to extract the delta
field from it: flux correction, inverse variance corrections,
dlas, absorbers, ...
Attributes:
## Inherits from QSO ##
log_lambda : array of floats
Array containing the logarithm of the wavelengths (in Angs)
flux : array of floats
Array containing the flux associated to each wavelength
ivar: array of floats
Array containing the inverse variance associated to each flux
mean_optical_depth: array of floats or None
Mean optical depth at the redshift of each pixel in the forest
dla_transmission: array of floats or None
Decrease of the transmitted flux due to the presence of a Damped
Lyman alpha absorbers
mean_expected_flux_frac: array of floats or None
Mean expected flux fraction using the mock continuum
order: 0 or 1
Order of the log10(lambda) polynomial for the continuum fit
exposures_diff: array of floats or None
Difference between exposures
reso: array of floats or None
Resolution of the forest
mean_snr: float or None
Mean signal-to-noise ratio in the forest
mean_reso: float or None
Mean resolution of the forest
mean_z: float or None
Mean redshift of the forest
cont: array of floats or None
Quasar continuum
p0: float or None
Zero point of the linear function (flux mean)
p1: float or None
Slope of the linear function (evolution of the flux)
bad_cont: string or None
Reason as to why the continuum fit is not acceptable
abs_igm: string
Name of the absorption line in picca.constants defining the
redshift of the forest pixels
Class attributes:
log_lambda_max: float
Logarithm of the maximum wavelength (in Angs) to be considered in a
forest.
log_lambda_min: float
Logarithm of the minimum wavelength (in Angs) to be considered in a
forest.
log_lambda_max_rest_frame: float
As log_lambda_max but for rest-frame wavelength.
log_lambda_min_rest_frame: float
As log_lambda_min but for rest-frame wavelength.
rebin: integer
Rebin wavelength grid by combining this number of adjacent pixels
(inverse variance weighting).
delta_log_lambda: float
Variation of the logarithm of the wavelength (in Angs) between two
pixels.
extinction_bv_map: dict
B-V extinction due to dust. Maps thingids (integers) to the dust
correction (array).
absorber_mask_width: float
Mask width on each side of the absorber central observed wavelength
in units of 1e4*dlog10(lambda/Angs).
dla_mask_limit: float
Lower limit on the DLA transmission. Transmissions below this
number are masked.
Methods:
__init__: Initializes class instances.
__add__: Adds the information of another forest.
correct_flux: Corrects for multiplicative errors in pipeline flux
calibration.
correct_ivar: Corrects for multiplicative errors in pipeline inverse
variance calibration.
get_var_lss: Interpolates the pixel variance due to the Large Scale
Strucure on the wavelength array.
get_eta: Interpolates the correction factor to the contribution of the
pipeline estimate of the instrumental noise to the variance on the
wavelength array.
get_fudge: Interpolates the fudge contribution to the variance on the
wavelength array.
get_mean_cont: Interpolates the mean quasar continuum over the whole
sample on the wavelength array.
mask: Applies wavelength masking.
add_optical_depth: Adds the contribution of a given species to the mean
optical depth.
add_dla: Adds DLA to forest. Masks it by removing the afffected pixels.
add_absorber: Adds absorber to forest. Masks it by removing the
afffected pixels.
cont_fit: Computes the forest continuum.
"""
log_lambda_min = None
log_lambda_max = None
log_lambda_min_rest_frame = None
log_lambda_max_rest_frame = None
rebin = None
delta_log_lambda = None
@classmethod
def correct_flux(cls, log_lambda):
"""Corrects for multiplicative errors in pipeline flux calibration.
Empty function to be loaded at run-time.
Args:
log_lambda: array of float
Array containing the logarithm of the wavelengths (in Angs)
Returns:
An array with the correction
Raises:
NotImplementedError: Function was not specified
"""
raise NotImplementedError("Function should be specified at run-time")
@classmethod
def correct_ivar(cls, log_lambda):
"""Corrects for multiplicative errors in pipeline inverse variance
calibration.
Empty function to be loaded at run-time.
Args:
log_lambda: array of float
Array containing the logarithm of the wavelengths (in Angs)
Returns:
An array with the correction
Raises:
NotImplementedError: Function was not specified
"""
raise NotImplementedError("Function should be specified at run-time")
# map of g-band extinction to thingids for dust correction
extinction_bv_map = None
# absorber pixel mask limit
absorber_mask_width = None
## minumum dla transmission
dla_mask_limit = None
@classmethod
def get_var_lss(cls, log_lambda):
"""Interpolates the pixel variance due to the Large Scale Strucure on
the wavelength array.
Empty function to be loaded at run-time.
Args:
log_lambda: array of float
Array containing the logarithm of the wavelengths (in Angs)
Returns:
An array with the correction
Raises:
NotImplementedError: Function was not specified
"""
raise NotImplementedError("Function should be specified at run-time")
@classmethod
def get_eta(cls, log_lambda):
"""Interpolates the correction factor to the contribution of the
pipeline estimate of the instrumental noise to the variance on the
wavelength array.
See equation 4 of du Mas des Bourboux et al. 2020 for details.
Empty function to be loaded at run-time.
Args:
log_lambda: array of float
Array containing the logarithm of the wavelengths (in Angs)
Returns:
An array with the correction
Raises:
NotImplementedError: Function was not specified
"""
raise NotImplementedError("Function should be specified at run-time")
@classmethod
def get_mean_cont(cls, log_lambda):
"""Interpolates the mean quasar continuum over the whole
sample on the wavelength array.
See equation 2 of du Mas des Bourboux et al. 2020 for details.
Empty function to be loaded at run-time.
Args:
log_lambda: array of float
Array containing the logarithm of the wavelengths (in Angs)
Returns:
An array with the correction
Raises:
NotImplementedError: Function was not specified
"""
raise NotImplementedError("Function should be specified at run-time")
@classmethod
def get_fudge(cls, log_lambda):
"""Interpolates the fudge contribution to the variance on the
wavelength array.
See function epsilon in equation 4 of du Mas des Bourboux et al.
2020 for details.
Args:
log_lambda: array of float
Array containing the logarithm of the wavelengths (in Angs)
Returns:
An array with the correction
Raises:
NotImplementedError: Function was not specified
"""
raise NotImplementedError("Function should be specified at run-time")
def __init__(self,
log_lambda,
flux,
ivar,
thingid,
ra,
dec,
z_qso,
plate,
mjd,
fiberid,
exposures_diff=None,
reso=None,
mean_expected_flux_frac=None,
abs_igm="LYA"):
"""Initializes class instances.
Args:
log_lambda : array of floats
Array containing the logarithm of the wavelengths (in Angs).
flux : array of floats
Array containing the flux associated to each wavelength.
ivar : array of floats
Array containing the inverse variance associated to each flux.
thingis : float
ThingID of the observation.
ra: float
Right-ascension of the quasar (in radians).
dec: float
Declination of the quasar (in radians).
z_qso: float
Redshift of the quasar.
plate: integer
Plate number of the observation.
mjd: integer
Modified Julian Date of the observation.
fiberid: integer
Fiberid of the observation.
exposures_diff: array of floats or None - default: None
Difference between exposures.
reso: array of floats or None - default: None
Resolution of the forest.
mean_expected_flux_frac: array of floats or None - default: None
Mean expected flux fraction using the mock continuum
abs_igm: string - default: "LYA"
Name of the absorption in picca.constants defining the
redshift of the forest pixels
"""
QSO.__init__(self, thingid, ra, dec, z_qso, plate, mjd, fiberid)
# apply dust extinction correction
if Forest.extinction_bv_map is not None:
corr = unred(10**log_lambda, Forest.extinction_bv_map[thingid])
flux /= corr
ivar *= corr**2
if not exposures_diff is None:
exposures_diff /= corr
## cut to specified range
bins = (np.floor((log_lambda - Forest.log_lambda_min) /
Forest.delta_log_lambda + 0.5).astype(int))
log_lambda = Forest.log_lambda_min + bins * Forest.delta_log_lambda
w = (log_lambda >= Forest.log_lambda_min)
w = w & (log_lambda < Forest.log_lambda_max)
w = w & (log_lambda - np.log10(1. + self.z_qso) >
Forest.log_lambda_min_rest_frame)
w = w & (log_lambda - np.log10(1. + self.z_qso) <
Forest.log_lambda_max_rest_frame)
w = w & (ivar > 0.)
if w.sum() == 0:
return
bins = bins[w]
log_lambda = log_lambda[w]
flux = flux[w]
ivar = ivar[w]
if mean_expected_flux_frac is not None:
mean_expected_flux_frac = mean_expected_flux_frac[w]
if exposures_diff is not None:
exposures_diff = exposures_diff[w]
if reso is not None:
reso = reso[w]
# rebin arrays
rebin_log_lambda = (Forest.log_lambda_min +
np.arange(bins.max() + 1) * Forest.delta_log_lambda)
rebin_flux = np.zeros(bins.max() + 1)
rebin_ivar = np.zeros(bins.max() + 1)
if mean_expected_flux_frac is not None:
rebin_mean_expected_flux_frac = np.zeros(bins.max() + 1)
rebin_flux_aux = np.bincount(bins, weights=ivar * flux)
rebin_ivar_aux = np.bincount(bins, weights=ivar)
if mean_expected_flux_frac is not None:
rebin_mean_expected_flux_frac_aux = np.bincount(
bins, weights=ivar * mean_expected_flux_frac)
if exposures_diff is not None:
rebin_exposures_diff = np.bincount(bins,
weights=ivar * exposures_diff)
if reso is not None:
rebin_reso = np.bincount(bins, weights=ivar * reso)
rebin_flux[:len(rebin_flux_aux)] += rebin_flux_aux
rebin_ivar[:len(rebin_ivar_aux)] += rebin_ivar_aux
if mean_expected_flux_frac is not None:
rebin_mean_expected_flux_frac[:len(
rebin_mean_expected_flux_frac_aux
)] += rebin_mean_expected_flux_frac_aux
w = (rebin_ivar > 0.)
if w.sum() == 0:
return
log_lambda = rebin_log_lambda[w]
flux = rebin_flux[w] / rebin_ivar[w]
ivar = rebin_ivar[w]
if mean_expected_flux_frac is not None:
mean_expected_flux_frac = (rebin_mean_expected_flux_frac[w] /
rebin_ivar[w])
if exposures_diff is not None:
exposures_diff = rebin_exposures_diff[w] / rebin_ivar[w]
if reso is not None:
reso = rebin_reso[w] / rebin_ivar[w]
# Flux calibration correction
try:
correction = Forest.correct_flux(log_lambda)
flux /= correction
ivar *= correction**2
except NotImplementedError:
pass
# Inverse variance correction
try:
correction = Forest.correct_ivar(log_lambda)
ivar /= correction
except NotImplementedError:
pass
# keep the results so far in this instance
self.mean_optical_depth = None
self.dla_transmission = None
self.log_lambda = log_lambda
self.flux = flux
self.ivar = ivar
self.mean_expected_flux_frac = mean_expected_flux_frac
self.exposures_diff = exposures_diff
self.reso = reso
self.abs_igm = abs_igm
# compute mean quality variables
if reso is not None:
self.mean_reso = reso.mean()
else:
self.mean_reso = None
error = 1.0 / np.sqrt(ivar)
snr = flux / error
self.mean_snr = sum(snr) / float(len(snr))
lambda_abs_igm = constants.ABSORBER_IGM[self.abs_igm]
self.mean_z = ((np.power(10., log_lambda[len(log_lambda) - 1]) +
np.power(10., log_lambda[0])) / 2. / lambda_abs_igm -
1.0)
# continuum-related variables
self.cont = None
self.p0 = None
self.p1 = None
self.bad_cont = None
self.order = None
def coadd(self, other):
"""Coadds the information of another forest.
Forests are coadded by using inverse variance weighting.
Args:
other: Forest
The forest instance to be coadded. If other does not have the
attribute log_lambda, then the method returns without doing
anything.
Returns:
The coadded forest.
"""
if self.log_lambda is None or other.log_lambda is None:
return self
# this should contain all quantities that are to be coadded using
# ivar weighting
ivar_coadd_data = {}
log_lambda = np.append(self.log_lambda, other.log_lambda)
ivar_coadd_data['flux'] = np.append(self.flux, other.flux)
ivar = np.append(self.ivar, other.ivar)
if self.mean_expected_flux_frac is not None:
mean_expected_flux_frac = np.append(self.mean_expected_flux_frac,
other.mean_expected_flux_frac)
ivar_coadd_data['mean_expected_flux_frac'] = mean_expected_flux_frac
if self.exposures_diff is not None:
ivar_coadd_data['exposures_diff'] = np.append(
self.exposures_diff, other.exposures_diff)
if self.reso is not None:
ivar_coadd_data['reso'] = np.append(self.reso, other.reso)
# coadd the deltas by rebinning
bins = np.floor((log_lambda - Forest.log_lambda_min) /
Forest.delta_log_lambda + 0.5).astype(int)
rebin_log_lambda = Forest.log_lambda_min + (np.arange(bins.max() + 1) *
Forest.delta_log_lambda)
rebin_ivar = np.zeros(bins.max() + 1)
rebin_ivar_aux = np.bincount(bins, weights=ivar)
rebin_ivar[:len(rebin_ivar_aux)] += rebin_ivar_aux
w = (rebin_ivar > 0.)
self.log_lambda = rebin_log_lambda[w]
self.ivar = rebin_ivar[w]
# rebin using inverse variance weighting
for key, value in ivar_coadd_data.items():
rebin_value = np.zeros(bins.max() + 1)
rebin_value_aux = np.bincount(bins, weights=ivar * value)
rebin_value[:len(rebin_value_aux)] += rebin_value_aux
setattr(self, key, rebin_value[w] / rebin_ivar[w])
# recompute means of quality variables
if self.reso is not None:
self.mean_reso = self.reso.mean()
error = 1. / np.sqrt(self.ivar)
snr = self.flux / error
self.mean_snr = snr.mean()
lambda_abs_igm = constants.ABSORBER_IGM[self.abs_igm]
self.mean_z = ((np.power(10., log_lambda[len(log_lambda) - 1]) +
np.power(10., log_lambda[0])) / 2. / lambda_abs_igm -
1.0)
return self
def mask(self, mask_table):
"""Applies wavelength masking.
Pixels are masked according to a set of lines both in observed frame
and in the rest-frame. Masking is done by simply removing the pixels
from the arrays. Does nothing if the forest doesn't have the attribute
log_lambda set.
Args:
mask_table: astropy table
Table containing minimum and maximum wavelenths of absorption
lines to mask (in both rest frame and observed frame)
"""
if len(mask_table)==0:
return
select_rest_frame_mask = mask_table['frame'] == 'RF'
select_obs_mask = mask_table['frame'] == 'OBS'
mask_rest_frame = mask_table[select_rest_frame_mask]
mask_obs_frame = mask_table[select_obs_mask]
if len(mask_rest_frame)+len(mask_obs_frame)==0:
return
if self.log_lambda is None:
return
w = np.ones(self.log_lambda.size, dtype=bool)
for mask_range in mask_obs_frame:
w &= ((self.log_lambda < mask_range['log_wave_min']) |
(self.log_lambda > mask_range['log_wave_max']))
for mask_range in mask_rest_frame:
rest_frame_log_lambda = self.log_lambda - np.log10(1. + self.z_qso)
w &= ((rest_frame_log_lambda < mask_range['log_wave_min']) |
(rest_frame_log_lambda > mask_range['log_wave_max']))
parameters = [
'ivar', 'log_lambda', 'flux', 'dla_transmission',
'mean_optical_depth', 'mean_expected_flux_frac', 'exposures_diff',
'reso'
]
for param in parameters:
if hasattr(self, param) and (getattr(self, param) is not None):
setattr(self, param, getattr(self, param)[w])
return
def add_optical_depth(self, tau, gamma, lambda_rest_frame):
"""Adds the contribution of a given species to the mean optical depth.
Flux will be corrected by the mean optical depth. This correction is
governed by the optical depth-flux relation:
`F = exp(tau(1+z)^gamma)`
Args:
tau: float
Mean optical depth
gamma: float
Optical depth redshift evolution. Optical depth evolves as
`(1+z)^gamma`
lambda_rest_frame: float
Restframe wavelength of the element responsible for the absorption.
In Angstroms
"""
if self.log_lambda is None:
return
if self.mean_optical_depth is None:
self.mean_optical_depth = np.ones(self.log_lambda.size)
w = 10.**self.log_lambda / (1. + self.z_qso) <= lambda_rest_frame
z = 10.**self.log_lambda / lambda_rest_frame - 1.
self.mean_optical_depth[w] *= np.exp(-tau * (1. + z[w])**gamma)
return
def add_dla(self, z_abs, nhi, mask_table=None):
"""Adds DLA to forest. Masks it by removing the afffected pixels.
Args:
z_abs: float
Redshift of the DLA absorption
nhi : float
DLA column density in log10(cm^-2)
mask_table : astropy table for masking
Wavelengths to be masked in DLA rest-frame wavelength
"""
if self.log_lambda is None:
return
if self.dla_transmission is None:
self.dla_transmission = np.ones(len(self.log_lambda))
self.dla_transmission *= DLA(self, z_abs, nhi).transmission
w = self.dla_transmission > Forest.dla_mask_limit
if len(mask_table)>0:
select_dla_mask = mask_table['frame'] == 'RF_DLA'
mask = mask_table[select_dla_mask]
if len(mask)>0:
for mask_range in mask:
w &= ((self.log_lambda - np.log10(1. + z_abs) < mask_range['log_wave_min']) |
(self.log_lambda - np.log10(1. + z_abs) > mask_range['log_wave_max']))
# do the actual masking
parameters = [
'ivar', 'log_lambda', 'flux', 'dla_transmission',
'mean_optical_depth', 'mean_expected_flux_frac', 'exposures_diff',
'reso'
]
for param in parameters:
if hasattr(self, param) and (getattr(self, param) is not None):
setattr(self, param, getattr(self, param)[w])
return
def add_absorber(self, lambda_absorber):
"""Adds absorber to forest. Masks it by removing the afffected pixels.
Args:
lambda_absorber: float
Wavelength of the absorber
"""
if self.log_lambda is None:
return
w = np.ones(self.log_lambda.size, dtype=bool)
w &= (np.fabs(1.e4 * (self.log_lambda - np.log10(lambda_absorber))) >
Forest.absorber_mask_width)
parameters = [
'ivar', 'log_lambda', 'flux', 'dla_transmission',
'mean_optical_depth', 'mean_expected_flux_frac', 'exposures_diff',
'reso'
]
for param in parameters:
if hasattr(self, param) and (getattr(self, param) is not None):
setattr(self, param, getattr(self, param)[w])
return
def cont_fit(self):
"""Computes the forest continuum.
Fits a model based on the mean quasar continuum and linear function
(see equation 2 of du Mas des Bourboux et al. 2020)
Flags the forest with bad_cont if the computation fails.
"""
log_lambda_max = (Forest.log_lambda_max_rest_frame +
np.log10(1 + self.z_qso))
log_lambda_min = (Forest.log_lambda_min_rest_frame +
np.log10(1 + self.z_qso))
# get mean continuum
try:
mean_cont = Forest.get_mean_cont(self.log_lambda -
np.log10(1 + self.z_qso))
except ValueError:
raise Exception("Problem found when loading get_mean_cont")
# add the optical depth correction
# (previously computed using method add_optical_depth)
if not self.mean_optical_depth is None:
mean_cont *= self.mean_optical_depth
# add the dla transmission correction
# (previously computed using method add_dla)
if not self.dla_transmission is None:
mean_cont *= self.dla_transmission
# pixel variance due to the Large Scale Strucure
var_lss = Forest.get_var_lss(self.log_lambda)
# correction factor to the contribution of the pipeline
# estimate of the instrumental noise to the variance.
eta = Forest.get_eta(self.log_lambda)
# fudge contribution to the variance
fudge = Forest.get_fudge(self.log_lambda)
def get_cont_model(p0, p1):
"""Models the flux continuum by multiplying the mean_continuum
by a linear function
Args:
p0: float
Zero point of the linear function (flux mean)
p1: float
Slope of the linear function (evolution of the flux)
Global args (defined only in the scope of function cont_fit)
log_lambda_min: float
Minimum logarithm of the wavelength (in Angs)
log_lambda_max: float
Minimum logarithm of the wavelength (in Angs)
mean_cont: array of floats
Mean continuum
"""
line = (p1 * (self.log_lambda - log_lambda_min) /
(log_lambda_max - log_lambda_min) + p0)
return line * mean_cont
def chi2(p0, p1):
"""Computes the chi2 of a given model (see function model above).
Args:
p0: float
Zero point of the linear function (see function model above)
p1: float
Slope of the linear function (see function model above)
Global args (defined only in the scope of function cont_fit)
eta: array of floats
Correction factor to the contribution of the pipeline
estimate of the instrumental noise to the variance.
Returns:
The obtained chi2
"""
cont_model = get_cont_model(p0, p1)
var_pipe = 1. / self.ivar / cont_model**2
## prep_del.variance is the variance of delta
## we want here the weights = ivar(flux)
variance = eta * var_pipe + var_lss + fudge / var_pipe
weights = 1.0 / cont_model**2 / variance
# force weights=1 when use-constant-weight
# TODO: make this condition clearer, maybe pass an option
# use_constant_weights?
if (eta == 0).all():
weights = np.ones(len(weights))
chi2_contribution = (self.flux - cont_model)**2 * weights
return chi2_contribution.sum() - np.log(weights).sum()
p0 = (self.flux * self.ivar).sum() / self.ivar.sum()
p1 = 0.0
minimizer = iminuit.Minuit(chi2,
p0=p0,
p1=p1,
error_p0=p0 / 2.,
error_p1=p0 / 2.,
errordef=1.,
print_level=0,
fix_p1=(self.order == 0))
minimizer_result, _ = minimizer.migrad()
self.cont = get_cont_model(minimizer.values["p0"],
minimizer.values["p1"])
self.p0 = minimizer.values["p0"]
self.p1 = minimizer.values["p1"]
self.bad_cont = None
if not minimizer_result.is_valid:
self.bad_cont = "minuit didn't converge"
if np.any(self.cont <= 0):
self.bad_cont = "negative continuum"
## if the continuum is negative, then set it to a very small number
## so that this forest is ignored
if self.bad_cont is not None:
self.cont = self.cont * 0 + 1e-10
self.p0 = 0.
self.p1 = 0.
class Delta(QSO):
"""Class to represent the mean transimission fluctuation field (delta)
This class stores the information for the deltas for a given line of sight
Attributes:
## Inherits from QSO ##
log_lambda : array of floats
Array containing the logarithm of the wavelengths (in Angs)
weights : array of floats
Weights associated to pixel. Overloaded from parent class
cont: array of floats
Quasar continuum
delta: array of floats
Mean transmission fluctuation (delta field)
order: 0 or 1
Order of the log10(lambda) polynomial for the continuum fit
ivar: array of floats
Inverse variance associated to each flux
exposures_diff: array of floats
Difference between exposures
mean_snr: float
Mean signal-to-noise ratio in the forest
mean_reso: float
Mean resolution of the forest
mean_z: float
Mean redshift of the forest
delta_log_lambda: float
Variation of the logarithm of the wavelength between two pixels
z: array of floats or None
Redshift of the abosrption
r_comov: array of floats or None
Comoving distance to the object. Overloaded from parent class
dist_m: array of floats or None
Angular diameter distance to object. Overloaded from parent
class
neighbours: list of Delta or QSO or None
Neighbouring deltas/quasars
fname: string or None
String identifying Delta as part of a group
Methods:
__init__: Initializes class instances.
from_fitsio: Initialize instance from a fits file.
from_ascii: Initialize instance from an ascii file.
from_image: Initialize instance from an ascii file.
project: Project the delta field.
"""
def __init__(self, thingid, ra, dec, z_qso, plate, mjd, fiberid, log_lambda,
weights, cont, delta, order, ivar, exposures_diff, mean_snr,
mean_reso, mean_z, delta_log_lambda):
"""Initializes class instances.
Args:
thingid: integer
Thingid of the observation.
ra: float
Right-ascension of the quasar (in radians).
dec: float
Declination of the quasar (in radians).
z_qso: float
Redshift of the quasar.
plate: integer
Plate number of the observation.
mjd: integer
Modified Julian Date of the observation.
fiberid: integer
Fiberid of the observation.
log_lambda: array of floats
Logarithm of the wavelengths (in Angs)
weights: array of floats
Pixel weights
cont: array of floats
Quasar continuum
delta: array of floats
Mean transmission fluctuation (delta field)
order: 0 or 1
Order of the log10(lambda) polynomial for the continuum fit
ivar: array of floats
Inverse variance associated to each flux
exposures_diff: array of floats
Difference between exposures
mean_snr: float
Mean signal-to-noise ratio in the forest
mean_reso: float
Mean resolution of the forest
mean_z: float
Mean redshift of the forest
delta_log_lambda: float
Variation of the logarithm of the wavelength between two pixels
"""
QSO.__init__(self, thingid, ra, dec, z_qso, plate, mjd, fiberid)
self.log_lambda = log_lambda
self.weights = weights
self.cont = cont
self.delta = delta
self.order = order
self.ivar = ivar
self.exposures_diff = exposures_diff
self.mean_snr = mean_snr
self.mean_reso = mean_reso
self.mean_z = mean_z
self.delta_log_lambda = delta_log_lambda
# variables computed in function io.read_deltas
self.z = None
self.r_comov = None
self.dist_m = None
# variables computed in function cf.fill_neighs or xcf.fill_neighs
self.neighbours = None
# variables used in function cf.compute_wick_terms and
# main from bin.picca_wick
self.fname = None
@classmethod
def from_fitsio(cls, hdu, pk1d_type=False):
"""Initialize instance from a fits file.
Args:
hdu: fitsio.hdu.table.TableHDU
A Header Data Unit opened with fitsio
pk1d_type: bool - default: False
Specifies if the fits file is formatted for the 1D Power
Spectrum analysis
Returns:
a Delta instance
"""
header = hdu.read_header()
delta = hdu['DELTA'][:].astype(float)
log_lambda = hdu['LOGLAM'][:].astype(float)
if pk1d_type:
ivar = hdu['IVAR'][:].astype(float)
exposures_diff = hdu['DIFF'][:].astype(float)
mean_snr = header['MEANSNR']
mean_reso = header['MEANRESO']
mean_z = header['MEANZ']
delta_log_lambda = header['DLL']
weights = None
cont = None
else:
ivar = None
exposures_diff = None
mean_snr = None
mean_reso = None
delta_log_lambda = None
mean_z = None
weights = hdu['WEIGHT'][:].astype(float)
cont = hdu['CONT'][:].astype(float)
thingid = header['THING_ID']
ra = header['RA']
dec = header['DEC']
z_qso = header['Z']
plate = header['PLATE']
mjd = header['MJD']
fiberid = header['FIBERID']
try:
order = header['ORDER']
except KeyError:
order = 1
return cls(thingid, ra, dec, z_qso, plate, mjd, fiberid, log_lambda,
weights, cont, delta, order, ivar, exposures_diff, mean_snr,
mean_reso, mean_z, delta_log_lambda)
@classmethod
def from_ascii(cls, line):
"""Initialize instance from an ascii file.
Args:
line: string
A line of the ascii file containing information from a line
of sight
Returns:
a Delta instance
"""
cols = line.split()
plate = int(cols[0])
mjd = int(cols[1])
fiberid = int(cols[2])
ra = float(cols[3])
dec = float(cols[4])
z_qso = float(cols[5])
mean_z = float(cols[6])
mean_snr = float(cols[7])
mean_reso = float(cols[8])
delta_log_lambda = float(cols[9])
num_pixels = int(cols[10])
delta = np.array(cols[11:11 + num_pixels]).astype(float)
log_lambda = np.array(cols[11 + num_pixels:11 +
2 * num_pixels]).astype(float)
ivar = np.array(cols[11 + 2 * num_pixels:11 +
3 * num_pixels]).astype(float)
exposures_diff = np.array(cols[11 + 3 * num_pixels:11 +
4 * num_pixels]).astype(float)
thingid = 0
order = 0
weights = None
cont = None
return cls(thingid, ra, dec, z_qso, plate, mjd, fiberid, log_lambda,
weights, cont, delta, order, ivar, exposures_diff, mean_snr,
mean_reso, mean_z, delta_log_lambda)
@staticmethod
def from_image(file):
"""Initialize instance from an ascii file.
Args:
file: string
Name of the fits file containing the image data
Returns:
a list of Delta instances
"""
hdu = fitsio.FITS(file)
deltas_image = hdu[0].read().astype(float)
ivar_image = hdu[1].read().astype(float)
log_lambda_image = hdu[2].read().astype(float)
ra = hdu[3]["RA"][:].astype(np.float64) * np.pi / 180.
dec = hdu[3]["DEC"][:].astype(np.float64) * np.pi / 180.
z = hdu[3]["Z"][:].astype(np.float64)
plate = hdu[3]["PLATE"][:]
mjd = hdu[3]["MJD"][:]
fiberid = hdu[3]["FIBER"]
thingid = hdu[3]["THING_ID"][:]
nspec = hdu[0].read().shape[1]
deltas = []
for index in range(nspec):
if index % 100 == 0:
userprint("\rreading deltas {} of {}".format(index, nspec),
end="")
delta = deltas_image[:, index]
ivar = ivar_image[:, index]
w = ivar > 0
delta = delta[w]
aux_ivar = ivar[w]
log_lambda = log_lambda_image[w]
order = 1
exposures_diff = None
mean_snr = None
mean_reso = None
delta_log_lambda = None
mean_z = None
deltas.append(
Delta(thingid[index], ra[index], dec[index], z[index],
plate[index], mjd[index], fiberid[index], log_lambda,
aux_ivar, None, delta, order, ivar, exposures_diff,
mean_snr, mean_reso, mean_z, delta_log_lambda))
hdu.close()
return deltas
def project(self):
"""Project the delta field.
The projection gets rid of the distortion caused by the continuum
fitiing. See equations 5 and 6 of du Mas des Bourboux et al. 2020
"""
# 2nd term in equation 6
mean_delta = np.average(self.delta, weights=self.weights)
# 3rd term in equation 6
res = 0
if (self.order == 1) and self.delta.shape[0] > 1:
mean_log_lambda = np.average(self.log_lambda, weights=self.weights)
meanless_log_lambda = self.log_lambda - mean_log_lambda
mean_delta_log_lambda = (
np.sum(self.weights * self.delta * meanless_log_lambda) /
np.sum(self.weights * meanless_log_lambda**2))
res = mean_delta_log_lambda * meanless_log_lambda
elif self.order == 1:
res = self.delta
self.delta -= mean_delta + res
|
gpl-3.0
| 5,412,465,735,005,068
| 36.097808
| 97
| 0.557275
| false
| 4.019551
| false
| false
| false
|
FylmTM/edX-code
|
MITx_6.00.1x/final_exam/problem_7.py
|
1
|
1961
|
class Frob(object):
def __init__(self, name):
self.name = name
self.before = None
self.after = None
def setBefore(self, before):
# example: a.setBefore(b) sets b before a
self.before = before
def setAfter(self, after):
# example: a.setAfter(b) sets b after a
self.after = after
def getBefore(self):
return self.before
def getAfter(self):
return self.after
def myName(self):
return self.name
def insert(atMe, newFrob):
def get_latest(node):
while node.getAfter() is not None:
node = node.getAfter()
return node
def innerInsert(innerAtMe, innerNewFrob):
if innerAtMe.myName() > innerNewFrob.myName():
if innerAtMe.getBefore() is None:
innerAtMe.setBefore(innerNewFrob)
innerNewFrob.setAfter(innerAtMe)
else:
innerInsert(innerAtMe.getBefore(), innerNewFrob)
else:
temp = innerAtMe.getAfter()
if temp is None:
innerAtMe.setAfter(innerNewFrob)
innerNewFrob.setBefore(innerAtMe)
else:
innerAtMe.setAfter(innerNewFrob)
innerNewFrob.setBefore(innerAtMe)
innerNewFrob.setAfter(temp)
temp.setBefore(innerNewFrob)
innerInsert(get_latest(atMe), newFrob)
def print_frobs(start):
if start.getAfter() is not None:
return start.myName() + " - " + print_frobs(start.getAfter())
else:
return start.myName()
eric = Frob('eric')
andrew = Frob('andrew')
ruth = Frob('ruth')
fred = Frob('fred')
martha = Frob('martha')
insert(eric, andrew)
print print_frobs(andrew)
print
insert(eric, ruth)
print print_frobs(andrew)
print
insert(eric, fred)
print print_frobs(andrew)
print
insert(ruth, martha)
print print_frobs(andrew)
print
insert(eric, Frob('martha'))
print print_frobs(andrew)
print
|
mit
| 4,611,521,960,080,144,400
| 26.236111
| 69
| 0.609893
| false
| 3.340716
| false
| false
| false
|
Razbit/razttthon
|
src/player.py
|
1
|
2608
|
# Razttthon, a python-implemented Tic-tac-toe game.
# Copyright Eetu 'Razbit' Pesonen, 2014
#
# This file is a part of Razttthon, which is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# version 3 as published by the Free Software Foundation.
#
# Razttthonis distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#This file contains the player class
from mainc import cMain
class cPlayerHandler(cMain):
def __init__(self):
pass
def getPlayers(self): return cMain.nPlayers
def getName(self, id): return cMain.playerlist[id][0]
def getGames(self, id): return cMain.playerlist[id][1]
def getWins(self, id): return cMain.playerlist[id][2]
def getLosses(self, id): return cMain.playerlist[id][3]
def getQuits(self, id): return cMain.playerlist[id][4]
def getData(self, id):
return [self.getName(id), self.getGames(id), self.getWins(id), self.getLosses(id), self.getQuits(id)]
def addPlayer(self, name):
#Adds a new player with name 'name' to the cMain's list
cMain.playerlist.append([name, 0, 0, 0, 0]) #Add player
cMain.nPlayers += 1
return self.getPlayers()-1 #Return PID for the newly created player
def addGame(self, id):
try:
cMain.playerlist[id][1] += 1
return True
except IndexError:
return False
def addWin(self, id):
try:
cMain.playerlist[id][2] += 1
return True
except IndexError:
return False
def addLose(self, id):
try:
cMain.playerlist[id][3] += 1
return True
except IndexError:
return False
def addQuit(self, id):
try:
cMain.playerlist[id][4] += 1
return True
except IndexError:
return False
def getPID(self, name):
#Search the playerlist, return index where player 'name' was found
for index in range(len(cMain.playerlist)):
if cMain.playerlist[index][0].upper() == name.upper():
return index
return -1 #If item isn't found, return -1
|
gpl-3.0
| -5,719,532,197,032,230,000
| 32.435897
| 109
| 0.610813
| false
| 3.812865
| false
| false
| false
|
borjam/exabgp
|
src/exabgp/bgp/neighbor.py
|
2
|
17189
|
# encoding: utf-8
"""
neighbor.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from collections import deque
from collections import Counter
from exabgp.protocol.family import AFI
from exabgp.util.dns import host, domain
from exabgp.bgp.message import Message
from exabgp.bgp.message.open.capability import AddPath
from exabgp.bgp.message.open.holdtime import HoldTime
from exabgp.rib import RIB
# class Section(dict):
# name = ''
# key = ''
# sub = ['capability']
# def string(self, level=0):
# prefix = ' ' * level
# key_name = self.get(key,'')
# returned = f'{prefix} {key_name} {\n'
# prefix = ' ' * (level+1)
# for k, v in self.items():
# if k == prefix:
# continue
# if k in sub:
# returned += self[k].string(level+1)
# returned += f'{k} {v};\n'
# return returned
# The definition of a neighbor (from reading the configuration)
class Neighbor(dict):
class Capability(dict):
defaults = {
'asn4': True,
'extended-message': True,
'graceful-restart': False,
'multi-session': False,
'operational': False,
'add-path': 0,
'route-refresh': 0,
'nexthop': None,
'aigp': None,
}
defaults = {
# Those are the field from the configuration
'description': '',
'router-id': None,
'local-address': None,
'peer-address': None,
'local-as': None,
'peer-as': None,
# passive indicate that we do not establish outgoing connections
'passive': False,
# the port to listen on ( zero mean that we do not listen )
'listen': 0,
# the port to connect to
'connect': 0,
'hold-time': HoldTime(180),
'rate-limit': 0,
'host-name': host(),
'domain-name': domain(),
'group-updates': True,
'auto-flush': True,
'adj-rib-in': True,
'adj-rib-out': True,
'manual-eor': False,
# XXX: this should be under an MD5 sub-dict/object ?
'md5-password': None,
'md5-base64': False,
'md5-ip': None,
'outgoing-ttl': None,
'incoming-ttl': None,
}
_GLOBAL = {'uid': 1}
def __init__(self):
# super init
self.update(self.defaults)
# Those are subconf
self.api = None # XXX: not scriptable - is replaced outside the class
# internal or calculated field
self['capability'] = self.Capability.defaults.copy()
# local_address uses auto discovery
self.auto_discovery = False
self.range_size = 1
# was this Neighbor generated from a range
self.generated = False
self._families = []
self._nexthop = []
self._addpath = []
self.rib = None
# The routes we have parsed from the configuration
self.changes = []
# On signal update, the previous routes so we can compare what changed
self.backup_changes = []
self.eor = deque()
self.asm = dict()
self.messages = deque()
self.refresh = deque()
self.counter = Counter()
# It is possible to :
# - have multiple exabgp toward one peer on the same host ( use of pid )
# - have more than once connection toward a peer
# - each connection has it own neihgbor (hence why identificator is not in Protocol)
self.uid = '%d' % self._GLOBAL['uid']
self._GLOBAL['uid'] += 1
def missing(self):
if self['local-as'] is None:
return 'incomplete neighbor, missing local-address'
if self['local-as'] is None:
return 'incomplete neighbor, missing local-as'
if self['peer-as'] is None:
return 'incomplete neighbor, missing peer-as'
return ''
def infer(self):
if self['md5-ip'] is None:
self['md5-ip'] = self['local-address']
if self['capability']['graceful-restart'] == 0:
self['capability']['graceful-restart'] = int(self['hold-time'])
def id(self):
return 'neighbor-%s' % self.uid
# This set must be unique between peer, not full draft-ietf-idr-bgp-multisession-07
def index(self):
if self['listen'] != 0:
return 'peer-ip %s listen %d' % (self['peer-address'], self['listen'])
return self.name()
def make_rib(self):
self.rib = RIB(self.name(), self['adj-rib-in'], self['adj-rib-out'], self._families)
# will resend all the routes once we reconnect
def reset_rib(self):
self.rib.reset()
self.messages = deque()
self.refresh = deque()
# back to square one, all the routes are removed
def clear_rib(self):
self.rib.clear()
self.messages = deque()
self.refresh = deque()
def name(self):
if self['capability']['multi-session']:
session = '/'.join("%s-%s" % (afi.name(), safi.name()) for (afi, safi) in self.families())
else:
session = 'in-open'
return "neighbor %s local-ip %s local-as %s peer-as %s router-id %s family-allowed %s" % (
self['peer-address'],
self['local-address'] if self['peer-address'] is not None else 'auto',
self['local-as'] if self['local-as'] is not None else 'auto',
self['peer-as'] if self['peer-as'] is not None else 'auto',
self['router-id'],
session,
)
def families(self):
# this list() is important .. as we use the function to modify self._families
return list(self._families)
def nexthops(self):
# this list() is important .. as we use the function to modify self._nexthop
return list(self._nexthop)
def addpaths(self):
# this list() is important .. as we use the function to modify self._add_path
return list(self._addpath)
def add_family(self, family):
# the families MUST be sorted for neighbor indexing name to be predictable for API users
# this list() is important .. as we use the function to modify self._families
if family not in self.families():
afi, safi = family
d = dict()
d[afi] = [
safi,
]
for afi, safi in self._families:
d.setdefault(afi, []).append(safi)
self._families = [(afi, safi) for afi in sorted(d) for safi in sorted(d[afi])]
def add_nexthop(self, afi, safi, nhafi):
if (afi, safi, nhafi) not in self._nexthop:
self._nexthop.append((afi, safi, nhafi))
def add_addpath(self, family):
# the families MUST be sorted for neighbor indexing name to be predictable for API users
# this list() is important .. as we use the function to modify self._add_path
if family not in self.addpaths():
afi, safi = family
d = dict()
d[afi] = [
safi,
]
for afi, safi in self._addpath:
d.setdefault(afi, []).append(safi)
self._addpath = [(afi, safi) for afi in sorted(d) for safi in sorted(d[afi])]
def remove_family(self, family):
if family in self.families():
self._families.remove(family)
def remove_nexthop(self, afi, safi, nhafi):
if (afi, safi, nhafi) in self.nexthops():
self._nexthop.remove((afi, safi, nhafi))
def remove_addpath(self, family):
if family in self.addpaths():
self._addpath.remove(family)
def missing(self):
if self['local-address'] is None and not self.auto_discovery:
return 'local-address'
if self['listen'] > 0 and self.auto_discovery:
return 'local-address'
if self['peer-address'] is None:
return 'peer-address'
if self.auto_discovery and not self['router-id']:
return 'router-id'
if self['peer-address'].afi == AFI.ipv6 and not self['router-id']:
return 'router-id'
return ''
# This function only compares the neighbor BUT NOT ITS ROUTES
def __eq__(self, other):
# Comparing local_address is skipped in the case where either
# peer is configured to auto discover its local address. In
# this case it can happen that one local_address is None and
# the other one will be set to the auto disocvered IP address.
auto_discovery = self.auto_discovery or other.auto_discovery
return (
self['router-id'] == other['router-id']
and self['local-as'] == other['local-as']
and self['peer-address'] == other['peer-address']
and self['peer-as'] == other['peer-as']
and self['passive'] == other['passive']
and self['listen'] == other['listen']
and self['connect'] == other['connect']
and self['hold-time'] == other['hold-time']
and self['rate-limit'] == other['rate-limit']
and self['host-name'] == other['host-name']
and self['domain-name'] == other['domain-name']
and self['md5-password'] == other['md5-password']
and self['md5-ip'] == other['md5-ip']
and self['incoming-ttl'] == other['incoming-ttl']
and self['outgoing-ttl'] == other['outgoing-ttl']
and self['group-updates'] == other['group-updates']
and self['auto-flush'] == other['auto-flush']
and self['adj-rib-in'] == other['adj-rib-in']
and self['adj-rib-out'] == other['adj-rib-out']
and (auto_discovery or self['local-address'] == other['local-address'])
and self['capability'] == other['capability']
and self.auto_discovery == other.auto_discovery
and self.families() == other.families()
)
def __ne__(self, other):
return not self.__eq__(other)
def string(self, with_changes=True):
changes = ''
if with_changes:
changes += '\nstatic { '
for change in self.rib.outgoing.queued_changes():
changes += '\n\t\t%s' % change.extensive()
changes += '\n}'
families = ''
for afi, safi in self.families():
families += '\n\t\t%s %s;' % (afi.name(), safi.name())
nexthops = ''
for afi, safi, nexthop in self.nexthops():
nexthops += '\n\t\t%s %s %s;' % (afi.name(), safi.name(), nexthop.name())
addpaths = ''
for afi, safi in self.addpaths():
addpaths += '\n\t\t%s %s;' % (afi.name(), safi.name())
codes = Message.CODE
_extension_global = {
'neighbor-changes': 'neighbor-changes',
'negotiated': 'negotiated',
'fsm': 'fsm',
'signal': 'signal',
}
_extension_receive = {
'receive-packets': 'packets',
'receive-parsed': 'parsed',
'receive-consolidate': 'consolidate',
'receive-%s' % codes.NOTIFICATION.SHORT: 'notification',
'receive-%s' % codes.OPEN.SHORT: 'open',
'receive-%s' % codes.KEEPALIVE.SHORT: 'keepalive',
'receive-%s' % codes.UPDATE.SHORT: 'update',
'receive-%s' % codes.ROUTE_REFRESH.SHORT: 'refresh',
'receive-%s' % codes.OPERATIONAL.SHORT: 'operational',
}
_extension_send = {
'send-packets': 'packets',
'send-parsed': 'parsed',
'send-consolidate': 'consolidate',
'send-%s' % codes.NOTIFICATION.SHORT: 'notification',
'send-%s' % codes.OPEN.SHORT: 'open',
'send-%s' % codes.KEEPALIVE.SHORT: 'keepalive',
'send-%s' % codes.UPDATE.SHORT: 'update',
'send-%s' % codes.ROUTE_REFRESH.SHORT: 'refresh',
'send-%s' % codes.OPERATIONAL.SHORT: 'operational',
}
apis = ''
for process in self.api.get('processes', []):
_global = []
_receive = []
_send = []
for api, name in _extension_global.items():
_global.extend(
[
'\t\t%s;\n' % name,
]
if process in self.api[api]
else []
)
for api, name in _extension_receive.items():
_receive.extend(
[
'\t\t\t%s;\n' % name,
]
if process in self.api[api]
else []
)
for api, name in _extension_send.items():
_send.extend(
[
'\t\t\t%s;\n' % name,
]
if process in self.api[api]
else []
)
_api = '\tapi {\n'
_api += '\t\tprocesses [ %s ];\n' % process
_api += ''.join(_global)
if _receive:
_api += '\t\treceive {\n'
_api += ''.join(_receive)
_api += '\t\t}\n'
if _send:
_api += '\t\tsend {\n'
_api += ''.join(_send)
_api += '\t\t}\n'
_api += '\t}\n'
apis += _api
returned = (
'neighbor %s {\n'
'\tdescription "%s";\n'
'\trouter-id %s;\n'
'\thost-name %s;\n'
'\tdomain-name %s;\n'
'\tlocal-address %s;\n'
'\tlocal-as %s;\n'
'\tpeer-as %s;\n'
'\thold-time %s;\n'
'\trate-limit %s;\n'
'\tmanual-eor %s;\n'
'%s%s%s%s%s%s%s%s%s%s%s\n'
'\tcapability {\n'
'%s%s%s%s%s%s%s%s%s\t}\n'
'\tfamily {%s\n'
'\t}\n'
'\tnexthop {%s\n'
'\t}\n'
'\tadd-path {%s\n'
'\t}\n'
'%s'
'%s'
'}'
% (
self['peer-address'],
self['description'],
self['router-id'],
self['host-name'],
self['domain-name'],
self['local-address'] if not self.auto_discovery else 'auto',
self['local-as'],
self['peer-as'],
self['hold-time'],
'disable' if self['rate-limit'] == 0 else self['rate-limit'],
'true' if self['manual-eor'] else 'false',
'\n\tpassive %s;\n' % ('true' if self['passive'] else 'false'),
'\n\tlisten %d;\n' % self['listen'] if self['listen'] else '',
'\n\tconnect %d;\n' % self['connect'] if self['connect'] else '',
'\tgroup-updates %s;\n' % ('true' if self['group-updates'] else 'false'),
'\tauto-flush %s;\n' % ('true' if self['auto-flush'] else 'false'),
'\tadj-rib-in %s;\n' % ('true' if self['adj-rib-in'] else 'false'),
'\tadj-rib-out %s;\n' % ('true' if self['adj-rib-out'] else 'false'),
'\tmd5-password "%s";\n' % self['md5-password'] if self['md5-password'] else '',
'\tmd5-base64 %s;\n'
% ('true' if self['md5-base64'] is True else 'false' if self['md5-base64'] is False else 'auto'),
'\tmd5-ip "%s";\n' % self['md5-ip'] if not self.auto_discovery else '',
'\toutgoing-ttl %s;\n' % self['outgoing-ttl'] if self['outgoing-ttl'] else '',
'\tincoming-ttl %s;\n' % self['incoming-ttl'] if self['incoming-ttl'] else '',
'\t\tasn4 %s;\n' % ('enable' if self['capability']['asn4'] else 'disable'),
'\t\troute-refresh %s;\n' % ('enable' if self['capability']['route-refresh'] else 'disable'),
'\t\tgraceful-restart %s;\n'
% (self['capability']['graceful-restart'] if self['capability']['graceful-restart'] else 'disable'),
'\t\tnexthop %s;\n' % ('enable' if self['capability']['nexthop'] else 'disable'),
'\t\tadd-path %s;\n'
% (AddPath.string[self['capability']['add-path']] if self['capability']['add-path'] else 'disable'),
'\t\tmulti-session %s;\n' % ('enable' if self['capability']['multi-session'] else 'disable'),
'\t\toperational %s;\n' % ('enable' if self['capability']['operational'] else 'disable'),
'\t\taigp %s;\n' % ('enable' if self['capability']['aigp'] else 'disable'),
families,
nexthops,
addpaths,
apis,
changes,
)
)
# '\t\treceive {\n%s\t\t}\n' % receive if receive else '',
# '\t\tsend {\n%s\t\t}\n' % send if send else '',
return returned.replace('\t', ' ')
def __str__(self):
return self.string(False)
|
bsd-3-clause
| -2,980,760,986,235,840,500
| 35.965591
| 116
| 0.507127
| false
| 3.774484
| false
| false
| false
|
malisal/bfdpie
|
setup.py
|
1
|
1563
|
import os
import distutils
from setuptools import setup, Extension, Command
from distutils.command import build as build_module
from distutils.command.install import install
BINUTILS_VERSION = "binutils-2.26"
module = Extension(
name = "bfdpie._bfdpie",
sources = ["bfdpie.c"],
# Include dir is our own binutils
include_dirs= ["tmp/install/include/"],
# Link against what?
library_dirs=["tmp/install/lib/"],
libraries=["bfd", "opcodes", "iberty", "z"],
)
class BuildCommand(distutils.command.build.build):
def run(self):
# Download and compile binutils first
os.system("./bfdpie_build.sh %s" % (BINUTILS_VERSION))
build_module.build.run(self)
setup(
name = "bfdpie",
version = "0.1.14",
description = "A tiny interface around a subset of libBFD. Code based on https://github.com/Groundworkstech/pybfd",
author = "Luka Malisa",
author_email = "luka.malisha@gmail.com",
url = "https://github.com/malisal/bfdpie",
keywords = ["binary", "libbfd"],
platforms=["any"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=["bfdpie"],
package_dir={"bfdpie": "bfdpie"},
ext_modules = [module],
test_suite = "tests",
install_requires = [
"wheel>=0.29.0",
],
package_data = {
"bfdpie" : ["bin/dummy.elf"],
},
cmdclass={
"build": BuildCommand,
}
)
|
mit
| -7,680,653,282,204,861,000
| 22.681818
| 118
| 0.633397
| false
| 3.488839
| false
| false
| false
|
schreiberx/sweet
|
scripts/pp_plot_lonlat_csv.py
|
1
|
2786
|
#! /usr/bin/python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
first = True
s = 2e-5
eta_contour_levels = np.append(np.arange(-1e-4, 0, s), np.arange(s, 1e-4, s))
hs = 5
h_contour_levels = np.append(np.arange(900, 1000-hs, hs), np.arange(1000+hs, 1100, hs))
zoom_lat = True
zoom_lat = False
zoom_lat = 'eta' in sys.argv[1]
fontsize=8
figsize=(9, 3)
for filename in sys.argv[1:]:
print(filename)
data = np.loadtxt(filename, skiprows=3)
labelsx = data[0,1:]
labelsy = data[1:,0]
data = data[1:,1:]
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
data = data[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
data = data[0:-2]
# while labelsx[1] < 90:
# tmplabelsx = labelsx[0]
# labelsx[0:-1] = labelsx[1:]
# labelsx[-1] = tmplabelsx
#
# tmpdata = data[:,0]
# data[:,0:-1] = data[:,1:]
# data[:,-1] = tmpdata
if first:
lon_min = labelsx[0]
lon_max = labelsx[-1]
lat_min = labelsy[0]
lat_max = labelsy[-1]
new_labelsx = np.linspace(lon_min, lon_max, 7)
new_labelsy = np.linspace(lat_min, lat_max, 7)
labelsx = np.interp(new_labelsx, labelsx, labelsx)
labelsy = np.interp(new_labelsy, labelsy, labelsy)
if first:
cmin = np.amin(data)
cmax = np.amax(data)
if 'eta' in filename:
cmin = 1e-4
cmax = -1e-4
#cmin *= 1.2
#cmax *= 1.2
extent = (labelsx[0], labelsx[-1], labelsy[0], labelsy[-1])
plt.figure(figsize=figsize)
plt.imshow(data, interpolation='nearest', extent=extent, origin='lower', aspect='auto')
plt.clim(cmin, cmax)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsize)
plt.title(filename, fontsize=fontsize)
if 'prog_eta' in filename:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=eta_contour_levels, linewidths=0.5)
elif 'prog_h' in filename:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=h_contour_levels, linewidths=0.5)
# elif '_u' in filename:
# hs = 0.001
# h_contour_levels = np.append(np.arange(-0.1, 0-hs, hs), np.arange(hs, 0.1, hs))
# plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=h_contour_levels, linewidths=0.5)
else:
if cmin != cmax:
pass
#plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, linewidths=0.5)
ax = plt.gca()
ax.xaxis.set_label_coords(0.5, -0.075)
plt.xticks(labelsx, fontsize=fontsize)
plt.xlabel("Longitude", fontsize=fontsize)
plt.yticks(labelsy, fontsize=fontsize)
plt.ylabel("Latitude", fontsize=fontsize)
#plt.show()
outfilename = filename.replace('.csv', '.png')
print(outfilename)
plt.savefig(outfilename, dpi=200)
plt.close()
first = False
|
mit
| 2,323,065,064,202,803,000
| 21.836066
| 131
| 0.660804
| false
| 2.483066
| false
| false
| false
|
skosukhin/spack
|
var/spack/repos/builtin/packages/aspell/package.py
|
1
|
3695
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from llnl.util.link_tree import LinkTree
import spack.store
from spack.package import ExtensionError, ExtensionConflictError
# See also: AspellDictPackage
class Aspell(AutotoolsPackage):
"""GNU Aspell is a Free and Open Source spell checker designed to
eventually replace Ispell."""
homepage = "http://aspell.net/"
url = "https://ftpmirror.gnu.org/aspell/aspell-0.60.6.1.tar.gz"
extendable = True # support activating dictionaries
version('0.60.6.1', 'e66a9c9af6a60dc46134fdacf6ce97d7')
# The dictionaries install all their bits into their prefix.lib dir,
# we want to link them into aspell's dict-dir.
# These are identical to what's in spack/package.py except
# for using:
# - extension.prefix.lib instead of extension.prefix in LinkTree()
# - dest_dir instead of self.prefix in tree.(find_conflict|merge)()
def activate(self, extension, **kwargs):
extensions_layout = kwargs.get("extensions_layout",
spack.store.extensions)
if extensions_layout is not spack.store.extensions:
raise ExtensionError(
'aspell does not support non-global extensions')
aspell = which(self.prefix.bin.aspell)
dest_dir = aspell('dump', 'config', 'dict-dir', output=str).strip()
tree = LinkTree(extension.prefix.lib)
def ignore(filename):
return (filename in spack.store.layout.hidden_file_paths or
kwargs.get('ignore', lambda f: False)(filename))
conflict = tree.find_conflict(dest_dir, ignore=ignore)
if conflict:
raise ExtensionConflictError(conflict)
tree.merge(dest_dir, ignore=ignore)
def deactivate(self, extension, **kwargs):
extensions_layout = kwargs.get("extensions_layout",
spack.store.extensions)
if extensions_layout is not spack.store.extensions:
raise ExtensionError(
'aspell does not support non-global extensions')
aspell = which(self.prefix.bin.aspell)
dest_dir = aspell('dump', 'config', 'dict-dir', output=str).strip()
def ignore(filename):
return (filename in spack.store.layout.hidden_file_paths or
kwargs.get('ignore', lambda f: False)(filename))
tree = LinkTree(extension.prefix.lib)
tree.unmerge(dest_dir, ignore=ignore)
|
lgpl-2.1
| -2,902,301,946,385,594,400
| 42.470588
| 78
| 0.651421
| false
| 4.038251
| false
| false
| false
|
rwalk333/pyquadprog
|
test/test.py
|
1
|
2107
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Test routines for the quadprog package. Excepted where noted, all examples are drawn
from the R quadprog package documentation and test suite.
"""
import numpy as np
import quadprog
import unittest
class TestQuadprog(unittest.TestCase):
def setUp(self):
print(dir(quadprog))
pass
def test_solveQP_basic(self):
'''Solve a simple quadratic program.
Minimize in x: -(0 5 0) %*% x + 1/2 x^T x
Subject to: A^T b >= b0
with b0 = (-8,2,0)^T
and (-4 2 0)
A = (-3 1 -2)
( 0 0 1)
'''
expected = [0.4761905, 1.0476190, 2.0952381]
Dmat = np.identity(3)
dvec = np.array([0,5,0])
Amat = np.array([[-4, 2, 0],[-3, 1, -2], [0, 0, 1]])
bvec = np.array([-8,2,0])
sol = quadprog.solveQP(Dmat,dvec,Amat,bvec)
print(self.test_solveQP_basic.__doc__ + '\nExpected: ' + expected.__str__())
np.testing.assert_almost_equal(sol.solution, np.array(expected))
def test_solveCompactFormQP_basic(self):
'''Solve a simple quadratic progam using the compact storage format for the constraint data.
Minimize in x: -(0 5 0) %*% x + 1/2 x^T x
Subject to: A^T b >= b0
with b0 = (-8,2,0)^T
and (-4 2 0)
A = (-3 1 -2)
( 0 0 1)
using a compact form of A.
'''
expected = [0.4761905, 1.0476190, 2.0952381]
Dmat = np.identity(3)
dvec = np.array([0,5,0])
Aind = np.array([[2,2,2], [1,1,2], [2,2,3]])
Amat = np.array([[-4,2,-2],[-3,1,1]])
bvec = np.array([-8,2,0])
sol = quadprog.solveCompactFormQP(Dmat, dvec, Amat, Aind, bvec)
print(self.test_solveCompactFormQP_basic.__doc__+ '\nExpected: ' + expected.__str__())
np.testing.assert_almost_equal(sol.solution, np.array(expected))
if __name__ == "__main__":
unittest.main()
|
lgpl-2.1
| -8,512,066,933,382,506,000
| 30.924242
| 100
| 0.505458
| false
| 3.116864
| true
| false
| false
|
crustycrab/Risk-Prototype
|
graphics.py
|
1
|
1788
|
import pygame
import os
import random
import res
class Camera:
def __init__(self):
self.x = self.y = 0
self.speed = 500
def update(self, dt, key_state):
speed = self.speed * dt
if key_state[0]:
self.x = min(self.x + speed, 0)
if key_state[1]:
self.x = max(self.x - speed, res.WIN_WIDTH - res.MAP_WIDTH)
if key_state[2]:
self.y = min(self.y + speed, 0)
if key_state[3]:
self.y = max(self.y - speed, res.WIN_HEIGHT - res.MAP_HEIGHT)
def convert_pos(self, pos):
return (pos[0] - self.x, pos[1] - self.y)
def get_pos(self):
return (self.x, self.y)
def set_pos(self, pos):
self.x, self.y = pos
class Hud:
def __init__(self):
pass
class Stars:
def __init__(self, num_stars=256):
self.num_stars = num_stars
self.stars = []
self.gen_stars()
def draw(self, surface):
for star in self.stars:
pygame.draw.rect(surface, star['color'], star['rect'], 1)
def update(self, dt):
for i, star in enumerate(self.stars):
speed = star['speed'] * dt
x, y = star['rect'].topleft
x -= speed
if x < 0:
x, y = (res.MAP_WIDTH + x, random.randint(0, res.MAP_HEIGHT))
self.stars[i]['rect'].topleft = (int(x), y)
def gen_stars(self):
for _ in range(self.num_stars):
x, y = self.get_random_cords()
star = {'speed': random.randint(1, 100),
'rect': pygame.Rect((x, y), (random.randint(2, 4),) * 2),
'color': (random.randint(153, 204), random.randint(153, 204), random.randint(178, 229))}
self.stars.append(star)
def get_random_cords(self):
return (random.randint(0, res.MAP_WIDTH - 1), random.randint(0, res.MAP_HEIGHT - 1))
|
mit
| 574,214,615,529,656,260
| 24.913043
| 108
| 0.553691
| false
| 2.912052
| false
| false
| false
|
tonybeltramelli/Deep-Lyrics
|
gather.py
|
1
|
3064
|
#!/usr/bin/env python
__author__ = 'Tony Beltramelli www.tonybeltramelli.com - 09/07/2016'
import argparse
import os
import urllib2
import re
import codecs
from threading import Thread
from HTMLParser import HTMLParser
DOMAIN = "songmeanings.com/"
ARTIST_PATH = 'artist/view/songs/'
def start_new_thread(task, arg):
thread = Thread(target=task, args=(arg,))
thread.start()
def write_to_file(path, data):
output_file = codecs.open(path, 'a', 'utf_8')
output_file.write(data.encode('utf-8'))
output_file.write("\n")
output_file.close()
def get_url(path, arg = ""):
return 'http://' + DOMAIN + path + arg
def get_page_content(url):
response = urllib2.urlopen(url)
return response.read()
class SongPageParser(HTMLParser):
record = False
lyrics = ""
output_path = ""
def handle_starttag(self, tag, attrs):
for attr in attrs:
if attr[0] == "class" and attr[1].find('lyric-box') != -1:
self.record = True
if attr[0] == "id" and attr[1].find('lyrics-edit') != -1:
self.record = False
write_to_file(self.output_path, self.lyrics)
self.lyrics = ""
def handle_data(self, data):
if self.record:
self.lyrics += re.sub(r'[^\x00-\x7F]+', '\'', data.lstrip()) + "\n"
class ArtistPageParser(HTMLParser):
match = 0
url = ""
title = ""
output_path = ""
def handle_starttag(self, tag, attrs):
href = None
for attr in attrs:
if attr[0] == "id" and attr[1].find('lyric-') != -1:
self.match += 1
if attr[0] == "href" and attr[1].find(DOMAIN) != -1:
self.match += 1
href = attr[1]
if self.match > 1 and href is not None:
self.url = href[href.find(DOMAIN) + len(DOMAIN):]
def handle_endtag(self, tag):
self.match = 0
def handle_data(self, data):
if self.match > 1:
self.title = data
html = get_page_content(get_url(self.url))
song_parser = SongPageParser()
song_parser.output_path = self.output_path
start_new_thread(song_parser.feed, html)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output_file', type=str, required=True)
parser.add_argument('--artists', type=str, required=True)
args = parser.parse_args()
output_file = args.output_file
artists = args.artists.replace(' ', '').split(',')
try:
os.remove(output_file)
except OSError:
print "The output file doesn't exist, creating it"
print "Gathering lyrics..."
for i, artist in enumerate(artists):
html = get_page_content(get_url(ARTIST_PATH, artist))
artist_parser = ArtistPageParser()
artist_parser.output_path = output_file
artist_parser.feed(html)
print "Progress: {}%".format(((i + 1) * 100) / len(artists))
print "Lyrics saved in {}".format(output_file)
if __name__ == "__main__":
main()
|
mit
| 6,959,599,421,735,938,000
| 26.357143
| 79
| 0.579308
| false
| 3.462147
| false
| false
| false
|
Naeka/vosae-app
|
www/notification/api/resources/invoicing_notifications/make_invoice.py
|
1
|
3176
|
# -*- coding:Utf-8 -*-
from tastypie_mongoengine import fields
from notification.api.resources.base import NotificationBaseResource
from notification.api.doc import HELP_TEXT
from notification.models import invoicing_notifications
__all__ = (
'QuotationMakeInvoiceResource',
'QuotationMakeDownPaymentInvoiceResource',
'PurchaseOrderMakeInvoiceResource',
'PurchaseOrderMakeDownPaymentInvoiceResource',
)
class QuotationMakeInvoiceResource(NotificationBaseResource):
quotation = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='quotation',
help_text=HELP_TEXT['quotation_make_invoice']['quotation']
)
invoice = fields.ReferenceField(
to='invoicing.api.resources.InvoiceResource',
attribute='invoice',
help_text=HELP_TEXT['quotation_make_invoice']['invoice']
)
class Meta(NotificationBaseResource.Meta):
resource_name = 'quotation_make_invoice'
object_class = invoicing_notifications.QuotationMakeInvoice
class QuotationMakeDownPaymentInvoiceResource(NotificationBaseResource):
quotation = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='quotation',
help_text=HELP_TEXT['quotation_make_invoice']['quotation']
)
down_payment_invoice = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='down_payment_invoice',
help_text=HELP_TEXT['quotation_make_invoice']['down_payment_invoice']
)
class Meta(NotificationBaseResource.Meta):
resource_name = 'quotation_make_down_payment_invoice'
object_class = invoicing_notifications.QuotationMakeDownPaymentInvoice
class PurchaseOrderMakeInvoiceResource(NotificationBaseResource):
purchase_order = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='purchase_order',
help_text=HELP_TEXT['purchase_order_make_invoice']['purchase_order']
)
invoice = fields.ReferenceField(
to='invoicing.api.resources.InvoiceResource',
attribute='invoice',
help_text=HELP_TEXT['purchase_order_make_invoice']['invoice']
)
class Meta(NotificationBaseResource.Meta):
resource_name = 'purchase_order_make_invoice'
object_class = invoicing_notifications.PurchaseOrderMakeInvoice
class PurchaseOrderMakeDownPaymentInvoiceResource(NotificationBaseResource):
purchase_order = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='purchase_order',
help_text=HELP_TEXT['purchase_order_make_invoice']['purchase_order']
)
down_payment_invoice = fields.ReferenceField(
to='invoicing.api.resources.DownPaymentInvoiceResource',
attribute='down_payment_invoice',
help_text=HELP_TEXT['purchase_order_make_invoice']['down_payment_invoice']
)
class Meta(NotificationBaseResource.Meta):
resource_name = 'quotation_make_down_payment_invoice'
object_class = invoicing_notifications.PurchaseOrderMakeDownPaymentInvoice
|
agpl-3.0
| 2,691,955,120,545,300,000
| 37.26506
| 82
| 0.732997
| false
| 4.229028
| false
| false
| false
|
talapus/Ophidian
|
Flask_fu/shell.py
|
1
|
1369
|
import sys
from datetime import datetime
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from os import popen
app = Flask(__name__)
@app.route('/')
def main_form():
# app.logger.warning('A warning occurred (%d apples)', 42)
# app.logger.error('An error occurred')
# print('main_form', file=sys.stderr)
app.logger.info('main_form')
return '<form action="submit" id="textform" method="post"><textarea name="text">Hi</textarea><input type="submit" value="Submit"></form>'
'''
@app.route('/submit', methods=['POST'])
def submit_textarea():
# print('submit_textarea', file=sys.stderr)
# app.logger.info('submit_textarea')
app.logger.info('{} Submitted: {}'.format(datetime.now(), request.form["text"]))
return '{}'.format(request.form["text"])
def write_notes():
# print('write_notes', file=sys.stderr)
app.logger.info('{} write_notes'.format(datetime.now()))
with open ('notes.txt', 'w') as notes:
notes.write(submit_textarea())
'''
@app.route('/sh/<input>')
def bones(input):
data = popen('{}'.format(input)).read()
return ('<tt>{}</tt>'.format(data.replace('\n', '<br>')))
if __name__ == '__main__':
handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run()
|
bsd-3-clause
| 404,746,768,136,102,460
| 30.136364
| 141
| 0.652301
| false
| 3.492347
| false
| false
| false
|
PurpleMyst/porcupine
|
porcupine/_logs.py
|
1
|
2465
|
import itertools
import logging
import os
import platform
import sys
if platform.system() == 'Windows': # noqa
import msvcrt
else: # noqa
import fcntl
from porcupine import dirs
def _lock(fileno):
"""Try to lock a file. Return True on success."""
# closing the file unlocks it, so we don't need to unlock here
if platform.system() == 'Windows':
try:
msvcrt.locking(fileno, msvcrt.LK_NBLCK, 10)
return True
except PermissionError:
return False
else:
try:
fcntl.lockf(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
# the docs recommend catching both of these
except (BlockingIOError, PermissionError):
return False
def _open_log_file():
"""Open a Porcupine log file.
Usually this opens and overwrites log.txt. If another Porcupine
process has it currently opened, this opens log1.txt instead, then
log2.txt and so on.
"""
# create an iterator 'log.txt', 'log2.txt', 'log3.txt', ...
filenames = itertools.chain(
['log.txt'],
map('log{}.txt'.format, itertools.count(start=2)),
)
for filename in filenames:
path = os.path.join(dirs.cachedir, filename)
# unfortunately there's not a mode that would open in write but
# not truncate like 'w' or seek to end like 'a'
fileno = os.open(path, os.O_WRONLY | os.O_CREAT, 0o644)
if _lock(fileno):
# now we can delete the old content, can't use os.truncate
# here because it doesn't exist on windows
file = open(fileno, 'w')
file.truncate(0)
return file
else:
os.close(fileno)
# FileHandler doesn't take already opened files and StreamHandler
# doesn't close the file :(
class _ClosingStreamHandler(logging.StreamHandler):
def close(self):
self.stream.close()
def setup(file=None):
if file is None:
handler = _ClosingStreamHandler(_open_log_file())
elif file in (sys.stdout, sys.stderr):
# somehow closing these files just feels wrong
handler = logging.StreamHandler(file)
else:
handler = _ClosingStreamHandler(file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(
"[PID {} %(levelname)s] %(name)s: %(message)s".format(os.getpid())
))
logging.basicConfig(level=logging.DEBUG, handlers=[handler])
|
mit
| -8,319,217,412,416,285,000
| 28.698795
| 74
| 0.625963
| false
| 3.975806
| false
| false
| false
|
malirod/pylua
|
pylua/validator.py
|
1
|
2300
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as etree
from json import JSONDecoder
class Validator:
_errors = [(0, 'Ok'), (1, "Function not found"), (2, "Validation error")]
_error_index_ok = 0
_error_index_not_found = 1
_error_index_error = 2
def __init__(self):
self._schema = None
def load_schema_from_string(self, xml_string):
assert xml_string is not None
try:
self._schema = etree.fromstring(xml_string)
except etree.ParseError:
return False
return True
@staticmethod
def _validate_param(schema_param_name, schema_param_is_mandatory,
schema_param_type, params):
assert schema_param_name is not None
assert schema_param_is_mandatory is not None
assert schema_param_type is not None
params_obj = JSONDecoder().decode(params)
if params_obj.get(schema_param_name) is None:
return False
return True
def _validate(self, function_item, params):
# This is very simple validation, will work only with test data
schema_params = function_item.findall('param')
is_schema_params_empty = len(schema_params) == 0
if not is_schema_params_empty and params is None:
return self._errors[self._error_index_error]
if is_schema_params_empty and params is None:
return self._errors[self._error_index_ok]
for param in schema_params:
validated = self._validate_param(
param.get('name'),
param.get('mandatory'),
param.get('type'),
params)
if not validated:
return self._errors[self._error_index_error]
return self._errors[self._error_index_ok]
def validate(self, function_id, function_type, params=None):
assert function_id is not None
assert function_type is not None
assert self._schema is not None
for function_item in self._schema.findall('function'):
if (function_id == function_item.get('id')
and function_type == function_item.get('type')):
return self._validate(function_item, params)
return self._errors[self._error_index_not_found]
|
mit
| -6,456,706,902,619,544,000
| 35.507937
| 77
| 0.600435
| false
| 4.174229
| false
| false
| false
|
gjover/Lima_subtree
|
applications/tango/camera/Maxipix.py
|
1
|
25664
|
############################################################################
# This file is part of LImA, a Library for Image Acquisition
#
# Copyright (C) : 2009-2011
# European Synchrotron Radiation Facility
# BP 220, Grenoble 38043
# FRANCE
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
############################################################################
#=============================================================================
#
# file : Maxipix.py
#
# description : Python source for the Maxipix and its commands.
# The class is derived from Device. It represents the
# CORBA servant object which will be accessed from the
# network. All commands which can be executed on the
# Pilatus are implemented in this file.
#
# project : TANGO Device Server
#
# copyleft : European Synchrotron Radiation Facility
# BP 220, Grenoble 38043
# FRANCE
#
#=============================================================================
# (c) - Bliss - ESRF
#=============================================================================
#
import PyTango
import sys, types, os, time
from Lima import Core
from Lima.Maxipix.MpxCommon import MpxError
class Maxipix(PyTango.Device_4Impl):
Core.DEB_CLASS(Core.DebModApplication, 'LimaCCDs')
#------------------------------------------------------------------
# Device constructor
#------------------------------------------------------------------
def __init__(self,*args) :
PyTango.Device_4Impl.__init__(self,*args)
self.init_device()
#------------------------------------------------------------------
# Device destructor
#------------------------------------------------------------------
def delete_device(self):
pass
#------------------------------------------------------------------
# Device initialization
#------------------------------------------------------------------
@Core.DEB_MEMBER_FUNCT
def init_device(self):
self.set_state(PyTango.DevState.ON)
self.get_device_properties(self.get_device_class())
_PriamAcq = _MaxipixAcq.getPriamAcq()
self.__SignalLevel = {'LOW_FALL': _PriamAcq.LOW_FALL,\
'HIGH_RISE': _PriamAcq.HIGH_RISE}
self.__ReadyMode = {'EXPOSURE': _PriamAcq.EXPOSURE,\
'EXPOSURE_READOUT': _PriamAcq.EXPOSURE_READOUT}
self.__GateMode = {'INACTIVE': _PriamAcq.INACTIVE,\
'ACTIVE': _PriamAcq.ACTIVE}
self.__FillMode = _MaxipixAcq.mpxFillModes
self.__dacname = "thl"
#Init default Path
if self.config_path:
try:
_MaxipixAcq.setPath(self.config_path)
except MpxError as error:
PyTango.Except.throw_exception('DevFailed',\
'MpxError: %s'%(error),\
'Maxipix Class')
#Load default config
if self.config_name:
try:
_MaxipixAcq.loadConfig(self.config_name)
except MpxError as error:
PyTango.Except.throw_exception('DevFailed',\
'MpxError: %s'%(error),\
'Maxipix Class')
#set the priamAcq attributes with properties if any
for attName in ['fill_mode','ready_mode','ready_level','gate_mode','gate_level','shutter_level','trigger_level'] :
self.__setMaxipixAttr(attName,None)
#==================================================================
#
# Some Utils
#
#==================================================================
def __getDictKey(self,dict, value):
try:
ind = dict.values().index(value)
except ValueError:
return None
return dict.keys()[ind]
def __getDictValue(self,dict, key):
try:
value = dict[key.upper()]
except KeyError:
return None
return value
def __getMaxipixAttr(self,attr_name):
_PriamAcq = _MaxipixAcq.getPriamAcq()
name = ''.join([name.capitalize() for name in attr_name.split('_')])
attr = getattr(self,attr_name)
if attr_name.count('level'):
dictInstance = self.__SignalLevel
else:
dictInstance = getattr(self,'_Maxipix__%s' % name)
if attr_name.count('fill_mode'): getMethod = getattr(_MaxipixAcq,'get%s' % name)
else: getMethod = getattr(_PriamAcq,'get%s' % name)
setattr(self,attr_name, self.__getDictKey(dictInstance,getMethod()))
return getattr(self,attr_name)
def __getValueList(self, attr_name):
name = ''.join([name.capitalize() for name in attr_name.split('_')])
if attr_name.count('level'):
valueList = self.__SignalLevel.keys()
elif attr_name.count('mode'):
valueList = getattr(self,'_Maxipix__%s' % name).keys()
elif attr_name.count('config_name'):
valueList = self.__getConfigNameList()
else:
valueList = []
return valueList
def __setMaxipixAttr(self,attr_name, key=None):
_PriamAcq = _MaxipixAcq.getPriamAcq()
name = ''.join([name.capitalize() for name in attr_name.split('_')])
attr = getattr(self,attr_name)
if attr_name.count('level'):
dictInstance = self.__SignalLevel
else:
dictInstance = getattr(self,'_Maxipix__%s' % name)
if attr_name.count('fill_mode'):
getMethod = getattr(_MaxipixAcq,'get%s' % name)
setMethod = getattr(_MaxipixAcq,'set%s' % name)
else:
getMethod = getattr(_PriamAcq,'get%s' % name)
setMethod = getattr(_PriamAcq,'set%s' % name)
if key != None:
# just set a new value for this attribute
attrValue = self.__getDictValue(dictInstance,key)
if attrValue == None:
PyTango.Except.throw_exception('DevFailed',\
'Wrong value %s: %s'%(attr_name,key),\
'Maxipix Class')
else:
setMethod(attrValue)
attrNewKey = key
else:
# here set attribute from the property value
# if the property is missing (=[]) then initialize the attribute by reading the hardware
if attr == []:
attrNewKey = self.__getDictKey(dictInstance,getMethod())
elif type(attr) is not types.StringType:
PyTango.Except.throw_exception('WrongData',\
'Wrong value %s: %s'%(attr_name,attr),\
'Maxipix Class')
else:
attrValue = self.__getDictValue(dictInstance,attr)
if attrValue == None:
PyTango.Except.throw_exception('WrongData',\
'Wrong value %s: %s'%(attr_name,attr),\
'Maxipix Class')
else:
setMethod(attrValue)
attrNewKey = attr
# set the new attribute value as upper string
setattr(self,attr_name, attrNewKey.upper())
def __getConfigNameList(self):
spath= os.path.normpath(self.config_path)
if not os.path.isdir(spath):
PyTango.Except.throw_exception('WrongData',\
'Invalid path: %s'%(self.config_path),\
'Maxipix Class')
else:
dirList = os.listdir(spath)
fileDict={}
fileList=[]
for file in dirList:
if file.endswith('.cfg'):
filePath = spath+'/'+file
fileStat = os.stat(filePath)
modifiedTime = fileStat.st_mtime
fileDict[modifiedTime]= file.strip('.cfg')
if fileDict:
timeList = fileDict.keys();timeList.sort()
for mTime in timeList:
fileList.append(fileDict[mTime])
#fileList.append(time.ctime(mTime))
return fileList
#==================================================================
#
# Maxipix read/write attribute methods
#
#==================================================================
## @brief Read the current dac name
#
def read_dac_name(self,attr) :
attr.set_value(self.__dacname)
## @brief Write dac name
#
def write_dac_name(self,attr) :
data = attr.get_write_value()
dacs = _MaxipixAcq.mpxDacs
if data not in dacs.getListKeys():
PyTango.Except.throw_exception('WrongData',\
'Wrong value %s: %s'%('dac_name',data),\
'Maxipix Class')
self.__dacname = data[0]
## @brief Read the possible dac names
#
def read_dac_possible(self,attr) :
dacs = _MaxipixAcq.mpxDacs
data = dacs.getListKeys()
attr.set_value(data)
# Read the chip dac value, named by the dac_name attribute
# For multichips only a unique DAC is valid for all the chips
def read_dac_value(self,attr) :
data = 0
dacs = _MaxipixAcq.mpxDacs
data = dacs.getOneDac(0,self.__dacname)
# if a all the chips don't have the same dac value
# None is return, typically this is the case for thl
if data == None: data = -1
attr.set_value(data)
## @brief Write a DAC value of the named dac_name attribute
#
def write_dac_value(self,attr) :
data = attr.get_write_value()
dacs = _MaxipixAcq.mpxDacs
dacs.setOneDac(0,self.__dacname, data)
dacs.applyChipDacs(0)
## @brief Read threshold noise of a maxipix chips
#
def read_threshold_noise(self,attr) :
dac = _MaxipixAcq.mpxDacs
thlNoises = dac.getThlNoise(0)
attr.set_value(thlNoises,len(thlNoises))
## @brief Write threshold noise of a maxipix chips
#
def write_threshold_noise(self,attr) :
data = attr.get_write_value()
dacs = _MaxipixAcq.mpxDacs
dacs.setThlNoise(0,data)
dacs.applyChipDacs(0)
## @brief Read the global threshold
#
def read_threshold(self,attr) :
dacs = _MaxipixAcq.mpxDacs
thl = dacs.getThl()
if thl is None: thl = -1
attr.set_value(thl)
## @brief Write the global threshold
#
def write_threshold(self,attr) :
data = attr.get_write_value()
dacs = _MaxipixAcq.mpxDacs
dacs.setThl(data)
dacs.applyChipDacs(0)
## @brief Read the energy step
#
# energy step is the coef which link the global threshold with energy
# threshold
#
def read_energy_calibration(self,attr) :
dacs = _MaxipixAcq.mpxDacs
values = dacs .getECalibration()
attr.set_value(values,len(values))
## @brief Write the energy step
#
def write_energy_calibration(self,attr) :
data = attr.get_write_value()
dacs = _MaxipixAcq.mpxDacs
dacs.setECalibration(data)
## @brief Read the energy threshold
#
# energy_threshold = energy_step * threshold (global)
def read_energy_threshold(self,attr) :
dacs= _MaxipixAcq.mpxDacs
value = dacs.getEThl()
if value is None: value = -1
attr.set_value(value)
## @brief Write the energy threshold
#
def write_energy_threshold(self,attr) :
data = attr.get_write_value()
dacs = _MaxipixAcq.mpxDacs
dacs.setEThl(data)
dacs.applyChipDacs(0)
## @brief read the config name
#
def read_config_name(self,attr) :
cfg_name = ""
if self.config_name:
cfg_name = self.config_name
attr.set_value(cfg_name)
## @brief Write the config name and load it
#
def write_config_name(self,attr) :
data = attr.get_write_value()
_MaxipixAcq.loadConfig(data)
self.config_name = data
## @brief read the config path
#
def read_config_path(self,attr) :
cfg_path = ""
if self.config_path:
cfg_path = self.config_path
attr.set_value(cfg_path)
## @brief Write the config path
#
def write_config_path(self,attr) :
data = attr.get_write_value()
_MaxipixAcq.setPath(data)
self.config_path = data
## @brief read the fill mode
#
def read_fill_mode(self,attr) :
fill_mode = self.__getMaxipixAttr('fill_mode')
attr.set_value(fill_mode)
## @brief Write the gap fill mode
#
def write_fill_mode(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('fill_mode',data)
## @brief read the board id
#
def read_espia_dev_nb(self,attr) :
espia_dev_nb = 0
if self.espia_dev_nb:
espia_dev_nb = self.espia_dev_nb
attr.set_value(espia_dev_nb)
## @brief read the ready_mode
# EXPOSURE-0, EXPOSURE_READOUT-1
def read_ready_mode(self,attr) :
ready_mode = self.__getMaxipixAttr('ready_mode')
attr.set_value(ready_mode)
## @brief Write the ready_mode
# EXPOSURE-0, EXPOSURE_READOUT-1
def write_ready_mode(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('ready_mode',data)
## @brief read the ready_level
# LOW_FALL-0, HIGH_RISE-1
def read_ready_level(self,attr) :
ready_level = self.__getMaxipixAttr('ready_level')
attr.set_value(ready_level)
## @brief Write the ready_level
# LOW_FALL-0, HIGH_RISE-1
def write_ready_level(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('ready_level',data)
## @brief read the shutter_level
# LOW_FALL-0, HIGH_RISE-1
def read_shutter_level(self,attr) :
shutter_level = self.__getMaxipixAttr('shutter_level')
attr.set_value(shutter_level)
## @brief Write the shutter_level
# LOW_FALL-0, HIGH_RISE-1
def write_shutter_level(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('shutter_level',data)
## @brief read the gate_mode
# FRAME-0, SEQUENCE-1
def read_gate_mode(self,attr) :
gate_mode = self.__getMaxipixAttr('gate_mode')
attr.set_value(gate_mode)
## @brief Write the gate_mode
# FRAME-0, SEQUENCE-1
def write_gate_mode(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('gate_mode',data)
## @brief read the gate_level
# LOW_FALL-0, HIGH_RISE-1
def read_gate_level(self,attr) :
gate_level = self.__getMaxipixAttr('gate_level')
attr.set_value(gate_level)
## @brief Write the gate_level
# LOW_FALL-0, HIGH_RISE-1
def write_gate_level(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('gate_level',data)
## @brief read the trigger_level
# LOW_FALL-0, HIGH_RISE-1
def read_trigger_level(self,attr) :
trigger_level = self.__getMaxipixAttr('trigger_level')
attr.set_value(trigger_level)
## @brief Write the trigger_level
# LOW_FALL-0, HIGH_RISE-1
def write_trigger_level(self,attr) :
data = attr.get_write_value()
self.__setMaxipixAttr('trigger_level',data)
#==================================================================
#
# Maxipix command methods
#
#==================================================================
#------------------------------------------------------------------
# getAttrStringValueList command:
#
# Description: return a list of authorized values if any
# argout: DevVarStringArray
#------------------------------------------------------------------
@Core.DEB_MEMBER_FUNCT
def getAttrStringValueList(self, attr_name):
valueList = self.__getValueList(attr_name)
return valueList
#------------------------------------------------------------------
# setDebugFlags command:
#
# Description: Get the current acquired frame number
# argout: DevVarDoubleArray
#------------------------------------------------------------------
@Core.DEB_MEMBER_FUNCT
def setDebugFlags(self, deb_flags):
deb_flags &= 0xffffffff
deb.Param('Setting debug flags: 0x%08x' % deb_flags)
Core.DebParams.setTypeFlags((deb_flags >> 16) & 0xff)
Core.DebParams.setModuleFlags((deb_flags >> 0) & 0xffff)
deb.Trace('FormatFlags: %s' % Core.DebParams.getFormatFlagsNameList())
deb.Trace('TypeFlags: %s' % Core.DebParams.getTypeFlagsNameList())
deb.Trace('ModuleFlags: %s' % Core.DebParams.getModuleFlagsNameList())
#------------------------------------------------------------------
# getDebugFlags command:
#
# Description: Get the current acquired frame number
# argout: DevVarDoubleArray
#------------------------------------------------------------------
@Core.DEB_MEMBER_FUNCT
def getDebugFlags(self):
deb.Trace('FormatFlags: %s' % Core.DebParams.getFormatFlagsNameList())
deb.Trace('TypeFlags: %s' % Core.DebParams.getTypeFlagsNameList())
deb.Trace('ModuleFlags: %s' % Core.DebParams.getModuleFlagsNameList())
deb_flags = (((Core.DebParams.getTypeFlags() & 0xff) << 16) |
((Core.DebParams.getModuleFlags() & 0xffff) << 0))
deb_flags &= 0xffffffff
deb.Return('Getting debug flags: 0x%08x' % deb_flags)
return deb_flags
class MaxipixClass(PyTango.DeviceClass):
class_property_list = {}
device_property_list = {
'espia_dev_nb':
[PyTango.DevShort,
"Espia board device number",[]],
'config_path':
[PyTango.DevString,
"Path where configuration files are",[]],
'config_name':
[PyTango.DevString,
"The default configuration loaded",[]],
'fill_mode':
[PyTango.DevString,
"The default configuration loaded",[]],
'ready_level':
[PyTango.DevString,
"The ready output signal level",[]],
'gate_level':
[PyTango.DevString,
"The gate output signal level",[]],
'shutter_level':
[PyTango.DevString,
"The shutter output signal level",[]],
'trigger_level':
[PyTango.DevString,
"The trigger output signal level",[]],
'ready_mode':
[PyTango.DevString,
"The ready output signal level",[]],
'gate_mode':
[PyTango.DevString,
"The gate output signal level",[]],
}
cmd_list = {
'getAttrStringValueList':
[[PyTango.DevString, "Attribute name"],
[PyTango.DevVarStringArray, "Authorized String value list"]],
'getDebugFlags':
[[PyTango.DevVoid, ""],
[PyTango.DevULong, "Debug flag in HEX format"]],
'setDebugFlags':
[[PyTango.DevULong, "Debug flag in HEX format"],
[PyTango.DevVoid, ""]],
}
attr_list = {
'threshold_noise':
[[PyTango.DevLong,
PyTango.SPECTRUM,
PyTango.READ_WRITE,5],
{
'label':"Threshold (thlow) noise of chips",
'unit':"N/A",
'format':"%6d",
'description':"Threshold (thlow) noise of the chip(s)",
}],
'threshold':
[[PyTango.DevLong,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Global Threshold ",
'unit':"N/A",
'format':"%6d",
'description':"The global threshold, apply the same offset on all the chips",
}],
'energy_calibration':
[[PyTango.DevDouble,
PyTango.SPECTRUM,
PyTango.READ_WRITE,2],
{
'label':"Energy calibration",
'unit':"N/A",
'format':"%5.2f",
'description':"[0] = e0thl, [1] = estep: ethl=(thl-e0thl)*estep",
}],
'energy_threshold':
[[PyTango.DevDouble,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Energy thresholds",
'unit':"keV",
'format':"%5.2f",
'description':"Threshold in energy (keV)",
}],
'config_name':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Configuration name",
'unit':"N/A",
'format':"",
'description':"root name of the configuration files",
}],
'config_path':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Configuration directory path",
'unit':"N/A",
'format':"",
'description':"Path of the configuration directory",
}],
'fill_mode':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Fill mode",
'unit':"enum.",
'format':"",
'description':"Between chip filling mode",
}],
'espia_dev_nb':
[[PyTango.DevShort,
PyTango.SCALAR,
PyTango.READ],
{
'label':"Espia board number",
'unit':"number",
'format':"",
'description':"The Espia board device number",
}],
'ready_mode':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Ready output mode",
'unit':"enum.",
'format':"",
'description':"Mode of the Ready output",
}],
'ready_level':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Ready output level",
'unit':"enum.",
'format':"",
'description':"The level logic of the Ready output",
}],
'shutter_level':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"Shutter output level",
'unit':"enum.",
'format':"",
'description':"The level logic of the Shutter output",
}],
'gate_mode':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"The Gate input mode",
'unit':"enum.",
'format':"",
'description':"",
}],
'gate_level':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"",
'unit':"",
'format':"",
'description':"",
}],
'trigger_level':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"",
'unit':"",
'format':"",
'description':"",
}],
'dac_possible':
[[PyTango.DevString,
PyTango.SPECTRUM,
PyTango.READ,17],
{
'label':"",
'unit':"",
'format':"",
'description':"",
}],
'dac_name':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"",
'unit':"",
'format':"",
'description':"",
}],
'dac_value':
[[PyTango.DevLong,
PyTango.SCALAR,
PyTango.READ_WRITE],
{
'label':"",
'unit':"",
'format':"%xd",
'description':"",
}],
}
def __init__(self,name) :
PyTango.DeviceClass.__init__(self,name)
self.set_type(name)
#----------------------------------------------------------------------------
# Plugins
#----------------------------------------------------------------------------
from Lima.Maxipix.MpxAcq import MpxAcq
_MaxipixAcq = None
def get_control(espia_dev_nb = '0',**keys) :
#properties are passed here as string
global _MaxipixAcq
if _MaxipixAcq is None:
_MaxipixAcq = MpxAcq(int(espia_dev_nb))
return _MaxipixAcq.getControl()
def close_interface() :
global _MaxipixAcq
_MaxipixAcq = None
def get_tango_specific_class_n_device():
return MaxipixClass,Maxipix
|
gpl-3.0
| -6,498,890,244,258,648,000
| 31.944801
| 122
| 0.501364
| false
| 3.85519
| true
| false
| false
|
earwig/earwigbot
|
earwigbot/commands/help.py
|
1
|
3230
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2015 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from platform import python_version
import re
from earwigbot import __version__
from earwigbot.commands import Command
class Help(Command):
"""Displays information about the bot."""
name = "help"
commands = ["help", "version"]
def check(self, data):
if data.is_command:
if data.command in self.commands:
return True
if not data.command and data.trigger == data.my_nick:
return True
return False
def process(self, data):
if not data.command:
self.do_hello(data)
elif data.command == "version":
self.do_version(data)
elif data.args:
self.do_command_help(data)
else:
self.do_main_help(data)
def do_main_help(self, data):
"""Give the user a general help message with a list of all commands."""
msg = "Hi, I'm a bot! I have {0} commands loaded: {1}. You can get help for any command with '!help <command>'."
cmnds = sorted([cmnd.name for cmnd in self.bot.commands])
msg = msg.format(len(cmnds), ', '.join(cmnds))
self.reply(data, msg)
def do_command_help(self, data):
"""Give the user help for a specific command."""
target = data.args[0]
for command in self.bot.commands:
if command.name == target or target in command.commands:
if command.__doc__:
doc = command.__doc__.replace("\n", "")
doc = re.sub(r"\s\s+", " ", doc)
msg = 'Help for command \x0303{0}\x0F: "{1}"'
self.reply(data, msg.format(target, doc))
return
msg = "Sorry, no help for \x0303{0}\x0F.".format(target)
self.reply(data, msg)
def do_hello(self, data):
self.say(data.chan, "Yes, {0}?".format(data.nick))
def do_version(self, data):
vers = "EarwigBot v{bot} on Python {python}: https://github.com/earwig/earwigbot"
self.reply(data, vers.format(bot=__version__, python=python_version()))
|
mit
| -3,154,985,194,490,449,000
| 39.375
| 120
| 0.635913
| false
| 3.882212
| false
| false
| false
|
qPCR4vir/orange3
|
Orange/widgets/classify/owclassificationtree.py
|
1
|
2938
|
from collections import OrderedDict
from Orange.data import Table
from Orange.classification.tree import TreeLearner
from Orange.widgets import gui
from Orange.widgets.settings import Setting
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
class OWClassificationTree(OWBaseLearner):
name = "Classification Tree"
icon = "icons/ClassificationTree.svg"
description = "Classification tree algorithm with forward pruning."
priority = 30
LEARNER = TreeLearner
attribute_score = Setting(0)
limit_min_leaf = Setting(True)
min_leaf = Setting(2)
limit_min_internal = Setting(True)
min_internal = Setting(5)
limit_depth = Setting(True)
max_depth = Setting(100)
scores = (("Entropy", "entropy"), ("Gini Index", "gini"))
def add_main_layout(self):
gui.comboBox(self.controlArea, self, "attribute_score",
box='Feature Selection',
items=[name for name, _ in self.scores],
callback=self.settings_changed)
box = gui.vBox(self.controlArea, 'Pruning')
gui.spin(box, self, "min_leaf", 1, 1000,
label="Min. instances in leaves: ", checked="limit_min_leaf",
callback=self.settings_changed)
gui.spin(box, self, "min_internal", 1, 1000,
label="Stop splitting nodes with less instances than: ",
checked="limit_min_internal",
callback=self.settings_changed)
gui.spin(box, self, "max_depth", 1, 1000,
label="Limit the depth to: ", checked="limit_depth",
callback=self.settings_changed)
def create_learner(self):
return self.LEARNER(
criterion=self.scores[self.attribute_score][1],
max_depth=self.max_depth if self.limit_depth else None,
min_samples_split=(self.min_internal if self.limit_min_internal
else 2),
min_samples_leaf=(self.min_leaf if self.limit_min_leaf else 1),
preprocessors=self.preprocessors
)
def get_learner_parameters(self):
from Orange.canvas.report import plural_w
items = OrderedDict()
items["Split selection"] = self.scores[self.attribute_score][0]
items["Pruning"] = ", ".join(s for s, c in (
(plural_w("at least {number} instance{s} in leaves", self.min_leaf),
self.limit_min_leaf),
(plural_w("at least {number} instance{s} in internal nodes", self.min_internal),
self.limit_min_internal),
("maximum depth {}".format(self.max_depth), self.limit_depth)) if c) or "None"
return items
if __name__ == "__main__":
import sys
from PyQt4.QtGui import QApplication
a = QApplication(sys.argv)
ow = OWClassificationTree()
d = Table('iris')
ow.set_data(d)
ow.show()
a.exec_()
ow.saveSettings()
|
bsd-2-clause
| 3,757,405,505,065,211,000
| 36.666667
| 92
| 0.614364
| false
| 3.917333
| false
| false
| false
|
saltastro/pysalt
|
saltspec/InterIdentify.py
|
1
|
46552
|
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. See LICENSE for more details #
"""INTERIDENTIFY provides an interactive method for identifying
lines in an arc image. The tasks displays the full image, a
line extracted from the image, and residuals to the fit of that line.
The task will display the total image so the user can extract the lines
to be fit. Or the user can automate the process so only certain lines are
fit by the user. On the next tab, the task displays the arc line
and the fit to the line including what lines have been detected and
are being used for the fit. Finally the task displays the residual in
the fit and the user can select different options to be displayed.
Author Version Date
-----------------------------------------------
S. M. Crawford (SAAO) 1.0 10 Oct 2009
TODO
----
LIMITATIONS
-----------
"""
# Ensure Python 2.5 compatibility
from __future__ import with_statement
# General imports
import os
import sys
import copy
import numpy as np
import pyfits
from pyraf import iraf
from pyraf.iraf import pysalt
# Gui library imports
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
# Salt imports
import saltsafeio
from saltgui import ImageDisplay, MplCanvas
from salterror import SaltIOError
from PySpectrograph.Spectra import Spectrum, apext
import WavelengthSolution
import spectools as st
import AutoIdentify as ai
from spectools import SALTSpecError
class InterIdentifyWindow(QtGui.QMainWindow):
"""Main application window."""
def __init__(self, xarr, specarr, slines, sfluxes, ws, hmin=150, wmin=400, mdiff=20,
filename=None, res=2.0, dres=0.1, dc=20, ndstep=20, sigma=5, smooth=0, niter=5, istart=None,
nrows=1, rstep=100, method='Zeropoint', ivar=None, cmap='gray', scale='zscale', contrast=1.0,
subback=0, textcolor='green', preprocess=False, log=None, verbose=True):
"""Default constructor."""
# set up the variables
if istart is None:
self.y1 = int(0.5 * len(specarr))
else:
self.y1 = istart
self.y2 = self.y1 + nrows
self.specarr = specarr
self.xarr = xarr
self.ivar = ivar
self.slines = slines
self.sfluxes = sfluxes
self.hmin = hmin
self.wmin = wmin
self.ws = ws
self.res = res
self.dres = dres
self.mdiff = mdiff
self.sigma = sigma
self.niter = int(niter)
self.nrows = nrows
self.rstep = rstep
self.dc = dc
self.ndstep = ndstep
self.method = method
self.cmap = cmap
self.scale = scale
self.contrast = contrast
self.smooth = smooth
self.subback = subback
self.filename = filename
self.ImageSolution = {}
self.textcolor = textcolor
self.preprocess = preprocess
self.log = log
self.verbose = verbose
# Setup widget
QtGui.QMainWindow.__init__(self)
# Set main widget
self.main = QtGui.QWidget(self)
# Set window title
self.setWindowTitle("InterIdentify")
# create the Image page
self.imagePage = imageWidget(self.specarr, y1=self.y1, y2=self.y2, hmin=self.hmin, wmin=self.wmin, cmap=self.cmap,
rstep=self.rstep, name=self.filename, scale=self.scale, contrast=self.contrast, log=self.log)
# set up the arc page
self.farr = apext.makeflat(self.specarr, self.y1, self.y2)
self.farr = st.flatspectrum(self.xarr, self.farr, order=self.subback)
# set up variables
self.arcdisplay = ArcDisplay(xarr, self.farr, slines, sfluxes, self.ws, specarr=self.specarr,
res=self.res, dres=self.dres, dc=self.dc, ndstep=self.ndstep, xp=[], wp=[],
method=self.method, smooth=self.smooth, niter=self.niter, mdiff=self.mdiff,
sigma=self.sigma, textcolor=self.textcolor, preprocess=self.preprocess,
log=self.log, verbose=self.verbose)
self.arcPage = arcWidget(
self.arcdisplay,
hmin=hmin,
wmin=wmin,
y1=self.y1,
y2=self.y2,
name=self.filename)
# set up the residual page
self.errPage = errWidget(self.arcdisplay, hmin=hmin, wmin=wmin)
# create the tabs
self.tabWidget = QtGui.QTabWidget()
self.tabWidget.addTab(self.imagePage, 'Image')
self.tabWidget.addTab(self.arcPage, 'Arc')
self.tabWidget.addTab(self.errPage, 'Residual')
# layout the widgets
mainLayout = QtGui.QVBoxLayout(self.main)
mainLayout.addWidget(self.tabWidget)
# self.setLayout(mainLayout)
# Set focus to main widget
# self.main.setFocus()
# Set the main widget as the central widget
self.setCentralWidget(self.main)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Close when config dialog is closed
# self.connect(self.conf, QtCore.SIGNAL('destroyed()'),
# self, QtCore.SLOT('close()'))
self.connect(self.tabWidget, QtCore.SIGNAL('currentChanged(int)'),
self.currentChanged)
self.connect(self.imagePage, QtCore.SIGNAL('regionChange(int,int)'),
self.regionChange)
self.connect(self.imagePage, QtCore.SIGNAL('runauto(int, int, int)'),
self.runauto)
self.connect(self.arcPage, QtCore.SIGNAL('savews()'), self.saveWS)
self.connect(self.arcdisplay, QtCore.SIGNAL('quit()'), self.close)
def keyPressEvent(self, event):
# print "Key Pressed:", event.key
if event.key == 'q':
self.close()
def currentChanged(self, event):
# print event
pass
def regionChange(self, y1, y2):
self.y1 = y1
self.y2 = y2
self.farr = apext.makeflat(self.specarr, self.y1, self.y2)
self.farr = st.flatspectrum(self.xarr, self.farr, order=self.subback)
# set up variables
self.ws = self.newWS(0.5 * (self.y1 + self.y2))
self.arcdisplay = ArcDisplay(
self.xarr,
self.farr,
self.slines,
self.sfluxes,
self.ws,
specarr=self.specarr,
res=self.res,
dres=self.dres,
smooth=self.smooth,
niter=self.niter,
sigma=self.sigma,
xp=[],
wp=[],
textcolor=self.textcolor,
preprocess=self.preprocess,
log=self.log,
verbose=self.verbose)
self.arcPage = arcWidget(
self.arcdisplay,
hmin=self.hmin,
wmin=self.wmin,
y1=self.y1,
y2=self.y2)
self.connect(self.arcPage, QtCore.SIGNAL('savews()'), self.saveWS)
# set up the residual page
self.errPage = errWidget(
self.arcdisplay,
hmin=self.hmin,
wmin=self.wmin)
# reset the pages
self.tabWidget.removeTab(2)
self.tabWidget.removeTab(1)
self.tabWidget.insertTab(1, self.arcPage, 'Arc')
self.tabWidget.insertTab(2, self.errPage, 'Residual')
def saveWS(self):
self.ws = self.arcdisplay.ws
value = 0.0
k = 0.5 * (self.y1 + self.y2)
xp = np.array(self.arcdisplay.xp)
wp = np.array(self.arcdisplay.wp)
if len(xp > 0):
w = self.arcdisplay.ws.value(xp)
value = (wp - w).std()
if self.log is not None:
msg = 'Saving WS value for row %i with rms=%f for %i lines' % (
k, value, len(self.arcdisplay.wp))
self.log.message(msg)
# create a new wavelength solution
nws = copy.deepcopy(self.ws)
if len(xp > 0):
nws = WavelengthSolution.WavelengthSolution(
self.ws.x_arr,
self.ws.w_arr,
order=self.ws.order,
function=self.ws.function)
nws.func.func.domain = self.ws.func.func.domain
try:
nws.fit()
except Exception as e:
if self.log is not None:
self.log.warning(
"Unable to save wavelength solution because %s" %
e)
return
self.ImageSolution[k] = nws
# for k in self.ImageSolution: print k,self.ImageSolution[k].coef
def newWS(self, y):
"""Determine the WS closest to the values given by y1 and y2"""
keys = np.array(self.ImageSolution.keys())
try:
i = abs(keys - y).argmin()
ws = self.ImageSolution[keys[i]]
nws = WavelengthSolution.WavelengthSolution(
ws.x_arr,
ws.w_arr,
order=ws.order,
function=ws.function)
nws.func.func.domain = ws.domain
nws.fit()
return nws
except:
return self.ws
def runauto(self, istart, nrows, rstep):
""" Autoidentify the rest of the lines and produce the image solution"""
self.ImageSolution = self.arcdisplay.autoidentify(
istart=istart,
nrows=nrows,
rstep=rstep,
oneline=False)
class imageWidget(QtGui.QWidget):
def __init__(self, imarr, y1=None, y2=None, nrows=1, rstep=100, hmin=150, wmin=400,
name=None, cmap='Gray', scale='zscale', contrast=0.1, log=None, parent=None):
super(imageWidget, self).__init__(parent)
self.y1 = y1
self.y2 = y2
self.x1 = 0
self.x2 = len(imarr[0])
self.nrows = nrows
self.rstep = rstep
self.log = log
# Add FITS display widget with mouse interaction and overplotting
self.imdisplay = ImageDisplay()
self.imdisplay.setMinimumHeight(hmin)
self.imdisplay.setMinimumWidth(wmin)
# Set colormap
self.imdisplay.setColormap(cmap)
# Set scale mode for dynamic range
self.imdisplay.scale = scale
self.imdisplay.contrast = contrast
self.imdisplay.aspect = 'auto'
self.imdisplay.loadImage(imarr)
self.imdisplay.drawImage()
self.y1line, = self.imdisplay.axes.plot(
[self.x1, self.x2], [self.y1, self.y1], ls='-', color='#00FF00')
self.y2line, = self.imdisplay.axes.plot(
[self.x1, self.x2], [self.y2, self.y2], ls='-', color='#00FF00')
# Add navigation toolbars for each widget to enable zooming
self.toolbar = NavigationToolbar2QT(self.imdisplay, self)
# set up the information panel
self.infopanel = QtGui.QWidget()
# add the name of the file
self.NameLabel = QtGui.QLabel("Filename:")
self.NameLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.NameValueLabel = QtGui.QLabel("%s" % name)
self.NameValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
# add the rows that are extracted
self.y1Label = QtGui.QLabel("Y1:")
self.y1Label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.y1ValueEdit = QtGui.QLineEdit("%6i" % self.y1)
self.y2Label = QtGui.QLabel("Y2:")
self.y2Label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.y2ValueEdit = QtGui.QLineEdit("%6i" % self.y2)
self.updateButton = QtGui.QPushButton("Update")
self.updateButton.clicked.connect(self.updatesection)
# add the update for automatically updating it
self.nrLabel = QtGui.QLabel("nrows:")
self.nrLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.nrValueEdit = QtGui.QLineEdit("%5i" % self.nrows)
self.nsLabel = QtGui.QLabel("rstep:")
self.nsLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.nsValueEdit = QtGui.QLineEdit("%6i" % self.rstep)
self.nextButton = QtGui.QPushButton("Next")
self.nextButton.clicked.connect(self.nextsection)
self.autoButton = QtGui.QPushButton("Auto-Identify")
self.autoButton.clicked.connect(self.runauto)
# set up the info panel layout
infoLayout = QtGui.QGridLayout(self.infopanel)
infoLayout.addWidget(self.NameLabel, 0, 0, 1, 1)
infoLayout.addWidget(self.NameValueLabel, 0, 1, 1, 5)
infoLayout.addWidget(self.y1Label, 1, 0, 1, 1)
infoLayout.addWidget(self.y1ValueEdit, 1, 1, 1, 1)
infoLayout.addWidget(self.y2Label, 1, 2, 1, 1)
infoLayout.addWidget(self.y2ValueEdit, 1, 3, 1, 1)
infoLayout.addWidget(self.updateButton, 1, 4, 1, 1)
infoLayout.addWidget(self.nrLabel, 2, 0, 1, 1)
infoLayout.addWidget(self.nrValueEdit, 2, 1, 1, 1)
infoLayout.addWidget(self.nsLabel, 2, 2, 1, 1)
infoLayout.addWidget(self.nsValueEdit, 2, 3, 1, 1)
infoLayout.addWidget(self.nextButton, 2, 4, 1, 1)
infoLayout.addWidget(self.autoButton, 3, 0, 1, 1)
# Set up the layout
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.imdisplay)
mainLayout.addWidget(self.toolbar)
mainLayout.addWidget(self.infopanel)
self.setLayout(mainLayout)
def updatesection(self):
self.y1 = int(self.y1ValueEdit.text())
self.y2 = int(self.y2ValueEdit.text())
self.nrows = int(self.nrValueEdit.text())
self.rstep = int(self.nsValueEdit.text())
if abs(self.y1 - self.y2) != self.nrows:
if self.log:
self.log.warning(
"Warning: Update y2 to increase the row sampling")
self.y1line.set_ydata([self.y1, self.y1])
self.y2line.set_ydata([self.y2, self.y2])
self.imdisplay.draw()
self.emit(QtCore.SIGNAL("regionChange(int,int)"), self.y1, self.y2)
def nextsection(self):
self.nrows = int(self.nrValueEdit.text())
self.rstep = int(self.nsValueEdit.text())
self.y1 = self.y1 + self.rstep
self.y2 = self.y1 + self.nrows
self.y1ValueEdit.setText('%6i' % self.y1)
self.y2ValueEdit.setText('%6i' % self.y2)
self.updatesection()
def runauto(self):
if self.log is not None:
self.log.message("Running Auto")
self.emit(
QtCore.SIGNAL("runauto(int, int, int)"),
self.y1,
self.nrows,
self.rstep)
class arcWidget(QtGui.QWidget):
def __init__(self, arcdisplay, hmin=150, wmin=450, name=None,
x1=0, w1=0, y1=None, y2=None, parent=None):
super(arcWidget, self).__init__(parent)
# Add FITS display widget with mouse interaction and overplotting
self.arcdisplay = arcdisplay
self.arcdisplay.arcfigure.setMinimumHeight(hmin)
self.arcdisplay.arcfigure.setMinimumWidth(wmin)
self.arcdisplay.plotArc()
# Add navigation toolbars for each widget to enable zooming
self.toolbar = NavigationToolbar2QT(self.arcdisplay.arcfigure, self)
# set up the information panel
self.infopanel = QtGui.QWidget()
# add the name of the file
self.NameLabel = QtGui.QLabel("Filename:")
self.NameLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.NameValueLabel = QtGui.QLabel("%s" % name)
self.NameValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
# add the rows that are extracted
self.y1Label = QtGui.QLabel("Y1:")
self.y1Label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.y1ValueLabel = QtGui.QLabel("%6i" % y1)
self.y1ValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
self.y2Label = QtGui.QLabel("Y2:")
self.y2Label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.y2ValueLabel = QtGui.QLabel("%6i" % y2)
self.y2ValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
# add in what the value is for a x and w position
self.x1Label = QtGui.QLabel("X1:")
self.x1Label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.w1Label = QtGui.QLabel("w1:")
self.w1Label.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.x1ValueLabel = QtGui.QLabel("%6.2f" % x1)
self.x1ValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
w1 = self.arcdisplay.ws.value(x1)
self.w1ValueEdit = QtGui.QLineEdit("%6i" % w1)
self.addButton = QtGui.QPushButton("Add")
self.addButton.clicked.connect(self.addpoints)
# add in radio buttons for pixel or wavelength
self.pixelradio = QtGui.QRadioButton("Pixel")
self.wavelengthradio = QtGui.QRadioButton("Wavelength")
self.pixelradio.setChecked(True)
# add in information about the order and type of solution
self.funcLabel = QtGui.QLabel("Function:")
self.funcLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.funcComboBox = QtGui.QComboBox()
self.funcComboBox.addItems(self.arcdisplay.ws.func_options)
self.funcComboBox.setCurrentIndex(
self.arcdisplay.ws.func_options.index(
self.arcdisplay.ws.function))
# self.funcComboBox."%s" % self.arcdisplay.ws.function)
self.orderLabel = QtGui.QLabel("Order:")
self.orderLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.orderValueEdit = QtGui.QLineEdit("%2i" % self.arcdisplay.ws.order)
self.updateButton = QtGui.QPushButton("Update")
self.updateButton.clicked.connect(self.updatefunction)
# provide a method for automatically fitting the line
self.methodComboBox = QtGui.QComboBox()
self.methodComboBox.addItems(ai.autoidentify_options)
self.methodComboBox.setCurrentIndex(
ai.autoidentify_options.index(
self.arcdisplay.method))
self.runButton = QtGui.QPushButton("Run")
self.runButton.clicked.connect(self.runauto)
self.saveButton = QtGui.QPushButton("Save")
self.saveButton.clicked.connect(self.savews)
# provide the full layout of the information panel
infoLayout = QtGui.QGridLayout(self.infopanel)
infoLayout.addWidget(self.NameLabel, 0, 0, 1, 1)
infoLayout.addWidget(self.NameValueLabel, 0, 1, 1, 5)
infoLayout.addWidget(self.y1Label, 1, 0, 1, 1)
infoLayout.addWidget(self.y1ValueLabel, 1, 1, 1, 1)
infoLayout.addWidget(self.y2Label, 1, 2, 1, 1)
infoLayout.addWidget(self.y2ValueLabel, 1, 3, 1, 1)
infoLayout.addWidget(self.x1Label, 2, 0, 1, 1)
infoLayout.addWidget(self.x1ValueLabel, 2, 1, 1, 1)
infoLayout.addWidget(self.w1Label, 2, 2, 1, 1)
infoLayout.addWidget(self.w1ValueEdit, 2, 3)
infoLayout.addWidget(self.addButton, 2, 4, 1, 1)
infoLayout.addWidget(self.funcLabel, 3, 0, 1, 1)
infoLayout.addWidget(self.funcComboBox, 3, 1, 1, 1)
infoLayout.addWidget(self.orderLabel, 3, 2, 1, 1)
infoLayout.addWidget(self.orderValueEdit, 3, 3, 1, 1)
infoLayout.addWidget(self.updateButton, 3, 4, 1, 1)
infoLayout.addWidget(self.methodComboBox, 4, 0, 1, 1)
infoLayout.addWidget(self.runButton, 4, 1, 1, 1)
infoLayout.addWidget(self.saveButton, 4, 4, 1, 1)
# infoLayout.addWidget(self.pixelradio, 3, 0, 1, 2)
# infoLayout.addWidget(self.wavelengthradio, 3, 2, 1, 2)
# Set up the layout
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.arcdisplay.arcfigure)
mainLayout.addWidget(self.toolbar)
mainLayout.addWidget(self.infopanel)
self.setLayout(mainLayout)
self.connect(
self.arcdisplay,
QtCore.SIGNAL('keyPressEvent'),
self.keyPressEvent)
self.connect(
self.arcdisplay,
QtCore.SIGNAL('updatex(float)'),
self.updatexlabel)
self.connect(
self.funcComboBox,
QtCore.SIGNAL('activated(QString)'),
self.updatefunction)
self.connect(
self.methodComboBox,
QtCore.SIGNAL('activated(QString)'),
self.updatemethod)
def keyPressEvent(self, event):
pass
# print "Arc Widget, keyPress:", event
def updatexlabel(self, value):
try:
self.x1ValueLabel.setText("%6.2f" % value)
self.w1ValueEdit.setText("%6.2f" % self.arcdisplay.ws.value(value))
except TypeError:
pass
def addpoints(self):
"""Add the x and w points to the list of matched points"""
x = float(self.x1ValueLabel.text())
w = float(self.w1ValueEdit.text())
# x=[1904.5, 1687.22, 3124.349, 632.5705]
# w=[4671.225, 4624.275, 4916.512, 4383.901]
self.arcdisplay.addpoints(x, w)
def updatefunction(self):
"""Update the values for the function"""
self.arcdisplay.ws.order = int(self.orderValueEdit.text())
self.arcdisplay.ws.function = self.funcComboBox.currentText()
self.arcdisplay.ws.set_func()
self.arcdisplay.findfit()
def updatemethod(self):
"""Update the values for the method for autoidenitfy"""
self.arcdisplay.method = self.methodComboBox.currentText()
def runauto(self):
"""Run autoidenity on one line"""
self.arcdisplay.dc = 0.5 * self.arcdisplay.rms * self.arcdisplay.ndstep
self.arcdisplay.autoidentify()
def savews(self):
"""Save the wcs to the """
self.emit(QtCore.SIGNAL("savews()"))
class errWidget(QtGui.QWidget):
def __init__(self, arcdisplay, hmin=150, wmin=450, name=None, parent=None):
super(errWidget, self).__init__(parent)
# Add FITS display widget with mouse interaction and overplotting
self.arcdisplay = arcdisplay
self.arcdisplay.errfigure.setMinimumHeight(hmin)
self.arcdisplay.errfigure.setMinimumWidth(wmin)
self.arcdisplay.plotErr()
# Add navigation toolbars for each widget to enable zooming
self.toolbar = NavigationToolbar2QT(self.arcdisplay.errfigure, self)
# set up the information panel
self.infopanel = QtGui.QWidget()
# add the name of the file
self.NameLabel = QtGui.QLabel("Filename:")
self.NameLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.NameValueLabel = QtGui.QLabel("%s" % name)
self.NameValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
# add in the rejection parameters
self.sigmaLabel = QtGui.QLabel("Sigma:")
self.sigmaLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.sigmaValueEdit = QtGui.QLineEdit(
"%2.1f" %
self.arcdisplay.ws.thresh)
self.niterLabel = QtGui.QLabel("Niter:")
self.niterLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.niterValueEdit = QtGui.QLineEdit("%i" % self.arcdisplay.ws.niter)
self.rejectButton = QtGui.QPushButton("Reject")
self.rejectButton.clicked.connect(self.rejectpoints)
# add the labels for the results
self.aveLabel = QtGui.QLabel("Average:")
self.aveLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.aveValueLabel = QtGui.QLabel("")
self.aveValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
self.stdLabel = QtGui.QLabel("Std(A):")
self.stdLabel.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.stdValueLabel = QtGui.QLabel("")
self.stdValueLabel.setFrameStyle(
QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
# provide the full layout of the information panel
infoLayout = QtGui.QGridLayout(self.infopanel)
infoLayout.addWidget(self.NameLabel, 0, 0, 1, 1)
infoLayout.addWidget(self.NameValueLabel, 0, 1, 1, 5)
infoLayout.addWidget(self.aveLabel, 1, 0)
infoLayout.addWidget(self.aveValueLabel, 1, 1)
infoLayout.addWidget(self.stdLabel, 1, 2)
infoLayout.addWidget(self.stdValueLabel, 1, 3)
infoLayout.addWidget(self.sigmaLabel, 2, 0)
infoLayout.addWidget(self.sigmaValueEdit, 2, 1)
infoLayout.addWidget(self.niterLabel, 2, 2)
infoLayout.addWidget(self.niterValueEdit, 2, 3)
infoLayout.addWidget(self.rejectButton, 2, 4)
# Set up the layout
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.arcdisplay.errfigure)
mainLayout.addWidget(self.toolbar)
mainLayout.addWidget(self.infopanel)
self.setLayout(mainLayout)
self.connect(
self.arcdisplay,
QtCore.SIGNAL('fitUpdate()'),
self.fitUpdate)
def fitUpdate(self):
if len(self.arcdisplay.xp) <= 1:
return
try:
xp = np.array(self.arcdisplay.xp)
wp = np.array(self.arcdisplay.wp)
w = self.arcdisplay.ws.value(xp)
value = (wp - w).mean()
self.aveValueLabel.setText("%4.2g" % value)
value = (wp - w).std()
self.stdValueLabel.setText("%4.2g" % value)
except Exception as e:
if self.arcdisplay.log is not None:
self.arcdisplay.log.message(e)
pass
def rejectpoints(self):
self.arcdisplay.ws.set_thresh(float(self.sigmaValueEdit.text()))
self.arcdisplay.ws.set_niter(int(self.niterValueEdit.text()))
self.arcdisplay.findfit()
class ArcDisplay(QtGui.QWidget):
"""Class for displaying Arc Spectra using matplotlib and embedded in a Qt 4 GUI.
"""
def __init__(self, xarr, farr, slines, sfluxes, ws, xp=[], wp=[], mdiff=20, specarr=None,
res=2.0, dres=0.1, dc=20, ndstep=20, sigma=5, smooth=0, niter=5, method='MatchZero',
textcolor='green', preprocess=False, log=None, verbose=True):
"""Default constructor."""
QtGui.QWidget.__init__(self)
# Initialize base class
self.arcfigure = MplCanvas()
self.errfigure = MplCanvas()
# Add central axes instance
self.axes = self.arcfigure.figure.add_subplot(111)
self.erraxes = self.errfigure.figure.add_subplot(111)
# Connect mouse events
self.arcfigure.connectMatplotlibMouseMotion()
self.arcfigure.mpl_connect('button_press_event', self.onButtonPress)
self.arcfigure.mpl_connect('key_press_event', self.onKeyPress)
self.errfigure.connectMatplotlibMouseMotion()
self.errfigure.mpl_connect('button_press_event', self.onButtonPress)
self.errfigure.mpl_connect('key_press_event', self.onKeyPress)
# load the data
self.xarr = xarr
self.farr = farr
self.slines = slines
self.sfluxes = sfluxes
self.ws = ws
self.orig_ws = copy.deepcopy(ws)
self.specarr = specarr
self.mdiff = mdiff
self.sigma = sigma
self.niter = int(niter)
self.smooth = int(smooth)
self.res = res
self.dres = dres
self.dc = dc
self.sections = 6
self.ndstep = ndstep
self.method = method
self.textcolor = textcolor
self.preprocess = preprocess
self.log = log
self.verbose = True
# if asked, smooth the data
if self.smooth > 0:
self.farr = st.smooth_spectra(
self.xarr,
self.farr,
sigma=self.smooth)
self.xp = xp
self.wp = wp
self.rms = res
# set up the artificial spectra
self.spectrum = Spectrum.Spectrum(
self.slines,
self.sfluxes,
dw=self.dres,
stype='line',
sigma=self.res)
self.swarr = self.spectrum.wavelength
self.sfarr = self.spectrum.flux * \
self.farr.max() / self.spectrum.flux.max()
# set up the wavelength solution
if self.ws.function == 'line':
self.ws.set_xarr(self.xarr)
self.ws.farr = self.farr
self.ws.spectrum = self.spectrum
# set up the list of deleted points
self.dxp = []
self.dwp = []
# set up other variables
self.isArt = False
self.isFeature = False
# Set display parameters
self.xmin = self.xarr.min()
self.xmax = self.xarr.max()
self.ymin = self.farr.min()
self.ymax = self.farr.max()
#preprocess if asked
if self.preprocess:
self.log.message("Preprocessing Spectra", with_header=False)
self.findzpd()
self.findfeatures()
self.findfit()
self.isFeature = True
self.emit(QtCore.SIGNAL("fitUpdate()"))
def help(self):
helpoutput = """
? - Print this file q - Quit the program
c - centroid on line x - print the current position
a - Display spectrum l - display features
b - identify features f - fit solution
p - print features P - print solution
z - zeropoint fit Z - find zeropoint and dispersion
r - redraw spectrum R - reset values
e - add closest line L - show detected peaks
d - delete feature u - undelete feature
X - fit full X-cor
"""
print helpoutput
def onKeyPress(self, event):
"""Emit signal on key press"""
if event.key == '?':
# return the help file
self.help()
elif event.key == 'q':
# exit the task
self.emit(QtCore.SIGNAL("quit()"))
elif event.key == 'c':
# return the centroid
if event.xdata:
self.log.message(str(event.xdata), with_header=False)
cx = st.mcentroid(
self.xarr,
self.farr,
xc=event.xdata,
xdiff=self.mdiff)
self.emit(QtCore.SIGNAL("updatex(float)"), cx)
elif event.key == 'x':
# return the x position
if event.xdata:
self.log.message(str(event.xdata), with_header=False)
self.emit(QtCore.SIGNAL("updatex(float)"), event.xdata)
elif event.key == 'R':
# reset the fit
self.reset()
elif event.key == 'f':
# find the fit
self.findfit()
self.emit(QtCore.SIGNAL("fitUpdate()"))
elif event.key == 'b':
# auto-idenitfy features
self.isFeature = True
self.findfeatures()
elif event.key == 'z':
# Assume the solution is correct and find the zeropoint
# that best matches it from cross correllation
self.findzp()
elif event.key == 'Z':
# Assume the solution is correct and find the zeropoint
# that best matches it from cross correllation
self.findzpd()
elif event.key == 'X':
# Assume the solution is almost correct
# Fit the full solution using the cross correlation coefficient
self.findxcorfit()
elif event.key == 'e':
# find closest feature from existing fit and line list
# and match it
self.addclosestline(event.xdata)
elif event.key == 'i':
# reset identified features
pass
elif event.key == 't':
# reset identified features
self.isFeature = True
self.testfeatures()
elif event.key == 'l':
# plot the features from existing list
if self.isFeature:
self.isFeature = False
self.redraw_canvas()
else:
self.isFeature = True
self.plotFeatures()
self.redraw_canvas()
elif event.key == 'L':
# plot the sources that are detected
self.plotDetections()
elif event.key == 'p':
# print information about features
for i in range(len(self.xp)):
print self.xp[i], self.wp[i]
elif event.key == 'P':
# print information about features
print self.ws.coef
elif event.key == 'r':
# redraw graph
self.redraw_canvas()
elif event.key == 'a':
# draw artificial spectrum
self.isArt = not self.isArt
self.redraw_canvas()
elif event.key == 'd':
# Delete feature
save = False
y = None
if event.canvas == self.errfigure:
y = event.ydata
save = True
self.deletepoints(event.xdata, y=y, save=save)
self.redraw_canvas(keepzoom=True)
elif event.key == 'u':
# undelete
self.undeletepoints(event.xdata, y=event.ydata)
self.redraw_canvas(keepzoom=True)
elif event.key:
self.emit(QtCore.SIGNAL("keyPressEvent(string)"), event.key)
def onButtonPress(self, event):
"""Emit signal on selecting valid image position."""
if event.xdata and event.ydata:
self.emit(QtCore.SIGNAL("positionSelected(float, float)"),
float(event.xdata), float(event.ydata))
def plotArc(self):
"""Draw image to canvas."""
# plot the spectra
self.spcurve, = self.axes.plot(
self.xarr, self.farr, linewidth=0.5, linestyle='-', marker='None', color='b')
def plotArt(self):
"""Plot the artificial spectrum"""
self.isArt = True
warr = self.ws.value(self.xarr)
asfarr = st.interpolate(
warr,
self.swarr,
self.sfarr,
left=0.0,
right=0.0)
asfarr = asfarr * self.farr.max() / asfarr.max()
self.fpcurve, = self.axes.plot(self.xarr, asfarr, linewidth=0.5, linestyle='-',
marker='None', color='r')
def plotDetections(self):
"""Plot the lines that are detected"""
xp, xf = st.findpoints(
self.xarr, self.farr, self.sigma, self.niter, sections=self.sections)
print xp
self.axes.plot(xp, xf, ls='', marker='|', ms=20, color='#000000')
def plotFeatures(self):
"""Plot features identified in the line list"""
fl = np.array(self.xp) * 0.0 + 0.25 * self.farr.max()
self.splines = self.axes.plot(
self.xp,
fl,
ls='',
marker='|',
ms=20,
color=self.textcolor)
# set up the text position
tsize = 0.83
self.ymin, self.ymax = self.axes.get_ylim()
ppp = (self.ymax - self.ymin) / (self.arcfigure.figure.get_figheight()
* self.arcfigure.figure.get_dpi())
f = self.ymax - 10 * tsize * ppp
for x, w in zip(self.xp, self.wp):
w = '%6.2f' % float(w)
self.axes.text(
x,
f,
w,
size='small',
rotation='vertical',
color=self.textcolor)
def plotErr(self):
"""Draw image to canvas."""
if self.xp and self.wp:
# plot the spectra
w = self.ws.value(np.array(self.xp))
self.errcurve, = self.erraxes.plot(
self.xp, self.wp - w, linewidth=0.5, linestyle='', marker='o', color='b')
if self.dxp and self.dwp:
# plot the spectra
dw = self.ws.value(np.array(self.dxp))
self.delerrcurve, = self.erraxes.plot(
self.dxp, self.dwp - dw, linewidth=0.5, linestyle='', marker='x', color='b')
def set_wdiff(self):
"""Derive a value for wdiff"""
try:
self.wdiff = self.mdiff * self.ws.coef[1]
except:
self.wdiff = self.mdiff
def testfeatures(self):
"""Run the test matching algorithm"""
self.set_wdiff()
res = max(self.res * 0.25, 2)
xp, wp = st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes, self.ws,
res=res, mdiff=self.mdiff, wdiff=20, sigma=self.sigma,
niter=self.niter, sections=self.sections)
for x, w in zip(xp, wp):
if w not in self.wp and w > -1:
self.xp.append(x)
self.wp.append(w)
self.plotFeatures()
self.redraw_canvas()
def findfeatures(self):
"""Given a set of features, find other features that might
correspond to those features
"""
#self.set_wdiff()
# xp, wp=st.findfeatures(self.xarr, self.farr, self.slines, self.sfluxes,
# self.ws, mdiff=self.mdiff, wdiff=self.wdiff, sigma=self.sigma,
# niter=self.niter, sections=3)
xp, wp = st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes, self.ws,
res=max(self.sigma*self.res, 3), mdiff=self.mdiff, wdiff=10,
sections=self.sections, sigma=self.sigma, niter=self.niter)
for x, w in zip(xp, wp):
if w not in self.wp and w > -1:
self.xp.append(x)
self.wp.append(w)
# for i in range(len(self.xp)): print self.xp[i], self.wp[i]
# print
self.plotFeatures()
self.redraw_canvas()
def addclosestline(self, x):
"""Find the closes line to the centroided position and
add it
"""
cx = st.mcentroid(self.xarr, self.farr, xc=x, xdiff=self.mdiff)
w = self.ws.value(cx)
d = abs(self.slines - w)
w = self.slines[d.argmin()]
self.xp.append(x)
self.wp.append(w)
self.plotFeatures()
self.redraw_canvas()
def findzp(self):
"""Find the zeropoint for the source and plot of the new value
"""
dc = 0.5 * self.rms * self.ndstep
self.ws = st.findzeropoint(self.xarr, self.farr, self.swarr, self.sfarr,
self.ws, dc=dc, ndstep=self.ndstep, inttype='interp')
self.plotArt()
self.redraw_canvas()
def findzpd(self):
"""Find the zeropoint and dispersion for the source and plot of the new value
"""
dc = 0.5 * self.rms * self.ndstep
# fixed at 0.1 of the dispersion
dd = 0.1 * self.ws.coef[1]
# set upt he docef values
dcoef = self.ws.coef * 0.0
dcoef[0] = dc
dcoef[1] = dd
self.ws = st.findxcor(self.xarr, self.farr, self.swarr, self.sfarr, self.ws,
dcoef=dcoef, ndstep=self.ndstep, best=False, inttype='interp')
self.plotArt()
self.redraw_canvas()
def findxcorfit(self):
"""Maximize the normalized correlation coefficient using the full wavelength solution.
"""
self.ws = st.fitxcor(
self.xarr,
self.farr,
self.swarr,
self.sfarr,
self.ws,
interptype='interp')
self.plotArt()
self.redraw_canvas()
def findfit(self):
if len(self.xp) < self.ws.order:
raise SALTSpecError(
"Insufficient sources number of points for fit")
return
try:
self.ws = st.findfit(
np.array(
self.xp), np.array(
self.wp), ws=self.ws, thresh=self.ws.thresh)
except SALTSpecError as e:
self.log.warning(e)
return
del_list = []
for i in range(len(self.ws.func.mask)):
if self.ws.func.mask[i] == 0:
self.deletepoints(self.ws.func.x[i], w=self.ws.func.y[i],
save=True)
self.rms = self.ws.sigma(self.ws.x_arr, self.ws.w_arr)
self.redraw_canvas()
def autoidentify(self, rstep=1, istart=None, nrows=1, oneline=True):
"""Run the autoidentify method for the current line"""
# update the line list such that it is only the line list of selected
# lines
if self.wp:
slines = np.array(self.wp)
sfluxes = self.farr[np.array(self.xp, dtype=int)]
# sfluxes=np.zeros(len(slines))
# for i in range(len(slines)):
# try:
# sfluxes[i]=self.sfluxes[self.slines==slines[i]][0]
# except:
# if sfluxes.mean()==0:
# sfluxes[i]=1
# else:
# sfluxes[i]=sfluxes.mean()
else:
slines = self.slines
sfluxes = self.sfluxes
iws = ai.AutoIdentify(self.xarr, self.specarr, slines, sfluxes, self.ws, farr=self.farr,
method=self.method, rstep=rstep, istart=istart, nrows=nrows,
res=self.res, dres=self.dres, mdiff=self.mdiff, sigma=self.sigma,
smooth=self.smooth, niter=self.niter, dc=self.dc, ndstep=self.ndstep,
oneline=oneline, log=self.log, verbose=self.verbose)
if oneline:
self.ws = iws
else:
return iws
def addpoints(self, x, w):
"""Add points to the line list
"""
if isinstance(x, list) and isinstance(w, list):
self.xp.extend(x)
self.wp.extend(w)
else:
self.xp.append(x)
self.wp.append(w)
def deletepoints(self, x, y=None, w=None, save=False):
""" Delete points from the line list
"""
dist = (np.array(self.xp) - x) ** 2
# assumes you are using the error plot
if y is not None:
w = self.ws.value(np.array(self.xp))
norm = self.xarr.max() / abs(self.wp - w).max()
dist += norm * (self.wp - w - y) ** 2
# print y, norm, dist.min()
# print y, dist.min()
elif w is not None:
norm = self.xarr.max() / abs(self.wp - w).max()
dist += norm * (self.wp - w)**2
in_minw = dist.argmin()
if save:
self.dxp.append(self.xp[in_minw])
self.dwp.append(self.wp[in_minw])
self.xp.__delitem__(in_minw)
self.wp.__delitem__(in_minw)
def undeletepoints(self, x, y=None):
""" Delete points from the line list
"""
if len(self.dxp) < 1:
return
if len(self.dxp) == 1:
self.xp.append(self.dxp[0])
self.wp.append(self.dwp[0])
self.dxp.__delitem__(0)
self.dwp.__delitem__(0)
return
dist = (self.dxp - x) ** 2
if y is not None:
w = self.ws.value(np.array(self.dxp))
# dist += (self.dwp-w-y)**2
in_minw = dist.argmin()
self.xp.append(self.dxp[in_minw])
self.wp.append(self.dwp[in_minw])
self.dxp.__delitem__(in_minw)
self.dwp.__delitem__(in_minw)
return
def reset(self):
self.ws = copy.deepcopy(self.orig_ws)
self.redraw_canvas()
def redraw_canvas(self, keepzoom=False):
if keepzoom:
# Store current zoom level
xmin, xmax = self.axes.get_xlim()
ymin, ymax = self.axes.get_ylim()
# Clear plot
self.axes.clear()
# Draw image
self.plotArc()
# if necessary, redraw the features
if self.isFeature:
self.plotFeatures()
# if necessary, draw the artificial spectrum
if self.isArt:
self.plotArt()
# Restore zoom level
if keepzoom:
self.axes.set_xlim((self.xmin, self.xmax))
self.axes.set_ylim((self.ymin, self.ymax))
# Force redraw
self.arcfigure.draw()
self.err_redraw_canvas()
def err_redraw_canvas(self, keepzoom=False):
if keepzoom:
# Store current zoom level
xmin, xmax = self.erraxes.get_xlim()
ymin, ymax = self.erraxes.get_ylim()
else:
self.xmin, self.xmax = self.axes.get_xlim()
# Clear plot
self.erraxes.clear()
# Draw image
self.plotErr()
# Restore zoom level
if keepzoom:
self.erraxes.set_xlim((xmin, xmax))
self.erraxes.set_ylim((ymin, ymax))
else:
self.erraxes.set_xlim((self.xmin, self.xmax))
self.errfigure.draw()
self.emit(QtCore.SIGNAL("fitUpdate()"))
def InterIdentify(xarr, specarr, slines, sfluxes, ws, mdiff=20, rstep=1, filename=None,
function='poly', order=3, sigma=3, smooth=0, niter=5, res=2, dres=0.1, dc=20, ndstep=20,
istart=None, method='Zeropoint', scale='zscale', cmap='gray', contrast=1.0,
subback=0, textcolor='green', preprocess=False, log=None, verbose=True):
# Create GUI
global App
App = QtGui.QApplication.instance()
if App is None:
App = QtGui.QApplication(sys.argv)
aw = InterIdentifyWindow(xarr, specarr, slines, sfluxes, ws, rstep=rstep, mdiff=mdiff, sigma=sigma, niter=niter,
res=res, dres=dres, dc=dc, ndstep=ndstep, istart=istart, method=method, smooth=smooth,subback=subback,
cmap=cmap, scale=scale, contrast=contrast, filename=filename, textcolor=textcolor, preprocess=preprocess,
log=log)
aw.show()
# Start application event loop
exit = App.exec_()
imsol = aw.ImageSolution.copy()
# Check if GUI was executed succesfully
if exit != 0:
raise SALTSpecError(
'InterIdentify GUI has unexpected exit status ' +
str(exit))
del aw
return imsol
|
bsd-3-clause
| -5,777,721,175,345,980,000
| 36.034208
| 135
| 0.576237
| false
| 3.639719
| false
| false
| false
|
boytm/transparent_proxy
|
tcp_proxy.py
|
1
|
5604
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import sys
import socket
import struct
import tornado.ioloop
import tornado.tcpserver
import tornado.tcpclient
#import tornado.web
from tornado import gen
import functools
class TCPProxyHandler(tornado.tcpserver.TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
factory = tornado.tcpclient.TCPClient()
if stream.socket.family == socket.AF_INET:
#print stream.socket.getsockopt(socket.SOL_IP, socket.SO_ORIGINAL_DST, 16)
dst = stream.socket.getsockopt(socket.SOL_IP, 80, 16)
srv_port, srv_ip = struct.unpack('!2xH4s8x', dst)
srv_ip = socket.inet_ntoa(srv_ip)
if cmp((srv_ip, srv_port), stream.socket.getsockname()) == 0:
print "ignore not nated stream"
stream.close()
return
try:
remote = yield factory.connect(srv_ip, srv_port)
Relay(stream, remote)
except:
print 'connect error'
stream.close()
return
else:
print 'Unsupported protocol family'
return
class Relay(object):
def __init__(self, local, remote):
self.local = local
self.remote = remote
self.local.set_nodelay(True)
self.remote.set_nodelay(True)
self.local.set_close_callback(self.on_local_close)
self.remote.set_close_callback(self.on_remote_close)
self.local.read_bytes(65536, callback=self.on_local_read, partial=True)
self.remote.read_bytes(65536, callback=self.on_remote_read, partial=True)
def on_local_close(self):
print 'detect local close'
if self.local.error:
print self.local.error
if not self.remote.writing():
self.remote.close()
def on_remote_close(self):
print 'detect remote close'
if self.remote.error:
print self.remote.error
if not self.local.writing():
self.local.close()
def on_local_read(self, data):
self.remote.write(data, callback = self.on_remote_write)
def on_local_write(self):
#if shouldclose:
# self.local.close()
#else:
if self.remote.closed():
print 'remote closed, cancel relay'
return
self.remote.read_bytes(65536, callback=self.on_remote_read, partial=True)
def on_remote_read(self, data):
if self.remote.closed():
print 'remote read %d, but should close' % len(data)
self.local.write(data, callback = self.on_local_write)
def on_remote_write(self):
if self.local.closed():
print 'local closed, cancel relay'
return
self.local.read_bytes(65536, callback=self.on_local_read, partial=True)
class TCPProxyHandler2(tornado.tcpserver.TCPServer):
#@gen.coroutine
def handle_stream(self, stream, address):
factory = tornado.tcpclient.TCPClient()
if stream.socket.family == socket.AF_INET:
#print stream.socket.getsockopt(socket.SOL_IP, socket.SO_ORIGINAL_DST, 16)
dst = stream.socket.getsockopt(socket.SOL_IP, 80, 16)
print struct.unpack('!2xH4s8x', dst)
srv_port, srv_ip = struct.unpack('!2xH4s8x', dst)
srv_ip = socket.inet_ntoa(srv_ip)
if cmp((srv_ip, srv_port), stream.socket.getsockname()) == 0:
print "error connect itself"
stream.close()
return
#remote = yield factory.connect(srv_ip, srv_port)
#Relay2(local, remote)
factory.connect(srv_ip, srv_port, callback=functools.partial(self.on_connect, stream))
else:
return
def on_connect(self, local, remote):
Relay2(local, remote)
class Relay2(object):
def __init__(self, local, remote):
self.local = local
self.remote = remote
self.quit = False
self.local.set_nodelay(True)
self.remote.set_nodelay(True)
self.local.set_close_callback(self.on_local_close)
self.remote.set_close_callback(self.on_remote_close)
self.read_and_write(local, remote)
self.read_and_write(remote, local)
def on_local_close(self):
print 'detect local close'
self.quit = True
if self.local.error:
print self.local.error
if not self.remote.writing():
self.remote.close()
def on_remote_close(self):
print 'detect remote close'
self.quit = True
if self.remote.error:
print self.remote.error
if not self.local.writing():
self.local.close()
@gen.coroutine
def read_and_write(self, read_from, to):
while not self.quit:
try:
data = yield read_from.read_bytes(65536, partial=True)
yield to.write(data)
except Exception as e:
print "error %s, quit relay" % str(e)
break
def main():
#tornado.netutil.Resolver.configure('tornado.netutil.ThreadedResolver')
#tornado.netutil.Resolver.configure('tornado.platform.caresresolver.CaresResolver')
server = TCPProxyHandler()
#server.listen(8888, address='127.0.0.1') # iptables can't DNAT to 127.0.0.1:8888
server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
if sys.platform == 'linux2':
import os, pwd
os.setuid(pwd.getpwnam('nobody').pw_uid)
main()
|
apache-2.0
| 2,496,638,393,646,806,000
| 29.791209
| 98
| 0.597252
| false
| 3.738492
| false
| false
| false
|
codingvirtual/fullstack-p4-conference
|
constants.py
|
1
|
3218
|
__author__ = 'Greg'
from protorpc import messages
from protorpc import message_types
from models import *
""" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - """
""" Default values for a new conference. Used only if the user creating
the conference doesn't supply values for a given field and only fields
left empty pick up the default (in other words, if the user supplies
a value for one of the fields below, but not the others, the one they
supplied a value for will retain that value and only the others that
were left empty will inherit the default values)"""
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ]
}
""" As above, defaults for a new session when there are fields left empty"""
SESSION_DEFAULTS = {
"speaker": "Unknown",
"duration": 60,
"typeOfSession": "Keynote",
}
""" Comparison operators used for filter and query operations"""
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
""" Fields present for a conference """
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
""" The following list of elements each define a specific request or response
container that is specific to a particular Model in the overall data
scheme. A "websafe" key is a key that has been URL-encoded to preserve
integrity of the key for transmission across the web. Google code
can use this websafe key to get back to the "real" key in order to
access Datastore """
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSIONS_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
conferenceKey=messages.StringField(1),
sessionKey=messages.StringField(2)
)
SESSIONS_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
conferenceKey=messages.StringField(1),
)
WISHLIST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionKey=messages.StringField(1, required=True),
)
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1, required=True),
)
QUERY_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
startTime=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
SESSION_BY_CONF_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
conferenceKey=messages.StringField(1),
)
SESSION_BY_TYPE_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
conferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
SESSION_BY_SPEAKER_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
GET_FEATURED_SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
conf_key=messages.StringField(1, required=True)
)
|
apache-2.0
| -6,473,297,852,493,102,000
| 28.53211
| 77
| 0.699503
| false
| 3.619798
| false
| false
| false
|
CoDaS-Lab/image_analysis
|
demo/demo_features.py
|
1
|
1262
|
# Copyright 2017 Codas Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import skimage.color
from image_analysis.pipeline.feature import Feature
class RGBToGray(Feature):
def __init__(self):
Feature.__init__(self, 'grayscale', frame_op=True)
def extract(self, RGB_frame):
return skimage.color.rgb2gray(RGB_frame)
class BatchOP(Feature):
def __init__(self):
Feature.__init__(self, 'batch_length', batch_op=True)
def extract(self, batch):
return len(batch)
class ArgMaxPixel(Feature):
def __init__(self):
Feature.__init__(self, 'max_pixel', frame_op=True)
def extract(self, frame):
return np.max(frame)
|
apache-2.0
| -6,162,282,703,088,598,000
| 29.047619
| 80
| 0.654517
| false
| 4.019108
| false
| false
| false
|
edocappelli/crystalpy
|
crystalpy/diffraction/GeometryType.py
|
1
|
2379
|
"""
Represents geometry types/setups: Bragg diffraction, BraggTransmission, Laue diffraction, Laue transmission.
"""
class GeometryType(object):
def __init__(self, description):
"""
Constructor.
:param description: Description of the geometry type, e.g. "Bragg transmission"
"""
self._description = description
def description(self):
"""
Returns the description of this geometry type.
:return: Description of this geometry type.
"""
return self._description
def __eq__(self, candidate):
"""
Determines if two instances are equal.
:param candidate: Instances to compare to.
:return: True if both instances are equal. Otherwise False.
"""
return self.description() == candidate.description()
def __ne__(self, candidate):
"""
Determines if two instances are not equal.
:param candidate: Instances to compare.
:return: True if both instances are not equal. Otherwise False.
"""
return not self == candidate
def __hash__(self):
"""
Returns the hash value of this instance.
:return: Hash value of this instance.
"""
# As hash value just use the hash of the description.
return hash(self._description)
@staticmethod
def allGeometryTypes():
"""
Returns all possible geometry types.
:return: All possible geometry types.
"""
return [BraggDiffraction(),
LaueDiffraction(),
BraggTransmission(),
LaueTransmission()]
class LaueDiffraction(GeometryType):
"""
Represents Laue diffraction.
"""
def __init__(self):
super(LaueDiffraction, self).__init__("Laue diffraction")
class BraggDiffraction(GeometryType):
"""
Represents Bragg diffraction.
"""
def __init__(self):
super(BraggDiffraction, self).__init__("Bragg diffraction")
class LaueTransmission(GeometryType):
"""
Represents Laue transmission.
"""
def __init__(self):
super(LaueTransmission, self).__init__("Laue transmission")
class BraggTransmission(GeometryType):
"""
Represents Bragg transmission.
"""
def __init__(self):
super(BraggTransmission, self).__init__("Bragg transmission")
|
mit
| -4,599,577,668,534,421,500
| 26.662791
| 108
| 0.605717
| false
| 4.548757
| false
| false
| false
|
klahnakoski/TestFailures
|
pyLibrary/debugs/exceptions.py
|
1
|
7358
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from collections import Mapping
from pyLibrary.dot import Dict, listwrap, unwraplist, set_default, Null
from pyLibrary.jsons.encoder import json_encoder
from pyLibrary.strings import indent, expand_template
FATAL = "FATAL"
ERROR = "ERROR"
WARNING = "WARNING"
ALARM = "ALARM"
UNEXPECTED = "UNEXPECTED"
NOTE = "NOTE"
class Except(Exception):
@staticmethod
def new_instance(desc):
return Except(
desc.type,
desc.template,
desc.params,
[Except.new_instance(c) for c in listwrap(desc.cause)],
desc.trace
)
def __init__(self, type=ERROR, template=Null, params=Null, cause=Null, trace=Null, **kwargs):
Exception.__init__(self)
self.type = type
self.template = template
self.params = set_default(kwargs, params)
self.cause = cause
if not trace:
self.trace=extract_stack(2)
else:
self.trace = trace
@classmethod
def wrap(cls, e, stack_depth=0):
if e == None:
return Null
elif isinstance(e, (list, Except)):
return e
elif isinstance(e, Mapping):
e.cause = unwraplist([Except.wrap(c) for c in listwrap(e.cause)])
return Except(**e)
else:
if hasattr(e, "message") and e.message:
cause = Except(ERROR, unicode(e.message), trace=_extract_traceback(0))
else:
cause = Except(ERROR, unicode(e), trace=_extract_traceback(0))
trace = extract_stack(stack_depth + 2) # +2 = to remove the caller, and it's call to this' Except.wrap()
cause.trace.extend(trace)
return cause
@property
def message(self):
return expand_template(self.template, self.params)
def __contains__(self, value):
if isinstance(value, basestring):
if self.template.find(value) >= 0 or self.message.find(value) >= 0:
return True
if self.type == value:
return True
for c in listwrap(self.cause):
if value in c:
return True
return False
def __unicode__(self):
output = self.type + ": " + self.template + "\n"
if self.params:
output = expand_template(output, self.params)
if self.trace:
output += indent(format_trace(self.trace))
if self.cause:
cause_strings = []
for c in listwrap(self.cause):
with suppress_exception:
cause_strings.append(unicode(c))
output += "caused by\n\t" + "and caused by\n\t".join(cause_strings)
return output
def __str__(self):
return self.__unicode__().encode('latin1', 'replace')
def as_dict(self):
return Dict(
type=self.type,
template=self.template,
params=self.params,
cause=self.cause,
trace=self.trace
)
def __json__(self):
return json_encoder(self.as_dict())
def extract_stack(start=0):
"""
SNAGGED FROM traceback.py
Extract the raw traceback from the current stack frame.
Each item in the returned list is a quadruple (filename,
line number, function name, text), and the entries are in order
from newest to oldest
"""
try:
raise ZeroDivisionError
except ZeroDivisionError:
trace = sys.exc_info()[2]
f = trace.tb_frame.f_back
for i in range(start):
f = f.f_back
stack = []
n = 0
while f is not None:
stack.append({
"depth": n,
"line": f.f_lineno,
"file": f.f_code.co_filename,
"method": f.f_code.co_name
})
f = f.f_back
n += 1
return stack
def _extract_traceback(start):
"""
SNAGGED FROM traceback.py
RETURN list OF dicts DESCRIBING THE STACK TRACE
"""
tb = sys.exc_info()[2]
for i in range(start):
tb = tb.tb_next
trace = []
n = 0
while tb is not None:
f = tb.tb_frame
trace.append({
"depth": n,
"file": f.f_code.co_filename,
"line": tb.tb_lineno,
"method": f.f_code.co_name
})
tb = tb.tb_next
n += 1
trace.reverse()
return trace
def format_trace(tbs, start=0):
trace = []
for d in tbs[start::]:
item = expand_template('File "{{file}}", line {{line}}, in {{method}}\n', d)
trace.append(item)
return "".join(trace)
class Suppress(object):
"""
IGNORE EXCEPTIONS
"""
def __init__(self, exception_type):
self.type = exception_type
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val or isinstance(exc_val, self.type):
return True
suppress_exception = Suppress(Exception)
class Explanation(object):
"""
EXPLAIN THE ACTION BEING TAKEN
IF THERE IS AN EXCEPTION WRAP IT WITH THE EXPLANATION
CHAIN EXCEPTION AND RE-RAISE
"""
def __init__(
self,
template, # human readable template
**more_params
):
self.template = template
self.more_params = more_params
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(exc_val, Exception):
from pyLibrary.debugs.logs import Log
Log.error(
template="Failure in " + self.template,
default_params=self.more_params,
cause=exc_val,
stack_depth=1
)
return True
class WarnOnException(object):
"""
EXPLAIN THE ACTION BEING TAKEN
IF THERE IS AN EXCEPTION WRAP ISSUE A WARNING
"""
def __init__(
self,
template, # human readable template
**more_params
):
self.template = template
self.more_params = more_params
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(exc_val, Exception):
from pyLibrary.debugs.logs import Log
Log.warning(
template="Ignored failure while " + self.template,
default_params=self.more_params,
cause=exc_val,
stack_depth=1
)
return True
class AssertNoException(object):
"""
EXPECT NO EXCEPTION IN THIS BLOCK
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(exc_val, Exception):
from pyLibrary.debugs.logs import Log
Log.error(
template="Not expected to fail",
cause=exc_val,
stack_depth=1
)
return True
assert_no_exception = AssertNoException()
|
mpl-2.0
| 7,010,921,535,276,321,000
| 23.858108
| 117
| 0.554499
| false
| 3.889006
| false
| false
| false
|
vermouth1992/Leetcode
|
python/576.out-of-boundary-paths.py
|
1
|
2536
|
#
# @lc app=leetcode id=576 lang=python3
#
# [576] Out of Boundary Paths
#
# https://leetcode.com/problems/out-of-boundary-paths/description/
#
# algorithms
# Medium (36.32%)
# Total Accepted: 37.6K
# Total Submissions: 103.6K
# Testcase Example: '2\n2\n2\n0\n0'
#
# There is an m x n grid with a ball. The ball is initially at the position
# [startRow, startColumn]. You are allowed to move the ball to one of the four
# adjacent four cells in the grid (possibly out of the grid crossing the grid
# boundary). You can apply at most maxMove moves to the ball.
#
# Given the five integers m, n, maxMove, startRow, startColumn, return the
# number of paths to move the ball out of the grid boundary. Since the answer
# can be very large, return it modulo 10^9 + 7.
#
#
# Example 1:
#
#
# Input: m = 2, n = 2, maxMove = 2, startRow = 0, startColumn = 0
# Output: 6
#
#
# Example 2:
#
#
# Input: m = 1, n = 3, maxMove = 3, startRow = 0, startColumn = 1
# Output: 12
#
#
#
# Constraints:
#
#
# 1 <= m, n <= 50
# 0 <= maxMove <= 50
# 0 <= startRow <= m
# 0 <= startColumn <= n
#
#
#
class Solution:
def findPaths(self, m: int, n: int, maxMove: int, startRow: int, startColumn: int) -> int:
if maxMove == 0:
return 0
table = []
for move in range(maxMove):
table.append([])
for row in range(m):
table[move].append([])
for col in range(n):
table[move][row].append(0)
# all the boundaries are 1
for row in range(m):
table[0][row][0] += 1
table[0][row][n - 1] += 1
for col in range(n):
table[0][0][col] += 1
table[0][m - 1][col] += 1
for move in range(1, maxMove):
for row in range(m):
for col in range(n):
if row > 0:
table[move][row][col] += table[move - 1][row - 1][col]
if row < m - 1:
table[move][row][col] += table[move - 1][row + 1][col]
if col > 0:
table[move][row][col] += table[move - 1][row][col - 1]
if col < n - 1:
table[move][row][col] += table[move - 1][row][col + 1]
result = 0
for move in range(maxMove):
result += table[move][startRow][startColumn]
return result % 1000000007
if __name__ == '__main__':
print(Solution().findPaths(1, 3, 3, 0, 1))
|
mit
| -6,378,149,178,022,609,000
| 26.879121
| 94
| 0.521293
| false
| 3.230573
| false
| false
| false
|
richardliaw/ray
|
rllib/utils/exploration/ornstein_uhlenbeck_noise.py
|
1
|
9037
|
import numpy as np
from typing import Optional, Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
get_variable, TensorType
from ray.rllib.utils.schedules import Schedule
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class OrnsteinUhlenbeckNoise(GaussianNoise):
"""An exploration that adds Ornstein-Uhlenbeck noise to continuous actions.
If explore=True, returns sampled actions plus a noise term X,
which changes according to this formula:
Xt+1 = -theta*Xt + sigma*N[0,stddev], where theta, sigma and stddev are
constants. Also, some completely random period is possible at the
beginning.
If explore=False, returns the deterministic action.
"""
def __init__(self,
action_space,
*,
framework: str,
ou_theta: float = 0.15,
ou_sigma: float = 0.2,
ou_base_scale: float = 0.1,
random_timesteps: int = 1000,
initial_scale: float = 1.0,
final_scale: float = 0.02,
scale_timesteps: int = 10000,
scale_schedule: Optional[Schedule] = None,
**kwargs):
"""Initializes an Ornstein-Uhlenbeck Exploration object.
Args:
action_space (Space): The gym action space used by the environment.
ou_theta (float): The theta parameter of the Ornstein-Uhlenbeck
process.
ou_sigma (float): The sigma parameter of the Ornstein-Uhlenbeck
process.
ou_base_scale (float): A fixed scaling factor, by which all OU-
noise is multiplied. NOTE: This is on top of the parent
GaussianNoise's scaling.
random_timesteps (int): The number of timesteps for which to act
completely randomly. Only after this number of timesteps, the
`self.scale` annealing process will start (see below).
initial_scale (float): The initial scaling weight to multiply
the noise with.
final_scale (float): The final scaling weight to multiply
the noise with.
scale_timesteps (int): The timesteps over which to linearly anneal
the scaling factor (after(!) having used random actions for
`random_timesteps` steps.
scale_schedule (Optional[Schedule]): An optional Schedule object
to use (instead of constructing one from the given parameters).
framework (Optional[str]): One of None, "tf", "torch".
"""
super().__init__(
action_space,
framework=framework,
random_timesteps=random_timesteps,
initial_scale=initial_scale,
final_scale=final_scale,
scale_timesteps=scale_timesteps,
scale_schedule=scale_schedule,
stddev=1.0, # Force `self.stddev` to 1.0.
**kwargs)
self.ou_theta = ou_theta
self.ou_sigma = ou_sigma
self.ou_base_scale = ou_base_scale
# The current OU-state value (gets updated each time, an eploration
# action is computed).
self.ou_state = get_variable(
np.array(self.action_space.low.size * [.0], dtype=np.float32),
framework=self.framework,
tf_name="ou_state",
torch_tensor=True,
device=self.device)
@override(GaussianNoise)
def _get_tf_exploration_action_op(self, action_dist: ActionDistribution,
explore: Union[bool, TensorType],
timestep: Union[int, TensorType]):
ts = timestep if timestep is not None else self.last_timestep
scale = self.scale_schedule(ts)
# The deterministic actions (if explore=False).
deterministic_actions = action_dist.deterministic_sample()
# Apply base-scaled and time-annealed scaled OU-noise to
# deterministic actions.
gaussian_sample = tf.random.normal(
shape=[self.action_space.low.size], stddev=self.stddev)
ou_new = self.ou_theta * -self.ou_state + \
self.ou_sigma * gaussian_sample
if self.framework in ["tf2", "tfe"]:
self.ou_state.assign_add(ou_new)
ou_state_new = self.ou_state
else:
ou_state_new = tf1.assign_add(self.ou_state, ou_new)
high_m_low = self.action_space.high - self.action_space.low
high_m_low = tf.where(
tf.math.is_inf(high_m_low), tf.ones_like(high_m_low), high_m_low)
noise = scale * self.ou_base_scale * ou_state_new * high_m_low
stochastic_actions = tf.clip_by_value(
deterministic_actions + noise,
self.action_space.low * tf.ones_like(deterministic_actions),
self.action_space.high * tf.ones_like(deterministic_actions))
# Stochastic actions could either be: random OR action + noise.
random_actions, _ = \
self.random_exploration.get_tf_exploration_action_op(
action_dist, explore)
exploration_actions = tf.cond(
pred=tf.convert_to_tensor(ts < self.random_timesteps),
true_fn=lambda: random_actions,
false_fn=lambda: stochastic_actions)
# Chose by `explore` (main exploration switch).
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool) else explore,
true_fn=lambda: exploration_actions,
false_fn=lambda: deterministic_actions)
# Logp=always zero.
batch_size = tf.shape(deterministic_actions)[0]
logp = tf.zeros(shape=(batch_size, ), dtype=tf.float32)
# Increment `last_timestep` by 1 (or set to `timestep`).
if self.framework in ["tf2", "tfe"]:
if timestep is None:
self.last_timestep.assign_add(1)
else:
self.last_timestep.assign(timestep)
return action, logp
else:
assign_op = (tf1.assign_add(self.last_timestep, 1)
if timestep is None else tf1.assign(
self.last_timestep, timestep))
with tf1.control_dependencies([assign_op, ou_state_new]):
return action, logp
@override(GaussianNoise)
def _get_torch_exploration_action(self, action_dist: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType]):
# Set last timestep or (if not given) increase by one.
self.last_timestep = timestep if timestep is not None else \
self.last_timestep + 1
# Apply exploration.
if explore:
# Random exploration phase.
if self.last_timestep < self.random_timesteps:
action, _ = \
self.random_exploration.get_torch_exploration_action(
action_dist, explore=True)
# Apply base-scaled and time-annealed scaled OU-noise to
# deterministic actions.
else:
det_actions = action_dist.deterministic_sample()
scale = self.scale_schedule(self.last_timestep)
gaussian_sample = scale * torch.normal(
mean=torch.zeros(self.ou_state.size()), std=1.0) \
.to(self.device)
ou_new = self.ou_theta * -self.ou_state + \
self.ou_sigma * gaussian_sample
self.ou_state += ou_new
high_m_low = torch.from_numpy(
self.action_space.high - self.action_space.low). \
to(self.device)
high_m_low = torch.where(
torch.isinf(high_m_low),
torch.ones_like(high_m_low).to(self.device), high_m_low)
noise = scale * self.ou_base_scale * self.ou_state * high_m_low
action = torch.min(
torch.max(
det_actions + noise,
torch.tensor(
self.action_space.low,
dtype=torch.float32,
device=self.device)),
torch.tensor(
self.action_space.high,
dtype=torch.float32,
device=self.device))
# No exploration -> Return deterministic actions.
else:
action = action_dist.deterministic_sample()
# Logp=always zero.
logp = torch.zeros(
(action.size()[0], ), dtype=torch.float32, device=self.device)
return action, logp
|
apache-2.0
| -3,410,992,502,494,740,500
| 43.29902
| 79
| 0.565674
| false
| 4.158767
| false
| false
| false
|
zejn/babbage
|
tests/test_model.py
|
1
|
2196
|
from .util import TestCase, load_json_fixture
from babbage.model import Model
class ModelTestCase(TestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
self.simple_model_data = load_json_fixture('models/simple_model.json')
self.simple_model = Model(self.simple_model_data)
def test_model_concepts(self):
concepts = list(self.simple_model.concepts)
assert len(concepts) == 7, len(concepts)
def test_model_match(self):
concepts = list(self.simple_model.match('foo'))
assert len(concepts) == 1, len(concepts)
def test_model_match_invalid(self):
concepts = list(self.simple_model.match('fooxx'))
assert len(concepts) == 0, len(concepts)
def test_model_aggregates(self):
aggregates = list(self.simple_model.aggregates)
assert len(aggregates) == 2, aggregates
def test_model_fact_table(self):
assert self.simple_model.fact_table_name == 'simple'
assert 'simple' in repr(self.simple_model), repr(self.simple_model)
def test_deref(self):
assert self.simple_model['foo'].name == 'foo'
assert self.simple_model['foo.key'].name == 'key'
assert self.simple_model['amount'].name == 'amount'
assert 'amount' in self.simple_model
assert 'amount.sum' in self.simple_model
assert '_count' in self.simple_model
assert 'yabba' not in self.simple_model
assert 'foo.key' in self.simple_model
def test_repr(self):
assert 'amount' in repr(self.simple_model['amount'])
assert 'amount.sum' in repr(self.simple_model['amount.sum'])
assert 'foo.key' in repr(self.simple_model['foo.key'])
assert 'foo' in repr(self.simple_model['foo'])
assert 'foo' in unicode(self.simple_model['foo'])
assert self.simple_model['foo'] == 'foo'
def test_to_dict(self):
data = self.simple_model.to_dict()
assert 'measures' in data
assert 'amount' in data['measures']
assert 'amount.sum' in data['aggregates']
assert 'ref' in data['measures']['amount']
assert 'dimensions' in data
assert 'foo' in data['dimensions']
|
mit
| 2,243,494,680,258,463,000
| 37.526316
| 78
| 0.636612
| false
| 3.623762
| true
| false
| false
|
sebastianlan/wedfairy-api
|
rsvp/migrations/0001_initial.py
|
1
|
1612
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.AutoField(serialize=False, verbose_name=b'id', primary_key=True)),
('user_pic_url', models.TextField()),
('name', models.TextField()),
('people', models.IntegerField()),
('create_date', models.DateField()),
],
options={
'db_table': 'attendance',
},
),
migrations.CreateModel(
name='Rsvp',
fields=[
('id', models.AutoField(serialize=False, verbose_name=b'id', primary_key=True)),
('message', models.TextField()),
('deadline', models.DateField()),
],
options={
'db_table': 'rsvp',
},
),
migrations.CreateModel(
name='UserRsvp',
fields=[
('id', models.AutoField(serialize=False, verbose_name=b'id', primary_key=True)),
('user', models.IntegerField()),
('rsvp', models.ForeignKey(to='rsvp.Rsvp')),
],
options={
'db_table': 'user_rsvp',
},
),
migrations.AddField(
model_name='attendance',
name='rsvp',
field=models.ForeignKey(to='rsvp.Rsvp'),
),
]
|
mit
| 7,234,744,672,930,323,000
| 29.415094
| 96
| 0.466501
| false
| 4.645533
| false
| false
| false
|
lgfausak/sqlbridge
|
sqlbridge/scripts/cli.py
|
1
|
4335
|
#!/usr/bin/env python
###############################################################################
##
## Copyright (C) 2014 Greg Fausak
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
import sys, os, argparse, six
import twisted
from twisted.python import log
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.wamp import types
from autobahn import util
from sqlbridge.twisted.dbengine import DB
import argparse
# http://stackoverflow.com/questions/3853722/python-argparse-how-to-insert-newline-the-help-text
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
# this is the RawTextHelpFormatter._split_lines
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def run():
prog = os.path.basename(__file__)
def_wsocket = 'ws://127.0.0.1:8080/ws'
def_user = 'db'
def_secret = 'dbsecret'
def_realm = 'realm1'
def_topic_base = 'com.db'
# http://stackoverflow.com/questions/3853722/python-argparse-how-to-insert-newline-the-help-text
p = argparse.ArgumentParser(description="db admin manager for autobahn", formatter_class=SmartFormatter)
p.add_argument('-w', '--websocket', action='store', dest='wsocket', default=def_wsocket,
help='web socket definition, default is: '+def_wsocket)
p.add_argument('-r', '--realm', action='store', dest='realm', default=def_realm,
help='connect to websocket using realm, default is: '+def_realm)
p.add_argument('-v', '--verbose', action='store_true', dest='verbose',
default=False, help='Verbose logging for debugging')
p.add_argument('-u', '--user', action='store', dest='user', default=def_user,
help='connect to websocket as user, default is: '+def_user)
p.add_argument('-s', '--secret', action='store', dest='password', default=def_secret,
help='users "secret" password')
p.add_argument('-e', '--engine', action='store', dest='engine', default=None,
help='if specified, a database engine will be attached.' +
' Note engine is rooted on --topic.' +
' Valid engine options are PG, MYSQL or SQLITE')
p.add_argument('-d', '--dsn', action='store', dest='dsn', default=None,
help='R|if specified the database in dsn will be connected and ready.\n' +
'dsns are unique to the engine being used. Valid examples:' +
'\n-----------' +
'\nPG: dbname=autobahn host=192.168.200.230 user=autouser password=testpass' +
'\nMYSQL: database=autobahn user=autouser password=passtest' +
'\nSQLITE: Z')
p.add_argument('-t', '--topic', action='store', dest='topic_base', default=def_topic_base,
help='if you specify --dsn then you will need a topic to root it on, the default ' + def_topic_base + ' is fine.')
args = p.parse_args()
if args.verbose:
log.startLogging(sys.stdout)
component_config = types.ComponentConfig(realm=args.realm)
ai = {
'auth_type':'wampcra',
'auth_user':args.user,
'auth_password':args.password
}
mdb = DB(config=component_config,
authinfo=ai,engine=args.engine,topic_base=args.topic_base,dsn=args.dsn, debug=args.verbose)
runner = ApplicationRunner(args.wsocket, args.realm)
runner.run(lambda _: mdb)
if __name__ == '__main__':
run()
|
apache-2.0
| 7,169,285,442,915,159,000
| 42.787879
| 138
| 0.599539
| false
| 3.980716
| false
| false
| false
|
bjura/pisak2
|
pisak/resources.py
|
1
|
1489
|
import os.path
from PyQt5.QtCore import QObject, QStandardPaths, QDir, pyqtSlot, pyqtProperty
from .res import getRes
class Resources(QObject):
appDataDir = QStandardPaths.writableLocation(QStandardPaths.AppDataLocation)
soundFileExt = '.wav'
iconFileExt = '.svg'
homeAppDir = os.path.join(QStandardPaths.standardLocations(QStandardPaths.HomeLocation)[0], '.pisak')
@pyqtSlot(str, result=str)
def getSoundPath(self, soundName):
soundFile = soundName + self.soundFileExt
path = os.path.join(self.appDataDir, soundFile)
if not os.path.exists(path):
path = getRes(os.path.join('sounds', soundFile))
return path
@pyqtSlot(str, result=str)
def getIconPath(self, iconName):
iconFile = iconName + self.iconFileExt
return getRes(os.path.join('icons', iconFile))
@pyqtSlot(str, result=str)
def getResource(self, item):
return getRes(item)
@pyqtProperty(str, constant=True)
def symbolsFolder(self):
return getRes('symbols')
@pyqtProperty(str, constant=True)
def moviesDir(self):
return QStandardPaths.standardLocations(QStandardPaths.MoviesLocation)[0]
@pyqtProperty(str, constant=True)
def musicDir(self):
return QStandardPaths.standardLocations(QStandardPaths.MusicLocation)[0]
@pyqtProperty(str, constant=True)
def photosDir(self):
return QStandardPaths.standardLocations(QStandardPaths.PicturesLocation)[0]
|
gpl-3.0
| 8,239,106,199,094,781,000
| 29.387755
| 105
| 0.703156
| false
| 3.631707
| false
| false
| false
|
nitehawck/dem
|
dem/dependency/url.py
|
1
|
1463
|
import os
import wget
from dem.dependency.archive import ArchiveInstaller
from dem.project.reader import Config
class UrlInstaller:
def __init__(self, project, packages, cache):
self._packages = packages
self._project = project
self._download_directory = os.path.join('.devenv', project, 'downloads')
self._config = Config({'remote-locations': [self._download_directory]})
self._cache = cache
def install_packages(self):
installed_packages = []
for p in self._packages:
if 'url' in p:
file_extension = UrlInstaller._get_ext(p['url'])
file_name = '{}-{}{}'.format(p['name'], p['version'], file_extension)
local_file = os.path.join(self._download_directory, file_name)
if not os.path.exists(local_file) and not self._cache.is_package_installed(p['name'], p['version']):
print('Fetching {}'.format(p['url']))
wget.download(p['url'], out=local_file)
print()
installed_packages.append(p)
local_installer = ArchiveInstaller(self._project, self._config, installed_packages, self._cache)
return local_installer.install_packages()
@staticmethod
def _get_ext(url):
root, ext = os.path.splitext(url.split('/')[-1])
if ext in ['.gz', '.bz2']:
ext = os.path.splitext(root)[1] + ext
return ext
|
mit
| 6,745,438,510,528,294,000
| 37.5
| 116
| 0.583049
| false
| 4.063889
| false
| false
| false
|
edx/edx-val
|
edxval/management/commands/verify_pact.py
|
1
|
1671
|
"""
Management command to verify VEM pact.
"""
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from pact import Verifier
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to verify VAL provider pacts.
Example Usage: python manage.py verify_pact --settings=edxval.settings.test
It should be run explicitly with test or test-only settings because the pact verification requires
some database operations that should not occur in a production related database.
"""
help = "Verify the VAL provider pacts"
default_opts = {
'broker_url': getattr(settings, 'PACT_BROKER_BASE_URL', None),
'publish_version': '1',
'publish_verification_results': getattr(settings, 'PUBLISH_VERIFICATION_RESULTS', False)
}
def verify_pact(self):
"""
Verify the pacts with Pact-verifier.
"""
verifier = Verifier(
provider='VAL',
provider_base_url=settings.PROVIDER_BASE_URL
)
if self.default_opts['broker_url']:
verifier.verify_with_broker(
**self.default_opts,
verbose=False,
provider_states_setup_url=settings.PROVIDER_STATES_URL,
)
else:
verifier.verify_pacts(
'edxval/pacts/vem-val.json',
provider_states_setup_url=settings.PROVIDER_STATES_URL,
)
def handle(self, *args, **options):
log.info("Starting pact verification")
self.verify_pact()
log.info('Pact verification completed')
|
agpl-3.0
| 2,924,957,101,037,660,700
| 29.381818
| 102
| 0.622382
| false
| 4.23038
| false
| false
| false
|
rcmorano/gecosws-config-assistant
|
firstboot/validation.py
|
1
|
1642
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
import re
def is_empty(value):
ret = not(len(value) > 0)
#print '> %s :: %s' % (ret, value)
return ret
def is_qname(value):
m = re.search(r'^[a-zA-Z]([\w-]|\.)+$', value)
#print '> %s :: %s' % (m != None, value)
return m != None
def is_domain(value):
m = re.search(r'[a-zA-Z0-9]{3,}\.[a-z]{2,3}$', value)
return m != None
def is_url(value):
m = re.search(r'^(http|https|ftp|ftps|file|ldap|ldaps)://(.+)', value)
#print '> %s :: %s' % (m != None, value)
return m != None
def is_auth_type(value):
return value == 'ldap' or value == 'ad'
def is_password(value):
""" Maybe not necesary """
return True
|
gpl-2.0
| 2,564,201,980,378,884,000
| 31.156863
| 83
| 0.656098
| false
| 3.111954
| false
| false
| false
|
Micronaet/micronaet-product
|
inventory_field/inventory.py
|
1
|
3807
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ProductProductInventoryCategory(orm.Model):
""" Model name: ProductProductInventoryCategory
"""
_name = 'product.product.inventory.category'
_description = 'Inventory category'
def force_no_code_category(self, cr, uid, ids, context=None):
''' Force all no code to this category
'''
product_pool = self.pool.get('product.product')
current_proxy = self.browse(cr, uid, ids, context=context)[0]
product_ids = product_pool.search(cr, uid, [
('default_code', '=', False)], context=context)
product_pool.write(cr, uid, product_ids, {
'inventory_category_id': current_proxy.id,
}, context=context)
return True
def force_code_category(self, cr, uid, ids, context=None):
''' Force product category with code in text field
'''
product_pool = self.pool.get('product.product')
current_proxy = self.browse(cr, uid, ids, context=context)[0]
code = current_proxy.code
code_list = code.split('\n')
product_ids = product_pool.search(cr, uid, [
('default_code', 'in', code_list)], context=context)
product_pool.write(cr, uid, product_ids, {
'inventory_category_id': current_proxy.id,
}, context=context)
return True
_columns = {
'name': fields.char(
'Name', size=64, required=True),
'note': fields.text('Note'),
'code': fields.text('Force code'),
}
class ProductProduct(orm.Model):
''' Link product to inventory purchase order
'''
_inherit = 'product.product'
_columns = {
# TODO No more use:
'inventory_start': fields.float(
'Inventory start', digits=(16, 3)),
'inventory_delta': fields.float(
'Inventory delta', digits=(16, 3),
help='Delta inventory for post correction retroactive'),
'inventory_date': fields.date('Inventory date'),
# XXX Inventory report (keep in isolated module?)
'inventory_category_id': fields.many2one(
'product.product.inventory.category', 'Inventory category'),
'inventory_excluded': fields.boolean('Inventory excluded'),
}
|
agpl-3.0
| 1,827,273,622,032,613,400
| 35.961165
| 79
| 0.628054
| false
| 4.20663
| false
| false
| false
|
powderblock/PyBad-Translator
|
translate.py
|
1
|
3795
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <terry.yinzhe@gmail.com> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return to Terry Yin.
#
# The idea of this is borrowed from <mort.yao@gmail.com>'s brilliant work
# https://github.com/soimort/google-translate-cli
# He uses "THE BEER-WARE LICENSE". That's why I use it too. So you can buy him a
# beer too.
# ----------------------------------------------------------------------------
'''
This is a simple, yet powerful command line translator with google translate
behind it. You can also use it as a Python module in your code.
'''
import re
import json
from textwrap import wrap
try:
import urllib2 as request
from urllib import quote
except:
from urllib import request
from urllib.parse import quote
class Translator:
string_pattern = r"\"(([^\"\\]|\\.)*)\""
match_string =re.compile(
r"\,?\["
+ string_pattern + r"\,"
+ string_pattern + r"\,"
+ string_pattern + r"\,"
+ string_pattern
+r"\]")
def __init__(self, to_lang, from_lang='auto'):
self.from_lang = from_lang
self.to_lang = to_lang
def translate(self, source):
self.source_list = wrap(source, 1000, replace_whitespace=False)
return ' '.join(self._get_translation_from_google(s) for s in self.source_list)
def _get_translation_from_google(self, source):
json5 = self._get_json5_from_google(source)
return self._unescape(self._get_translation_from_json5(json5))
def _get_translation_from_json5(self, content):
result = ""
pos = 2
while True:
m = self.match_string.match(content, pos)
if not m:
break
result += m.group(1)
pos = m.end()
return result
def _get_json5_from_google(self, source):
escaped_source = quote(source, '')
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19'}
req = request.Request(
url="http://translate.google.com/translate_a/t?client=t&ie=UTF-8&oe=UTF-8"
+"&sl=%s&tl=%s&text=%s" % (self.from_lang, self.to_lang, escaped_source)
, headers = headers)
r = request.urlopen(req)
return r.read().decode('utf-8')
def _unescape(self, text):
return json.loads('"%s"' % text)
def main():
import argparse
import sys
import locale
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('texts', metavar='text', nargs='+',
help='a string to translate(use "" when it\'s a sentence)')
parser.add_argument('-t', '--to', dest='to_lang', type=str, default='zh',
help='To language (e.g. zh, zh-TW, en, ja, ko). Default is zh.')
parser.add_argument('-f', '--from', dest='from_lang', type=str, default='auto',
help='From language (e.g. zh, zh-TW, en, ja, ko). Default is auto.')
args = parser.parse_args()
translator= Translator(from_lang=args.from_lang, to_lang=args.to_lang)
for text in args.texts:
translation = translator.translate(text)
if sys.version_info.major == 2:
translation =translation.encode(locale.getpreferredencoding())
sys.stdout.write(translation)
sys.stdout.write("\n")
if __name__ == "__main__":
main()
|
mit
| -4,403,544,834,307,570,700
| 39.37234
| 156
| 0.566535
| false
| 3.673766
| false
| false
| false
|
SNET-Entrance/Entrance-UM
|
src/cm/models.py
|
1
|
9801
|
import uuid
from bootstrap import db, all_attr, policy_mode
from bootstrap.models import Ext, AttrAuth
from um.models import Contact, Attribute
class Container(db.Model, Ext):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Text, nullable=False) # name of the container
path = db.Column(db.Text, nullable=False) # path to the container file
type = db.Column(db.Integer, nullable=False) # used by the policy enforcement strategies
files = db.relationship('File', backref='container', cascade='all, delete-orphan', lazy='joined')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __init__(self, name, path, type, user_id):
self.name = name
self.path = path
self.type = type
self.user_id = user_id
# reencrypts a container considering the new attributes sets of contacts
# called whenever a contact is modified
def reencrypt(self, user):
container = Container(self.name, self.path, self.type, user.id)
container.files = self.files
for f in container.files:
f.policy = Policy.generate(f.policy_text, user)
db.session.add(f)
db.session.delete(self)
db.session.add(container)
db.session.commit()
out = container.dict()
out['files'] = list()
for f in container.files:
out['files'].append(f.dict())
aa_param = dict()
aa_param['files'] = list()
for f in out['files']:
aa_param['files'].append({
"path": f['path'],
"type": f['type'],
"policy": f['policy']
})
aa_param['outfile'] = container.path
aa_param['overwriteOutfile'] = True
aa_response = AttrAuth.encrypt_container(container, aa_param)
if aa_response is None:
return None
return True
class File(db.Model, Ext):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
path = db.Column(db.Text, nullable=False) # path to the file
type = db.Column(db.Text, nullable=False) # always PABE14
policy = db.Column(db.Text, nullable=False) # the actual ABE policy
policy_text = db.Column(db.Text, nullable=False) # the specified policy of the user
container_id = db.Column(db.Integer, db.ForeignKey('container.id'))
def __init__(self, path, type, policy, policy_text, container_id):
self.path = path
self.type = type
self.policy = policy
self.policy_text = policy_text
self.container_id = container_id
class Policy(object):
def __init__(self):
pass
@staticmethod
def evaluate(policy, user):
operators = ['<', '>', '=', '<=', '>='] # list of operators to identify and evaluate numeric attributes
users = Contact.query.filter_by(user_id=user.id).all() # load all contacts of a user
literals = [x.split(':') for x in policy.split(',')] # parse submitted policy
excludes = set(
[x[0].replace('NOT ', '') for x in literals if x[0].startswith('NOT ')]) # identify explicit excludes
allowed_users = [] # initialize the authorized set of contacts
for user in users: # iterate over users list
attrs = set([a.display_name for a in user.attributes]) # extract attributes
for literal in literals: # for each user iterate through literals
if any(any(x in s for s in literal) for x in operators): # if any literal has an operator
condition = True
for l in literal:
operator = ''
for o in operators:
if o in l:
operator = o
if operator == '':
if l not in attrs:
condition = False
continue
else:
continue
attr, cond = l.split(operator)
present = False
for a in attrs:
if attr in a:
present = True
value = a.split('=')[1]
if not eval(
value + operator + cond): # check if the literal is met by the contact's attribute value
condition = False
if not present:
condition = False
if condition: # if condition is met check if user is in exclude list
if len(excludes.intersection(attrs)) == 0:
allowed_users.append(user)
else: # if no numeric attribute is used in literals
if set(literal).issubset(attrs): # simply check if attributes set of contact is subset of literals
if len(excludes.intersection(
attrs)) == 0: # and ensure again that contact is not in exclude list
allowed_users.append(user)
return list(set([a for a in allowed_users])) # return a distinct set of authorized contacts
@staticmethod
def convert(policy):
# convert a policy into an actual ABE policy
return ' OR '.join(['(' + ' AND '.join(l) + ')' for l in [x.split(':') for x in policy.split(',')]])
@staticmethod
def generate(policy, current_user):
# generate a policy based on a user-specified policy dependend on the policy_mode
if policy == all_attr: # if policy is the default policy simply use it
return policy
else:
# otherwise ...
users = Policy.evaluate(policy, current_user) # compute the authorized set of contacts
if policy_mode == 0:
if 'NOT' not in policy: # normal ABE only work if no excludes have been used
return Policy.convert(policy)
# TODO: else case - what to do if exlcuded have been used
elif policy_mode == 1: # case: static ciphertext strategy
uuid_attr = 'AAAAA' + str(uuid.uuid4()).replace('-', '') # generate a unique attribute
attr = Attribute(uuid_attr, True, current_user.id) # store this attribute permanently
db.session.add(attr)
db.session.commit()
# and assign it to the authorized contacts
for user in users:
user.attributes.append(attr)
db.session.add(user)
aa_response = AttrAuth.add_attr(user, attr, current_user) # AA communication
if aa_response is None:
db.session.rollback()
db.session.commit()
return uuid_attr
elif policy_mode == 2: # case: static secret key strategy
return ' OR '.join([c.identity for c in
users]) # generate disjunction of identity attribute of authorized contacts
@staticmethod
def check_for(contact, user):
# check_for() is used to determine ciphertexts that have to be updated after a contact has been modified
container = Container.query.filter_by(user_id=user.id)
for c in container: # iterate over all container of a user
if c.type == 0: # case: no strategy used - do nothing
pass
elif c.type == 1: # case: static ciphertext strategy used
for f in c.files: # iterate over all files - for each file
allowed_users = Policy.evaluate(f.policy_text, user) # evaluate the policy of the file
uuid = Attribute.query.filter_by(name=f.policy).first()
if contact not in allowed_users and uuid in contact.attributes: # if contact is not in set of allowed_users after modification
contact.attributes.remove(uuid) # remove uuid attribute from the contact
db.session.add(contact)
db.session.commit()
aa_response = AttrAuth.delete_attr(contact, uuid, user) # inform AA
if aa_response is None:
db.session.rollback()
elif contact in allowed_users and uuid not in contact.attributes: # if contact is in set of allowed_users but has not the corresponding attribute
contact.attributes.append(uuid) # assign attribute to the contact
db.session.add(contact)
db.session.commit()
aa_response = AttrAuth.add_attr(contact, uuid, user) # inform AA
if aa_response is None:
db.session.rollback()
elif c.type == 2: # case: static secret key strategy used
for f in c.files: # iterate through files again
allowed_users = Policy.evaluate(f.policy_text, user) # compute authorized users
if contact not in allowed_users and contact.identity in f.policy: # if user is not intented to have access to the resource after modification
c.reencrypt(user) # reencrypt
if contact in allowed_users and contact.identity not in f.policy: # if user is intended to have access to the resource after the modification
c.reencrypt(user) # reencrypt
# TODO: make this easier
|
apache-2.0
| 6,430,938,400,690,601,000
| 49.261538
| 166
| 0.550352
| false
| 4.638429
| false
| false
| false
|
PXke/invenio
|
invenio/ext/assets/extensions.py
|
1
|
8258
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.ext.assets.extensions
-----------------------------
This module contains custom `Jinja2` extensions.
"""
from operator import itemgetter
from jinja2 import nodes
from jinja2.ext import Extension
ENV_PREFIX = '_collected_'
def prepare_tag_bundle(cls, tag):
"""
Construct function that returns collected data specified
in jinja2 template like `{% <tag> <value> %}` in correct
order.
Here is an example that shows the final order when template
inheritance is used::
example.html
------------
{%\ extends 'page.html' %}
{%\ css 'template2.css' %}
{%\ css 'template3.css' %}
page.html
---------
{%\ css 'template1.css' %}
{{ get_css_bundle() }}
Output:
-------
[template1.css, template2.css, template3.css]
"""
def get_bundle(key=None, iterate=False):
def _get_data_by_key(data_, key_):
return map(itemgetter(1), filter(lambda (k, v): k == key_, data_))
data = getattr(cls.environment, ENV_PREFIX+tag)
if iterate:
bundles = sorted(set(map(itemgetter(0), data)))
def _generate_bundles():
for bundle in bundles:
cls._reset(tag, bundle)
yield cls.environment.new_bundle(tag,
_get_data_by_key(data,
bundle),
bundle)
return _generate_bundles()
else:
if key is not None:
data = _get_data_by_key(data, key)
else:
bundles = sorted(set(map(itemgetter(0), data)))
data = [f for bundle in bundles
for f in _get_data_by_key(data, bundle)]
cls._reset(tag, key)
return cls.environment.new_bundle(tag, data, key)
return get_bundle
class CollectionExtension(Extension):
"""
CollectionExtension adds new tags `css` and `js` and functions
``get_css_bundle`` and ``get_js_bundle`` for jinja2 templates.
The ``new_bundle`` method is used to create bundle from
list of file names collected using `css` and `js` tags.
Example: simple case
{% css 'css/invenio.css' %}
{% js 'js/jquery.js' %}
{% js 'js/invenio.js' %}
...
{% assets get_css_bundle() %}
<link rel="stylesheet" type="text/css" href="{{ ASSET_URL }}"></link>
{% endassets %}
{% assets get_js_bundle() %}
In template, use {{ ASSETS_URL }} for printing file URL.
{% endassets %}
Example: named bundles
record.html:
{% extend 'page.html' %}
{% css 'css/may-vary.css' %}
# default bundle name can be changed in application factory
# app.jinja_env.extend(default_bundle_name='90-default')
{% css 'css/record.css', '10-record' %}
{% css 'css/form.css', '10-record' %}
page.html:
{% css 'css/bootstrap.css', '00-base' %}
{% css 'css/invenio.css', '00-base' %}
...
{% for bundle in get_css_bundle(iterate=True) %}
{% assets bundle %}
<link rel="stylesheet" type="text/css" href="{{ ASSET_URL }}"></link>
{% endassets %}
{% endfor %}
Output:
<link rel="stylesheet" type="text/css" href="/css/00-base.css"></link>
<link rel="stylesheet" type="text/css" href="/css/10-record.css"></link>
<link rel="stylesheet" type="text/css" href="/css/90-default.css"></link>
Note:
If you decide not to use assets bundle but directly print
stylesheet and script html tags, you MUST define:
```
_app.jinja_env.extend(
use_bundle = False,
collection_templates = {
'css': '<link rel="stylesheet" type="text/css" href="/%s"></link>',
'js': '<script type="text/javascript" src="/%s"></script>'
})
```
Both callable and string with '%s' are allowed in
``collection_templates``.
"""
tags = set(['css', 'js'])
def __init__(self, environment):
super(CollectionExtension, self).__init__(environment)
ext = dict(('get_%s_bundle' % tag, prepare_tag_bundle(self, tag))
for tag in self.tags)
environment.extend(
default_bundle_name='10-default',
use_bundle=True,
collection_templates=dict((tag, lambda x: x) for tag in self.tags),
new_bundle=lambda tag, collection, name: collection,
**ext)
for tag in self.tags:
self._reset(tag)
def _reset(self, tag, key=None):
"""
Empty list of used scripts.
"""
if key is None:
setattr(self.environment, ENV_PREFIX+tag, [])
else:
data = filter(lambda (k, v): k != key,
getattr(self.environment, ENV_PREFIX+tag))
setattr(self.environment, ENV_PREFIX+tag, data)
def _update(self, tag, value, key, caller=None):
"""
Update list of used scripts.
"""
try:
values = getattr(self.environment, ENV_PREFIX+tag)
values.append((key, value))
except:
values = [(key, value)]
setattr(self.environment, ENV_PREFIX+tag, values)
return ''
def parse(self, parser):
"""
Parse Jinja statement tag defined in `self.tags` (default: css, js).
This accually tries to build corresponding html script tag
or collect script file name in jinja2 environment variable.
If you use bundles it is important to call ``get_css_bundle``
or ``get_js_bundle`` in template after all occurrences of
script tags (e.g. {% css ... %}, {% js ...%}).
"""
tag = parser.stream.current.value
lineno = next(parser.stream).lineno
default_bundle_name = u"%s" % (self.environment.default_bundle_name)
default_bundle_name.encode('utf-8')
bundle_name = nodes.Const(default_bundle_name)
#parse filename
if parser.stream.current.type != 'block_end':
value = parser.parse_expression()
# get first optional argument: bundle_name
if parser.stream.skip_if('comma'):
bundle_name = parser.parse_expression()
if isinstance(bundle_name, nodes.Name):
bundle_name = nodes.Name(bundle_name.name, 'load')
else:
value = parser.parse_tuple()
args = [nodes.Const(tag), value, bundle_name]
# Return html tag with link to corresponding script file.
if self.environment.use_bundle is False:
value = value.value
if callable(self.environment.collection_templates[tag]):
node = self.environment.collection_templates[tag](value)
else:
node = self.environment.collection_templates[tag] % value
return nodes.Output([nodes.MarkSafeIfAutoescape(nodes.Const(node))])
# Call :meth:`_update` to collect names of used scripts.
return nodes.CallBlock(self.call_method('_update', args=args,
lineno=lineno),
[], [], '')
|
gpl-2.0
| -8,043,150,594,907,580,000
| 35.061135
| 85
| 0.553039
| false
| 4.191878
| false
| false
| false
|
gr33ndata/dysl
|
dysl/corpora/corpuslib/train.py
|
1
|
3488
|
import os
import codecs
import time
from datetime import datetime
class Train:
def __init__(self, root=''):
# Setting root directory for training data
if root:
self.root = root
self.using_builtin_training = False
else:
#self.root = 'corpora/corpus-esaren'
self.root = __file__.rsplit('/',2)[0] + '/corpus-esaren'
self.using_builtin_training = True
#print self.root
self.root_depth = len(self.root.split('/'))
# Set of languages
self.lang_set = set()
# Temp Training Samples
# These are sample adding in run-time
# self.temp_train_data = {
# 'en': ['hello world', 'this is sparta'],
# 'es': ['hasta la vista', 'hola amigos']
# }
self.temp_train_data = {}
def get_corpus(self):
self.corpus = []
self.load()
return self.corpus
def get_corpus_path(self):
return self.root
def get_lang_set(self):
return list(self.lang_set)
def add(self, text=u'', lang=''):
if self.using_builtin_training:
print "Warning: Cannot add training samples to builtin training-set."
return
elif not text or not lang:
raise Exception("Error: No input text given!")
if not lang in self.temp_train_data:
self.temp_train_data[lang] = [text]
else:
self.temp_train_data[lang].append(text)
def save(self, domain='', filename=''):
if self.using_builtin_training:
raise Exception("Failed to save data, use custom training-set instead.")
if not domain:
timestamp = datetime.now().strftime("%y%m%d%H%M%S")
folder_path = self.root + '/batchTS' + timestamp
else:
folder_path = self.root + '/' + domain
try:
os.mkdir(folder_path)
except:
pass
for lang in self.temp_train_data:
lang_folder_path = folder_path + '/' + lang
try:
os.mkdir(lang_folder_path)
except:
pass
if not filename:
filename_and_path = lang_folder_path + '/file.txt'
else:
filename_and_path = lang_folder_path + '/' + filename
f = codecs.open(filename_and_path, mode='w', encoding='utf-8')
for sample in self.temp_train_data[lang]:
text = sample + u'\n'
f.write(text)
f.close()
def get_last_modified(self):
# Get corpus last modified timestamp
if self.using_builtin_training:
return 0
else:
return os.path.getmtime(self.root)
def visit(self, arg, dirname, names):
#print dirname
path = dirname.split('/')
#print 'path:', path, len(path)
if len(path) == self.root_depth + 2:
lang = path[-1]
# Update Language Set
self.lang_set.add(lang)
# Ignore hidden files
names = [name for name in names if not name.startswith('.')]
for name in names:
self.corpus.append((lang, dirname + '/' + name))
#print lang, path, dirname + '/' + name
def load(self):
os.path.walk(self.root, self.visit, '')
|
mit
| -8,860,003,400,784,678,000
| 28.820513
| 84
| 0.509461
| false
| 4.192308
| false
| false
| false
|
ondrokrc/gramps
|
gramps/gen/filters/rules/_hasldsbase.py
|
1
|
2527
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/_HasLDSBase.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from . import Rule
#-------------------------------------------------------------------------
#
# HasLDSBase
#
#-------------------------------------------------------------------------
class HasLDSBase(Rule):
"""Rule that checks for object with a LDS event"""
labels = [ _('Number of instances:'), _('Number must be:')]
name = 'Objects with LDS events'
description = "Matches objects with LDS events"
category = _('General filters')
def prepare(self, db):
# things we want to do just once, not for every handle
if self.list[1] == 'less than':
self.count_type = 0
elif self.list[1] == 'greater than':
self.count_type = 2
else:
self.count_type = 1 # "equal to"
self.userSelectedCount = int(self.list[0])
def apply(self, db, obj):
count = len( obj.get_lds_ord_list())
if self.count_type == 0: # "less than"
return count < self.userSelectedCount
elif self.count_type == 2: # "greater than"
return count > self.userSelectedCount
# "equal to"
return count == self.userSelectedCount
|
gpl-2.0
| -4,215,482,488,620,058,600
| 34.591549
| 79
| 0.540166
| false
| 4.402439
| false
| false
| false
|
att-comdev/drydock
|
drydock_provisioner/control/health.py
|
1
|
4369
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon
import json
from drydock_provisioner.control.base import StatefulResource
from drydock_provisioner.drivers.node.maasdriver.actions.node import ValidateNodeServices
from drydock_provisioner.objects.healthcheck import HealthCheck
from drydock_provisioner.objects.healthcheck import HealthCheckMessage
from drydock_provisioner.objects.fields import ActionResult
import drydock_provisioner.objects.fields as hd_fields
import drydock_provisioner.policy as policy
class HealthResource(StatefulResource):
"""
Returns empty response body that Drydock is healthy
"""
def __init__(self, orchestrator=None, **kwargs):
"""Object initializer.
:param orchestrator: instance of Drydock orchestrator
"""
super().__init__(**kwargs)
self.orchestrator = orchestrator
def on_get(self, req, resp):
"""
Returns 204 on healthy, otherwise 503, without response body.
"""
hc = HealthCheckCombined(
state_manager=self.state_manager,
orchestrator=self.orchestrator,
extended=False)
return hc.get(req, resp)
class HealthExtendedResource(StatefulResource):
"""
Returns response body that Drydock is healthy
"""
def __init__(self, orchestrator=None, **kwargs):
"""Object initializer.
:param orchestrator: instance of Drydock orchestrator
"""
super().__init__(**kwargs)
self.orchestrator = orchestrator
@policy.ApiEnforcer('physical_provisioner:health_data')
def on_get(self, req, resp):
"""
Returns 200 on success, otherwise 503, with a response body.
"""
hc = HealthCheckCombined(
state_manager=self.state_manager,
orchestrator=self.orchestrator,
extended=True)
return hc.get(req, resp)
class HealthCheckCombined(object):
"""
Returns Drydock health check status.
"""
def __init__(self, state_manager=None, orchestrator=None, extended=False):
"""Object initializer.
:param orchestrator: instance of Drydock orchestrator
"""
self.state_manager = state_manager
self.orchestrator = orchestrator
self.extended = extended
def get(self, req, resp):
"""
Returns updated response with body if extended.
"""
health_check = HealthCheck()
# Test database connection
try:
now = self.state_manager.get_now()
if now is None:
raise Exception('None received from database for now()')
except Exception as ex:
hcm = HealthCheckMessage(
msg='Unable to connect to database', error=True)
health_check.add_detail_msg(msg=hcm)
# Test MaaS connection
try:
task = self.orchestrator.create_task(
action=hd_fields.OrchestratorAction.Noop)
maas_validation = ValidateNodeServices(task, self.orchestrator,
self.state_manager)
maas_validation.start()
if maas_validation.task.get_status() == ActionResult.Failure:
raise Exception('MaaS task failure')
except Exception as ex:
hcm = HealthCheckMessage(
msg='Unable to connect to MaaS', error=True)
health_check.add_detail_msg(msg=hcm)
if self.extended:
resp.body = json.dumps(health_check.to_dict())
if health_check.is_healthy() and self.extended:
resp.status = falcon.HTTP_200
elif health_check.is_healthy():
resp.status = falcon.HTTP_204
else:
resp.status = falcon.HTTP_503
|
apache-2.0
| 5,482,039,274,584,251,000
| 33.674603
| 89
| 0.642939
| false
| 4.098499
| false
| false
| false
|
thedemz/python-gems
|
scrypto.py
|
1
|
5153
|
#https://bitbucket.org/mhallin/py-scrypt/src
#pip3.4 install scrypt
#pip3.4 install pycrypto
import struct
from binascii import b2a_base64 as e64
from binascii import a2b_base64 as d64
import scrypt
import Crypto.Random
random = Crypto.Random.new().read
#from passlib.utils import consteq
def consteq(left, right):
"""Check two strings/bytes for equality.
This is functionally equivalent to ``left == right``,
but attempts to take constant time relative to the size of the righthand input.
The purpose of this function is to help prevent timing attacks
during digest comparisons: the standard ``==`` operator aborts
after the first mismatched character, causing it's runtime to be
proportional to the longest prefix shared by the two inputs.
If an attacker is able to predict and control one of the two
inputs, repeated queries can be leveraged to reveal information about
the content of the second argument. To minimize this risk, :func:`!consteq`
is designed to take ``THETA(len(right))`` time, regardless
of the contents of the two strings.
It is recommended that the attacker-controlled input
be passed in as the left-hand value.
.. warning::
This function is *not* perfect. Various VM-dependant issues
(e.g. the VM's integer object instantiation algorithm, internal unicode representation, etc),
may still cause the function's run time to be affected by the inputs,
though in a less predictable manner.
*To minimize such risks, this function should not be passed* :class:`unicode`
*inputs that might contain non-* ``ASCII`` *characters*.
.. versionadded:: 1.6
"""
# NOTE:
# resources & discussions considered in the design of this function:
# hmac timing attack --
# http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/
# python developer discussion surrounding similar function --
# http://bugs.python.org/issue15061
# http://bugs.python.org/issue14955
# validate types
if isinstance(left, str):
if not isinstance(right, unicode):
raise TypeError("inputs must be both unicode or both bytes")
is_py3_bytes = False
elif isinstance(left, bytes):
if not isinstance(right, bytes):
raise TypeError("inputs must be both unicode or both bytes")
is_py3_bytes = True #Python3
else:
raise TypeError("inputs must be both unicode or both bytes")
# do size comparison.
# NOTE: the double-if construction below is done deliberately, to ensure
# the same number of operations (including branches) is performed regardless
# of whether left & right are the same size.
same_size = (len(left) == len(right))
if same_size:
# if sizes are the same, setup loop to perform actual check of contents.
tmp = left
result = 0
if not same_size:
# if sizes aren't the same, set 'result' so equality will fail regardless
# of contents. then, to ensure we do exactly 'len(right)' iterations
# of the loop, just compare 'right' against itself.
tmp = right
result = 1
# run constant-time string comparision
# TODO: use izip instead (but first verify it's faster than zip for this case)
if is_py3_bytes:
for l,r in zip(tmp, right):
result |= l ^ r
else:
for l,r in zip(tmp, right):
result |= ord(l) ^ ord(r)
return result == 0
_PARAMS = struct.Struct("!BBBB") #Four standardsize of 1 unsigned char.
def pack_verifier( logN,r,p, salt, hash):
"""
Return a bytes object containing the values v1, v2, ... packed according to the format string fmt, fmt is !BBBB.
The arguments must match the values required by the format exactly.
"""
packed = _PARAMS.pack(logN,r,p,len(salt)) + salt + hash
return packed
def unpack_verifier(verifier):
logN,r,p,salt_bytes = _PARAMS.unpack_from(verifier)
i = _PARAMS.size+salt_bytes
salt = verifier[_PARAMS.size:i]
hash = verifier[i:]
return logN,r,p,salt,hash
def make_verifier( password, logN=14, r=8, p=1, salt_bytes=16,hash_bytes=16):
"""
Factory Class, returns a packed bytes object.
"""
salt = random(salt_bytes)
hash = scrypt.hash(password,salt,1<<logN,r,p,hash_bytes)
return pack_verifier(logN,r,p,salt,hash)
def verify_password( password, verifier ):
logN,r,p,salt,hash = unpack_verifier(verifier)
newhash = scrypt.hash(password,salt,1<<logN,r,p,len(hash))
return consteq(newhash,hash)
def ev( verifier ):
"""
Create a ev string from a verifier.
"""
return e64(verifier).strip()
def get_verifier( ev ):
"""
Create a verifier from a ev string.
"""
return d64(ev)
if __name__=="__main__":
v = make_verifier( "password" )
print(verify_password( "password", v))#True
print(verify_password( "Password", v))#False
ev = e64(v).strip()
print(ev)#b'DggBECLLfyJNB/HlbT9m6nByPq0334rbufeNV191YNNWOImZ'
# store ev in database
print( verify_password("password",d64(ev)))#True
|
mit
| 2,875,160,194,208,963,600
| 34.784722
| 116
| 0.668737
| false
| 3.720578
| false
| false
| false
|
forScie/RAAED
|
RAAEDServer.py
|
1
|
5949
|
#!/usr/bin/env python3
# RAAED Server software: v1.0
# A GUI RAAED Server
# Detects a reverse SSH connection bound to port 22 from an RAAED Client.
#
# DESCRIPTION
# The server is designed to continually check for the prescence of a reverse SSH session on port 22.
# The GUI will then reflect the presence of the reverse SSH session.
# A Shell in the context of the reverse SSH session can be launched through clicking a button.
#
# SSH REQUIREMENTS
# This script requires an SSH service to be active and running locally.
# /etc/ssh/sshd_config should be configured to allow public key authentication, and operate on port 443.
# a valid private RSA key for the RAAED Client should be placed in ~/.ssh (id_rsa)
# a valid public key with an associated private key on the RAAED Client should be located in ~/.ssh (id_rsa.pub)
#
# THIRD PARTY DEPENDENCIES
# pip3 install psutil
# pip3 install appjar
#
# AUTHOR: forScience (james@forscience.xyz)
#
# INDENT: TABS
import sys
import os
import threading
import subprocess
import psutil
import time
from appJar import gui
# Checks if port 22 is listening on localhost.
# Called in a thread at launch. Runs in the background.
# If the the port is open then update GUI to reflect change
def connection_check():
# loop infinately (in background)
while True:
time.sleep(2)
# retrieve tuples of all local IPv4 connections (in form of [IP, Port])
local = psutil.net_connections('inet4')
connect = False # set flag to false each itteration of loop
# iterrate through local IPv4 tuples
for address in local:
(ip, port) = address.laddr # assign each tuple to local variables
# check each IP for localhost and Port for 22
if ip == '127.0.0.1' and port == 22:
connect = True # set flag to indicate connection
# if flag has been set then connection exists
if connect:
# only update GUI if port 22 on localhost is found
gui_update("connected")
else:
# otherwise GUI continues to indicate disconnection
gui_update("disconnected")
# Updates GUI to show client connection state
# Called by connection_check() depending on local port activity
# Updates indicator and text to reflect state
def gui_update(update):
if update == "connected":
# update gui to reflect connection
# update indicator
app.setLabel("indicator", "Connected") # update GUI indicator text
app.setLabelBg("indicator", "green") # update GUI indicator colour
# update text
app.setLabelFg("text", "green") # update GUI text colour
text = "Connected to client" # create explanation string
app.setLabel("text", text) # update GUI with explanation string
elif update == "disconnected":
# update gui to reflect disconnection
# update indicator
app.setLabel("indicator", "Disconnected") # update GUI indicator text
app.setLabelBg("indicator", "red") # update GUI indicator colour
# update text
app.setLabelFg("text", "red") # update GUI text colour
text = "No connection from client" # create explanation string
app.setLabel("text", text) # update GUI with explanation string
elif update == "list targets":
# update gui with targets from client
# open retrieved network list file
with open('/root/Desktop/network.list', 'r') as file:
iplist = file.read() # read in file to variable and remove EOL
# display targets in gui
app.setMessage('enumeration', iplist)
# Spawns an SSH session in a new shell
# gnome-terminal only works within the GNOME DE
def spawn_shell(btn):
# terminal remains open after command issued with '-x'
subprocess.call(['gnome-terminal', '-x', 'ssh', 'localhost'])
# Connects via scp to RAAED Client and retrieves a list of
# IPs enumerated on the Clients local network.
# The list is displayed in the GUI
def get_enum(btn):
# define local and remote list locations
localdest = "/root/Desktop/network.list"
remotedest = "/root/Desktop/network.list"
# retrieve enumeration txt files from client
sshcopy = "scp root@localhost:" + remotedest + " " + localdest # build ssh copy command
copyresult = subprocess.call(sshcopy, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # execute scp command
# if scp was successful
if copyresult == 0:
# update gui and delete localdest file
gui_update('list targets')
delfile = "rm " + localdest # build command to delete local network.list file
subprocess.call(delfile, shell=True) # delete file
# Entry
if __name__ == "__main__":
# put connection_check() in a thread and background
thread = threading.Thread(target=connection_check, args=())
thread.daemon = True # daemonised for clean closure, ok to kill with main
thread.start() # start daemon thread
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<
# GUI ELEMENTS
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>
# create the GUI & set a title
app = gui("RAAED Server")
app.setBg("white")
app.setFont(12, font="Arial")
app.setSticky("nesw")
app.setResizable(canResize=False)
# RAAED CONNECTION STATUS
app.startLabelFrame("Connection Status")
app.setLabelFramePadding("Connection Status", 4, 8)
# connection indicator
app.addLabel("indicator", "Disconnected", 0, 0)
app.setLabelBg("indicator", "red")
app.setLabelFg("indicator", "white")
app.setLabelPadding("indicator", 2, 5)
# explanation text
app.addLabel("text", "No connection from client", 0, 1)
app.setLabelFg("text", "red")
app.setLabelPadding("text", 4, 8)
# end frame
app.stopLabelFrame()
# SPAWN SHELL AND RETRIEVE ENUM BUTTONS
app.startLabelFrame("")
app.setLabelFramePadding("", 4, 8)
# spawn shell button
app.addButton("Spawn Shell", spawn_shell, 0, 0)
# retrieve enumeration button
app.addButton("Show Remote Hosts", get_enum, 0, 1)
# end bottom frame
app.stopLabelFrame()
# REMOTE TARGET LIST
app.startLabelFrame("Remote Network Hosts")
app.setLabelFramePadding("Remote Network Hosts", 4, 8)
# spawn shell button
app.addEmptyMessage("enumeration")
# end bottom frame
app.stopLabelFrame()
# start GUI
app.go()
|
gpl-3.0
| -6,022,877,912,289,613,000
| 31.872928
| 126
| 0.723651
| false
| 3.476914
| false
| false
| false
|
nel215/py-sae
|
dataset.py
|
1
|
1536
|
#coding: utf-8
import requests
import os.path
import pickle
def get_binary_dataset():
# 0-1 dataset
dataset = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/spect/SPECT.train').text
dataset = map(lambda row: row.split(','), dataset.split('\n'))
titles = dataset[0]
dataset = dataset[1:]
dataset = filter(lambda data: len(data) > 1, dataset)
features = map(lambda data: map(float, data[:-1]), dataset)
labels = map(lambda data: map(float, data[-1:]), dataset)
return (features, labels)
def get_mushroom_dataset():
filename = './tmp/mushroom.dat'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
if os.path.isfile(filename):
f = open(filename, 'r')
return pickle.load(f)
dataset = requests.get('http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/mushrooms').text
num_feature = 112
features = []
labels = []
dataset = filter(lambda data: len(data)>1, dataset.split('\n'))
for data in dataset:
data = data.split(' ')
labels.append([1] if data[0] == '2' else [0])
feature = [0 for f in xrange(num_feature)]
for [bin, _] in map(lambda d: d.split(':'), filter(lambda d: len(d)>1, data[1:])):
feature[int(bin)-1] = 1
features.append(feature)
result = (features, labels)
f = open(filename, 'w')
pickle.dump(result, f)
f.close()
return result
if __name__=='__main__':
get_mushroom_dataset()
|
mit
| -6,360,485,346,207,833,000
| 31.680851
| 110
| 0.617188
| false
| 3.289079
| false
| false
| false
|
MGautier/security-sensor
|
trunk/version-1-0/webapp/secproject/secproject/settings.py
|
1
|
3617
|
"""
Django settings for secproject project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3m&&ig6ksy_fy=sc4n8)foq&*-%ug*la@5d@8m*u1s%fcs2rsz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'react',
'rest_framework',
'secapp.apps.SecappConfig',
]
REACT = {
'RENDER': not DEBUG,
'RENDER_URL': 'http://127.0.0.1:8001/render',
}
# REST_FRAMEWORK = {
# 'DEFAULT_RENDERER_CLASSES': (
# 'rest_framework.renderers.JSONRenderer',
# ),
# 'DEFAULT_PARSER_CLASSES': (
# 'rest_framework.parsers.JSONParser',
# )
# }
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'secproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'secproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'database.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "secapp/static/")
|
mit
| -4,357,623,697,613,626,400
| 25.021583
| 91
| 0.679292
| false
| 3.421949
| false
| false
| false
|
jwacalex/MULTEX-EAST-PoS-Tagger
|
MTEDownloader.py
|
1
|
3734
|
"""
Downloader for multex east corpus.
"""
import os
from os.path import expanduser, abspath
import sys
import urllib
import zipfile
import nltk.data
isCustomPath = False
def main():
download()
def download():
try:
__download__()
except KeyboardInterrupt:
print("\nDiscarded download due to keyboard interrupt.\n")
def __getFilePath__():
global isCustomPath
paths = list(zip(range(len(nltk.data.path)+1), nltk.data.path + ["custom"]))
pathStr = ""
try:
pathStr = raw_input("Where should the corpus be saved?" + str(paths) + " [%s]: " % 0)
except:
pathStr = input("Where should the corpus be saved?" + str(paths) + " [%s]: " % 0)
pathNum = None
if pathStr:
pathNum = int(pathStr)
else:
pathNum = 0
if (pathNum == len(nltk.data.path)):
isCustomPath = True
try:
return abspath(raw_input(
"Please input the directory where you want the files to be saved (NO backslash at the end): ")) + "/"
except:
return abspath(input(
"Please input the directory where you want the files to be saved (NO backslash at the end): ")) + "/"
else:
return abspath(nltk.data.path[pathNum]) + "/corpora/"
def __download__():
filePath = __getFilePath__()
finished = False
try:
if not os.path.exists(filePath):
os.makedirs(filePath)
except EnvironmentError:
print("Could not create or write to file")
else:
# download zip archive
with open(filePath + "mte_teip5.zip", "wb") as f:
url = "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1043/MTE1984-ana.zip"
try:
request = urllib.urlopen(url)
except:
request = urllib.request.urlopen(url)
chunk_read_write(f, request, report_hook=chunk_report)
print("Download finished")
# handle "invalid" zip format from clarin.si
with open(filePath + "mte_teip5.zip", "r+b") as f:
content = f.read()
pos = content.rfind(
b'\x50\x4b\x05\x06') # reverse find: this string of bytes is the end of the zip's central directory.
if pos > 0:
f.seek(pos + 20) # +20: see secion V.I in 'ZIP format' link above.
f.truncate()
f.write(b'\x00\x00') # Zip file comment length: 0 byte length; tell zip applications to stop reading.
f.seek(0)
# extract zip archive
print("Extracting files...")
with zipfile.ZipFile(filePath + "mte_teip5.zip", "r") as z:
z.extractall(filePath)
os.rename(filePath + "MTE1984-ana", filePath + "mte_teip5")
print("Done")
def chunk_report(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def chunk_read_write(fileHandle, response, chunk_size=8192, report_hook=None):
try:
total_size = response.info().getheader('Content-Length').strip()
except:
total_size = response.getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
fileHandle.write(chunk)
bytes_so_far += len(chunk)
if not chunk:
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
if __name__ == "__main__":
main()
|
lgpl-3.0
| 4,770,773,840,565,016,000
| 28.171875
| 118
| 0.580343
| false
| 3.65362
| false
| false
| false
|
bak1an/django
|
django/utils/timesince.py
|
1
|
2806
|
import calendar
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import gettext, ngettext_lazy
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, ngettext_lazy('%d year', '%d years')),
(60 * 60 * 24 * 30, ngettext_lazy('%d month', '%d months')),
(60 * 60 * 24 * 7, ngettext_lazy('%d week', '%d weeks')),
(60 * 60 * 24, ngettext_lazy('%d day', '%d days')),
(60 * 60, ngettext_lazy('%d hour', '%d hours')),
(60, ngettext_lazy('%d minute', '%d minutes'))
)
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
if reversed:
d, now = now, d
delta = now - d
# Deal with leapyears by subtracing the number of leapdays
leapdays = calendar.leapdays(d.year, now.year)
if leapdays != 0:
if calendar.isleap(d.year):
leapdays -= 1
elif calendar.isleap(now.year):
leapdays += 1
delta -= datetime.timedelta(leapdays)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(gettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(name % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += gettext(', ') + avoid_wrapping(name2 % count2)
return result
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
|
bsd-3-clause
| -6,810,944,789,384,965,000
| 34.974359
| 101
| 0.629722
| false
| 3.542929
| false
| false
| false
|
seece/cbpp
|
minifyoperation.py
|
1
|
1252
|
import re
from util import *
from operation import Operation, OperationResult
class Replacement:
def __init__(self, regex, substitution):
self.regex = regex
self.substitution = substitution
class MinifyOperation(Operation):
def __init__(self):
self.inMultilineComment = False
pass
def apply(self, line, state):
result = OperationResult(line, False)
if not state.args.minify:
return result
l = stripComments(line)
strings = scanForStrings(l)
commentStart = len(l)
stringRegex = r'(("[^"]+")|(|[^"]*?)([^\s]*?))?'
comments = r'(?P<comment>(|(\'|//)*$))'
def string(s):
if not s:
return ""
return s
def replace(m, group):
if checkIfInsideString(m.start(group), strings):
return string(m.group(0))
return string(m.group(1)) + string(m.group(group))
ops = []
ops.append(Replacement(re.compile(r'' + stringRegex + '\s*(?P<op>[=+\-*/\><,\^]{1,2})\s*'), lambda m: replace(m, "op")))
ops.append(Replacement(re.compile(r'' + stringRegex + r'(?<=\D)(0)(?P<digit>\.\d+)'), lambda m: replace(m, "digit") ))
#l = l.lstrip("\t")
for o in ops:
l = o.regex.sub(o.substitution, l)
l = l.rstrip("\r\n")
result.line = strInsert(result.line, 0, commentStart-1, l)
return result
|
mit
| -1,057,761,309,854,611,000
| 23.54902
| 122
| 0.620607
| false
| 2.884793
| false
| false
| false
|
SummerLW/Perf-Insight-Report
|
dashboard/dashboard/bisect_stats.py
|
1
|
3742
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoints to show bisect stats."""
import datetime
import json
from dashboard import layered_cache
from dashboard import request_handler
from dashboard import utils
_BISECT_STATS_CACHE_KEY = 'bisect_stats'
_NUM_POINTS_TO_DISPLAY = 52
_BISECT_STAT_SERIES_NAME = ['win', 'linux', 'mac', 'android']
class BisectStatsHandler(request_handler.RequestHandler):
"""URL endpoint to get stats about bisect runs."""
def get(self):
"""Renders the UI with charts."""
bisect_stats = layered_cache.GetExternal(_BISECT_STATS_CACHE_KEY)
if not bisect_stats:
bisect_stats = {
'failed': [],
'completed': []
}
series_data = {
'failed': bisect_stats['failed'],
'completed': bisect_stats['completed']
}
total_series_data = {
'failed': self._GetTotalBisectRunSeries(bisect_stats['failed']),
'completed': self._GetTotalBisectRunSeries(bisect_stats['completed'])
}
self.RenderHtml('bisect_stats.html', {
'series_data': json.dumps(series_data),
'total_series_data': json.dumps(total_series_data),
})
def _GetTotalBisectRunSeries(self, series_map):
"""Sums up failed and completed bisect run series.
Args:
series_map: Dictionary of series names to list of data series.
Returns:
A list of data series.
"""
cropped_series_list = []
for key in series_map:
series = series_map[key]
cropped_series_list.append(series[len(series) - _NUM_POINTS_TO_DISPLAY:])
# Sum up series.
series_map = {}
for series in cropped_series_list:
for x_value, y_value in series:
if x_value not in series_map:
series_map[x_value] = y_value
else:
series_map[x_value] += y_value
result_list = []
for key in sorted(series_map):
result_list.append([key, series_map[key]])
return result_list
def UpdateBisectStats(bot_name, status):
"""Updates bisect run stat by bot name and status.
Bisect stats stored in a layered_cache entity have the form below. Each
tick is one week and count is the sum of failed or completed bisect runs.
{
'failed': {
bot_name: [[week_timestamp, count], [week_timestamp, count]],
},
'completed': {
bot_name: [[week_timestamp, count], [week_timestamp, count]],
}
}
Args:
bot_name: Name of the bisect bot.
status: Bisect status. Either 'failed' or 'completed'.
"""
# TODO(chrisphan): Add stats for staled bisect.
if status not in ['failed', 'completed']:
return
series_name = _GetSeriesNameFromBotName(bot_name)
week_timestamp = _GetLastMondayTimestamp()
bisect_stats = layered_cache.GetExternal(_BISECT_STATS_CACHE_KEY)
if not bisect_stats:
bisect_stats = {
'failed': {},
'completed': {},
}
series_map = bisect_stats[status]
if series_name not in series_map:
series_map[series_name] = [[week_timestamp, 1]]
else:
series = series_map[series_name]
if week_timestamp == series[-1][0]:
series[-1][1] += 1
else:
series.append([week_timestamp, 1])
layered_cache.SetExternal(_BISECT_STATS_CACHE_KEY, bisect_stats)
def _GetLastMondayTimestamp():
"""Get timestamp of 00:00 last Monday in milliseconds as an integer."""
today = datetime.date.today()
monday = today - datetime.timedelta(days=today.weekday())
return utils.TimestampMilliseconds(monday)
def _GetSeriesNameFromBotName(bot_name):
for series_name in _BISECT_STAT_SERIES_NAME:
if series_name in bot_name:
return series_name
return 'other'
|
bsd-3-clause
| -2,050,621,821,403,550,200
| 27.564885
| 79
| 0.656868
| false
| 3.574021
| false
| false
| false
|
pytorch/fairseq
|
fairseq/modules/layer_norm.py
|
1
|
1500
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting():
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
|
mit
| 4,775,746,740,642,711,000
| 29
| 81
| 0.639333
| false
| 3.778338
| false
| false
| false
|
sadig/DC2
|
components/dc2-admincenter/dc2/admincenter/apps/main.py
|
1
|
3768
|
# -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
try:
import web
except ImportError as e:
print(e)
print("You need to install web.py")
sys.exit(1)
try:
from dc2.admincenter.globals import CSS_FILES
from dc2.admincenter.globals import JS_LIBS
from dc2.admincenter.globals import logger
except ImportError as e:
print(e)
print("You are missing the necessary DC2 modules")
sys.exit(1)
try:
from jinja2 import Environment, FileSystemLoader
except ImportError as e:
print(e)
print("You didn't install jinja2 templating engine")
sys.exit(1)
try:
from dc2.lib.web.pages import Page
from dc2.lib.web.csrf import csrf_protected
from dc2.lib.decorators.logger import Logger
except ImportError as e:
print(e)
print("You are missing the necessary DC2 modules")
sys.exit(1)
try:
from settings import TEMPLATE_DIR
from settings import KERBEROS_AUTH_ENABLED
except ImportError as e:
print(e)
print("You don't have a settings file")
sys.exit(1)
try:
from dc2.admincenter.lib.auth import do_kinit
from dc2.admincenter.lib.auth import KerberosAuthError
except ImportError as e:
print(e)
print("There are dc2.admincenter modules missing")
sys.exit(1)
tmpl_env = Environment(loader=FileSystemLoader(TEMPLATE_DIR))
class Home(object):
@Logger(logger=logger)
def GET(self):
page = Page('index.tmpl', tmpl_env, web.ctx)
page.set_title('DC2-AdminCenter - Index')
page.set_cssfiles(CSS_FILES)
page.set_jslibs(JS_LIBS)
if ('authenticated' in web.ctx.session and
web.ctx.session.authenticated):
user_info = {}
user_info['username'] = web.ctx.session.username
user_info['realname'] = web.ctx.session.realname
user_info['is_dc2admin'] = web.ctx.session.is_dc2admin
page.add_page_data({'user': user_info})
return page.render()
class Login(object):
@csrf_protected
@Logger(logger=logger)
def POST(self):
params = web.input()
if 'error' in web.ctx.session:
del web.ctx.session.error
del web.ctx.session.errorno
del web.ctx.session.errormsg
if KERBEROS_AUTH_ENABLED:
try:
do_kinit(params.username, params.password)
web.ctx.session.authenticated = True
web.ctx.session.username = params.username
raise web.seeother('/')
except KerberosAuthError, e:
web.ctx.session.authenticated = False
web.ctx.session.error = True
web.ctx.session.errorno = 1020
web.ctx.session.errormsg = e
raise web.seeother('/')
# TODO: Standard Auth
else:
web.ctx.session.authenticated = True
web.ctx.session.username = params.username
raise web.seeother('/')
|
gpl-2.0
| -5,261,619,635,792,575,000
| 31.196581
| 76
| 0.655694
| false
| 3.812753
| false
| false
| false
|
usc-isi-i2/etk
|
etk/utilities.py
|
1
|
2077
|
import datetime
import hashlib
import json
from typing import Dict
import uuid
import warnings
class Utility(object):
@staticmethod
def make_json_serializable(doc: Dict):
"""
Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.
This method modifies the given document in place.
Args:
doc: A Python Dictionary, typically a CDR object.
Returns: None
"""
for k, v in doc.items():
if isinstance(v, datetime.date):
doc[k] = v.strftime("%Y-%m-%d")
elif isinstance(v, datetime.datetime):
doc[k] = v.isoformat()
@staticmethod
def create_doc_id_from_json(doc) -> str:
"""
Docs with identical contents get the same ID.
Args:
doc:
Returns: a string with the hash of the given document.
"""
return hashlib.sha256(json.dumps(doc, sort_keys=True).encode('utf-8')).hexdigest()
@staticmethod
def create_doc_id_string(any_string):
"""
Creates sha256 has of a string
:param any_string: input string
:return: sha256 hash of any_string
"""
try:
return hashlib.sha256(any_string).hexdigest()
except:
# probably failed because of unicode
return hashlib.sha256(any_string.encode('utf-8')).hexdigest()
@staticmethod
def create_uuid():
return str(uuid.uuid4())
@staticmethod
def create_description_from_json(doc_json):
description = ''
for key in doc_json:
description += '"' + key + '":"' + str(doc_json[key]) + '", <br/>'
description += '}'
return description
def deprecated(msg=''):
def deprecated_decorator(func):
def deprecated_func(*args, **kwargs):
warnings.warn("{}: this function is deprecated. {}".format(func.__name__, msg))
return func(*args, **kwargs)
return deprecated_func
return deprecated_decorator
|
mit
| 3,378,224,767,070,204,000
| 27.847222
| 117
| 0.585941
| false
| 4.372632
| false
| false
| false
|
postlund/home-assistant
|
homeassistant/components/rflink/switch.py
|
1
|
2332
|
"""Support for Rflink switches."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import (
CONF_ALIASES,
CONF_DEVICE_DEFAULTS,
CONF_DEVICES,
CONF_FIRE_EVENT,
CONF_GROUP,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES,
CONF_SIGNAL_REPETITIONS,
DEVICE_DEFAULTS_SCHEMA,
SwitchableRflinkDevice,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})
): DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_GROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FIRE_EVENT): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
}
)
},
},
extra=vol.ALLOW_EXTRA,
)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink switch devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
device = RflinkSwitch(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink platform."""
async_add_entities(devices_from_config(config))
# pylint: disable=too-many-ancestors
class RflinkSwitch(SwitchableRflinkDevice, SwitchDevice):
"""Representation of a Rflink switch."""
pass
|
apache-2.0
| 2,837,758,458,445,601,000
| 29.684211
| 86
| 0.602487
| false
| 4
| true
| false
| false
|
fmaguire/ete
|
ete3/tools/phylobuild_lib/task/phyml.py
|
1
|
4152
|
from __future__ import absolute_import
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
import os
import shutil
import sys
import re
import logging
log = logging.getLogger("main")
from ete3.tools.phylobuild_lib.master_task import TreeTask
from ete3.tools.phylobuild_lib.master_job import Job
from ete3.tools.phylobuild_lib.utils import basename, PhyloTree, OrderedDict, GLOBALS, PHYML_CITE, DATATYPES
from ete3.tools.phylobuild_lib import db
__all__ = ["Phyml"]
class Phyml(TreeTask):
def __init__(self, nodeid, alg_phylip_file, constrain_id, model,
seqtype, conf, confname, parts_id=None):
GLOBALS["citator"].add(PHYML_CITE)
base_args = OrderedDict({
"--model": "",
"--no_memory_check": "",
"--quiet": "",
"--constraint_tree": ""})
self.confname = confname
self.conf = conf
self.constrain_tree = None
if constrain_id:
self.constrain_tree = db.get_dataid(constrain_id, DATATYPES.constrain_tree)
self.alg_phylip_file = alg_phylip_file
TreeTask.__init__(self, nodeid, "tree", "Phyml",
base_args, conf[confname])
if seqtype == "aa":
self.model = model or conf[confname]["_aa_model"]
elif seqtype == "nt":
self.model = model or conf[confname]["_nt_model"]
self.seqtype = seqtype
self.lk = None
self.init()
def load_jobs(self):
appname = self.conf[self.confname]["_app"]
args = OrderedDict(self.args)
args["--model"] = self.model
args["--datatype"] = self.seqtype
args["--input"] = self.alg_phylip_file
if self.constrain_tree:
args["--constraint_tree"] = self.constrain_tree
args["-u"] = self.constrain_tree
else:
del args["--constraint_tree"]
job = Job(self.conf["app"][appname], args, parent_ids=[self.nodeid])
job.add_input_file(self.alg_phylip_file, job.jobdir)
if self.constrain_tree:
job.add_input_file(self.constrain_tree, job.jobdir)
job.jobname += "-"+self.model
self.jobs.append(job)
def finish(self):
lks = []
j = self.jobs[0]
tree_file = os.path.join(j.jobdir,
self.alg_phylip_file+"_phyml_tree.txt")
stats_file = os.path.join(j.jobdir,
self.alg_phylip_file+"_phyml_stats.txt")
m = re.search('Log-likelihood:\s+(-?\d+\.\d+)',
open(stats_file).read())
lk = float(m.groups()[0])
stats = {"lk": lk}
tree = PhyloTree(tree_file)
TreeTask.store_data(self, tree.write(), stats)
|
gpl-3.0
| 8,228,641,529,093,788,000
| 34.186441
| 108
| 0.593931
| false
| 3.545687
| false
| false
| false
|
Mkebede/OmicsIntegrator
|
scripts/motif_regression.py
|
1
|
11666
|
'''
File to handle motif/expression regression
'''
__author__='Anthony Soltis'
__email__='asoltis@mit.edu'
import sys,os,pickle,re
from optparse import OptionParser
import numpy as np
from scipy import stats
import fileinput
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
def load_tgm(tgm_fn):
'''
Load tgm file and produce output matrix.
Output is transposed numpy array object.
'''
print 'Loading tgm file...'
tgm = []
for line in fileinput.input(tgm_fn):
l = line.strip('\n').split()
tgm.append(l)
# display results, return array
s = np.asarray(tgm).T.shape
print 'TGM file loaded with %d genes by %d motifs.'%(s[0],s[1])
return np.asarray(tgm).T
def load_ids(ids_fn):
'''
Load ids filename and store as list.
'''
ids = []
for line in fileinput.input(ids_fn):
l = line.strip('\n')
ids.append(l)
return ids
def load_response(data_fn):
'''
Load ydata and return numpy vector.
Input file should have one value per-row.
'''
r_data = []
r_genes = []
for line in fileinput.input(data_fn):
row=line.strip('\n').split('\t')
if len(row)>1:
r_genes.append(row[0])
r_data.append(float(row[1]))
else:
r_data.append(float(row[0]))
# r_data.append(float(line.strip('\n')))
print 'Response data file loaded with %d values.'%(len(r_data))
return np.asarray(r_data),r_genes
def map_data(Xdata,Xnames,Ydata,Ynames):
'''
Map X (predictor) data to Y (response) data using X and Y data ids (i.e. gene names).
'''
# Intersect two gene lists
Xinds = []
Yinds = []
#yn = []
for i,Xgene in enumerate(Xnames):
for j,Ygene in enumerate(Ynames):
if Xgene == Ygene:
Xinds.append(i)
Yinds.append(j)
# yn.append(Ygene)
Xdata_out = Xdata[Xinds,:]
Ydata_out = Ydata[Yinds]
print 'Found %d genes that have binding data and are in the expression output'%(len(Yinds))
#yn.sort()
#print ','.join(yn[0:20])
return Xdata_out,Ydata_out
def perform_regression(X,Y,motif_ids,norm,outdir,plot):
'''
'''
reg_results = []
for i in range(0,X.shape[1]):
# Set up data
x = np.array(X[:,i],dtype=float)
if norm != None:
if norm == 'log2':
y = np.log2(Y+.1)
elif norm == 'log10':
y = np.log10(Y+.1)
else: y = Y
# Perform regression
slope,intercept,r_val,p_val,std_err = stats.linregress(x,y)
reg_results.append(([motif_ids[i],slope,p_val,i]))
#regression plot
if plot:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x,y,'bo',x,intercept+slope*x,'k')
ax1.set_title(motif_ids[i])
ax1.set_xlabel('Estimated transcription factor affinity')
ax1.set_ylabel('Expression log fold change')
#checking if a subdirectory is present to save plots
plotdir = os.path.join(os.path.split(outdir)[0],'regression_plots')
if not os.path.isdir(plotdir):
os.makedirs(plotdir)
#cleaning all motif ids to have all alphanumeric name
if not re.match(r'^[A-Za-z0-9.]*$', motif_ids[i]):
motif_ids[i] = "".join(c for c in motif_ids[i] if c not in ('!','$','@','!','%','*','\\','/','_','-'))
#file name must be within max characters supported by os
if len(motif_ids[i])>162:
st = motif_ids[i]
motif_ids[i] = st[0:160]
plotfile = os.path.join(plotdir,motif_ids[i]+'.pdf')
fig.savefig(open(plotfile,'w'),dpi=300)
plt.close()
return sorted(reg_results,key=lambda x: x[2])
def fdr_correction(results):
'''
Compute FDR corrected p-values based on Benjamini-Hochberg procedure.
'''
new_results = []
num_tests = len([r for r in results if str(r[1])!='nan'])
print 'Correcting for '+str(num_tests)+' numeric values'
for i in range(0,num_tests):
tup = results[i]
pval = tup[2]
fdr = num_tests*pval/(i+1)
if fdr > 1.0: fdr = 1.0
tup+=(fdr,)
new_results.append(tup)
return new_results
def main():
usage = "%prog [options] <scores.tgm or scores.tgm.pkl> <response_values.tab>"
description = "Script that takes a predicted TF-Gene matrix and uses a linear regression to identify which TFs have binding scores correlated with gene expression changes."
parser = OptionParser(usage=usage,description=description)
##get program directory
progdir=os.path.dirname(os.path.abspath(sys.argv[0]))
# Options
parser.add_option('--outdir','--out',dest="outdir",default='./test_out.txt',
help='Choose output file name. Default is %default.')
parser.add_option('--motif-ids','--motif-ids',dest='motif_ids',default=None,
help='OPTIONAL: If input file is in text format (.tgm), provide motif ids corresponding to tgm file motifs.')
parser.add_option('--tgm-genes',dest='tgm_genes',default=None,
help='OPTIONAL: If input file is in text format (.tgm), provide gene ids corresponding to tgm file genes.')
parser.add_option('--response-genes',dest='response_genes',default=None,
help='OPTIONAL: If two-column file is not provided, add in gene ids corresponding to response values.')
parser.add_option('--norm-type',dest='norm_type',default=None,
help='Choose normalization type for response data. Choices are: "log2", "log10".\
Default is %default.')
parser.add_option('--use-qval',dest='use_qval',action='store_true',default=False,help='If set this the Forest input file will contain -log(qval) instead of -log(pval) and threshold the output using qval. Default:%default')
parser.add_option('--thresh',dest='thresh',type='string',default='0.9',help='P/Q-Value threshold to illustrate results. Default:%default')
parser.add_option('--gifdir',dest='motifs',default=os.path.join(progdir,'../data/matrix_files/gifs'),
help='Directory containing motif GIFs to illustrate results. Default is %default')
parser.add_option('--plot',dest='plot',action='store_true',default=False,help='Enable plot generation for regression results. Default:%default')
# get options, arguments
(opts,args) = parser.parse_args()
# Handle arguments
tgm_fn = args[0]
response_data_fn = args[1]
# Load in Y-vector data (gene expression, fold-changes, etc.)
response_data,response_genes = load_response(response_data_fn)
print 'Trying to get file type...'
ext=tgm_fn.split('.')[-1]
if ext.lower()=='pkl':
print '...found PKL file'
pkl=True
else:
print '...found text file, looking for additional data files in options'
pkl=False
# Handle options
outdir = opts.outdir
motif_ids = opts.motif_ids
if motif_ids == None and not pkl:
print 'Must provide motif ids file or use pickled dictionary. Exiting.'
sys.exit()
tgm_genes = opts.tgm_genes
if tgm_genes == None and not pkl:
print 'Must provide gene ids for motifs file or use pickled dictionary. Exiting.'
sys.exit()
# response_genes = opts.response_genes
if opts.response_genes == None and len(response_genes)==0:
print 'Must provide gene ids for response data or have a two-column data file. Exiting.'
sys.exit()
norm_type = opts.norm_type
valid_norm_types = ['log2','log10']
if norm_type != None:
if norm_type not in valid_norm_types:
print 'Normalization type not valid. Exiting.'
sys.exit()
if pkl:
#load in values from dictionary
tgmdict=pickle.load(open(tgm_fn,'rU'))
tgm_data=tgmdict['matrix'].T
motif_ids=tgmdict['tfs']
tgm_genes=tgmdict['genes']
delim=tgmdict['delim']
else:
# Load in transcription factor affinity matrix and IDs
tgm_data = load_tgm(tgm_fn)
motif_ids = load_ids(motif_ids)
tgm_genes = load_ids(tgm_genes)
delim='.'
#now load response_genes if they're not loaded yet
if len(response_genes)==0:
response_genes = load_ids(opts.response_genes)
# Map predictor data to response data
X,Y=map_data(tgm_data,tgm_genes,response_data,response_genes)
# Perform regression
reg_results=perform_regression(X,Y,motif_ids,norm_type,outdir,opts.plot)
# FDR correction
new_results = fdr_correction(reg_results)
dn=os.path.dirname(outdir)
if dn!='' and dn!='./' and not os.path.exists(dn):
os.system('mkdir '+dn)
# Write to TEXT file complete results
of = open(outdir,'w')
of.writelines('\t'.join(['Motif','Slope','p-val','q-val'])+'\n')
for res in new_results:
if str(res[1])=='nan':
continue
ostr = '\t'.join([res[0],str(res[1]),str(res[2]),str(res[4])]) + '\n'
of.writelines(ostr)
of.close()
##now create HTML writeup
threshold = float(opts.thresh)
of= open(re.sub(outdir.split('.')[-1],'html',outdir),'w')
of.writelines("""<html>
<title>GARNET Results</title>
<h3>GARNET regression results</h3>
<p>This table includes the results for GARNET TF-motif discovery and regression. This Table includes the non-zero results of the linear regression</p>
<table width="90%">
<tr><th style="width:25%">Motif Cluster</th><th style="width:12%">Slope</th><th style="width:12%">P-value</th><th style="width:12%">Q-value</th><th style="width:35%">LOGO</th></tr>
""")
for res in new_results:
if str(res[1])=='nan':
continue
# skip rows that exceed the q-value or p-value threhsold
if (opts.use_qval and res[4]<=threshold) or ((not opts.use_qval) and res[2]<=threshold):
motifgif=os.path.join(opts.motifs,'motif'+str(res[3])+'.gif')
ostr = "<tr><td>"+' '.join(res[0].split('.'))+"</td><td>"+str(res[1])+'</td><td>'+str(res[2])+"</td><td>"+str(res[4])+"</td><td><img src=\""+motifgif+"\" scale=80%></td></tr>\n"
of.writelines(ostr)
of.writelines("</table></html>")
of.close()
##now write to Forest-friendly input file
##collect dictionary of all individual tf names and their regression p-values
##or q-values
regdict={}
for row in new_results:
tfs=[t for t in row[0].split(delim) if t!='' and ' ' not in t]
#print row
if str(row[1])=='nan':
continue
# skip rows that exceed the q-value or p-value threhsold
if opts.use_qval:
if row[4]>threshold:
continue
elif row[2]>threshold:
continue
for tf in tfs:
if row[2]==1:
continue
if opts.use_qval:
lpv=-1.0*np.log2(float(row[4]))#calculate neg log2 qvalue
else:
lpv=-1.0*np.log2(float(row[2]))#calculate neg log2 pvalue
try:
cpv=regdict[tf]
except KeyError:
cpv=0.0
if lpv>cpv:
regdict[tf]=lpv
print 'Found '+str(len(regdict))+'Tf scores for '+str(len(new_results))+' motif results'
of=open(re.sub('.tsv','_FOREST_INPUT.tsv',outdir),'w')
for tf in sorted(regdict.keys()):
val=regdict[tf]
of.write(tf+'\t'+str(val)+'\n')
of.close()
if __name__ == '__main__': main()
|
bsd-2-clause
| 4,813,545,824,401,240,000
| 36.271565
| 226
| 0.59232
| false
| 3.383411
| false
| false
| false
|
jodonnell/Minesweeper-
|
minesweeper/views.py
|
1
|
4681
|
# Create your views here.
from django.shortcuts import render_to_response
from django import forms
from django import http
from minesweeper.classes.create_board import CreateBoard
from minesweeper.classes.board import Board
from pymongo import Connection, DESCENDING, ASCENDING
import cPickle
import json
import datetime
ROWS = 8
COLUMNS = 8
TOTAL_MINES = 10
connection = Connection('localhost', 27017)
class EmailForm(forms.Form):
email = forms.EmailField(required = True)
def _get_minesweeper_db():
return connection.minesweeper
def index(request):
if 'email' not in request.COOKIES:
return _get_email(request)
email = request.COOKIES['email']
db = _get_minesweeper_db()
game_query = db.minesweeper.find_one({'email':email})
board = _create_new_board()
new_record = {"email": email, "board":cPickle.dumps(board), 'new_game':True}
if game_query is None:
db.minesweeper.insert(new_record)
else:
db.minesweeper.update({"email": email}, new_record)
return render_to_response('index.html', {'num_flags':TOTAL_MINES, 'rows':ROWS, 'columns':COLUMNS})
def clear(request):
"User is attempting to clear a square"
row, column, email = _get_row_column_email_params(request)
board = _get_board(email)
_update_board(email, board)
if board.is_mined(row, column):
return http.HttpResponse(json.dumps({'lost':True}))
num_surronding_mines = board.get_num_surronding_mines(row, column)
if num_surronding_mines:
return http.HttpResponse(json.dumps({'num_surronding_mines':num_surronding_mines}))
clear_area = board.get_clear_area(row, column, [])
return http.HttpResponse(json.dumps({'clear_area':clear_area}))
def _update_board(email, board):
update_row = {"email": email, "board":cPickle.dumps(board), "new_game":False}
db = _get_minesweeper_db()
query = db.minesweeper.find_one({'email':email})
if 'new_game' in query and query['new_game']:
update_row['time'] = datetime.datetime.now()
else:
update_row['time'] = query['time']
db.minesweeper.update({"email": email}, update_row)
def flag(request):
row, column, email = _get_row_column_email_params(request)
board = _get_board(email)
board.place_flag(row, column)
_update_board(email, board)
response = {}
if board.has_won():
high_score = _check_high_score(email)
response = {'won':True, 'high_score': high_score}
return http.HttpResponse(json.dumps(response))
def _get_row_column_email_params(request):
row = int(request.GET['row'])
column = int(request.GET['column'])
email = request.COOKIES['email']
return (row, column, email)
def _check_high_score(email):
db = _get_minesweeper_db()
game = db.minesweeper.find_one({'email':email})
high_scores_query = db.high_scores.find()
high_scores_query.sort('time', DESCENDING)
time_diff = datetime.datetime.now() - game['time']
game_time = float(str(time_diff.seconds) + '.' + str(time_diff.microseconds))
high_score = 0
if high_scores_query.count() >= 10 and game_time < high_scores_query[0]['time']:
db.high_scores.remove(high_scores_query[0]['_id'])
db.high_scores.insert({'email':game['email'], 'time':game_time})
high_score = game_time
elif high_scores_query.count() < 10:
db.high_scores.insert({'email':game['email'], 'time':game_time})
high_score = game_time
return high_score
def reset(request):
email = request.COOKIES['email']
board = _create_new_board()
db = _get_minesweeper_db()
db.minesweeper.update({"email": email}, {"email": email, "board":cPickle.dumps(board), 'new_game':True})
return http.HttpResponse(json.dumps([]))
def _create_new_board():
create_board = CreateBoard(ROWS, COLUMNS, TOTAL_MINES)
return Board(create_board)
def view_high_scores(request):
db = _get_minesweeper_db()
high_scores_query = db.high_scores.find()
high_scores_query.sort('time', ASCENDING)
return render_to_response('view_high_scores.html', { 'high_scores': high_scores_query })
def _get_board(email):
db = _get_minesweeper_db()
query = db.minesweeper.find_one({'email':email})
return cPickle.loads(str(query['board']))
def _get_email(request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
redirect = http.HttpResponseRedirect('/')
redirect.set_cookie('email', form.cleaned_data['email'])
return redirect
else:
form = EmailForm()
return render_to_response('get_email.html', { 'form': form })
|
unlicense
| -8,034,674,039,468,431,000
| 30.206667
| 108
| 0.657979
| false
| 3.289529
| false
| false
| false
|
ActiveState/code
|
recipes/Python/491280_BackgroundCall_Threading_like/recipe-491280.py
|
1
|
2587
|
def example_BackgroundCall():
import urllib,time
def work():
return urllib.urlopen('http://www.python.org/').read()
bkcall=BackgroundCall(work)
print 'work() executing in background ...'
while not bkcall.is_done():
print '.',
time.sleep(0.010)
print 'done.'
print bkcall.get_return()[:500]
import sys
from time import time as _time, sleep as _sleep
class Full(Exception):pass
class Empty(Exception):pass
class BackgroundCall:
"""BackgroundCall
Example:
bkcall=BackgroundCall( time_consuming_function )
...
if bkcall.is_done():
print "got", bkcall.get_return()
"""
id=None
done=0 #1=returned; 2=exception raised
def __init__(self, func, args=(), kwargs={}):
import thread
def thread_bkcall():
try:
self.ret=func(*args, **kwargs)
self.done=1
except:
self.exc=sys.exc_info()
self.done=2
self.id=thread.start_new(thread_bkcall, ())
def is_done(self):
return self.done
def get_return(self, wait=1, timeout=None, raise_exception=1, alt_return=None):
"""delivers the return value or (by default) echoes the exception of
the call job
wait: 0=no waiting; Attribute error raised if no
1=waits for return value or exception
callable -> waits and wait()-call's while waiting for return
"""
if not self.done and wait:
starttime=_time()
delay=0.0005
while not self.done:
if timeout:
remaining = starttime + timeout - _time()
if remaining <= 0: #time is over
if raise_exception:
raise Empty, "return timed out"
else:
return alt_return
delay = min(delay * 2, remaining, .05)
else:
delay = min(delay * 2, .05)
if callable(wait): wait()
_sleep(delay) #reduce CPU usage by using a sleep
if self.done==2: #we had an exception
exc=self.exc
del self.exc
if raise_exception & 1: #by default exception is raised
raise exc[0],exc[1],exc[2]
else:
return alt_return
return self.ret
def get_exception(self):
return self.exc
if __name__=='__main__':
example_BackgroundCall()
|
mit
| 7,555,456,405,313,564,000
| 32.166667
| 83
| 0.521067
| false
| 4.261944
| false
| false
| false
|
area3001/ColliScanner
|
barcode.py
|
1
|
1301
|
import io
from threading import Thread
import picamera
from PIL import Image
import zbar
class BarcodeScanner(Thread):
def __init__(self, resolutionX=800, resolutionY=600, callback=None):
self.callback = callback
self.scanner = zbar.ImageScanner()
self.scanner.parse_config("enable")
self.stream = io.BytesIO()
self.camera = picamera.PiCamera()
self.camera.resolution = (resolutionX, resolutionY)
self.quit = False
Thread.__init__(self)
def setCallback(self, callback):
self.callback = callback
def run(self):
self.quit = False
if self.camera.closed:
self.camera.open()
self.scan()
def terminate(self):
self.quit = True
if not self.camera.closed:
self.camera.close()
def scan(self):
while not self.quit and not self.camera.closed:
self.stream = io.BytesIO()
self.camera.capture(self.stream, format="jpeg")
# "Rewind" the stream to the beginning so we can read its content
self.stream.seek(0)
pil = Image.open(self.stream)
# create a reader
pil = pil.convert("L")
width, height = pil.size
raw = pil.tobytes()
# wrap image data
image = zbar.Image(width, height, "Y800", raw)
# scan the image for barcodes
self.scanner.scan(image)
if any(True for _ in image):
self.callback(image)
self.quit = True
|
lgpl-3.0
| 2,577,924,286,906,595,000
| 22.25
| 69
| 0.691007
| false
| 3.119904
| false
| false
| false
|
eammx/proyectosWeb
|
proyectoPython/env/lib/python3.6/site-packages/werkzeug/debug/tbtools.py
|
2
|
20363
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import inspect
import json
import os
import re
import sys
import sysconfig
import traceback
from tokenize import TokenError
from .._compat import PY2
from .._compat import range_type
from .._compat import reraise
from .._compat import string_types
from .._compat import text_type
from .._compat import to_native
from .._compat import to_unicode
from ..filesystem import get_filesystem_encoding
from ..utils import cached_property
from ..utils import escape
from .console import Console
_coding_re = re.compile(br"coding[:=]\s*([-\w.]+)")
_line_re = re.compile(br"^(.*?)$", re.MULTILINE)
_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
UTF8_COOKIE = b"\xef\xbb\xbf"
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not by accident trigger a request to /favicon.ico which might
change the application state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&cmd=resource&f=console.png">
<script src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = u"""\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ u"""\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ u"""\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = u"""\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = u"""\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
SOURCE_LINE_HTML = u"""\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
"""
def render_console_html(secret, evalex_trusted=True):
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
"traceback_id": -1,
}
def get_current_traceback(
ignore_system_exceptions=False, show_hidden_frames=False, skip=0
):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
reraise(exc_type, exc_value, tb)
for _ in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ("lineno", "code", "in_frame", "current")
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
@property
def classes(self):
rv = ["line"]
if self.in_frame:
rv.append("in-frame")
if self.current:
rv.append("current")
return rv
def render(self):
return SOURCE_LINE_HTML % {
"classes": u" ".join(self.classes),
"lineno": self.lineno,
"code": escape(self.code),
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
exception_type = exc_type.__name__
if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
exception_type = exc_type.__module__ + "." + exception_type
self.exception_type = exception_type
self.groups = []
memo = set()
while True:
self.groups.append(Group(exc_type, exc_value, tb))
memo.add(id(exc_value))
if PY2:
break
exc_value = exc_value.__cause__ or exc_value.__context__
if exc_value is None or id(exc_value) in memo:
break
exc_type = type(exc_value)
tb = exc_value.__traceback__
self.groups.reverse()
self.frames = [frame for group in self.groups for frame in group.frames]
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
for group in self.groups:
group.filter_hidden_frames()
self.frames[:] = [frame for group in self.groups for frame in group.frames]
@property
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
@property
def exception(self):
"""String representation of the final exception."""
return self.groups[-1].exception
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u"\n"
logfile.write(to_native(tb, "utf-8", "replace"))
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps(
{
"description": "Werkzeug Internal Server Error",
"public": False,
"files": {"traceback.txt": {"content": self.plaintext}},
}
).encode("utf-8")
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen("https://api.github.com/gists", data=data)
resp = json.loads(rv.read().decode("utf-8"))
rv.close()
return {"url": resp["html_url"], "id": resp["id"]}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ""
classes = ["traceback"]
if not self.frames:
classes.append("noframe-traceback")
frames = []
else:
library_frames = sum(frame.is_library for frame in self.frames)
mark_lib = 0 < library_frames < len(self.frames)
frames = [group.render(mark_lib=mark_lib) for group in self.groups]
if include_title:
if self.is_syntax_error:
title = u"Syntax Error"
else:
title = u"Traceback <em>(most recent call last)</em>:"
if self.is_syntax_error:
description_wrapper = u"<pre class=syntaxerror>%s</pre>"
else:
description_wrapper = u"<blockquote>%s</blockquote>"
return SUMMARY_HTML % {
"classes": u" ".join(classes),
"title": u"<h3>%s</h3>" % title if title else u"",
"frames": u"\n".join(frames),
"description": description_wrapper % escape(self.exception),
}
def render_full(self, evalex=False, secret=None, evalex_trusted=True):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": exc,
"exception": exc,
"exception_type": escape(self.exception_type),
"summary": self.render_summary(include_title=False),
"plaintext": escape(self.plaintext),
"plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
"traceback_id": self.id,
"secret": secret,
}
@cached_property
def plaintext(self):
return u"\n".join([group.render_text() for group in self.groups])
@property
def id(self):
return id(self)
class Group(object):
"""A group of frames for an exception in a traceback. On Python 3,
if the exception has a ``__cause__`` or ``__context__``, there are
multiple exception groups.
"""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.info = None
if not PY2:
if exc_value.__cause__ is not None:
self.info = (
u"The above exception was the direct cause of the"
u" following exception"
)
elif exc_value.__context__ is not None:
self.info = (
u"During handling of the above exception, another"
u" exception occurred"
)
self.frames = []
while tb is not None:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ("before", "before_and_this"):
new_frames = []
hidden = False
if hide == "before_and_this":
continue
elif hide in ("reset", "reset_and_this"):
hidden = False
if hide == "reset_and_this":
continue
elif hide in ("after", "after_and_this"):
hidden = True
if hide == "after_and_this":
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == "codeop":
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
@property
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = "".join(buf).strip()
return to_unicode(rv, "utf-8", "replace")
def render(self, mark_lib=True):
out = []
if self.info is not None:
out.append(u'<li><div class="exc-divider">%s:</div>' % self.info)
for frame in self.frames:
out.append(
u"<li%s>%s"
% (
u' title="%s"' % escape(frame.info) if frame.info else u"",
frame.render(mark_lib=mark_lib),
)
)
return u"\n".join(out)
def render_text(self):
out = []
if self.info is not None:
out.append(u"\n%s:\n" % self.info)
out.append(u"Traceback (most recent call last):")
for frame in self.frames:
out.append(frame.render_text())
out.append(self.exception)
return u"\n".join(out)
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in (".pyo", ".pyc"):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = to_unicode(fn, get_filesystem_encoding())
self.module = self.globals.get("__name__", self.locals.get("__name__"))
self.loader = self.globals.get("__loader__", self.locals.get("__loader__"))
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get("__traceback_hide__", False)
info = self.locals.get("__traceback_info__")
if info is not None:
info = to_unicode(info, "utf-8", "replace")
self.info = info
def render(self, mark_lib=True):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
"id": self.id,
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.function_name),
"lines": self.render_line_context(),
"library": "library" if mark_lib and self.is_library else "",
}
@cached_property
def is_library(self):
return any(
self.filename.startswith(path) for path in sysconfig.get_paths().values()
)
def render_text(self):
return u' File "%s", line %s, in %s\n %s' % (
self.filename,
self.lineno,
self.function_name,
self.current_line.strip(),
)
def render_line_context(self):
before, current, after = self.get_context_lines()
rv = []
def render_line(line, cls):
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
rv.append(
'<pre class="line %s"><span class="ws">%s</span>%s</pre>'
% (cls, " " * prefix, escape(stripped_line) or " ")
)
for line in before:
render_line(line, "before")
render_line(current, "current")
for line in after:
render_line(line, "after")
return "\n".join(rv)
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, "co_firstlineno"):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno : lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def eval(self, code, mode="single"):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, text_type): # noqa
code = UTF8_COOKIE + code.encode("utf-8")
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, "get_source"):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, "get_source_by_code"):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
with open(
to_native(self.filename, get_filesystem_encoding()), mode="rb"
) as f:
source = f.read()
except IOError:
return []
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = "utf-8"
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _coding_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = to_native(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
return source.decode(charset, "replace").splitlines()
def get_context_lines(self, context=5):
before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
past = self.sourcelines[self.lineno : self.lineno + context]
return (before, self.current_line, past)
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u""
@cached_property
def console(self):
return Console(self.globals, self.locals)
@property
def id(self):
return id(self)
|
mit
| 8,006,801,681,762,134,000
| 31.425159
| 87
| 0.559201
| false
| 3.914456
| false
| false
| false
|
mit-dig/punya
|
appinventor/misc/emulator-support/aiWinStarter.py
|
1
|
5833
|
#!/usr/bin/python
from bottle import run,route,app,request,response,template,default_app,Bottle,debug,abort
import sys
import os
import platform
import subprocess
import re
#from flup.server.fcgi import WSGIServer
#from cStringIO import StringIO
#import memcache
app = Bottle()
default_app.push(app)
VERSION = "2.2"
platforms = platform.uname()[0]
print "Platform = %s" % platforms
if platforms == 'Windows': # Windows
PLATDIR = os.environ["ProgramFiles"]
PLATDIR = '"' + PLATDIR + '"'
print "AppInventor tools located here: %s" % PLATDIR
else:
sys.exit(1)
@route('/ping/')
def ping():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
return '{ "status" : "OK", "version" : "%s" }' % VERSION
@route('/utest/')
def utest():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
device = checkrunning(False)
if device:
return '{ "status" : "OK", "device" : "%s", "version" : "%s" }' % (device, VERSION)
else:
return '{ "status" : "NO", "version" : "%s" }' % VERSION
@route('/start/')
def start():
subprocess.call(PLATDIR + "\\AppInventor\\commands-for-Appinventor\\run-emulator ", shell=True)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
return ''
@route('/emulatorreset/')
def emulatorreset():
subprocess.call(PLATDIR + "\\AppInventor\\commands-for-Appinventor\\reset-emulator ", shell=True)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
return ''
@route('/echeck/')
def echeck():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
device = checkrunning(True)
if device:
return '{ "status" : "OK", "device" : "%s", "version" : "%s"}' % (device, VERSION)
else:
return '{ "status" : "NO", "version" : "%s" }' % VERSION
@route('/ucheck/')
def ucheck():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
device = checkrunning(False)
if device:
return '{ "status" : "OK", "device" : "%s", "version" : "%s"}' % (device, VERSION)
else:
return '{ "status" : "NO", "version" : "%s" }' % VERSION
@route('/reset/')
def reset():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
response.headers['Content-Type'] = 'application/json'
killadb()
killemulator()
return '{ "status" : "OK", "version" : "%s" }' % VERSION
@route('/replstart/:device')
def replstart(device=None):
print "Device = %s" % device
try:
subprocess.check_output((PLATDIR + "\\AppInventor\\commands-for-Appinventor\\adb -s %s forward tcp:8001 tcp:8001") % device, shell=True)
if re.match('.*emulat.*', device): # Only fake the menu key for the emulator
subprocess.check_output((PLATDIR + "\\AppInventor\\commands-for-Appinventor\\adb -s %s shell input keyevent 82") % device, shell=True)
subprocess.check_output((PLATDIR + "\\AppInventor\\commands-for-Appinventor\\adb -s %s shell am start -a android.intent.action.VIEW -n edu.mit.appinventor.punya.aicompanion3/.Screen1 --ez rundirect true") % device, shell=True)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, content-type'
return ''
except subprocess.CalledProcessError as e:
print "Problem starting companion app : status %i\n" % e.returncode
return ''
def checkrunning(emulator):
try:
result = subprocess.check_output(PLATDIR + "\\AppInventor\\commands-for-Appinventor\\adb devices", shell=True)
lines = result.split('\n')
for line in lines[1:]:
if emulator:
m = re.search('^(.*emulator-[1-9]+)\t+device.*', line)
else:
if re.search('^(.*emulator-[1-9]+)\t+device.*', line): # We are an emulator
continue # Skip it
m = re.search('^([A-z0-9.:]+.*?)\t+device.*', line)
if m:
break
if m:
return m.group(1)
return False
except subprocess.CalledProcessError as e:
print "Problem checking for devices : status %i\n" % e.returncode
return False
def killadb():
try:
subprocess.check_output(PLATDIR + "\\AppInventor\\commands-for-Appinventor\\adb kill-server", shell=True)
print "Killed adb\n"
except subprocess.CalledProcessError as e:
print "Problem stopping adb : status %i\n" % e.returncode
return ''
def killemulator():
try:
subprocess.check_output(PLATDIR + "\\AppInventor\\commands-for-Appinventor\\kill-emulator", shell=True)
print "Killed emulator\n"
except subprocess.CalledProcessError as e:
print "Problem stopping emulator : status %i\n" % e.returncode
return ''
def shutdown():
try: # Be quiet...
killadb()
killemulator()
except:
pass
if __name__ == '__main__':
import atexit
atexit.register(shutdown)
run(host='127.0.0.1', port=8004)
##WSGIServer(app).run()
|
apache-2.0
| -4,360,912,690,102,491,000
| 37.124183
| 234
| 0.621464
| false
| 3.522343
| false
| false
| false
|
JeffRoy/mi-dataset
|
mi/dataset/driver/nutnr_b/dcl_conc/nutnr_b_dcl_conc_telemetered_driver.py
|
1
|
1490
|
#!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = 'kustert,mworden'
import os
from mi.logging import config
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.parser.nutnr_b_dcl_conc import NutnrBDclConcTelemeteredParser
from mi.core.versioning import version
@version("15.7.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
config.add_configuration(os.path.join(basePythonCodePath, 'res', 'config', 'mi-logging.yml'))
log = get_logger()
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.nutnr_b_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
def exception_callback(exception):
log.debug("ERROR: %r", exception)
particleDataHdlrObj.setParticleDataCaptureFailure()
with open(sourceFilePath, 'r') as stream_handle:
parser = NutnrBDclConcTelemeteredParser(parser_config,
stream_handle,
lambda state, ingested: None,
lambda data: None,
exception_callback)
driver = DataSetDriver(parser, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
|
bsd-2-clause
| -7,912,399,062,643,848,000
| 32.111111
| 97
| 0.639597
| false
| 3.890339
| true
| false
| false
|
ReubenAbrams/Chrubix
|
src/setbright.py
|
1
|
3204
|
#!/usr/local/bin/python3
'''simple brightness controller for Chrubix
'''
import sys
import os
# import hashlib
from chrubix.utils import logme, read_oneliner_file
# from chrubix import save_distro_record, load_distro_record
try:
from PyQt4.QtCore import QString
except ImportError:
QString = str
TIME_BETWEEN_CHECKS = 200 # .2 seconds
DELAY_BEFORE_HIDING = 3000 # 3 seconds
from PyQt4.QtCore import pyqtSignature, Qt, QTimer
# from PyQt4.Qt import QLineEdit, QPixmap
from PyQt4 import QtGui # , uic
# from PyQt4 import QtCore
# import resources_rc
from ui.ui_BrightnessControl import Ui_BrightnessControlWidget
class BrightnessControlWidget( QtGui.QDialog, Ui_BrightnessControlWidget ):
def __init__( self ):
# self._password = None
self.cycles = 99
self.brightnow_fname = '%s/.brightnow' % ( os.path.expanduser( "~" ) )
super( BrightnessControlWidget, self ).__init__()
self.setupUi( self )
self.setWindowFlags( Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.ToolTip ) # QtCore.Qt.Tool )
self.show()
self.raise_()
self.setAttribute( Qt.WA_ShowWithoutActivating )
# self.setBrightness( 0 )
self.hide()
self.old_brightness = None
# self.speaker_width = self.speakeron.width()
# self.speaker_height = self.speakeron.height()
QTimer.singleShot( TIME_BETWEEN_CHECKS, self.monitor )
def monitor( self ):
noof_checks = DELAY_BEFORE_HIDING / TIME_BETWEEN_CHECKS
if self.cycles > noof_checks:
self.hide()
# print( 'hiding again' )
else:
self.cycles += 1
# print( 'checking' )
if os.path.exists( self.brightnow_fname ):
try:
new_brightness = int( read_oneliner_file( self.brightnow_fname ) )
# logme( 'curr bri = %d' % ( new_brightness ) )
if new_brightness != self.old_brightness:
self.setBrightness( new_brightness )
self.old_brightness = new_brightness
# logme( 'Updating brightness to %d' % ( new_brightness ) )
except ValueError:
logme( 'Bad entry for %s' % ( self.brightnow_fname ) )
# else:
# print( 'Waiting for .brightnow to appear' )
QTimer.singleShot( TIME_BETWEEN_CHECKS, self.monitor )
def setBrightness( self, brightness ):
# logme( 'setBrightness(%d)' % ( brightness ) )
self.cycles = 0
self.show()
self.progressBar.setValue( brightness )
self.update()
self.repaint()
# self.raise_()
@pyqtSignature( "" )
def closeEvent( self, event ):
event.accept()
sys.exit()
#------------------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
app = QtGui.QApplication( sys.argv )
window = BrightnessControlWidget()
screen = QtGui.QDesktopWidget().screenGeometry()
window.setGeometry( screen.width() - window.width() * 2 - 2, screen.height() - 49, window.width(), window.height() )
sys.exit( app.exec_() )
|
gpl-3.0
| 2,982,620,355,699,736,600
| 31.693878
| 133
| 0.586454
| false
| 3.778302
| false
| false
| false
|
jelly/calibre
|
src/calibre/ebooks/rtf2xml/preamble_div.py
|
2
|
22954
|
#########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os
from calibre.ebooks.rtf2xml import copy, override_table, list_table
from calibre.ptempfile import better_mktemp
class PreambleDiv:
"""
Break the preamble into divisions.
"""
def __init__(self, in_file,
bug_handler,
copy=None,
no_namespace=None,
run_level=1,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__no_namespace = no_namespace
self.__write_to = better_mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Set values, including those for the dictionary.
"""
self.__all_lists = {}
self.__page = {
'margin-top' : 72,
'margin-bottom' : 72,
'margin-left' : 90,
'margin-right' : 90,
'gutter' : 0,
}
self.__cb_count = ''
self.__ob_count = ''
self.__state = 'preamble'
self.__rtf_final = ''
self.__close_group_count = ''
self.__found_font_table = 0
self.__list_table_final = ''
self.__override_table_final = ''
self.__revision_table_final = ''
self.__doc_info_table_final = ''
self.__state_dict = {
'default' : self.__default_func,
'rtf_header' : self.__rtf_head_func,
'preamble' : self.__preamble_func,
'font_table' : self.__font_table_func,
'color_table' : self.__color_table_func,
'style_sheet' : self.__style_sheet_func,
'list_table' : self.__list_table_func,
'override_table' : self.__override_table_func,
'revision_table' : self.__revision_table_func,
'doc_info' : self.__doc_info_func,
'body' : self.__body_func,
'ignore' : self.__ignore_func,
'cw<ri<rtf_______' : self.__found_rtf_head_func,
'cw<pf<par-def___' : self.__para_def_func,
'tx<nu<__________' : self.__text_func,
'cw<tb<row-def___' : self.__row_def_func,
'cw<sc<section___' : self.__new_section_func,
'cw<sc<sect-defin' : self.__new_section_func,
'cw<it<font-table' : self.__found_font_table_func,
'cw<it<colr-table' : self.__found_color_table_func,
'cw<ss<style-shet' : self.__found_style_sheet_func,
'cw<it<listtable_' : self.__found_list_table_func,
'cw<it<lovr-table' : self.__found_override_table_func,
'cw<it<revi-table' : self.__found_revision_table_func,
'cw<di<doc-info__' : self.__found_doc_info_func,
'cw<pa<margin-lef' : self.__margin_func,
'cw<pa<margin-rig' : self.__margin_func,
'cw<pa<margin-top' : self.__margin_func,
'cw<pa<margin-bot' : self.__margin_func,
'cw<pa<gutter____' : self.__margin_func,
'cw<pa<paper-widt' : self.__margin_func,
'cw<pa<paper-hght' : self.__margin_func,
# 'cw<tb<columns___' : self.__section_func,
}
self.__margin_dict = {
'margin-lef' : 'margin-left',
'margin-rig' : 'margin-right',
'margin-top' : 'margin-top',
'margin-bot' : 'margin-bottom',
'gutter____' : 'gutter',
'paper-widt' : 'paper-width',
'paper-hght' : 'paper-height',
}
self.__translate_sec = {
'columns___' : 'column',
}
self.__section = {}
# self.__write_obj.write(self.__color_table_final)
self.__color_table_final = ''
self.__style_sheet_final = ''
self.__individual_font = 0
self.__old_font = 0
self.__ob_group = 0 # depth of group
self.__font_table_final = 0
self.__list_table_obj = list_table.ListTable(
run_level=self.__run_level,
bug_handler=self.__bug_handler,
)
def __ignore_func(self, line):
"""
Ignore all lines, until the bracket is found that marks the end of
the group.
"""
if self.__ignore_num == self.__cb_count:
self.__state = self.__previous_state
def __found_rtf_head_func(self, line):
self.__state = 'rtf_header'
def __rtf_head_func(self, line):
if self.__ob_count == '0002':
self.__rtf_final = (
'mi<mk<rtfhed-beg\n' +
self.__rtf_final +
'mi<mk<rtfhed-end\n'
)
self.__state = 'preamble'
elif self.__token_info == 'tx<nu<__________' or \
self.__token_info == 'cw<pf<par-def___':
self.__state = 'body'
self.__rtf_final = (
'mi<mk<rtfhed-beg\n' +
self.__rtf_final +
'mi<mk<rtfhed-end\n'
)
self.__make_default_font_table()
self.__write_preamble()
self.__write_obj.write(line)
else:
self.__rtf_final = self.__rtf_final + line
def __make_default_font_table(self):
"""
If not font table is fount, need to write one out.
"""
self.__font_table_final = 'mi<tg<open______<font-table\n'
self.__font_table_final += 'mi<mk<fonttb-beg\n'
self.__font_table_final += 'mi<mk<fontit-beg\n'
self.__font_table_final += 'cw<ci<font-style<nu<0\n'
self.__font_table_final += 'tx<nu<__________<Times;\n'
self.__font_table_final += 'mi<mk<fontit-end\n'
self.__font_table_final += 'mi<mk<fonttb-end\n'
self.__font_table_final += 'mi<tg<close_____<font-table\n'
def __make_default_color_table(self):
"""
If no color table is found, write a string for a default one
"""
self.__color_table_final = 'mi<tg<open______<color-table\n'
self.__color_table_final += 'mi<mk<clrtbl-beg\n'
self.__color_table_final += 'cw<ci<red_______<nu<00\n'
self.__color_table_final += 'cw<ci<green_____<nu<00\n'
self.__color_table_final += 'cw<ci<blue______<en<00\n'
self.__color_table_final += 'mi<mk<clrtbl-end\n'
self.__color_table_final += 'mi<tg<close_____<color-table\n'
def __make_default_style_table(self):
"""
If not font table is found, make a string for a default one
"""
"""
self.__style_sheet_final = 'mi<tg<open______<style-table\n'
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final += 'mi<tg<close_____<style-table\n'
"""
self.__style_sheet_final = """mi<tg<open______<style-table
mi<mk<styles-beg
mi<mk<stylei-beg
cw<ci<font-style<nu<0
tx<nu<__________<Normal;
mi<mk<stylei-end
mi<mk<stylei-beg
cw<ss<char-style<nu<0
tx<nu<__________<Default Paragraph Font;
mi<mk<stylei-end
mi<mk<styles-end
mi<tg<close_____<style-table
"""
def __found_font_table_func(self, line):
if self.__found_font_table:
self.__state = 'ignore'
else:
self.__state = 'font_table'
self.__font_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
self.__found_font_table = 1
def __font_table_func(self, line):
"""
Keep adding to the self.__individual_font string until end of group
found. If a bracket is found, check that it is only one bracket deep.
If it is, then set the marker for an individual font. If it is not,
then ignore all data in this group.
cw<ci<font-style<nu<0
"""
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__font_table_final = 'mi<tg<open______<font-table\n' + \
'mi<mk<fonttb-beg\n' + self.__font_table_final
self.__font_table_final += \
'mi<mk<fonttb-end\n' + 'mi<tg<close_____<font-table\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__font_table_final += \
'mi<mk<fontit-beg\n'
self.__individual_font = 1
else:
# ignore
self.__previous_state = 'font_table'
self.__state = 'ignore'
self.__ignore_num = self.__ob_count
elif self.__token_info == 'cb<nu<clos-brack':
if int(self.__cb_count) == int(self.__close_group_count) + 1:
self.__individual_font = 0
self.__font_table_final += \
'mi<mk<fontit-end\n'
elif self.__individual_font:
if self.__old_font and self.__token_info == 'tx<nu<__________':
if ';' in line:
self.__font_table_final += line
self.__font_table_final += 'mi<mk<fontit-end\n'
self.__individual_font = 0
else:
self.__font_table_final += line
elif self.__token_info == 'cw<ci<font-style':
self.__old_font = 1
self.__individual_font = 1
self.__font_table_final += 'mi<mk<fontit-beg\n'
self.__font_table_final += line
def __old_font_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
used for older forms of RTF:
\f3\fswiss\fcharset77 Helvetica-Oblique;\f4\fnil\fcharset77 Geneva;}
Note how each font is not divided by a bracket
"""
def __found_color_table_func(self, line):
"""
all functions that start with __found operate the same. They set the
state, initiate a string, determine the self.__close_group_count, and
set self.__cb_count to zero.
"""
self.__state = 'color_table'
self.__color_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __color_table_func(self, line):
if int(self.__cb_count) == int(self.__close_group_count):
self.__state = 'preamble'
self.__color_table_final = 'mi<tg<open______<color-table\n' + \
'mi<mk<clrtbl-beg\n' + self.__color_table_final
self.__color_table_final += \
'mi<mk<clrtbl-end\n' + 'mi<tg<close_____<color-table\n'
else:
self.__color_table_final += line
def __found_style_sheet_func(self, line):
self.__state = 'style_sheet'
self.__style_sheet_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __style_sheet_func(self, line):
"""
Same logic as the font_table_func.
"""
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__style_sheet_final = 'mi<tg<open______<style-table\n' + \
'mi<mk<styles-beg\n' + self.__style_sheet_final
self.__style_sheet_final += \
'mi<mk<styles-end\n' + 'mi<tg<close_____<style-table\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__style_sheet_final += \
'mi<mk<stylei-beg\n'
elif self.__token_info == 'cb<nu<clos-brack':
if int(self.__cb_count) == int(self.__close_group_count) + 1:
self.__style_sheet_final += \
'mi<mk<stylei-end\n'
else:
self.__style_sheet_final += line
def __found_list_table_func(self, line):
self.__state = 'list_table'
self.__list_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __list_table_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__list_table_final, self.__all_lists =\
self.__list_table_obj.parse_list_table(
self.__list_table_final)
# sys.stderr.write(repr(all_lists))
elif self.__token_info == '':
pass
else:
self.__list_table_final += line
pass
def __found_override_table_func(self, line):
self.__override_table_obj = override_table.OverrideTable(
run_level=self.__run_level,
list_of_lists=self.__all_lists,
)
self.__state = 'override_table'
self.__override_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
# cw<it<lovr-table
def __override_table_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__override_table_final, self.__all_lists =\
self.__override_table_obj.parse_override_table(self.__override_table_final)
elif self.__token_info == '':
pass
else:
self.__override_table_final += line
def __found_revision_table_func(self, line):
self.__state = 'revision_table'
self.__revision_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __revision_table_func(self, line):
if int(self.__cb_count) == int(self.__close_group_count):
self.__state = 'preamble'
self.__revision_table_final = 'mi<tg<open______<revision-table\n' + \
'mi<mk<revtbl-beg\n' + self.__revision_table_final
self.__revision_table_final += \
'mi<mk<revtbl-end\n' + 'mi<tg<close_____<revision-table\n'
else:
self.__revision_table_final += line
def __found_doc_info_func(self, line):
self.__state = 'doc_info'
self.__doc_info_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __doc_info_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__doc_info_table_final = 'mi<tg<open______<doc-information\n' + \
'mi<mk<doc-in-beg\n' + self.__doc_info_table_final
self.__doc_info_table_final += \
'mi<mk<doc-in-end\n' + 'mi<tg<close_____<doc-information\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__doc_info_table_final += \
'mi<mk<docinf-beg\n'
elif self.__token_info == 'cb<nu<clos-brack':
if int(self.__cb_count) == int(self.__close_group_count) + 1:
self.__doc_info_table_final += \
'mi<mk<docinf-end\n'
else:
self.__doc_info_table_final += line
def __margin_func(self, line):
"""
Handles lines that describe page info. Add the apporpriate info in the
token to the self.__margin_dict dicitonary.
"""
info = line[6:16]
changed = self.__margin_dict.get(info)
if changed is None:
print 'woops!'
else:
self.__page[changed] = line[20:-1]
# cw<pa<margin-lef<nu<1728
def __print_page_info(self):
self.__write_obj.write('mi<tg<empty-att_<page-definition')
for key in self.__page.keys():
self.__write_obj.write(
'<%s>%s' % (key, self.__page[key])
)
self.__write_obj.write('\n')
# mi<tg<open-att__<footn
def __print_sec_info(self):
"""
Check if there is any section info. If so, print it out.
If not, print out an empty tag to satisfy the dtd.
"""
if len(self.__section.keys()) == 0:
self.__write_obj.write(
'mi<tg<open______<section-definition\n'
)
else:
self.__write_obj.write(
'mi<tg<open-att__<section-definition')
keys = self.__section.keys()
for key in keys:
self.__write_obj.write(
'<%s>%s' % (key, self.__section[key])
)
self.__write_obj.write('\n')
def __section_func(self, line):
"""
Add info pertaining to section to the self.__section dictionary, to be
printed out later.
"""
info = self.__translate_sec.get(line[6:16])
if info is None:
sys.stderr.write('woops!\n')
else:
self.__section[info] = 'true'
def __body_func(self, line):
self.__write_obj.write(line)
def __default_func(self, line):
# either in preamble or in body
pass
def __para_def_func(self, line):
# if self.__ob_group == 1
# this tells dept of group
if self.__cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
self.__write_obj.write(line)
def __text_func(self, line):
"""
If the cb_count is less than 1, you have hit the body
For older RTF
Newer RTF should never have to use this function
"""
if self.__cb_count == '':
cb_count = '0002'
else:
cb_count = self.__cb_count
# ignore previous lines
# should be
# if self.__ob_group == 1
# this tells dept of group
if cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
self.__write_obj.write(line)
def __row_def_func(self, line):
# if self.__ob_group == 1
# this tells dept of group
if self.__cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
self.__write_obj.write(line)
def __new_section_func(self, line):
"""
This is new. The start of a section marks the end of the preamble
"""
if self.__cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
else:
sys.stderr.write('module is preamble_div\n')
sys.stderr.write('method is __new_section_func\n')
sys.stderr.write('bracket count should be 2?\n')
self.__write_obj.write(line)
def __write_preamble(self):
"""
Write all the strings, which represent all the data in the preamble.
Write a body and section beginning.
"""
if self.__no_namespace:
self.__write_obj.write(
'mi<tg<open______<doc\n'
)
else:
self.__write_obj.write(
'mi<tg<open-att__<doc<xmlns>http://rtf2xml.sourceforge.net/\n')
self.__write_obj.write('mi<tg<open______<preamble\n')
self.__write_obj.write(self.__rtf_final)
if not self.__color_table_final:
self.__make_default_color_table()
if not self.__font_table_final:
self.__make_default_font_table()
self.__write_obj.write(self.__font_table_final)
self.__write_obj.write(self.__color_table_final)
if not self.__style_sheet_final:
self.__make_default_style_table()
self.__write_obj.write(self.__style_sheet_final)
self.__write_obj.write(self.__list_table_final)
self.__write_obj.write(self.__override_table_final)
self.__write_obj.write(self.__revision_table_final)
self.__write_obj.write(self.__doc_info_table_final)
self.__print_page_info()
self.__write_obj.write('ob<nu<open-brack<0001\n')
self.__write_obj.write('ob<nu<open-brack<0002\n')
self.__write_obj.write('cb<nu<clos-brack<0002\n')
self.__write_obj.write('mi<tg<close_____<preamble\n')
self.__write_obj.write('mi<tg<open______<body\n')
# self.__write_obj.write('mi<tg<open-att__<section<num>1\n')
# self.__print_sec_info()
# self.__write_obj.write('mi<tg<open______<headers-and-footers\n')
# self.__write_obj.write('mi<mk<head_foot_<\n')
# self.__write_obj.write('mi<tg<close_____<headers-and-footers\n')
self.__write_obj.write('mi<mk<body-open_\n')
def __preamble_func(self, line):
"""
Check if the token info belongs to the dictionary. If so, take the
appropriate action.
"""
action = self.__state_dict.get(self.__token_info)
if action:
action(line)
def make_preamble_divisions(self):
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
self.__ob_group += 1
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
self.__ob_group -= 1
action = self.__state_dict.get(self.__state)
if action is None:
print self.__state
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "preamble_div.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__all_lists
|
gpl-3.0
| -456,268,684,472,462,900
| 38.037415
| 91
| 0.500218
| false
| 3.565947
| false
| false
| false
|
bigent/sinemalar-python
|
sinemalar/api.py
|
1
|
13305
|
import datetime, json
import requests
from .core import CallObject
from . import str2bool
class Artist(object):
def __init__(self, artist):
if type(artist) is not dict:
raise TypeError("Type of 'artist' must be 'dict'.")
self.id = int(artist['id'])
self.nameSurname = artist['nameSurname']
self.characterName = str2bool(artist['characterName'], True)
self.image = artist['image']
class Comment(object):
def __init__(self, comment):
if type(comment) is not dict:
raise TypeError("Type of 'comment' must be 'dict'.")
self.id = int(comment['id'])
self.username = comment['username']
self.comment = comment['comment']
self.addDate = datetime.datetime.strptime(comment['addDate'], '%Y-%m-%d %H:%M:%S')
class Movie(CallObject):
def __init__(self, movie_id=0, display_artists=False, display_comments=False, movie=None, to_gallery=False):
if type(movie_id) is not int:
raise TypeError("Type of 'movie_id' must be 'int'.")
if movie and movie_id:
raise ValueError("Only one can set a value.")
if not movie and not movie_id:
raise ValueError("You should set a value to 'movie_id' or 'movie'.")
CallObject.__init__(self)
self.to_gallery = to_gallery
self._path_name = "movie"
self.movie_id = movie_id
if movie:
if type(movie) is not dict:
raise TypeError("Type of 'movie' must be 'dict'.")
self.id = movie['id']
self.name = movie['name']
self.orgName = movie['orgName']
try:
self.image = movie['image']
self.rating = float(movie['rating'])
except:
pass
try:
self.type = movie['type']
self.seances = []
for i in movie['seances']:
self.seances.append(datetime.datetime.strptime(i, '%H:%M'))
self.selected = int(movie['selected'])
except:
pass
try:
self.director = movie['director']
except:
pass
elif not to_gallery:
if type(display_artists) is not bool:
raise TypeError("Type of 'display_artist' must be 'boolean'.")
if type(display_comments) is not bool:
raise TypeError("Type of 'display_comments' must be 'boolean'.")
self.display_artists = display_artists
self.display_comments = display_comments
if str2bool(self.show()[self._path_name]['id'], True):
self.id = int(self.show()[self._path_name]['id'])
else:
raise ValueError("Not found any movie of this ID.")
self.name = self.show()[self._path_name]['name']
self.orgName = self.show()[self._path_name]['orgName']
self.image = self.show()[self._path_name]['image']
self.rating = float(self.show()[self._path_name]['rating'])
self.type = self.show()[self._path_name]['type']
self.director = self.show()[self._path_name]['director']
self.summary = str2bool(self.show()[self._path_name]['summary'], True)
self.duration = str2bool(self.show()[self._path_name]['duration'], True)
self.produceYear = int(self.show()[self._path_name]['produceYear'])
self.week = str2bool(self.show()[self._path_name]['week'], True)
self.pubDate = str2bool(self.show()[self._path_name]['pubDate'], True)
self.embedId = str2bool(self.show()[self._path_name]['embedId'], True)
self.embedTitle = str2bool(self.show()[self._path_name]['embedTitle'], True)
self.trailerUrl = self.show()[self._path_name]['trailerUrl']
#artists
if display_artists:
self.artists = []
for i in self.show()['artists']:
self.artists.append(Artist(i))
#comments
if display_comments:
self.comments = []
for i in self.show()['comments']:
self.comments.append(Comment(i))
else:
print type(to_gallery)
if type(to_gallery) is not bool:
raise TypeError("Type of 'to_gallery' must be 'boolean'.")
self.gallery = []
if str2bool(self.show(), True):
for i in self.show():
self.gallery.append(i)
else:
raise ValueError("Not found any movie of this ID.")
def show(self):
if self.to_gallery:
return self.GET(
'gallery',
self._path_name,
self.movie_id,
)
else:
return self.GET(
self._path_name,
self.movie_id,
self.is_True(self.display_artists),
self.is_True(self.display_comments)
)
class Theatre(object):
def __init__(self, theatre):
if type(theatre) is not dict:
raise TypeError("Type of 'theatre' must be 'dict'.")
self.id = int(theatre['id'])
self.name = theatre['name']
try:
self.seances = []
for i in theatre['seances'][0]:
self.seances.append(datetime.datetime.strptime(i, '%H:%M'))
self.selected = theatre['selected']
except:
pass
try:
self.city = theatre['city']
self.latitude = float(theatre['latitude'])
self.longitude = float(theatre['longitude'])
self.phone = theatre['phone']
self.address = theatre['address']
except:
pass
try:
self.ad = theatre['ad']
#seances
self.movies = []
for i in theatre['movies']:
self.movies.append(Movie(i))
except:
pass
try:
self.town = theatre['town']
self.distance = theatre['distance']
except:
pass
try:
self.cityId = int(theatre['cityId'])
except:
pass
class Theatres(CallObject):
def __init__(self, theatre_id=0, city_id=0, city_count=1000):
if type(theatre_id) is not int:
raise TypeError("Type of 'theatre_id' must be 'int'.")
if type(city_id) is not int:
raise TypeError("Type of 'city_id' must be 'int'.")
if type(city_count) is not int:
raise TypeError("Type of 'city_count' must be 'int'.")
if theatre_id and city_id:
raise ValueError("Only one can set a value.")
if not theatre_id and not city_id:
raise ValueError("You should set a value to 'theatre_id' or 'city_id'.")
CallObject.__init__(self)
self._path_name = "theatre"
self.theatre_id = theatre_id
self.city_id = city_id
self.city_count = city_count
if city_id:
if str2bool(self.show()[0]['id'], True):
self.theatres = []
for i in self.show():
self.theatres.append(Theatre(i))
else:
raise ValueError("Not found any city of this ID.")
else:
if str2bool(self.show()[0]['id'], True):
self.theatre = Theatre(self.show())
else:
raise ValueError("Not found any theatre of this ID.")
def show(self):
if self.city_id:
return self.GET(
self._path_name,
0,
1,
self.city_id,
self.city_count
)
else:
return self.GET(
self._path_name,
self.theatre_id,
)[0]
class NearTheatre(CallObject):
def __init__(self, lat=41.0, lng=30.0):
if type(lat) is not float:
if type(lat) is not int:
raise TypeError("Type of 'lat' must be 'float' or 'int'.")
if type(lng) is not float:
if type(lng) is not int:
raise TypeError("Type of 'lng' must be 'float' or 'int'.")
CallObject.__init__(self)
self._path_name = "nearTheatre"
self._latitude = str(lat)
self._longitude = str(lng)
try:
self.show()
except:
raise ValueError("Not found any near theatre in this latitude and longitude.")
if str2bool(self.show()['tenPlus'], True) is not False:
self.theatres = []
for i in self.show()['tenPlus']:
self.theatres.append(Theatre(i))
if str2bool(self.show()['five'], True) is not False:
self.theatres = []
for i in self.show()['five']:
self.theatres.append(Theatre(i))
def show(self):
return self.GET(
"gps",
self._path_name,
self._latitude,
self._longitude
)
class City(object):
def __init__(self, city):
if type(city) is not dict:
raise TypeError("Type of 'city' must be 'dict'.")
self.id = int(city['id'])
self.name = city['name']
class Cities(CallObject):
def __init__(self):
CallObject.__init__(self)
self._path_name = "cities"
#cities
self.cities = []
for i in self.show():
self.cities.append(City(city=i))
def show(self):
return self.GET(
self._path_name,
"0",
)
class PlayingMovies(CallObject):
def __init__(self):
CallObject.__init__(self)
self.api_domain = "www.sinemalar.com"
self._path_name = "playingMovies"
self.sections = []
for i in self.show()['sections']:
self.sections.append(i)
self.movies = []
for i in self.show()['movies']:
for z in i:
self.movies.append(Movie(movie=z))
def show(self):
return self.GET(
self._path_name,
)
class PlayingMoviesRemain(PlayingMovies):
def __init__(self):
PlayingMovies.__init__(self)
self._path_name = "playingMoviesRemain"
class ComingSoon(PlayingMovies):
def __init__(self):
PlayingMovies.__init__(self)
self._path_name = "comingSoon"
class NearestSeances(CallObject):
def __init__(self, movie_id, lat=41.0, lng=30.0):
if type(movie_id) is not int:
raise TypeError("Type of 'movie_id' must be 'int'.")
if type(lat) is not float:
if type(lat) is not int:
raise TypeError("Type of 'lat' must be 'float' or 'int'.")
if type(lng) is not float:
if type(lng) is not int:
raise TypeError("Type of 'lng' must be 'float' or 'int'.")
CallObject.__init__(self)
self._path_name = "seance"
self.movie_id = movie_id
self._latitude = str(lat)
self._longitude = str(lng)
try:
self.show()
except:
raise ValueError("Not found the nearest seance of the movie in this latitude and longitude.")
self.seances = []
for i in self.show()['seances']:
self.seances.append(datetime.datetime.strptime(i, '%H:%M'))
self.selected = self.show()['selected']
self.cinema = Theatre(self.show()['cinema'])
def show(self):
return self.GET(
"gps",
self._path_name,
self._latitude,
self._longitude,
self.movie_id
)
class TheatreSeance(CallObject):
def __init__(self, city_id, movie_id):
if type(city_id) is not int:
raise TypeError("Type of 'city_id' must be 'int'.")
if type(movie_id) is not int:
raise TypeError("Type of 'movie_id' must be 'int'.")
CallObject.__init__(self)
self._path_name = "theatreSeance"
self.city_id = city_id
self.movie_id = movie_id
if not str2bool(self.show()['movie']['id'], True):
raise ValueError("Not found any movie of this ID.")
self.movie = Movie(movie=self.show()['movie'])
self.theatres = []
for i in self.show()['theatre']:
self.theatres.append(Theatre(i))
def show(self):
return self.GET(
self._path_name,
self.city_id,
self.movie_id
)
class ArtistGallery(CallObject):
def __init__(self, artist_id):
if type(artist_id) is not int:
raise TypeError("Type of 'artist_id' must be 'int'.")
CallObject.__init__(self)
self._path_name = "artist"
self.artist_id = artist_id
if not str2bool(self.show(), True):
raise ValueError("Not found any artist of this ID.")
self.gallery = []
for i in self.show():
self.gallery.append(i)
def show(self):
return self.GET(
"gallery",
self._path_name,
self.artist_id,
)
|
mit
| 7,511,655,941,144,440,000
| 29.798611
| 112
| 0.514543
| false
| 3.949243
| false
| false
| false
|
sammosummo/sammosummo.github.io
|
assets/_scripts/variable-precision.py
|
1
|
4851
|
# approximate sum of two von mises with a single von mises
import numpy as np
from scipy.stats import vonmises
def sim(a, b, plot=False, n=int(1e8)):
unwrapped = vonmises.rvs(a, size=n) + vonmises.rvs(b, size=n)
unwrapped = unwrapped
wrapped = (unwrapped + np.pi) % (2 * np.pi) - np.pi
kappa, _, _ = vonmises.fit(wrapped, floc=0, fscale=1)
if plot is True:
plt.hist(wrapped, normed=True, bins=100)
x = np.linspace(-np.pi, np.pi)
y = vonmises.pdf(x, kappa)
plt.plot(x, y)
return kappa
# import numpy as np
# import pymc3 as pm
# import matplotlib.pyplot as plt
# import theano.tensor as tt
# from scipy.stats import norm, vonmises
# from scipy.integrate import quad
#
#
# n = 10000
# mu = 3
# sigma = 3
#
# k = np.exp(norm.rvs(mu, sigma, size=n))
# x = vonmises.rvs(kappa=k, size=n)
#
# with pm.Model():
#
# mu = pm.Normal(name="mu", mu=0, sigma=10)
# sigma = pm.HalfCauchy(name="sigma", beta=1)
# delta = pm.Normal(name="delta", mu=0, sigma=1, shape=n)
# kappa = tt.exp(mu + delta * sigma) # IMPORTANT! Use non-centered parameterization
# pm.VonMises(name="obs", mu=0, kappa=kappa, observed=x)
# trace = pm.sample(10000, tune=5000, chains=2)
# pm.traceplot(trace, compact=True, var_names=["mu", "sigma"])
# plt.savefig("tmp.png")
#
# # hist(x, bins=100, normed=True)
# #
# # x = np.linspace(-np.pi, np.pi, 100)
# #
# # def pdf(x, mu, sigma, a):
# # g = 1
# # v = vonmises.pdf(x, kappa=mu)
# # def f(k, x):
# # g = gamma.pdf(k, mu**2 / sigma**2, scale=1. / (mu / sigma**2))
# # v = vonmises.pdf(x, kappa=k)
# # return g * v
# # return [quad(f, 0, a, _x)[0] for _x in x]
# #
# # def logpdf(x, mu, sigma, a):
# # g = 1
# # v = vonmises.pdf(x, kappa=mu)
# # def f(k, x):
# # g = gamma.logpdf(k, mu**2 / sigma**2, scale=1. / (mu / sigma**2))
# # v = vonmises.logpdf(x, kappa=k)
# # return g * v
# # return [quad(f, 0, a, _x)[0] for _x in x]
# #
# # [plot(x, pdf(x, mu, sigma, a)) for a in [500]]
# #
# #
# # plot(x, np.log(pdf(x, mu, sigma)))
#
#
#
#
#
#
# # from scipy.integrate import quad
# # import theano
# # import theano.tensor as tt
# # import numpy as np
# # import pymc3 as pm
# #
# #
# # class Integrate(theano.Op):
# # def __init__(self, expr, var, *extra_vars):
# # super().__init__()
# # self._expr = expr
# # self._var = var
# # self._extra_vars = extra_vars
# # self._func = theano.function(
# # [var] + list(extra_vars),
# # self._expr,
# # on_unused_input='ignore')
# #
# # def make_node(self, start, stop, *extra_vars):
# # self._extra_vars_node = extra_vars
# # assert len(self._extra_vars) == len(extra_vars)
# # self._start = start
# # self._stop = stop
# # vars = [start, stop] + list(extra_vars)
# # # vars = list(extra_vars)
# # return theano.Apply(self, vars, [tt.dscalar().type()])
# #
# # def perform(self, node, inputs, out):
# # start, stop, *args = inputs
# # val = quad(self._func, start, stop, args=tuple(args))[0]
# # out[0][0] = np.array(val)
# #
# # def grad(self, inputs, grads):
# # start, stop, *args = inputs
# # out, = grads
# # replace = dict(zip(self._extra_vars, args))
# #
# # replace_ = replace.copy()
# # replace_[self._var] = start
# # dstart = out * theano.clone(-self._expr, replace=replace_)
# #
# # replace_ = replace.copy()
# # replace_[self._var] = stop
# # dstop = out * theano.clone(self._expr, replace=replace_)
# #
# # grads = tt.grad(self._expr, self._extra_vars)
# # dargs = []
# # for grad in grads:
# # integrate = Integrate(grad, self._var, *self._extra_vars)
# # darg = out * integrate(start, stop, *args)
# # dargs.append(darg)
# #
# # return [dstart, dstop] + dargs
# #
# #
# # y_obs = 8.3
# #
# # start = theano.shared(1.)
# # stop = theano.shared(2.)
# # with pm.Model() as basic_model:
# # a = pm.Uniform('a', 1.5, 3.5)
# # b = pm.Uniform('b', 4., 6.)
# #
# # # Define the function to integrate in plain theano
# # t = tt.dscalar('t')
# # t.tag.test_value = np.zeros(())
# # a_ = tt.dscalar('a_')
# # a_.tag.test_value = np.ones(())*2.
# # b_ = tt.dscalar('b_')
# # b_.tag.test_value = np.ones(())*5.
# # func = t**a_ + b_
# # integrate = Integrate(func, t, a_, b_)
# #
# # # Now we plug in the values from the model.
# # # The `a_` and `b_` from above corresponds to the `a` and `b` here.
# # mu = integrate(start, stop, a, b)
# # y = pm.Normal('y', mu=mu, sd=0.4, observed=y_obs)
# # trace = pm.sample(1500, tune=500, cores=2, chains=2)
|
mit
| 1,727,707,249,573,825,000
| 30.506494
| 88
| 0.527726
| false
| 2.622162
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/retail/v2alpha/retail-v2alpha-py/google/cloud/retail_v2alpha/types/product.py
|
1
|
11878
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.retail_v2alpha.types import common
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.retail.v2alpha',
manifest={
'Product',
},
)
class Product(proto.Message):
r"""Product captures all metadata information of items to be
recommended or searched.
Attributes:
name (str):
Immutable. Full resource name of the product, such as
"projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/product_id".
The branch ID must be "default_branch".
id (str):
Immutable. [Product][google.cloud.retail.v2alpha.Product]
identifier, which is the final component of
[name][google.cloud.retail.v2alpha.Product.name]. For
example, this field is "id_1", if
[name][google.cloud.retail.v2alpha.Product.name] is
"projects/*/locations/global/catalogs/default_catalog/branches/default_branch/products/id_1".
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`id <https://support.google.com/merchants/answer/6324405>`__.
Schema.org Property
`Product.sku <https://schema.org/sku>`__.
type_ (google.cloud.retail_v2alpha.types.Product.Type):
Immutable. The type of the product. This
field is output-only.
primary_product_id (str):
Variant group identifier. Must be an
[id][google.cloud.retail.v2alpha.Product.id], with the same
parent branch with this product. Otherwise, an error is
thrown.
For
[Type.PRIMARY][google.cloud.retail.v2alpha.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2alpha.Product]s, this field
can only be empty or set to the same value as
[id][google.cloud.retail.v2alpha.Product.id].
For VARIANT [Product][google.cloud.retail.v2alpha.Product]s,
this field cannot be empty. A maximum of 2,000 products are
allowed to share the same
[Type.PRIMARY][google.cloud.retail.v2alpha.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2alpha.Product]. Otherwise,
an INVALID_ARGUMENT error is returned.
Google Merchant Center Property
`item_group_id <https://support.google.com/merchants/answer/6324507>`__.
Schema.org Property
`Product.inProductGroupWithID <https://schema.org/inProductGroupWithID>`__.
This field must be enabled before it can be used. `Learn
more </recommendations-ai/docs/catalog#item-group-id>`__.
categories (Sequence[str]):
Product categories. This field is repeated for supporting
one product belonging to several parallel categories.
Strongly recommended using the full path for better search /
recommendation quality.
To represent full path of category, use '>' sign to separate
different hierarchies. If '>' is part of the category name,
please replace it with other character(s).
For example, if a shoes product belongs to both ["Shoes &
Accessories" -> "Shoes"] and ["Sports & Fitness" ->
"Athletic Clothing" -> "Shoes"], it could be represented as:
::
"categories": [
"Shoes & Accessories > Shoes",
"Sports & Fitness > Athletic Clothing > Shoes"
]
Must be set for
[Type.PRIMARY][google.cloud.retail.v2alpha.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2alpha.Product] otherwise an
INVALID_ARGUMENT error is returned.
At most 250 values are allowed per
[Product][google.cloud.retail.v2alpha.Product]. Empty values
are not allowed. Each value must be a UTF-8 encoded string
with a length limit of 5,000 characters. Otherwise, an
INVALID_ARGUMENT error is returned.
Google Merchant Center property
`google_product_category <https://support.google.com/merchants/answer/6324436>`__.
Schema.org property [Product.category]
(https://schema.org/category).
title (str):
Required. Product title.
This field must be a UTF-8 encoded string with a length
limit of 128 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`title <https://support.google.com/merchants/answer/6324415>`__.
Schema.org property
`Product.name <https://schema.org/name>`__.
description (str):
Product description.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`description <https://support.google.com/merchants/answer/6324468>`__.
schema.org property
`Product.description <https://schema.org/description>`__.
attributes (Sequence[google.cloud.retail_v2alpha.types.Product.AttributesEntry]):
Highly encouraged. Extra product attributes to be included.
For example, for products, this could include the store
name, vendor, style, color, etc. These are very strong
signals for recommendation model, thus we highly recommend
providing the attributes here.
Features that can take on one of a limited number of
possible values. Two types of features can be set are:
Textual features. some examples would be the brand/maker of
a product, or country of a customer. Numerical features.
Some examples would be the height/weight of a product, or
age of a customer.
For example:
``{ "vendor": {"text": ["vendor123", "vendor456"]}, "lengths_cm": {"numbers":[2.3, 15.4]}, "heights_cm": {"numbers":[8.1, 6.4]} }``.
A maximum of 150 attributes are allowed. Otherwise, an
INVALID_ARGUMENT error is returned.
The key must be a UTF-8 encoded string with a length limit
of 5,000 characters. Otherwise, an INVALID_ARGUMENT error is
returned.
tags (Sequence[str]):
Custom tags associated with the product.
At most 250 values are allowed per
[Product][google.cloud.retail.v2alpha.Product]. This value
must be a UTF-8 encoded string with a length limit of 1,000
characters. Otherwise, an INVALID_ARGUMENT error is
returned.
This tag can be used for filtering recommendation results by
passing the tag as part of the
[PredictRequest.filter][google.cloud.retail.v2alpha.PredictRequest.filter].
Google Merchant Center property
`custom_label_0–4 <https://support.google.com/merchants/answer/6324473>`__.
price_info (google.cloud.retail_v2alpha.types.PriceInfo):
Product price and cost information.
Google Merchant Center property
`price <https://support.google.com/merchants/answer/6324371>`__.
available_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when this
[Product][google.cloud.retail.v2alpha.Product] becomes
available recommendation and search.
availability (google.cloud.retail_v2alpha.types.Product.Availability):
The online availability of the
[Product][google.cloud.retail.v2alpha.Product]. Default to
[Availability.IN_STOCK][google.cloud.retail.v2alpha.Product.Availability.IN_STOCK].
Google Merchant Center Property
`availability <https://support.google.com/merchants/answer/6324448>`__.
Schema.org Property
`Offer.availability <https://schema.org/availability>`__.
available_quantity (google.protobuf.wrappers_pb2.Int32Value):
The available quantity of the item.
uri (str):
Canonical URL directly linking to the product detail page.
This field must be a UTF-8 encoded string with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`link <https://support.google.com/merchants/answer/6324416>`__.
Schema.org property `Offer.url <https://schema.org/url>`__.
images (Sequence[google.cloud.retail_v2alpha.types.Image]):
Product images for the product.
A maximum of 300 images are allowed.
Google Merchant Center property
`image_link <https://support.google.com/merchants/answer/6324350>`__.
Schema.org property
`Product.image <https://schema.org/image>`__.
"""
class Type(proto.Enum):
r"""The type of this product."""
TYPE_UNSPECIFIED = 0
PRIMARY = 1
VARIANT = 2
COLLECTION = 3
class Availability(proto.Enum):
r"""Product availability. If this field is unspecified, the
product is assumed to be in stock.
"""
AVAILABILITY_UNSPECIFIED = 0
IN_STOCK = 1
OUT_OF_STOCK = 2
PREORDER = 3
BACKORDER = 4
name = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.STRING,
number=2,
)
type_ = proto.Field(
proto.ENUM,
number=3,
enum=Type,
)
primary_product_id = proto.Field(
proto.STRING,
number=4,
)
categories = proto.RepeatedField(
proto.STRING,
number=7,
)
title = proto.Field(
proto.STRING,
number=8,
)
description = proto.Field(
proto.STRING,
number=10,
)
attributes = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=12,
message=common.CustomAttribute,
)
tags = proto.RepeatedField(
proto.STRING,
number=13,
)
price_info = proto.Field(
proto.MESSAGE,
number=14,
message=common.PriceInfo,
)
available_time = proto.Field(
proto.MESSAGE,
number=18,
message=timestamp_pb2.Timestamp,
)
availability = proto.Field(
proto.ENUM,
number=19,
enum=Availability,
)
available_quantity = proto.Field(
proto.MESSAGE,
number=20,
message=wrappers_pb2.Int32Value,
)
uri = proto.Field(
proto.STRING,
number=22,
)
images = proto.RepeatedField(
proto.MESSAGE,
number=23,
message=common.Image,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 2,006,343,473,132,579,000
| 37.309677
| 144
| 0.616706
| false
| 4.323262
| false
| false
| false
|
gkonstantyno/construct
|
construct/debug.py
|
1
|
4154
|
"""
Debugging utilities for constructs
"""
import sys
import traceback
import pdb
import inspect
from construct.core import Construct, Subconstruct
from construct.lib import HexString, Container, ListContainer
class Probe(Construct):
"""
A probe: dumps the context, stack frames, and stream content to the screen
to aid the debugging process.
.. seealso:: :class:`Debugger`.
:param name: the display name
:param show_stream: whether or not to show stream contents. default is True. the stream must be seekable.
:param show_context: whether or not to show the context. default is True.
:param show_stack: whether or not to show the upper stack frames. default is True.
:param stream_lookahead: the number of bytes to dump when show_stack is set. default is 100.
Example::
Struct("foo",
UBInt8("a"),
Probe("between a and b"),
UBInt8("b"),
)
"""
__slots__ = [
"printname", "show_stream", "show_context", "show_stack",
"stream_lookahead"
]
counter = 0
def __init__(self, name = None, show_stream = True,
show_context = True, show_stack = True,
stream_lookahead = 100):
super(Probe, self).__init__(None)
if name is None:
Probe.counter += 1
name = "<unnamed %d>" % (Probe.counter,)
self.printname = name
self.show_stream = show_stream
self.show_context = show_context
self.show_stack = show_stack
self.stream_lookahead = stream_lookahead
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.printname)
def _parse(self, stream, context):
self.printout(stream, context)
def _build(self, obj, stream, context):
self.printout(stream, context)
def _sizeof(self, context):
return 0
def printout(self, stream, context):
obj = Container()
if self.show_stream:
obj.stream_position = stream.tell()
follows = stream.read(self.stream_lookahead)
if not follows:
obj.following_stream_data = "EOF reached"
else:
stream.seek(-len(follows), 1)
obj.following_stream_data = HexString(follows)
print("")
if self.show_context:
obj.context = context
if self.show_stack:
obj.stack = ListContainer()
frames = [s[0] for s in inspect.stack()][1:-1]
frames.reverse()
for f in frames:
a = Container()
a.__update__(f.f_locals)
obj.stack.append(a)
print("=" * 80)
print("Probe %s" % (self.printname,))
print(obj)
print("=" * 80)
class Debugger(Subconstruct):
"""
A pdb-based debugger. When an exception occurs in the subcon, a debugger
will appear and allow you to debug the error (and even fix on-the-fly).
:param subcon: the subcon to debug
Example::
Debugger(
Enum(UBInt8("foo"),
a = 1,
b = 2,
c = 3
)
)
"""
__slots__ = ["retval"]
def _parse(self, stream, context):
try:
return self.subcon._parse(stream, context)
except Exception:
self.retval = NotImplemented
self.handle_exc("(you can set the value of 'self.retval', "
"which will be returned)")
if self.retval is NotImplemented:
raise
else:
return self.retval
def _build(self, obj, stream, context):
try:
self.subcon._build(obj, stream, context)
except Exception:
self.handle_exc()
def handle_exc(self, msg = None):
print("=" * 80)
print("Debugging exception of %s:" % (self.subcon,))
print("".join(traceback.format_exception(*sys.exc_info())[1:]))
if msg:
print(msg)
pdb.post_mortem(sys.exc_info()[2])
print("=" * 80)
|
mit
| -8,919,516,380,459,991,000
| 30.233083
| 109
| 0.545258
| false
| 4.158158
| false
| false
| false
|
liuenyan/django-blog
|
blog/views.py
|
1
|
8259
|
"""
博客应用的视图函数。
"""
import json
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.contrib import messages
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from blog.models import Post, Comment, Tag, Category
from blog.forms import PostForm, CommentForm, EditProfileForm, CategoryForm, TagForm
from blog.tools import clean_html_tags, convert_to_html
# Create your views here.
def index(request):
"""首页的视图函数"""
post_list = Post.objects.all().order_by('-id')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except InvalidPage:
posts = paginator.page(1)
return render(request, "index.html", context={'posts': posts})
def post_detail(request, slug):
"""文章页面的视图函数"""
post = get_object_or_404(Post, slug=slug)
context = {
'comments_provider': settings.DEFAULT_COMMENTS_PROVIDER,
'post': post,
}
if settings.DEFAULT_COMMENTS_PROVIDER == 'default':
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = Comment(
name=form.cleaned_data['name'],
url=form.cleaned_data['url'],
email=form.cleaned_data['email'],
comment=clean_html_tags(form.cleaned_data['comment']),
post=post
)
comment.save()
return redirect('post', slug)
else:
messages.add_message(request, messages.ERROR, form.errors)
form = CommentForm()
comments = Comment.objects.filter(post=post)
context['form'] = form
context['comments'] = comments
return render(request, 'post.html', context)
@login_required
def edit_post(request, slug):
"""文章编辑页面的视图函数"""
post = get_object_or_404(Post, slug=slug)
if request.user.id != post.author.id:
return redirect('post', slug)
if request.method == 'POST':
post_form = PostForm(request.POST, instance=post)
if post_form.is_valid():
post.body_html = convert_to_html(post_form.cleaned_data['body_markdown'])
post_form.save()
messages.add_message(request, messages.SUCCESS, '文章已更新')
return redirect('post', post.slug)
else:
messages.add_message(request, messages.ERROR, post_form.errors)
context = {
'post_form': post_form,
'category_form': CategoryForm(),
'tag_form': TagForm(),
}
return render(request, 'edit_post.html', context)
context = {
'post_form': PostForm(instance=post),
'category_form': CategoryForm(),
'tag_form': TagForm(),
}
return render(request, 'edit_post.html', context)
@login_required
def new_post(request):
"""文章新建页面的视图函数"""
if request.method == 'POST':
post_form = PostForm(request.POST)
if post_form.is_valid():
post = post_form.save(commit=False)
post.body_html = convert_to_html(post_form.cleaned_data['body_markdown'])
post.author = request.user
post.save()
post_form.save_m2m()
messages.add_message(request, messages.SUCCESS, '文章已发布')
return redirect('post', post.slug)
else:
messages.add_message(request, messages.ERROR, post_form.errors)
context = {
'post_form': post_form,
'category_form': CategoryForm(),
'tag_form': TagForm(),
}
return render(request, 'edit_post.html', context)
context = {
'post_form': PostForm(),
'category_form': CategoryForm(),
'tag_form': TagForm(),
}
return render(request, 'edit_post.html', context)
@login_required
def delete_post(request, slug):
"""文章删除的视图函数"""
post = get_object_or_404(Post, id=slug)
if request.user.id != post.author.id:
return redirect('post', slug)
post.delete()
return redirect('index')
def category_posts(request, category_name):
"""分类页面的视图函数"""
category_object = get_object_or_404(Category, category=category_name)
post_list = category_object.post_set.order_by('-id')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except InvalidPage:
posts = paginator.page(1)
title = '分类为{0}的文章'.format(category_name)
return render(request, 'index.html', context={'title': title, 'posts': posts})
@login_required
@require_POST
def new_category(request):
"""新建分类的处理函数"""
form = CategoryForm(request.POST)
if form.is_valid():
category = form.save()
result = {
'status': 'success',
'category': {
'id': category.id,
'category': category.category,
},
}
return HttpResponse(json.dumps(result), content_type="text/json")
else:
result = {
'status': 'fail',
'errors': form.category.errors,
}
return HttpResponse(json.dumps(result), content="text/json")
@login_required
@require_POST
def new_tag(request):
"""新建标签的处理函数"""
form = TagForm(request.POST)
if form.is_valid():
tag = form.save()
result = {
'status': 'success',
'tag': {
'id': tag.id,
'tag': tag.tag,
}
}
return HttpResponse(json.dumps(result), content_type="text/json")
else:
result = {
'status': 'fail',
'errors': form.errors,
}
return HttpResponse(json.dumps(result), content="text/json")
def tag_posts(request, tagname):
"""标签页面的视图函数"""
tag_object = get_object_or_404(Tag, tag=tagname)
post_list = tag_object.post_set.order_by('-id')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except InvalidPage:
posts = paginator.page(1)
title = '标签为{0}的文章'.format(tagname)
return render(request, 'index.html', context={'title': title, 'posts': posts})
def archive(request, year, month):
"""归档页面的视图函数"""
post_list = Post.objects.filter(
creation_time__year=year,
creation_time__month=month
).order_by('-id')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except InvalidPage:
posts = paginator.page(1)
title = '{0}年{1}月的归档'.format(year, month)
return render(request, 'index.html', context={'title': title, 'posts': posts})
@login_required
def profile(request):
"""个人资料页面的视图函数"""
return render(request, 'profile.html')
@login_required
def change_profile(request):
"""修改个人资料的视图函数"""
current_user = request.user
if request.method == 'POST':
form = EditProfileForm(request.POST)
if form.is_valid():
current_user.first_name = form.cleaned_data['first_name']
current_user.last_name = form.cleaned_data['last_name']
current_user.email = form.cleaned_data['email']
current_user.save()
messages.add_message(request, messages.SUCCESS, '个人资料已更新')
return redirect('profile')
else:
messages.add_message(request, messages.ERROR, form.errors)
data = {
'first_name': current_user.first_name,
'last_name': current_user.last_name,
'email': current_user.email
}
form = EditProfileForm(data)
return render(request, 'change_profile.html', context={'form': form})
|
mit
| 8,883,726,643,625,573,000
| 31.687243
| 85
| 0.591716
| false
| 3.509943
| false
| false
| false
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/sympy/utilities/lambdify.py
|
1
|
18165
|
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
from sympy.external import import_module
from sympy.core.compatibility import exec_, is_sequence, iterable, string_types
import inspect
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
SYMPY_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"Abs": "fabs",
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"Abs": "abs",
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"Max": "amax",
"Min": "amin",
"oo": "inf",
"re": "real",
}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from sympy.mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
def lambdify(args, expr, modules=None, printer=None, use_imps=True, dummify=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, SymPy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order. To change this behavior, the "modules"
argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import lambdify, sin, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>> import numpy
>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>> f = lambdify((x,y), tan(x*y), "numpy")
>> f(1, 2)
-2.18503986326
>> from numpy import array
>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function, lambdify
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
try:
_import("numpy")
except ImportError:
pass
else:
modules.insert(1, "numpy")
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
import itertools
namespace.update({flat: flatten})
return eval(lstr, namespace)
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
if isinstance(args, Function):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
--------
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of sympy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
--------
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
|
gpl-3.0
| 8,490,795,049,944,381,000
| 32.14781
| 93
| 0.600606
| false
| 3.929267
| false
| false
| false
|
daevaorn/kombu
|
kombu/transport/zmq.py
|
1
|
8458
|
"""
kombu.transport.zmq
===================
ZeroMQ transport.
"""
from __future__ import absolute_import
import errno
import os
import socket
try:
import zmq
from zmq import ZMQError
except ImportError:
zmq = ZMQError = None # noqa
from kombu.five import Empty
from kombu.log import get_logger
from kombu.serialization import pickle
from kombu.utils import cached_property
from kombu.utils.eventio import poll, READ
from . import virtual
logger = get_logger('kombu.transport.zmq')
DEFAULT_PORT = 5555
DEFAULT_HWM = 128
DEFAULT_INCR = 1
dumps, loads = pickle.dumps, pickle.loads
class MultiChannelPoller(object):
eventflags = READ
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map
self._fd_to_chan = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
def close(self):
for fd in self._fd_to_chan:
try:
self.poller.unregister(fd)
except KeyError:
pass
self._channels.clear()
self._fd_to_chan.clear()
self.poller = None
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
self._fd_to_chan.pop(channel.client.connection.fd, None)
def _register(self, channel):
conn = channel.client.connection
self._fd_to_chan[conn.fd] = channel
self.poller.register(conn.fd, self.eventflags)
def on_poll_start(self):
for channel in self._channels:
self._register(channel)
def on_readable(self, fileno):
chan = self._fd_to_chan[fileno]
return chan.drain_events(), chan
def get(self, timeout=None):
self.on_poll_start()
events = self.poller.poll(timeout)
for fileno, _ in events or []:
return self.on_readable(fileno)
raise Empty()
@property
def fds(self):
return self._fd_to_chan
class Client(object):
def __init__(self, uri='tcp://127.0.0.1', port=DEFAULT_PORT,
hwm=DEFAULT_HWM, swap_size=None, enable_sink=True,
context=None):
try:
scheme, parts = uri.split('://')
except ValueError:
scheme = 'tcp'
parts = uri
endpoints = parts.split(';')
self.port = port
if scheme != 'tcp':
raise NotImplementedError('Currently only TCP can be used')
self.context = context or zmq.Context.instance()
if enable_sink:
self.sink = self.context.socket(zmq.PULL)
self.sink.bind('tcp://*:{0.port}'.format(self))
else:
self.sink = None
self.vent = self.context.socket(zmq.PUSH)
if hasattr(zmq, 'SNDHWM'):
self.vent.setsockopt(zmq.SNDHWM, hwm)
else:
self.vent.setsockopt(zmq.HWM, hwm)
if swap_size:
self.vent.setsockopt(zmq.SWAP, swap_size)
for endpoint in endpoints:
if scheme == 'tcp' and ':' not in endpoint:
endpoint += ':' + str(DEFAULT_PORT)
endpoint = ''.join([scheme, '://', endpoint])
self.connect(endpoint)
def connect(self, endpoint):
self.vent.connect(endpoint)
def get(self, queue=None, timeout=None):
sink = self.sink
try:
if timeout is not None:
prev_timeout, sink.RCVTIMEO = sink.RCVTIMEO, timeout
try:
return sink.recv()
finally:
sink.RCVTIMEO = prev_timeout
else:
return sink.recv()
except ZMQError as exc:
if exc.errno == zmq.EAGAIN:
raise socket.error(errno.EAGAIN, exc.strerror)
else:
raise
def put(self, queue, message, **kwargs):
return self.vent.send(message)
def close(self):
if self.sink and not self.sink.closed:
self.sink.close()
if not self.vent.closed:
self.vent.close()
@property
def connection(self):
if self.sink:
return self.sink
return self.vent
class Channel(virtual.Channel):
Client = Client
hwm = DEFAULT_HWM
swap_size = None
enable_sink = True
port_incr = DEFAULT_INCR
from_transport_options = (
virtual.Channel.from_transport_options +
('hwm', 'swap_size', 'enable_sink', 'port_incr')
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
# Evaluate socket
self.client.connection.closed
self.connection.cycle.add(self)
self.connection_errors = self.connection.connection_errors
def _get(self, queue, timeout=None):
try:
return loads(self.client.get(queue, timeout))
except socket.error as exc:
if exc.errno == errno.EAGAIN and timeout != 0:
raise Empty()
else:
raise
def _put(self, queue, message, **kwargs):
self.client.put(queue, dumps(message, -1), **kwargs)
def _purge(self, queue):
return 0
def _poll(self, cycle, timeout=None):
return cycle.get(timeout=timeout)
def close(self):
if not self.closed:
self.connection.cycle.discard(self)
try:
self.__dict__['client'].close()
except KeyError:
pass
super(Channel, self).close()
def _prepare_port(self, port):
return (port + self.channel_id - 1) * self.port_incr
def _create_client(self):
conninfo = self.connection.client
port = self._prepare_port(conninfo.port or DEFAULT_PORT)
return self.Client(uri=conninfo.hostname or 'tcp://127.0.0.1',
port=port,
hwm=self.hwm,
swap_size=self.swap_size,
enable_sink=self.enable_sink,
context=self.connection.context)
@cached_property
def client(self):
return self._create_client()
class Transport(virtual.Transport):
Channel = Channel
can_parse_url = True
default_port = DEFAULT_PORT
driver_type = 'zeromq'
driver_name = 'zmq'
connection_errors = virtual.Transport.connection_errors + (ZMQError,)
implements = virtual.Transport.implements.extend(
async=True,
)
polling_interval = None
def __init__(self, *args, **kwargs):
if zmq is None:
raise ImportError('The zmq library is not installed')
super(Transport, self).__init__(*args, **kwargs)
self.cycle = MultiChannelPoller()
def driver_version(self):
return zmq.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.poller = loop.poller
add_reader = loop.add_reader
on_readable = self.on_readable
cycle_poll_start = cycle.on_poll_start
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
def on_readable(self, fileno):
self._handle_event(self.cycle.on_readable(fileno))
def drain_events(self, connection, timeout=None):
more_to_read = False
for channel in connection.channels:
try:
evt = channel.cycle.get(timeout=timeout)
except socket.error as exc:
if exc.errno == errno.EAGAIN:
continue
raise
else:
connection._handle_event((evt, channel))
more_to_read = True
if not more_to_read:
raise socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN))
def _handle_event(self, evt):
item, channel = evt
self._deliver(*item)
def establish_connection(self):
self.context.closed
return super(Transport, self).establish_connection()
def close_connection(self, connection):
super(Transport, self).close_connection(connection)
try:
connection.__dict__['context'].term()
except KeyError:
pass
@cached_property
def context(self):
return zmq.Context(1)
|
bsd-3-clause
| -779,977,540,349,261,000
| 26.196141
| 73
| 0.567155
| false
| 4.012334
| false
| false
| false
|
daviddaub/pyssllabs
|
pyssllabs.py
|
1
|
21480
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import urllib2
import json
import time
from datetime import datetime
import sys
SSLLABS_API_ENTRYPOINT = 'https://api.ssllabs.com/api/v2/'
_FMT = '%Y-%m-%d %H:%M:%S'
hasColorama = False
def _c(c):
return c if hasColorama else ''
def _parse_args():
ap = argparse.ArgumentParser(description='SSL Server Test with the ssllabs.com API')
meg = ap.add_mutually_exclusive_group(required=True)
meg.add_argument('-i', '--info', action='store_true', help='Info')
meg.add_argument('-H', '--host', dest='host', type=str, metavar='<host>',
help='Test a single host e.g. www.example.com')
meg.add_argument('-S', '--statuscodes', action='store_true',
help='Show available status codes and its details')
meg.add_argument('-file', '--file', action='store_true',
help='Show available status codes and its details')
ap.add_argument('-n', '--nocolor', action='store_true',
help='Omit colorized output')
ap.add_argument('-g', '--grade', action='store_true',
help='Output the grade in the form <fqdn>:<grade>')
ap.add_argument('-s', '--startnew', action='store_true',
help='Start new scan. Don\'t deliver cached results.')
return ap
def _format_timestamp(t):
return time.strftime(_FMT, time.localtime(t / 1000))
class Info(object):
version = None
criteriaVersion = None
maxAssessments = None
currentAssessments = None
messages = None
clientMaxAssessments = None
class Host(object):
host = None
port = None
protocol = None
isPublic = None
status = None
statusMessage = None
startTime = None
testTime = None
engineVersion = None
criteriaVersion = None
cacheExpiryTime = None
endpoints = []
certHostnames = []
class EndPoint(object):
ipAddress = None
serverName = None
statusMessage = None
statusDetails = None
statusDetailsMessage = None
grade = None
hasWarnings = None
isExceptional = None
progress = None
duration = None
eta = None
delegation = None
details = None
class Key(object):
size = None
strength = None
alg = None
debianFlaw = None
q = None
class Cert(object):
subject = None
commonNames = []
altNames = []
notBefore = None
notAfter = None
issuerSubject = None
sigAlg = None
revocationInfo = None
crlURIs = []
ocspURIs = []
revocationStatus = None
sgc = None
validationType = None
issues = None
class Chain(object):
certs = []
issues = None
class Suites(object):
_list = []
preference = None
class SimDetails(object):
results = []
class EndpointDetails(object):
hostStartTime = None
key = Key()
cert = Cert()
chain = Chain()
protocols = []
suites = Suites()
serverSignature = None
prefixDelegation = None
nonPrefixDelegation = None
vulnBeast = None
renegSupport = None
stsResponseHeader = None
stsMaxAge = None
stsSubdomains = None
pkpResponseHeader = None
sessionResumption = None
compressionMethods = None
supportsNpn = None
npnProtocols = None
sessionTickets = None
ocspStapling = None
sniRequired = None
httpStatusCode = None
httpForwarding = None
supportsRc4 = None
forwardSecrecy = None
rc4WithModern = None
sims = SimDetails()
heartbleed = None
heartbeat = None
openSslCcs = None
poodleTls = None
fallbackScsv = None
freak = None
class ChainCert(object):
subject = None
label = None
notBefore = None
notAfter = None
issuerSubject = None
issuerLabel = None
sigAlg = None
issues = None
keyAlg = None
keySize = None
keyStrength = None
raw = None
class Protocol(object):
_id = None
name = None
version = None
v2SuitesDisabled = None
q = None
class SimClient(object):
_id = None
name = None
platform = None
version = None
isReference = None
class Simulation(object):
client = None
errorCode = None
attempts = None
protocolId = None
suiteId = None
class Suite(object):
_id = None
name = None
cipherStrength = None
dhStrength = None
dhP = None
ghG = None
dhYs = None
ecdhBits = None
ecdhStrength = None
q = None
class StatusCodes(object):
statusDetails = None
class SSLLabs(object):
def info(self):
f = urllib2.urlopen(SSLLABS_API_ENTRYPOINT + 'info')
jsn = json.loads(f.read())
f.close()
i = Info()
i.version = jsn.get('engineVersion')
i.criteriaVersion = jsn.get('criteriaVersion')
i.maxAssessments = jsn.get('maxAssessments')
i.currentAssessments = jsn.get('currentAssessments')
i.messages = jsn.get('messages')
i.clientMaxAssessments = jsn.get('clientMaxAssessments')
return i
def analyze(self, host='www.ssllabs.com', publish='off', startNew='off',
fromCache='off', maxAge='1', _all='on', ignoreMismatch='off'):
# TODO: catch HTTP errors
f = urllib2.urlopen(SSLLABS_API_ENTRYPOINT + 'analyze?' +
'host=' + host + '&' +
'publish=' + publish + '&' +
'startNew=' + startNew + '&' +
'fromCache=' + fromCache + '&' +
'maxAge=' + maxAge + '&' +
'all=' + _all + '&' +
'ignoreMismatch=' + ignoreMismatch)
jsn = json.loads(f.read())
f.close()
h = Host()
h.host = jsn.get('host')
h.port = jsn.get('port')
h.protocol = jsn.get('protocol')
h.isPublic = jsn.get('isPublic')
h.status = jsn.get('status')
h.statusMessage = jsn.get('statusMessage')
h.startTime = jsn.get('startTime')
h.testTime = jsn.get('testTime')
h.engineVersion = jsn.get('engineVersion')
h.criteriaVersion = jsn.get('criteriaVersion')
h.cacheExpiryTime = jsn.get('cacheExpiryTime')
if h.status != 'READY':
return h
for e in jsn.get('endpoints'):
endpoint = EndPoint()
endpoint.ipAddress = e.get('ipAddress')
endpoint.serverName = e.get('serverName')
endpoint.statusMessage = e.get('statusMessage')
endpoint.statusDetails = e.get('statusDetails')
endpoint.statusDetailsMessage = e.get('statusDetailsMessage')
endpoint.grade = e.get('grade')
endpoint.hasWarnings = e.get('hasWarnings')
endpoint.isExceptional = e.get('isExceptional')
endpoint.progress = e.get('progress')
endpoint.duration = e.get('duration')
endpoint.eta = e.get('eta')
endpoint.delegation = e.get('delegation')
if _all == 'on':
endpoint.details = EndpointDetails()
endpoint.details.hostStartTime = e.get('details').get('hostStartTime')
endpoint.details.key = Key()
endpoint.details.key.size = e.get('details').get('key').get('size')
endpoint.details.key.strength = e.get('details').get('key').get('strength')
endpoint.details.key.alg = e.get('details').get('key').get('alg')
endpoint.details.key.debianFlaw = e.get('details').get('key').get('debianFlaw')
endpoint.details.key.q = e.get('details').get('key').get('q')
endpoint.details.cert = Cert()
endpoint.details.cert.subject = e.get('details').get('cert').get('subject')
endpoint.details.cert.commonNames = e.get('details').get('cert').get('commonNames')
endpoint.details.cert.altNames = e.get('details').get('cert').get('altNames')
endpoint.details.cert.notBefore = e.get('details').get('cert').get('notAfter')
endpoint.details.cert.issuerSubject = e.get('details').get('cert').get('issuerSubject')
endpoint.details.cert.sigAlg = e.get('details').get('cert').get('sigAlg')
endpoint.details.cert.issuerLabel = e.get('details').get('cert').get('issuerLabel')
endpoint.details.cert.revocationInfo = e.get('details').get('cert').get('revocationInfo')
endpoint.details.cert.crlURIs = e.get('details').get('cert').get('crlURIs')
endpoint.details.cert.ocspURIs = e.get('details').get('cert').get('ocspURIs')
endpoint.details.cert.revocationStatus = e.get('details').get('cert').get('revocationStatus')
endpoint.details.cert.sgc = e.get('details').get('cert').get('sgc')
endpoint.details.cert.validationType = e.get('details').get('cert').get('validationType')
endpoint.details.cert.issues = e.get('details').get('cert').get('issues')
endpoint.details.chain = Chain()
endpoint.details.chain.certs = []
for c in e.get('details').get('chain').get('certs'):
cc = ChainCert()
cc.subject = c.get('subject')
cc.label = c.get('label')
cc.notBefore = c.get('notBefore')
cc.notAfter = c.get('notAfter')
cc.issuerSubject = c.get('issuerSubject')
cc.issuerLabel = c.get('issuerLabel')
cc.sigAlg = c.get('sigAlg')
cc.issues = c.get('issues')
cc.keyAlg = c.get('keyAlg')
cc.keySize = c.get('keySize')
cc.raw = c.get('raw')
endpoint.details.chain.certs.append(cc)
endpoint.details.chain.issues = e.get('details').get('chain').get('issues')
endpoint.details.protocols = []
for i in e.get('details').get('protocols'):
p = Protocol()
p._id = i.get('id')
p.name = i.get('name')
p.version = i.get('version')
p.v2SuitesDisabled = i.get('v2SuitesDisabled')
p.q = i.get('q')
endpoint.details.protocols.append(p)
endpoint.details.suites = Suites()
endpoint.details.suites._list = []
for i in e.get('details').get('suites').get('list'):
s = Suite()
s._id = i.get('id')
s.name = i.get('name')
s.cipherStrength = i.get('cipherStrength')
s.dhStrength = i.get('dhStrength')
s.dhP = i.get('dhP')
s.dhG = i.get('dhG')
s.dhYs = i.get('dhYs')
s.ecdhBits = i.get('ecdhBits')
s.ecdhStrength = i.get('ecdhStrength')
s.q = i.get('q')
endpoint.details.suites._list.append(s)
endpoint.details.serverSignature = e.get('details').get('serverSignature')
endpoint.details.prefixDelegation = e.get('details').get('prefixDelegation')
endpoint.details.nonPrefixDelegation = e.get('details').get('nonPrefixDelegation')
endpoint.details.vulnBeast = e.get('details').get('vulnBeast')
endpoint.details.renegSupport = e.get('details').get('renegSupport')
endpoint.details.stsResponseHeader = e.get('details').get('stsResponseHeader')
endpoint.details.stsMaxAge = e.get('details').get('stsMaxAge')
endpoint.details.stsSubdomains = e.get('details').get('stsSubdomains')
endpoint.details.pkpResponseHeader = e.get('details').get('pkpResponseHeader')
endpoint.details.sessionResumption = e.get('details').get('sessionResumption')
endpoint.details.compressionMethods = e.get('details').get('compressionMethods')
endpoint.details.supportsNpn = e.get('details').get('supportsNpn')
endpoint.details.npnProtocols = e.get('details').get('npnProtocols')
endpoint.details.sessionTickets = e.get('details').get('sessionTickets')
endpoint.details.ocspStapling = e.get('details').get('ocspStapling')
endpoint.details.sniRequired = e.get('details').get('sniRequired')
endpoint.details.httpStatusCode = e.get('details').get('httpStatusCode')
endpoint.details.httpForwarding = e.get('details').get('httpForwarding')
endpoint.details.supportsRc4 = e.get('details').get('supportsRc4')
endpoint.details.forwardSecrecy = e.get('details').get('forwardSecrecy')
endpoint.details.rc4WithModern = e.get('details').get('rc4WithModern')
endpoint.details.sims = SimDetails()
endpoint.details.sims.results = []
for i in e.get('details').get('sims').get('results'):
s = Simulation()
s.client = SimClient()
s.client._id = i.get('client').get('id')
s.client.name = i.get('client').get('text')
s.client.platform = i.get('client').get('platform')
s.client.version = i.get('client').get('version')
s.client.isReference = i.get('client').get('isReference')
s._id = i.get('id')
s.errorCode = i.get('errorCode')
s.attempts = i.get('attempts')
s.protocolId = i.get('protocolId')
s.suiteId = i.get('suiteId')
endpoint.details.sims.results.append(s)
endpoint.details.heartbleed = e.get('details').get('heartbleed')
endpoint.details.heartbeat = e.get('details').get('heartbeat')
endpoint.details.openSslCcs = e.get('details').get('openSslCcs')
endpoint.details.poodleTls = e.get('details').get('poodleTls')
endpoint.details.fallbackScsv = e.get('details').get('fallbackScsv')
endpoint.details.freak = e.get('details').get('freak')
h.endpoints.append(endpoint)
return h
def getStatusCodes(self):
f = urllib2.urlopen(SSLLABS_API_ENTRYPOINT + 'getStatusCodes')
jsn = json.loads(f.read())
f.close()
s = StatusCodes()
s.statusDetails = jsn
return s
if __name__ == '__main__':
args = _parse_args().parse_args()
try:
from colorama import Fore, Style, init
init(autoreset=True)
hasColorama = True
except ImportError:
print('No color support. Falling back to normal output.')
args.nocolor = True
if args.info:
s = SSLLabs()
i = s.info()
if args.nocolor:
hasColorama = False
print(_c(Fore.WHITE) + i.messages[0] + '\n')
print(_c(Fore.BLUE) + 'Criteria Version: ' + '\t' +
_c(Fore.CYAN) + i.criteriaVersion)
print(_c(Fore.BLUE) + 'Maximum Assessments: ' + '\t' +
_c(Fore.CYAN) + str(i.maxAssessments))
print(_c(Fore.BLUE) + 'Current Assessments: ' + '\t' +
_c(Fore.CYAN) + str(i.currentAssessments))
print(_c(Fore.BLUE) + 'Engine Version: ' +'\t' +
_c(Fore.CYAN) + str(i.version))
elif args.statuscodes:
s = SSLLabs()
c = s.getStatusCodes()
for key, value in c.statusDetails['statusDetails'].iteritems():
print(_c(Fore.BLUE) + key + ': ' + _c(Fore.YELLOW) + value)
elif args.host:
s = SSLLabs()
h = s.analyze(args.host, startNew = 'on' if args.startnew else 'off')
if args.nocolor:
hasColorama = False
if h.status == 'READY':
for endpoint in h.endpoints:
if not args.grade:
msg = endpoint.serverName + ' (' + endpoint.ipAddress + ')' + ':'
print(_c(Style.BRIGHT) + _c(Fore.WHITE) + msg)
print(len(msg) * '-')
c = None
if endpoint.grade in [ 'A+', 'A', 'A-' ]:
c = Fore.GREEN
elif endpoint.grade in [ 'B', 'C', 'D', 'E' ]:
c = Fore.YELLOW
elif endpoint.grade in [ 'F', 'T', 'M' ]:
c = Fore.RED
if args.grade:
print(_c(Fore.WHITE) + endpoint.serverName + ': ' + _c(c) + endpoint.grade)
break
if endpoint.grade == 'T':
print(_c(Fore.BLUE) + 'Rating: ' + '\t\t' + _c(c) +
_c(Style.BRIGHT) + endpoint.grade + ' (no trust)')
elif endpoint.grade == 'M':
print(_c(Fore.BLUE) + 'Rating: ' + '\t\t' + _c(c) +
_c(Style.BRIGHT) +
endpoint.grade + ' (certificate name mismatch)')
elif endpoint.grade == 'F':
print(_c(Fore.BLUE) + 'Rating: ' + '\t\t' + _c(c) +
_c(Style.BRIGHT) + endpoint.grade)
else:
print(_c(Fore.BLUE) + 'Rating: ' + '\t\t' + _c(c) +
endpoint.grade)
print('')
if endpoint.details.supportsRc4:
print(_c(Fore.BLUE) + 'RC4: ' + '\t\t\t' +
_c(Fore.RED) + 'supported')
else:
print(_c(Fore.BLUE) + 'RC4: ' + '\t\t\t' +
_c(Fore.GREEN) + 'not supported')
if endpoint.details.heartbleed:
print(_c(Fore.BLUE) + 'Heartbleed: ' + '\t\t' +
_c(Fore.RED) + 'vulnerable')
else:
print(_c(Fore.BLUE) + 'Heartbleed: ' + '\t\t' +
_c(Fore.GREEN) + 'not vulnerable')
if endpoint.details.poodleTls == -1:
print(_c(Fore.BLUE) + 'POODLE: ' + '\t\t' +
_c(Fore.YELLOW) + 'test failed')
elif endpoint.details.poodleTls == -0:
print(_c(Fore.BLUE) + 'POODLE: ' + '\t\t' +
_c(Fore.YELLOW) + 'unknown')
elif endpoint.details.poodleTls == 1:
print(_c(Fore.BLUE) + 'POODLE: ' + '\t\t' +
_c(Fore.GREEN) + 'not vulnerable')
elif endpoint.details.poodleTls == 2:
print(_c(Fore.BLUE) + 'POODLE: ' + '\t\t' +
_c(Fore.RED) + 'vulnerable')
if endpoint.details.freak:
print(_c(Fore.BLUE) + 'FREAK: ' + '\t\t\t' +
_c(Fore.RED) + 'vulnerable')
else:
print(_c(Fore.BLUE) + 'FREAK: ' + '\t\t\t' +
_c(Fore.GREEN) + 'not vulnerable')
print('')
if not args.grade:
print(_c(Fore.BLUE) + 'Test starting time: ' + '\t' +
_c(Fore.CYAN) + _format_timestamp(h.startTime))
print(_c(Fore.BLUE) + 'Test completion time: ' + '\t' +
_c(Fore.CYAN) + _format_timestamp(h.testTime))
print(_c(Fore.BLUE) + 'Test duration: ' + '\t\t' +
_c(Fore.CYAN) +
str(datetime.strptime(_format_timestamp(h.testTime), _FMT) -
datetime.strptime(_format_timestamp(h.startTime), _FMT)))
if h.cacheExpiryTime:
print(_c(Fore.BLUE) + 'Cache expiry time: ' + '\t' +
_c(Fore.CYAN) + _format_timestamp(h.cacheExpiryTime))
sys.exit(0)
elif h.status == 'ERROR':
print(_c(Fore.RED) + h.statusMessage)
sys.exit(1)
elif h.status == 'DNS':
print(_c(Fore.CYAN) + h.statusMessage + '.' +
'Please try again in a few minutes.')
sys.exit(2)
elif h.status == 'IN_PROGRESS':
msg = 'Assessment is in Progress. Please try again in a few minutes.'
print(_c(Fore.WHITE) + msg)
print('')
print(_c(Fore.BLUE) + 'Test starting time: ' + '\t' +
_c(Fore.CYAN) + _format_timestamp(h.startTime))
sys.exit(3)
else:
msg = 'Unknown Status'
print(_c(Fore.RED) + msg)
sys.exit(255)
|
unlicense
| 5,589,052,208,164,433,000
| 36.552448
| 113
| 0.506657
| false
| 3.947804
| true
| false
| false
|
geraldinepascal/FROGS
|
assessment/bin/treeSampling.py
|
1
|
11792
|
#!/usr/bin/env python2.7
#
# Copyright (C) 2016 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'Frederic Escudie - Plateforme bioinformatique Toulouse'
__copyright__ = 'Copyright (C) 2016 INRA'
__license__ = 'GNU General Public License'
__version__ = '0.2.0'
__email__ = 'frogs-support@inrae.fr'
__status__ = 'dev'
import sys
import random
import argparse
import numpy as np
from frogsNode import Node
from frogsSequenceIO import FastaIO
##################################################################################################################################################
#
# FUNCTIONS
#
##################################################################################################################################################
def write_subset( in_path, out_path, selected ):
FH_in = FastaIO(in_path)
FH_out = FastaIO(out_path, "w")
for record in FH_in:
if record.id in selected:
FH_out.write(record)
FH_in.close()
FH_out.close()
def print_summary( tree ):
max_depth = databank_tree.get_leaves()[0].get_depth()
print "##########################################################\n" + \
"# Representation\n" + \
"#\n"
print "Rank_depth\tNb_taxa\tNb_selected"
for idx in range(max_depth):
depth = idx + 1
rank_nodes = databank_tree.get_descendants(depth)
nb_node_selected = 0
for node in rank_nodes:
for leaf in node.get_leaves():
if leaf.metadata["selected"]:
nb_node_selected += 1
break
print depth, len(rank_nodes), nb_node_selected
print ""
print "##########################################################\n" + \
"# Distribution\n" + \
"#\n"
distrib_data = dict()
for idx in range(max_depth -1):
depth = idx + 1
nb_selected = list()
rank_nodes = databank_tree.get_descendants(depth)
for node in rank_nodes:
nb_children_selected = 0
for child in node.get_children():
for leaf in child.get_leaves():
if leaf.metadata["selected"]:
nb_children_selected += 1
break
nb_selected.append(nb_children_selected)
distrib_data[str(depth)] = {
"all": distrib(nb_selected),
"non-zero": distrib(filter(lambda a: a != 0, nb_selected))
}
print "Distribution in all nodes:"
print "\t" + "\t".join(sorted(distrib_data))
for field in ["min", "10/100", "25/100", "50/100", "50/100", "75/100", "90/100", "max"]:
print field + ":\t" + "\t".join([str(distrib_data[depth]["all"][field]) for depth in sorted(distrib_data)])
print "\nDistribution in represented nodes:"
print "\t" + "\t".join(sorted(distrib_data))
for field in ["min", "10/100", "25/100", "50/100", "50/100", "75/100", "90/100", "max"]:
print field + ":\t" + "\t".join([str(distrib_data[depth]["non-zero"][field]) for depth in sorted(distrib_data)])
def distrib( data ):
return {
"min": min(data),
"10/100": np.percentile(data, 10),
"25/100": np.percentile(data, 25),
"50/100": np.percentile(data, 50),
"75/100": np.percentile(data, 75),
"90/100": np.percentile(data, 90),
"max": max(data)
}
def ascending_walk(node, max_selection):
selected_leaf = list()
if max_selection > 0:
if random.randint(1, args.climb_prob) != 1:
log[-1].append("ascending")
parent = node.get_parent()
if parent is not None: # Node is not root
brothers = parent.get_children()
if len(brothers) > 1: # Node has brother(s)
if random.randint(1, args.neighbor_prob) != 1: # Brother recruitment
neighbors_leaves = list()
for brother in brothers:
if brother is not node:
for leaf in brother.get_leaves():
if not leaf.metadata["selected"]:
neighbors_leaves.append( leaf )
if len(neighbors_leaves) > 0:
selected_idx = random.randint(1, len(neighbors_leaves)) -1
log[-1].append("neighbor_selection: " + neighbors_leaves[selected_idx].metadata["retained_seq_id"])
selected_leaf.append( neighbors_leaves[selected_idx] )
max_selection -= 1
# Go to parent
selected_leaf.extend( ascending_walk(parent, max_selection) )
return selected_leaf
def descending_walk( node ):
selected_leaf = None
if not node.has_child():
selected_leaf = node
else:
accessible_children = list()
for child in node.get_children():
for leaf in child.get_leaves():
if not leaf.metadata["selected"]:
accessible_children.append(child)
break
selected_idx = random.randint(1, len(accessible_children)) -1
selected_leaf = descending_walk( accessible_children[selected_idx] )
return selected_leaf
def rank_sampling( tree, rank ):
selected_leaf = None
accessible_leaves = list()
for rank_node in tree.get_descendants(rank):
rank_is_accessible = rank_node.has_child
for leaf in rank_node.get_leaves():
if leaf.metadata["selected"]:
rank_is_accessible = False
break
if rank_is_accessible:
accessible_leaves.extend( rank_node.get_leaves() )
selected_idx = random.randint(1, len(accessible_leaves)) -1
selected_leaf = accessible_leaves[selected_idx]
return selected_leaf
def get_tree_from_fasta( in_fasta ):
"""
@warning: The root node must be present
"""
databank_tree = None
FH_databank = FastaIO(in_fasta)
for record in FH_databank:
if record.description.endswith(";"):
record.description = record.description[:-1]
taxonomy = record.description.split(";")
if databank_tree is None:
databank_tree = Node(taxonomy[0])
parent = databank_tree
for rank_depth, taxa in enumerate(taxonomy[1:]):
if not parent.has_child( taxa ):
taxa_node = Node(taxa, parent)
if (rank_depth+1) == (len(taxonomy)-1): # Current node is leaf
taxa_node.metadata["seq_ids"] = [record.id]
else:
if (rank_depth+1) == (len(taxonomy)-1): # Current node is leaf
taxa_node = parent.get_child(taxa)
taxa_node.metadata["seq_ids"].append(record.id)
parent = parent.get_child(taxa)
FH_databank.close()
return databank_tree
##################################################################################################################################################
#
# MAIN
#
##################################################################################################################################################
if __name__ == "__main__":
# Manage parameters
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=('''Produce a random subset of species (with near and distant species) from a databank.''')
)
parser.add_argument( '-c', '--climb-prob', default=4, type=int, help='Porbability when you have selected a node that you go to the parent node (to eventualy select a neighbor node) [DEFAULT: 4]. Example: -c 4 you have 1/4 chance to stop ascension ; -c 5 you have 1/4 chance to stop ascension.' )
parser.add_argument( '-n', '--neighbor-prob', default=4, type=int, help='Porbability when you have go to a parent to select a neighbor nodes in children [DEFAULT: 4]. Example: -c 4 you have 1/4 chance to skip selection at this level ; -c 5 you have 1/4 chance to skip selection at this level.' )
parser.add_argument( '-e', '--expected-nb', default=100, type=int, help='Number of selected sequences.' )
parser.add_argument( '-f', '--select-from', default="bottom", choices=['top', 'bottom', 'mix'], help='Select in "top", "bottom" or "mix". With top the less divided branch is favored ; with bottom the most divided branch is favored ; with mix the less divided and most divided are favored. [Default: bottom]' )
parser.add_argument( '-v', '--version', action='version', version=__version__ )
group_input = parser.add_argument_group( 'Inputs' ) # Inputs
group_input.add_argument( '-d', '--databank', required=True, help='The reference databank (format : FASTA). Each sequence must have the same number of tacxonomy level and the header must have this format: "ID<TAB>TAX_LVL1;TAX_LVL2".' )
group_output = parser.add_argument_group( 'Outputs' ) # Outputs
group_output.add_argument( '-o', '--output', required=True, help='The selected sequences (format : FASTA).' )
args = parser.parse_args()
log = list()
# Build tree
databank_tree = get_tree_from_fasta(args.databank)
# Select one sequence ID by leaf
for leaf in databank_tree.get_leaves():
nb_seq_ids = len(leaf.metadata["seq_ids"])
leaf.metadata["selected"] = False
leaf.metadata["retained_seq_id"] = leaf.metadata["seq_ids"][0]
if nb_seq_ids > 1:
leaf.metadata["retained_seq_id"] = leaf.metadata["seq_ids"][random.randint(0, nb_seq_ids-1)]
# Select leaves
current_nb = 0
next_from_top = (args.select_from == "top")
nb_asc = 0
selected_leaves_id = list()
while args.expected_nb > current_nb:
# Random selection
current_selected_leaf = None
if next_from_top:
current_selected_leaf = descending_walk(databank_tree)
log.append(["from_top_selection: " + current_selected_leaf.metadata["retained_seq_id"]])
else:
current_selected_leaf = rank_sampling( databank_tree, 6 )#################################################### Param
log.append(["from_bottom_selection: " + current_selected_leaf.metadata["retained_seq_id"]])
nb_asc += 1
if args.select_from == "mix":
if nb_asc == 2:
nb_asc = 0
next_from_top = True
else:
next_from_top = False
current_selected_leaf.metadata["selected"] = True
selected_leaves_id.append( current_selected_leaf.metadata["retained_seq_id"] )
current_nb += 1
# Neighbor selection
current_selected_leaves = ascending_walk(current_selected_leaf, (args.expected_nb-current_nb))
for leaf in current_selected_leaves:
leaf.metadata["selected"] = True
selected_leaves_id.append( leaf.metadata["retained_seq_id"] )
current_nb += 1
# Write selection
write_subset(args.databank, args.output, selected_leaves_id)
# Log
for action in log:
print action
# Summary
print_summary(databank_tree)
|
gpl-3.0
| -7,714,959,664,225,086,000
| 43.164794
| 313
| 0.556988
| false
| 3.924126
| false
| false
| false
|
symarroun/FSND-movie-trailers
|
fav_movies.py
|
1
|
4818
|
import media
import fav_movies_web
# Instances of my favorite movies:
# Deadpool movie: movie title, sotryline, poster image and movie trailer
deadpool = media.Movie("Deadpool",
""" Wade Wilson (Ryan Reynolds) is a former Special Forces
operative who now works as a mercenary. His world comes
crashing down when evil scientist Ajax (Ed Skrein)
tortures, disfigures and transforms him into Deadpool.
The rogue experiment leaves Deadpool with accelerated
healing powers and a twisted sense of humor. With help
from mutant allies Colossus and Negasonic Teenage
Warhead (Brianna Hildebrand), Deadpool uses his new
skills to hunt down the man who nearly destroyed
his life""",
"https://www.flickeringmyth.com/wp-content/uploads/2016/01/Deadpool-poster-1.jpg", # NOQA
"https://www.youtube.com/watch?v=Xithigfg7dA"
) # NOQA
# Focus movie: movie title, sotryline, poster image and movie trailer
focus = media.Movie("Focus",
"""Nicky (Will Smith), a veteran con artist, takes a
novice named Jess(Margot Robbie) under his wing. While
Nicky teaches Jess the tricks of the trade, the pair
become romantically involved; but, when Jess gets
uncomfortably close, Nicky ends their relationship.""",
"http://static.rogerebert.com/uploads/movie/movie_poster/focus-2015/large_focus_ver2.jpg", # NOQA
"https://www.youtube.com/watch?v=MxCRgtdAuBo"
) # NOQA
# Mechanic: Resurrection movie: movie title, sotryline, poster image and
# movie trailer
mechanic = media.Movie("Mechanic: Resurrection",
"""Living under cover in Brazil, master assassin Arthur
Bishop(Jason Statham) springs back into action after an
old enemySam Hazeldine) kidnaps the woman (Jessica Alba)
he loves. To saveher life, Bishop must kill an
imprisoned African warlord, a humantrafficker (Toby
Eddington) and an arms dealer (Tommy Lee Jones),all
while making the deaths look like accidents. When things
don't goexactly as planned, Bishop turns the tables on
the people who forcedhim out of retirement.""",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMjYwODExNzUwMV5BMl5BanBnXkFtZTgwNTgwNjUyOTE@._V1_UY1200_CR90,0,630,1200_AL_.jpg", # NOQA
"https://www.youtube.com/watch?v=G-P3f_wDXvs"
) # NOQA
# Enemy movie: movie title, sotryline, poster image and movie trailer
enemy = media.Movie("Enemy",
"""A mild-mannered college professor (Jake Gyllenhaal)
discovers a look-alike actor and delves into the other
man's private affairs.""",
"http://www.impawards.com/intl/misc/2014/posters/enemy.jpg", # NOQA
"https://www.youtube.com/watch?v=FJuaAWrgoUY"
) # NOQA
# Wonder Woman movie: movie title, sotryline, poster image and movie trailer
wonder_woman = media.Movie("Wonder Woman",
"""Before she was Wonder Woman (Gal Gadot), she was
Diana, princess ofthe Amazons, trained to be an
unconquerable warrior. Raised on asheltered island
paradise, Diana meets an American pilot (Chris Pine)
who tells her about the massive conflict that's
raging in the outsideworld. Convinced that she can
stop the threat, Diana leaves herhome for the first
time. Fighting alongside men in a war to end
allwars, she finally discovers her full powers and
true destiny""",
"http://cdn2-www.comingsoon.net/assets/uploads/gallery/wonder-woman/wwposter5.jpg", # NOQA
"https://www.youtube.com/watch?v=1Q8fG0TtVAY"
) # NOQA
# Ghost in the Shell movie: movie title, sotryline, poster image and movie
# trailer
ghost_in_the_shell = media.Movie("Ghost in the Shell",
"""In the near future, Major is the first of
herkind: a human who iscyber-enhanced to be a
perfect soldier devoted to stopping theworld's
most dangerous criminals. When terrorism
reaches a newlevel that includes the ability
to hack into people's minds and control them,
Major is uniquely qualified to stop it. As
sheprepares to face a new enemy, Major
discovers that her life was stoleninstead of
saved. Now, she will stop at nothing to
recover her pastwhile punishing those who did
this to her.""",
"http://cdn2-www.comingsoon.net/assets/uploads/gallery/ghost-in-the-shell/ghostinshellposter.jpg", # NOQA
"https://www.youtube.com/watch?v=G4VmJcZR0Yg"
) # NOQA
# All instances grouped together in a list
# The list is the sit of the movies that will be passed to the media file
movies = [
deadpool,
focus,
mechanic,
enemy, wonder_woman,
ghost_in_the_shell
]
# Open the HTML file in a webbrowser via the fav_movies_web.py
fav_movies_web.open_movies_page(movies) # the array/list (argument)
|
mit
| -5,561,107,779,786,455,000
| 45.326923
| 149
| 0.706932
| false
| 3.155206
| false
| false
| false
|
statsmaths/stat665
|
psets/pset01/pset01_starter.py
|
1
|
1819
|
""" Problem Set 01 starter code
Please make sure your code runs on Python version 3.5.0
Due date: 2016-02-05 13:00
"""
import numpy as np
from scipy import spatial
from scipy.stats import norm
def my_knn(X, y, k=1):
""" Basic k-nearest neighbor functionality
k-nearest neighbor regression for a numeric test
matrix. Prediction are returned for the same data matrix
used for training. For each row of the input, the k
closest rows (using the l2 distance) in the training
set are identified. The mean of the observations y
is used for the predicted value of a new observation.
Args:
X: an n by p numpy array; the data matrix of predictors
y: a length n numpy array; the observed response
k: integer giving the number of neighbors to include
Returns:
a 1d numpy array of predicted responses for each row of the input matrix X
"""
distmat = spatial.distance.pdist(X)
def my_ksmooth(X, y, sigma=1.0):
""" Kernel smoothing function
kernel smoother for a numeric test matrix with a Gaussian
kernel. Prediction are returned for the same data matrix
used for training. For each row of the input, a weighted
average of the input y is used for prediction. The weights
are given by the density of the normal distribution for
the distance of a training point to the input.
Args:
X: an n by p numpy array; the data matrix of predictors
y: a length n numpy vector; the observed response
sigma: the standard deviation of the normal density function
used for the weighting scheme
Returns:
a 1d numpy array of predicted responses for each row of the input matrix X
"""
distmat = spatial.distance.pdist(X)
value = 1
norm(scale=sigma).pdf(value) # normal density at 'value'
|
gpl-2.0
| 2,707,504,578,652,846,000
| 32.072727
| 80
| 0.706432
| false
| 4.200924
| false
| false
| false
|
vhavlena/appreal
|
netbench/pattern_match/parser.py
|
1
|
6267
|
###############################################################################
# parser.py: Module for PATTERN MATCH, mataclass wrapping any parser based
# on nfa_parser base class.
# Copyright (C) 2011 Brno University of Technology, ANT @ FIT
# Author(s): Vlastimil Kosar <ikosar@fit.vutbr.cz>
###############################################################################
#
# LICENSE TERMS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. All advertising materials mentioning features or use of this software
# or firmware must display the following acknowledgement:
#
# This product includes software developed by the University of
# Technology, Faculty of Information Technology, Brno and its
# contributors.
#
# 4. Neither the name of the Company nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# This software or firmware is provided ``as is'', and any express or implied
# warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are disclaimed.
# In no event shall the company or contributors be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
#
# $Id$
import copy
from nfa_parser import nfa_parser
import pcre_parser
import pattern_exceptions
class parser(nfa_parser):
"""
A mata class wrapping under single interface any class for parsing of \
regular expressions based on base class nfa_parser.
:param selected_parser: Which class is used for parsing of regular \
expressions. Defaults to "pcre_parser". This parameter can be either \
name of parser class (eg. pcre_parser) or object of class based on \
nfa_parser class.
:type selected_parser: string or nfa_parser
:param args: any parser parameters. NOTE: Caller is suppossed to assign \
corect parametrs of corect types. If parameters excess the number of \
accepted parameters, then they are discarded.
:type args: list(Any type)
"""
def __init__(self, selected_parser = "pcre_parser", *args):
"""
Class constructor
"""
self.parser = None
if isinstance(selected_parser, str):
if selected_parser == "msfm_parser":
sys.stderr.write("ERROR: The class msfm_parser and coresponding \
RE parser was removed as deprecated. Use the class pcre_parser.\
\n")
exit()
elif selected_parser == "pcre_parser":
self.parser = pcre_parser.pcre_parser(*args)
else:
raise pattern_exceptions.unknown_parser(selected_parser)
else:
if isinstance(selected_parser, nfa_parser):
self.parser = selected_parser
else:
raise pattern_exceptions.unknown_parser(repr(selected_parser))
def load_file(self, filename):
"""
This function is used to specify input file and load the whole file into the input text atribute.
:param filename: Name of file.
:type filename: string
"""
self.parser.load_file(filename)
def set_text(self, input_text):
"""
Set text to parse - can have multiple text lines
:param input_text: Regular expressions.
:type input_text: string
"""
self.parser.set_text(input_text)
def get_nfa(self):
"""
Parse a current line and returns parsed nfa.
:returns: Created automaton in nfa_data format. Returns None if failure happens.
:rtype: nfa_data or None
"""
return self.parser.get_nfa()
def next_line(self):
"""
Move to the next line (next regular expression)
:returns: True if move was performed, Otherwise False is returned.
:rtype: boolean
"""
return self.parser.next_line()
def move_to_line(self, line):
"""
Move to the specified line
:param line: Line number.
:type line: int
:returns: True if move was performed, Otherwise False is returned.
:rtype: boolean
"""
return self.parser.move_to_line(line)
def num_lines(self):
"""
Returns number of lines.
:returns: Number of lines. Each line corespond to single regular expression.
:rtype: int
"""
return self.parser.num_lines()
def reset(self):
"""
Reset the position counter to 0. Parsing will continue from the begining.
"""
return self.parser.reset()
def get_position(self):
"""
Returns position in ruleset.
:returns: Position in ruleset.
:rtype: int
"""
return self.parser.get_position()
###############################################################################
# End of File parser.py #
###############################################################################
|
gpl-2.0
| -6,552,689,261,439,421,000
| 37.219512
| 109
| 0.588001
| false
| 4.628508
| false
| false
| false
|
ReneHollander/rep0st
|
rep0st/index/post.py
|
1
|
2758
|
from typing import Iterable, NamedTuple
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Date, Document, InnerDoc, Integer, Keyword, Nested
from injector import Module, inject
from rep0st.analyze.feature_vector_analyzer import TYPE_NAME as FEATURE_VECTOR_TYPE
from rep0st.config.rep0st_elasticsearch import Rep0stElasticsearchModule
from rep0st.db.post import Post as DBPost
from rep0st.framework.data.elasticsearch import ElastiknnDenseFloatVectorL2LSHMapping, Index
class PostIndexModule(Module):
def configure(self, binder):
binder.install(Rep0stElasticsearchModule)
binder.bind(PostIndex)
class Frame(InnerDoc):
id = Integer()
feature_vector = ElastiknnDenseFloatVectorL2LSHMapping(108, 180, 5, 3)
class Post(Document):
created = Date()
flags = Keyword()
type = Keyword()
tags = Keyword()
frames = Nested(Frame)
# TODO: Figure out how to disable dynamic mappings.
# dynamic = False
class Index:
name = 'posts'
settings = {
'number_of_shards': 6,
'elastiknn': True,
}
class SearchResult(NamedTuple):
score: float
id: int
class PostIndex(Index[Post]):
@inject
def __init__(self, elasticsearch: Elasticsearch):
super().__init__(Post, elasticsearch=elasticsearch)
def _index_post_from_post(self, post: DBPost) -> Post:
index_post = Post()
index_post.meta.id = post.id
index_post.created = post.created
index_post.type = post.type.value
index_post.flags = [flag.value for flag in post.get_flags()]
index_post.tags = [tag.tag for tag in post.tags]
index_post.frames = [
Frame(
id=feature.id,
feature_vector=[float(n / 255.0)
for n in feature.data])
for feature in post.features
if feature.type == FEATURE_VECTOR_TYPE
]
return index_post
def add_posts(self, posts: Iterable[Post]):
def _it():
for post in posts:
yield self._index_post_from_post(post)
self.save_all(_it())
def find_posts(self, feature_vector):
response = self.search().update_from_dict({
'size': 50,
'fields': [],
'_source': False,
'min_score': 0.3,
'query': {
'nested': {
'path': 'frames',
'query': {
'elastiknn_nearest_neighbors': {
'field': 'frames.feature_vector',
'vec': feature_vector,
'model': 'lsh',
'similarity': 'l2',
'candidates': 500
},
},
},
},
}).execute()
for post in response:
yield SearchResult(post.meta.score, post.meta.id)
|
mit
| -2,390,979,145,973,501,000
| 25.776699
| 92
| 0.603336
| false
| 3.793673
| false
| false
| false
|
TACC/tacc_stats
|
tacc_stats/site/machine/update_db.py
|
1
|
7979
|
#!/usr/bin/env python
import os,sys, pwd
from datetime import timedelta, datetime
from dateutil.parser import parse
from fcntl import flock, LOCK_EX, LOCK_NB
os.environ['DJANGO_SETTINGS_MODULE']='tacc_stats.site.tacc_stats_site.settings'
import django
django.setup()
from tacc_stats.site.machine.models import Job, Host, Libraries
from tacc_stats.site.xalt.models import run, join_run_object, lib
from tacc_stats.analysis.metrics import metrics
import tacc_stats.cfg as cfg
from tacc_stats.progress import progress
from tacc_stats.daterange import daterange
import pytz, calendar
import pickle as p
import traceback
import csv
import hostlist
def update_acct(date, rerun = False):
ftr = [3600,60,1]
tz = pytz.timezone('US/Central')
ctr = 0
with open(os.path.join(cfg.acct_path, date.strftime("%Y-%m-%d") + '.txt'), encoding = "latin1") as fd:
nrecords = sum(1 for record in csv.DictReader(fd))
fd.seek(0)
for job in csv.DictReader(fd, delimiter = '|'):
if '+' in job['JobID']:
jid, rid = job['JobID'].split('+')
job['JobID'] = int(jid) + int(rid)
if '_' in job['JobID']:
job['JobID'] = job['JobID'].split('_')[0]
if rerun:
pass
elif Job.objects.filter(id = job['JobID']).exists():
ctr += 1
continue
json = {}
json['id'] = job['JobID']
json['project'] = job['Account']
json['start_time'] = tz.localize(parse(job['Start']))
json['end_time'] = tz.localize(parse(job['End']))
json['start_epoch'] = calendar.timegm(json['start_time'].utctimetuple())
json['end_epoch'] = calendar.timegm(json['end_time'].utctimetuple())
json['run_time'] = json['end_epoch'] - json['start_epoch']
try:
if '-' in job['Timelimit']:
days, time = job['Timelimit'].split('-')
else:
time = job['Timelimit']
days = 0
json['requested_time'] = (int(days) * 86400 +
sum([a*b for a,b in zip(ftr, [int(i) for i in time.split(":")])]))/60
except: pass
json['queue_time'] = int(parse(job['Submit']).strftime('%s'))
try:
json['queue'] = job['Partition']
json['name'] = job['JobName'][0:128]
json['status'] = job['State'].split()[0]
json['nodes'] = int(job['NNodes'])
json['cores'] = int(job['ReqCPUS'])
json['wayness'] = json['cores']/json['nodes']
json['date'] = json['end_time'].date()
json['user'] = job['User']
except:
print(job)
continue
if "user" in json:
try:
json['uid'] = int(pwd.getpwnam(json['user']).pw_uid)
except: pass
host_list = hostlist.expand_hostlist(job['NodeList'])
del job['NodeList']
Job.objects.filter(id=json['id']).delete()
try:
obj, created = Job.objects.update_or_create(**json)
except:
continue
### If xalt is available add data to the DB
xd = None
try:
#xd = run.objects.using('xalt').filter(job_id = json['id'])[0]
for r in run.objects.using('xalt').filter(job_id = json['id']):
if "usr" in r.exec_path.split('/'): continue
xd = r
except: pass
if xd:
obj.exe = xd.exec_path.split('/')[-1][0:128]
obj.exec_path = xd.exec_path
obj.cwd = xd.cwd[0:128]
obj.threads = xd.num_threads
obj.save()
for join in join_run_object.objects.using('xalt').filter(run_id = xd.run_id):
object_path = lib.objects.using('xalt').get(obj_id = join.obj_id).object_path
module_name = lib.objects.using('xalt').get(obj_id = join.obj_id).module_name
if not module_name: module_name = 'none'
library = Libraries(object_path = object_path, module_name = module_name)
library.save()
library.jobs.add(obj)
### Build host table
for host_name in host_list:
h = Host(name=host_name)
h.save()
h.jobs.add(obj)
ctr += 1
progress(ctr, nrecords, date)
try:
with open(os.path.join(cfg.pickles_dir, date.strftime("%Y-%m-%d"), "validated")) as fd:
for line in fd.readlines():
Job.objects.filter(id = int(line)).update(validated = True)
except: pass
def update_metrics(date, pickles_dir, processes, rerun = False):
min_time = 60
metric_names = [
"avg_ethbw", "avg_cpi", "avg_freq", "avg_loads", "avg_l1loadhits",
"avg_l2loadhits", "avg_llcloadhits", "avg_sf_evictrate", "max_sf_evictrate",
"avg_mbw", "avg_page_hitrate", "time_imbalance",
"mem_hwm", "max_packetrate", "avg_packetsize", "node_imbalance",
"avg_flops_32b", "avg_flops_64b", "avg_vector_width_32b", "vecpercent_32b", "avg_vector_width_64b", "vecpercent_64b",
"avg_cpuusage", "max_mds", "avg_lnetmsgs", "avg_lnetbw", "max_lnetbw", "avg_fabricbw",
"max_fabricbw", "avg_mdcreqs", "avg_mdcwait", "avg_oscreqs",
"avg_oscwait", "avg_openclose", "avg_mcdrambw", "avg_blockbw", "max_load15", "avg_gpuutil"
]
aud = metrics.Metrics(metric_names, processes = processes)
print("Run the following tests for:",date)
for name in aud.metric_list:
print(name)
jobs_list = Job.objects.filter(date = date).exclude(run_time__lt = min_time)
#jobs_list = Job.objects.filter(date = date, queue__in = ['rtx', 'rtx-dev']).exclude(run_time__lt = min_time)
# Use avg_cpuusage to see if job was tested. It will always exist
if not rerun:
jobs_list = jobs_list.filter(avg_cpuusage = None)
paths = []
for job in jobs_list:
paths.append(os.path.join(pickles_dir,
job.date.strftime("%Y-%m-%d"),
str(job.id)))
num_jobs = jobs_list.count()
print("# Jobs to be tested:",num_jobs)
if num_jobs == 0 : return
for jobid, metric_dict in aud.run(paths):
try:
if metric_dict: jobs_list.filter(id = jobid).update(**metric_dict)
except: pass
if __name__ == "__main__":
import argparse
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "update_db_lock"), "w") as fd:
try:
flock(fd, LOCK_EX | LOCK_NB)
except IOError:
print("update_db is already running")
sys.exit()
parser = argparse.ArgumentParser(description='Run database update')
parser.add_argument('start', type = parse, nargs='?', default = datetime.now(),
help = 'Start (YYYY-mm-dd)')
parser.add_argument('end', type = parse, nargs='?', default = False,
help = 'End (YYYY-mm-dd)')
parser.add_argument('-p', '--processes', type = int, default = 1,
help = 'number of processes')
parser.add_argument('-d', '--directory', type = str,
help='Directory to read data', default = cfg.pickles_dir)
args = parser.parse_args()
start = args.start
end = args.end
if not end: end = start
for date in daterange(start, end):
update_acct(date, rerun = False)
update_metrics(date, args.directory, args.processes, rerun = False)
|
lgpl-2.1
| -1,956,235,082,091,073,300
| 40.557292
| 126
| 0.523499
| false
| 3.650046
| false
| false
| false
|
andrewlrogers/srvy
|
collection/setup_db.py
|
1
|
1042
|
#!/usr/bin/python
""" checks to see if srvy.db exists in ../archive. If not it creates the db and appropriate table """
import sqlite3
from sqlite3 import Error
def create_conection(db_file):
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
except Error as e:
print(e)
finally:
conn.close()
def create_table(db_file,create_table_sql):
try:
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
finally:
conn.close
def main():
database = "../archive/srvy.db"
create_conection(database)
create_srvy_table = """ CREATE TABLE IF NOT EXISTS responses (response_key INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
pythonDateTime TEXT NOT NULL,
unixTime REAL NOT NULL,
question TEXT NOT NULL,
opinion INTEGER NOT NULL
);"""
create_table(database, create_srvy_table)
main()
|
mit
| -6,962,049,424,461,966,000
| 27.162162
| 122
| 0.596929
| false
| 3.88806
| false
| false
| false
|
anurag-ks/eden
|
modules/s3db/cap.py
|
1
|
140541
|
# -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"S3CAPAreaNameModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_alert_list_layout",
"add_area_from_template",
"cap_AssignArea",
"cap_AreaRepresent",
#"cap_gis_location_xml_post_parse",
#"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_alert_approve",
"cap_warning_priority",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_id",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
"cap_template_represent",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geo - Geophysical (inc. landslide)")),
("Met", T("Met - Meteorological (inc. flood)")),
("Safety", T("Safety - General emergency and public safety")),
("Security", T("Security - Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue - Rescue and recovery")),
("Fire", T("Fire - Fire suppression and rescue")),
("Health", T("Health - Medical and public health")),
("Env", T("Env - Pollution and other environmental")),
("Transport", T("Transport - Public and private transportation")),
("Infra", T("Infra - Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("CBRNE - Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other - Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.cap_template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.cap_template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Template Title"),
T("Title for the template, to indicate to which event this template is related to"))),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
requires = IS_MATCH('^[^,<&\s]+$',
error_message=current.T("Cannot be empty and Must not include spaces, commas, or restricted characters (< and &).")),
# Dont Allow to change the identifier
readable = True,
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A unique identifier of the alert message"),
T("A number or string uniquely identifying this message, assigned by the sender. Must notnclude spaces, commas or restricted characters (< and &)."))),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
requires = IS_MATCH('^[^,<&\s]+$',
error_message=current.T("Cannot be empty and Must not include spaces, commas, or restricted characters (< and &).")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The identifier of the sender of the alert message"),
T("This is guaranteed by assigner to be unique globally; e.g., may be based on an Internet domain name. Must not include spaces, commas or restricted characters (< and &)."))),
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
represent = lambda opt: \
cap_alert_status_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(cap_alert_status_code_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the appropriate handling of the alert message"),
T("See options."))),
),
Field("msg_type",
label = T("Message Type"),
default = "Alert",
represent = lambda opt: \
cap_alert_msgType_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The nature of the alert message"),
T("See options."))),
),
Field("source",
label = T("Source"),
default = self.generate_source,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text identifying the source of the alert message"),
T("The particular source of this alert; e.g., an operator or a specific device."))),
),
Field("scope",
label = T("Scope"),
represent = lambda opt: \
cap_alert_scope_code_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the intended distribution of the alert message"),
T("Who is this alert for?"))),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text describing the rule for limiting distribution of the restricted alert message"),
T("Used when scope is 'Restricted'."))),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The group listing of intended recipients of the alert message"),
T("Required when scope is 'Private', optional when scope is 'Public' or 'Restricted'. Each recipient shall be identified by an identifier or an address."))),
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "list:string",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = self.list_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Codes for special handling of the message"),
T("Any user-defined flags or special codes used to flag the alert message for special handling."))),
),
Field("note", "text",
label = T("Note"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text describing the purpose or significance of the alert message"),
T("The message note is primarily intended for use with status 'Exercise' and message type 'Error'"))),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The group listing identifying earlier message(s) referenced by the alert message"),
T("The extended message identifier(s) (in the form sender,identifier,sent) of an earlier CAP message or messages referenced by this one."))),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(selectedList = 10),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A list of incident(s) referenced by the alert message"),
T("Used to collate multiple messages referring to different aspects of the same incident. If multiple incident identifiers are referenced, they SHALL be separated by whitespace. Incident names including whitespace SHALL be surrounded by double-quotes."))),
),
# approved_on field for recording when the alert was approved
s3_datetime("approved_on",
readable = False,
writable = False,
),
*s3_meta_fields())
list_fields = [(T("Sent"), "sent"),
"scope",
"info.priority",
"info.event_type_id",
"info.sender_name",
"area.name",
]
notify_fields = [(T("Identifier"), "identifier"),
(T("Date"), "sent"),
(T("Status"), "status"),
(T("Message Type"), "msg_type"),
(T("Source"), "source"),
(T("Scope"), "scope"),
(T("Restriction"), "restriction"),
(T("Category"), "info.category"),
(T("Event"), "info.event_type_id"),
(T("Response type"), "info.response_type"),
(T("Priority"), "info.priority"),
(T("Urgency"), "info.urgency"),
(T("Severity"), "info.severity"),
(T("Certainty"), "info.certainty"),
(T("Effective"), "info.effective"),
(T("Expires at"), "info.expires"),
(T("Sender's name"), "info.sender_name"),
(T("Headline"), "info.headline"),
(T("Description"), "info.description"),
(T("Instruction"), "info.instruction"),
(T("Contact information"), "info.contact"),
(T("URL"), "info.web"),
(T("Area Description"), "area.name"),
]
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3OptionsFilter("info.event_type_id",
),
S3OptionsFilter("info.priority",
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
create_onaccept = self.cap_alert_create_onaccept,
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = cap_alert_list_layout,
list_orderby = "cap_info.expires desc",
notify_fields = notify_fields,
onapprove = self.cap_alert_approve,
onvalidation = self.cap_alert_onvalidation,
orderby = "cap_info.expires desc",
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_area_tag = {"name": "tag",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
self.set_method("cap", "alert",
method = "assign",
action = self.cap_AssignArea())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
crud_strings[tablename] = Storage(
label_create = T("Create Alert"),
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# ---------------------------------------------------------------------
# Warning Priorities for CAP
tablename = "cap_warning_priority"
define_table(tablename,
Field("priority_rank", "integer",
label = T("Priority Rank"),
length = 2,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority Rank"),
T("The Priority Rank is basically to give it a ranking 1, 2, ..., n. That way we know 1 is the most important of the chain and n is lowest element. For eg. (1, Signal 1), (2, Signal 2)..., (5, Signal 5) to enumerate the priority for cyclone."))),
),
Field("event_code",
label = T("Event Code"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Code"),
T("Code (key) for the event like for eg. (2001, Typhoon), (2002, Flood)"))),
),
Field("name", notnull=True, length=64,
label = T("Name"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Name"),
T("The actual name for the warning priority, for eg. Typhoons in Philippines have five priority name (PSWS# 1, PSWS# 2, PSWS# 3, PSWS# 4 and PSWS# 5)"))),
),
Field("event_type",
label = T("Event Type"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Type"),
T("The Event to which this priority is targeted for. The 'Event Type' is the name of the standard Eden Event Type . These are available at /eden/event/event_type (The 'Event Type' should be exactly same as in /eden/event/event_type - case sensitive). For those events which are not in /eden/event/event_type but having the warning priority, you can create the event type using /eden/event/event_type/create and they will appear in this list."))),
),
Field("urgency",
label = T("Urgency"),
requires = IS_IN_SET(cap_info_urgency_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the urgency of the subject event of the alert message"),
T("The urgency, severity, and certainty of the information collectively distinguish less emphatic from more emphatic messages." +
"'Immediate' - Responsive action should be taken immediately" +
"'Expected' - Responsive action should be taken soon (within next hour)" +
"'Future' - Responsive action should be taken in the near future" +
"'Past' - Responsive action is no longer required" +
"'Unknown' - Urgency not known"))),
),
Field("severity",
label = T("Severity"),
requires = IS_IN_SET(cap_info_severity_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the severity of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Extreme' - Extraordinary threat to life or property" +
"'Severe' - Significant threat to life or property" +
"'Moderate' - Possible threat to life or property" +
"'Minor' - Minimal to no known threat to life or property" +
"'Unknown' - Severity unknown"))),
),
Field("certainty",
label = T("Certainty"),
requires = IS_IN_SET(cap_info_certainty_opts),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the certainty of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Observed' - Determined to have occurred or to be ongoing" +
"'Likely' - Likely (p > ~50%)" +
"'Possible' - Possible but not likely (p <= ~50%)" +
"'Unlikely' - Not expected to occur (p ~ 0)" +
"'Unknown' - Certainty unknown"))),
),
Field("color_code",
label = T("Color Code"),
widget = S3ColorPickerWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The color code for this priority"),
T("Pick from the color widget the color that is associated to this priority of the event. The color code is in hex format"))),
),
*s3_meta_fields())
priority_represent = S3Represent(lookup=tablename)
crud_strings[tablename] = Storage(
label_create = T("Create Warning Priority"),
title_display = T("Warning Priority Details"),
title_list = T("Warning Priorities"),
title_update = T("Edit Warning Priority"),
title_upload = T("Import Warning Priorities"),
label_list_button = T("List Warning Priorities"),
label_delete_button = T("Delete Warning Priority"),
msg_record_created = T("Warning Priority added"),
msg_record_modified = T("Warning Priority updated"),
msg_record_deleted = T("Warning Priority removed"),
msg_list_empty = T("No Warning Priorities currently registered")
)
configure(tablename,
deduplicate = S3Duplicate(primary=("event_type", "name")),
)
# ---------------------------------------------------------------------
# CAP info priority
# @ToDo: i18n: Need label=T("")
languages = settings.get_cap_languages()
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.cap_template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en-US",
represent = lambda opt: languages.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(languages)
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the language of the information"),
T("Code Values: Natural language identifier per [RFC 3066]. If not present, an implicit default value of 'en-US' will be assumed. Edit settings.cap.languages in 000_config.py to add more languages. See <a href=\"%s\">here</a> for a full list.") % "http://www.i18nguy.com/unicode/language-identifiers.html")),
),
Field("category", "list:string", # 1 or more allowed
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(selectedList = 10),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the category of the subject event of the alert message"),
T("You may select multiple categories by holding down control and then selecting the items."))),
),
Field("event",
label = T("Event"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text denoting the type of the subject event of the alert message"),
T("If not specified, will the same as the Event Type."))),
),
self.event_type_id(empty = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Type of the alert message"),
T("Event field above is more general. And this Event Type is classification of event. For eg. Event can be 'Terrorist Attack' and Event Type can be either 'Terrorist Bomb Explosion' or 'Terrorist Chemical Waefare Attack'. If not specified, will the same as the Event Type."))),
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
Field("response_type", "list:string", # 0 or more allowed
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(selectedList = 10),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the type of action recommended for the target audience"),
T("Multiple response types can be selected by holding down control and then selecting the items"))),
),
Field("priority", "reference cap_warning_priority",
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_warning_priority.id",
priority_represent
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority of the alert message"),
T("Defines the priority of the alert message. Selection of the priority automatically sets the value for 'Urgency', 'Severity' and 'Certainty'"))),
),
Field("urgency",
represent = lambda opt: \
cap_info_urgency_opts.get(opt, UNKNOWN_OPT),
# Empty For Template, checked onvalidation hook
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_urgency_opts)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the urgency of the subject event of the alert message"),
T("The urgency, severity, and certainty of the information collectively distinguish less emphatic from more emphatic messages." +
"'Immediate' - Responsive action should be taken immediately" +
"'Expected' - Responsive action should be taken soon (within next hour)" +
"'Future' - Responsive action should be taken in the near future" +
"'Past' - Responsive action is no longer required" +
"'Unknown' - Urgency not known"))),
),
Field("severity",
represent = lambda opt: \
cap_info_severity_opts.get(opt, UNKNOWN_OPT),
# Empty For Template, checked onvalidation hook
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_severity_opts)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the severity of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Extreme' - Extraordinary threat to life or property" +
"'Severe' - Significant threat to life or property" +
"'Moderate' - Possible threat to life or property" +
"'Minor' - Minimal to no known threat to life or property" +
"'Unknown' - Severity unknown"))),
),
Field("certainty",
represent = lambda opt: \
cap_info_certainty_opts.get(opt, UNKNOWN_OPT),
# Empty For Template, checked onvalidation hook
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_certainty_opts)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Denotes the certainty of the subject event of the alert message"),
T("The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages." +
"'Observed' - Determined to have occurred or to be ongoing" +
"'Likely' - Likely (p > ~50%)" +
"'Possible' - Possible but not likely (p <= ~50%)" +
"'Unlikely' - Not expected to occur (p ~ 0)" +
"'Unknown' - Certainty unknown"))),
),
Field("audience", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Audience"),
T("The intended audience of the alert message"))),
),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A system-specific code identifying the event type of the alert message"),
T("Any system-specific code for events, in the form of key-value pairs. (e.g., SAME, FIPS, ZIP)."))),
),
s3_datetime("effective",
default = "now",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The effective time of the information of the alert message"),
T("If not specified, the effective time shall be assumed to be the same the time the alert was sent."))),
),
s3_datetime("onset",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Onset"),
T("The expected time of the beginning of the subject event of the alert message"))),
),
s3_datetime("expires",
past = 0,
default = self.get_expirydate,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The expiry time of the information of the alert message"),
T("If this item is not provided, each recipient is free to enforce its own policy as to when the message is no longer in effect."))),
),
Field("sender_name",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text naming the originator of the alert message"),
T("The human-readable name of the agency or authority issuing this alert."))),
),
Field("headline",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The text headline of the alert message"),
T("A brief human-readable headline. Note that some displays (for example, short messaging service devices) may only present this headline; it should be made as direct and actionable as possible while remaining short. 160 characters may be a useful target limit for headline length."))),
),
Field("description", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The subject event of the alert message"),
T("An extended human readable description of the hazard or event that occasioned this message."))),
),
Field("instruction", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The recommended action to be taken by recipients of the alert message"),
T("An extended human readable instruction to targeted recipients. If different instructions are intended for different recipients, they should be represented by use of multiple information blocks. You can use a different information block also to specify this information in a different language."))),
),
Field("contact", "text",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Contact"),
T("The contact for follow-up and confirmation of the alert message"))),
),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A URL associating additional information with the alert message"),
T("A full, absolute URI for an HTML page or other text resource with additional or reference information regarding this alert."))),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("A system-specific additional parameter associated with the alert message"),
T("Any system-specific datum, in the form of key-value pairs."))),
),
*s3_meta_fields())
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
# Required Fields
mark_required = ("urgency", "severity", "certainty",),
onaccept = self.cap_info_onaccept,
onvalidation = self.cap_info_onvalidation,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The type and content of the resource file"),
T("The human-readable text describing the type and content, such as 'map' or 'photo', of the resource file."))),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The identifier of the MIME content type and sub-type describing the resource file"),
T("MIME content type and sub-type as described in [RFC 2046]. (As of this document, the current IANA registered MIME types are listed at http://www.iana.org/assignments/media-types/)"))),
),
Field("size", "integer",
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The integer indicating the size of the resource file"),
T("Approximate size of the resource file in bytes."))),
),
Field("uri",
# needs a special validation
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The identifier of the hyperlink for the resource file"),
T("A full absolute URI, typically a Uniform Resource Locator that can be used to retrieve the resource over the Internet."))),
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Deref URI"),
T("The base-64 encoded data content of the resource file"))),
),
Field("digest",
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The code representing the digital digest ('hash') computed from the resource file"),
T("Calculated using the Secure Hash Algorithm (SHA-1)."))),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm("alert_id",
"info_id",
"is_template",
"resource_desc",
S3SQLInlineComponent("image",
label = T("Image"),
fields = ["file",
],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Image"),
T("Attach an image that provides extra information about the event."))),
),
S3SQLInlineComponent("document",
label = T("Document"),
fields = ["file",
],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Document"),
T("Attach document that provides extra information about the event."))),
),
)
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
super_entity = "doc_entity",
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(),
info_id(comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Information segment for this Area segment"),
T("To which Information segment is this Area segment related. Note an Information segment can have multiple Area segments."))),
),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("name",
label = T("Area Description"),
required = True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The affected area of the alert message"),
T("A text description of the affected area."))),
),
Field("altitude", "integer", # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
label = T("Altitude"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The specific or minimum altitude of the affected area"),
T("If used with the ceiling element this value is the lower limit of a range. Otherwise, this value specifies a specific altitude. The altitude measure is in feet above mean sea level."))),
),
Field("ceiling", "integer", # Feet above Sea-level in WGS84 (Maximum)
label = T("Ceiling"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The maximum altitude of the affected area"),
T("must not be used except in combination with the 'altitude' element. The ceiling measure is in feet above mean sea level."))),
),
# Only used for Templates
self.event_type_id(comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Event Type of this predefined alert area"),
T("Event Type relating to this predefined area."))),
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
# Only used for Templates
Field("priority", "reference cap_warning_priority",
label = T("Priority"),
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(
db, "cap_warning_priority.id",
priority_represent
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Priority of the Event Type"),
T("Defines the priority of the Event Type for this predefined area."))),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("alert_id",
"info_id",
"is_template",
"name",
"info_id",
S3SQLInlineComponent("location",
name = "location",
label = "",
multiple = False,
fields = [("", "location_id")],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Geolocation"),
T("The paired values of points defining a polygon that delineates the affected area of the alert message"))),
),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("The geographic code delineating the affected area"),
T("Any geographically-based code to describe a message target area, in the form. The key is a user-assigned string designating the domain of the code, and the content of value is a string (which may represent a number) denoting the value itself (e.g., name='ZIP' and value='54321'). This should be used in concert with an equivalent description in the more universally understood polygon and circle forms whenever possible."))),
),
"altitude",
"ceiling",
"event_type_id",
"priority",
)
area_represent = cap_AreaRepresent(show_link=True)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Old: Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
onvalidation = self.cap_area_onvalidation,
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
# Names
cap_area_name = {"name": "name",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
catalog_layers = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
configure(tablename,
create_onaccept = update_alert_id(tablename),
# deduplicate = self.cap_area_tag_deduplicate,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_alert_approve = self.cap_alert_approve,
cap_area_id = area_id,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts,
cap_template_represent = self.cap_template_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y%m%d")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
oid = settings.get_cap_identifier_oid()
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%s-%03d-%s" % \
(prefix, oid, _time, next_id, suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def generate_source():
"""
Generate a source for CAP alert
"""
return "%s@%s" % (current.xml.domain,
current.deployment_settings.get_base_public_url())
# -------------------------------------------------------------------------
@staticmethod
def get_expirydate():
"""
Default Expiry date based on the expire offset
"""
return current.request.utcnow + \
datetime.timedelta(days = current.deployment_settings.\
get_cap_expire_offset())
# -------------------------------------------------------------------------
@staticmethod
def cap_template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_create_onaccept(form):
"""
Auto-approve Templates
"""
form_vars = form.vars
if form_vars.get("is_template"):
user = current.auth.user
if user:
current.db(current.s3db.cap_alert.id == form_vars.id).update(
approved_by = user.id)
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_onvalidation(form):
"""
Custom Form Validation:
multi-field level
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
# -------------------------------------------------------------------------
@staticmethod
def cap_info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
itable.event,
itable.event_type_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
set_ = db(itable.id == info_id)
if alert_id and cap_alert_is_template(alert_id):
set_.update(is_template = True)
if not info.event:
set_.update(event = current.db.cap_info.event_type_id.\
represent(info.event_type_id))
# -------------------------------------------------------------------------
@staticmethod
def cap_info_onvalidation(form):
"""
Custom Form Validation:
used for import from CSV
"""
form_record = form.record
if form_record and form_record.is_template == False:
form_vars = form.vars
if not form_vars.get("urgency"):
form.errors["urgency"] = \
current.T("'Urgency' field is mandatory")
if not form_vars.get("severity"):
form.errors["severity"] = \
current.T("'Severity' field is mandatory")
if not form_vars.get("certainty"):
form.errors["certainty"] = \
current.T("'Certainty' field is mandatory")
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_approve(record=None):
"""
Update the approved_on field when alert gets approved
"""
if not record:
return
alert_id = record["id"]
# Update approved_on at the time the alert is approved
if alert_id:
db = current.db
approved_on = record["approved_on"]
db(db.cap_alert.id == alert_id).update(approved_on = current.request.utcnow)
# -------------------------------------------------------------------------
@staticmethod
def cap_area_onvalidation(form):
"""
Custom Form Validation
"""
form_vars = form.vars
if form_vars.get("ceiling") and not form_vars.get("altitude"):
form.errors["altitude"] = \
current.T("'Altitude' field is mandatory if using 'Ceiling' field.")
# =============================================================================
class S3CAPAreaNameModel(S3Model):
"""
CAP Name Model:
-local names for CAP Area
"""
names = ("cap_area_name",
)
def model(self):
T = current.T
l10n_languages = current.deployment_settings.get_L10n_languages()
# ---------------------------------------------------------------------
# Local Names
#
tablename = "cap_area_name"
self.define_table(tablename,
self.cap_area_id(empty = False,
ondelete = "CASCADE",
),
Field("language",
label = T("Language"),
represent = lambda opt: l10n_languages.get(opt,
current.messages.UNKNOWN_OPT),
requires = IS_ISO639_2_LANGUAGE_CODE(),
),
Field("name_l10n",
label = T("Local Name"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary=("area_id", "language")),
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
alert_id = record.id
itable = s3db.cap_info
row = current.db(itable.alert_id == alert_id).\
select(itable.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
#(T("Area"), "area"),
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(s3db.cap_template_represent(alert_id, record),
_href=URL(c="cap", f="template",
args=[alert_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
submit_btn = None
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % alert_id]),
_target="_blank",
)
# Display 'Submit for Approval' based on permission
# and deployment settings
if not current.request.get_vars.get("_next") and \
not r.record.approved_by and \
current.deployment_settings.get_cap_authorisation() and \
current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id):
# Get the user ids for the role alert_approver
db = current.db
agtable = db.auth_group
group_rows = db(agtable.role == "Alert Approver").\
select(agtable.id)
if group_rows:
group_members = current.auth.s3_group_members
user_pe_id = current.auth.s3_user_pe_id
for group_row in group_rows:
group_id = group_row.id
user_ids = group_members(group_id) # List of user_ids
pe_ids = [] # List of pe_ids
pe_append = pe_ids.append
for user_id in user_ids:
pe_append(user_pe_id(int(user_id)))
submit_btn = A(T("Submit for Approval"),
_href = URL(f = "compose",
vars = {"cap_alert.id": record.id,
"pe_ids": pe_ids,
},
),
_class = "action-btn confirm-btn"
)
current.response.s3.jquery_ready.append(
'''S3.confirmClick('.confirm-btn','%s')''' % T("Do you want to submit the alert for approval?"))
else:
submit_btn = None
else:
submit_btn = None
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
(T("Resource Files"), "resource"),
]
if r.representation == "html" and \
current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id):
# Check to see if 'Predefined Areas' tab need to be added
artable = s3db.cap_area
query = (artable.is_template == True) & \
(artable.deleted == False)
template_area_rows = current.db(query)._select(artable.id,
limitby=(0, 1))
if template_area_rows:
tabs.insert(2, (T("Predefined Areas"), "assign"))
# Display "Copy" Button to copy record from the opened info
if r.component_name == "info" and \
r.component_id:
copy_btn = A(T("Copy"),
_href = URL(f = "alert",
args = [r.id, "info", "create",],
vars = {"from_record": r.component_id,
},
),
_class = "action-btn"
)
else:
copy_btn = None
else:
copy_btn = None
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(alert_id, record),
_href=URL(c="cap", f="alert",
args=[alert_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
if copy_btn:
rheader.insert(1, TR(TD(copy_btn)))
if submit_btn:
rheader.insert(1, TR(TD(submit_btn)))
elif tablename == "cap_area":
# Used only for Area Templates
tabs = [(T("Area"), None),
]
if current.deployment_settings.get_L10n_translate_cap_area():
tabs.insert(1, (T("Local Names"), "name"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(s3db.cap_template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location" or tablename == "cap_area_tag":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
# cap_area or cap_resource
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
if alert_id:
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# =============================================================================
def cap_alert_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CAP Alerts on the Home page.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cap_alert.id"]
item_class = "thumbnail"
T = current.T
#raw = record._row
# @ToDo: handle the case where we have multiple info segments &/or areas
headline = record["cap_info.headline"]
location = record["cap_area.name"]
priority = record["cap_info.priority"]
status = record["cap_alert.status"]
scope = record["cap_alert.scope"]
event = record["cap_info.event_type_id"]
if current.auth.s3_logged_in():
_href = URL(c="cap", f="alert", args=[record_id, "profile"])
else:
_href = URL(c="cap", f="public", args=[record_id, "profile"])
priority_row = None
if priority and priority != "-":
# Give the priority color to headline
db = current.db
wptable = db.cap_warning_priority
priority_row = db(wptable.name == priority).select(wptable.color_code,
limitby=(0, 1)).first()
more = A(T("Full Alert"),
_href = _href,
_target = "_blank",
)
if list_id == "map_popup":
itable = current.s3db.cap_info
# Map popup
event = itable.event_type_id.represent(event)
if priority is None:
priority = T("Unknown")
else:
priority = itable.priority.represent(priority)
description = record["cap_info.description"]
response_type = record["cap_info.response_type"]
sender = record["cap_info.sender_name"]
last = TAG[""](BR(),
description,
BR(),
", ".join(response_type),
BR(),
sender,
BR(),
)
details = "%s %s %s" % (priority, status, scope)
headline_ = A(headline,
_href = _href,
_target = "_blank",
)
if priority_row:
headline_["_style"] = "color: #%s" % (priority_row.color_code)
item = DIV(headline_,
BR(),
location,
BR(),
details,
BR(),
event,
last,
more,
_class=item_class,
_id=item_id,
)
else:
if priority == current.messages["NONE"]:
priority = T("Unknown")
certainty = record["cap_info.certainty"]
severity = record["cap_info.severity"]
urgency = record["cap_info.urgency"]
msg_type = record["cap_alert.msg_type"]
sender_name = record["cap_info.sender_name"]
sent = record["cap_alert.sent"]
headline = "%s; %s, %s" % (msg_type, headline, location)
sub_heading = "%s %s" % (priority, event)
sub_headline = A(sub_heading,
_href = _href,
_target = "_blank",
)
if priority_row:
sub_headline["_style"] = "color: #%s" % (priority_row.color_code)
para = T("It is %(certainty)s and %(urgency)s with %(severity)s threat to life and property.") \
% dict(certainty=certainty, urgency=urgency, severity=severity)
issuer = "%s: %s" % (T("Issued by"), sender_name)
issue_date = "%s: %s" % (T("Issued on"), sent)
item = DIV(headline,
BR(),
sub_headline,
BR(),
para,
BR(),
issuer,
BR(),
issue_date,
BR(),
more,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def add_area_from_template(area_id, alert_id):
"""
Add an area from a Template along with its components Location and Tag
"""
afieldnames = ("name",
"altitude",
"ceiling",
)
lfieldnames = ("location_id",
)
tfieldnames = ("tag",
"value",
"comments",
)
db = current.db
s3db = current.s3db
atable = s3db.cap_area
itable = s3db.cap_info
ltable = s3db.cap_area_location
ttable = s3db.cap_area_tag
# Create Area Record from Template
atemplate = db(atable.id == area_id).select(*afieldnames,
limitby=(0, 1)).first()
rows = db(itable.alert_id == alert_id).select(itable.id)
area_ids = []
for row in rows:
# @ToDo set_record_owner, update_super and/or onaccept
# Currently not required by SAMBRO template
adata = {"is_template": False,
"alert_id": alert_id,
"info_id": row.id,
}
for field in afieldnames:
adata[field] = atemplate[field]
aid = atable.insert(**adata)
# Add Area Location Components of Template
ltemplate = db(ltable.area_id == area_id).select(*lfieldnames)
for rows in ltemplate:
ldata = {"area_id": aid,
"alert_id": alert_id,
}
for field in lfieldnames:
ldata[field] = rows[field]
lid = ltable.insert(**ldata)
# Add Area Tag Components of Template
ttemplate = db(ttable.area_id == area_id).select(*tfieldnames)
for row in ttemplate:
tdata = {"area_id": aid,
"alert_id": alert_id,
}
for field in tfieldnames:
tdata[field] = row[field]
tid = ttable.insert(**tdata)
area_ids.append(aid)
return area_ids
# =============================================================================
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -----------------------------------------------------------------------------
class cap_AssignArea(S3Method):
"""
Assign CAP area to an alert, allows (multi-)selection of Predefined areas
"""
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if not r.record:
# Must be called for a particular alert
r.error(404, current.ERROR.BAD_RECORD)
# The record ID of the alert the method is called for
alert_id = r.id
# Requires permission to update this alert
authorised = current.auth.s3_has_permission("update", "cap_alert",
record_id=alert_id)
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
response = current.response
# Filter to limit the selection of areas
area_filter = (FS("is_template") == True)
if r.http == "POST":
# Template areas have been selected
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
# Handle exclusion filter
if post_vars.mode == "Exclusive":
# URL filters
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = area_filter & (~(FS("id").belongs(selected)))
aresource = s3db.resource("cap_area",
filter = query,
vars = filters)
rows = aresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
for area_id in selected:
area_id = int(area_id.strip())
add_area_from_template(area_id, alert_id)
added += 1
current.session.confirmation = T("%(number)s assigned") % \
{"number": added}
if added > 0:
# Redirect to the list of areas of this alert
redirect(URL(args=[r.id, "area"], vars={}))
else:
# Return to the "assign" page
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
# Filter widgets (@todo: lookup from cap_area resource config?)
filter_widgets = []
# List fields
list_fields = ["id",
"name",
"event_type_id",
"priority",
]
# Data table
aresource = s3db.resource("cap_area", filter=area_filter)
totalrows = aresource.count()
get_vars = r.get_vars
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
# Datatable filter and sorting
query, orderby, left = aresource.datatable_filter(list_fields,
get_vars,
)
aresource.add_filter(query)
# Extract the data
data = aresource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True,
)
filteredrows = data.numrows
# Instantiate the datatable
dt = S3DataTable(data.rfields, data.rows)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if r.representation == "html":
# Page load
# Disallow deletion from this table, and link all open-buttons
# to the respective area read page
aresource.configure(deletable = False)
profile_url = URL(c = "cap",
f = "area",
args = ["[id]", "read"],
)
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url,
)
# Hide export icons
response.s3.no_formats = True
# Render the datatable (will be "items" in the output dict)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url = URL(args = r.args,
extension="aadata",
vars={},
),
dt_bulk_actions = dt_bulk_actions,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
get_vars = aresource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=get_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="cap_area",
args=["filter.options"],
vars={},
)
get_config = aresource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = s3db.resource("cap_area")
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
)
else:
ff = ""
output = {"items": items, # the datatable
"title": T("Add Areas"),
"list_filter_form": ff,
}
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions,
)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -----------------------------------------------------------------------------
class cap_AreaRepresent(S3Represent):
""" Representation of CAP Area """
def __init__(self,
show_link=False,
multiple=False):
settings = current.deployment_settings
# Translation using cap_area_name & not T()
translate = settings.get_L10n_translate_cap_area()
if translate:
language = current.session.s3.language
if language == settings.get_L10n_default_language():
translate = False
super(cap_AreaRepresent,
self).__init__(lookup="cap_area",
show_link=show_link,
translate=translate,
multiple=multiple
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for Area(CAP) rows.Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the cap_area IDs
"""
db = current.db
s3db = current.s3db
artable = s3db.cap_area
count = len(values)
if count == 1:
query = (artable.id == values[0])
else:
query = (artable.id.belongs(values))
fields = [artable.id,
artable.name,
]
if self.translate:
ltable = s3db.cap_area_name
fields += [ltable.name_l10n,
]
left = [ltable.on((ltable.area_id == artable.id) & \
(ltable.language == current.session.s3.language)),
]
else:
left = None
rows = current.db(query).select(left = left,
limitby = (0, count),
*fields)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the cap_area Row
"""
if self.translate:
name = row["cap_area_name.name_l10n"] or row["cap_area.name"]
else:
name = row["cap_area.name"]
if not name:
return self.default
return s3_unicode(name)
# END =========================================================================
|
mit
| 3,098,138,409,540,321,300
| 48.837234
| 517
| 0.431575
| false
| 5.156522
| false
| false
| false
|
maxamillion/product-definition-center
|
pdc/apps/component/serializers.py
|
1
|
23374
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.text import capfirst
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from pdc.apps.contact.models import Contact, ContactRole
from pdc.apps.contact.serializers import RoleContactSerializer
from pdc.apps.common.serializers import DynamicFieldsSerializerMixin, LabelSerializer, StrictSerializerMixin
from pdc.apps.common.fields import ChoiceSlugField
from pdc.apps.release.models import Release
from pdc.apps.common.hacks import convert_str_to_int
from .models import (GlobalComponent,
RoleContact,
ReleaseComponent,
Upstream,
BugzillaComponent,
ReleaseComponentGroup,
GroupType,
ReleaseComponentType,
ReleaseComponentRelationshipType,
ReleaseComponentRelationship)
from . import signals
__all__ = (
'GlobalComponentSerializer',
'ReleaseComponentSerializer',
'HackedContactSerializer',
'UpstreamSerializer',
'BugzillaComponentSerializer',
'GroupSerializer',
'GroupTypeSerializer'
)
def reverse_url(request, view_name, **kwargs):
return request.build_absolute_uri(reverse(viewname=view_name,
kwargs=kwargs))
class HackedContactSerializer(RoleContactSerializer):
"""
Could use as a view leveled serializer to encode/decode the contact data, or
as a field in the global/release component.
Automatically replace the url with /[global|release]-components/<instance_pk>/contacts/<pk>.
Automatically set inherited = True when serialize release component.
"""
def __init__(self, *args, **kwargs):
self.inherited = kwargs.pop('inherited', False)
self.view_name = kwargs.pop('view_name', 'globalcomponentcontact-detail')
context = kwargs.get('context', None)
self.instance_pk = None
self.view = None
# Set view/instance_pk when uses the class as a serializer.
if context:
self.view = context.get('view', None)
extra_kwargs = context.get('extra_kwargs', None)
if extra_kwargs:
self.instance_pk = extra_kwargs.get('instance_pk', None)
super(HackedContactSerializer, self).__init__(*args, **kwargs)
def to_representation(self, obj):
ret = super(HackedContactSerializer, self).to_representation(obj)
request = self.context.get('request', None)
url_kwargs = self.context.get('extra_kwargs', {})
# NOTE(xchu): The `instance_pk` is needed for building a valid url,
# so if not provided, we should raise `KeyError`.
instance_pk = url_kwargs['instance_pk']
ret['url'] = reverse_url(request, self.view_name, **{
'instance_pk': instance_pk,
'pk': obj.pk
})
if self.inherited and self.view_name == 'globalcomponentcontact-detail':
ret['inherited'] = True
return ret
def to_internal_value(self, data):
# Run StrictSerializerMixin's to_internal_value() to check if extra field exists.
super(HackedContactSerializer, self).to_internal_value(data)
request = self.context.get('request', None)
serializer = RoleContactSerializer(data=data,
many=not isinstance(data, dict),
context={'request': request})
kwargs = {}
kwargs['contact_role'] = data.get('contact_role')
kwargs.update(data.get('contact'))
try:
contact = RoleContact.specific_objects.get(**kwargs)
except (RoleContact.DoesNotExist, Contact.DoesNotExist, ContactRole.DoesNotExist):
# If we can't get RoleContact in database, validate the input data and create the RoleContact.
if serializer.is_valid(raise_exception=True):
contact = RoleContact.specific_objects.create(**kwargs)
if request and request.changeset:
model_name = ContentType.objects.get_for_model(contact).model
request.changeset.add(model_name,
contact.id,
'null',
json.dumps(contact.export()))
component_class = self.view.model
if component_class.objects.get(pk=self.instance_pk).contacts.filter(pk=contact.pk).exists():
model_name = six.text_type(capfirst(component_class._meta.verbose_name))
raise serializers.ValidationError({"detail": "%s contact with this %s and Contact already exists."
% (model_name, model_name)})
else:
return contact
def save(self, **kwargs):
"""
Save the deserialized object and return it.
"""
instance_pk = self.context['extra_kwargs']['instance_pk']
component_class = self.context['view'].model
component = component_class.objects.get(pk=instance_pk)
existed_contacts = component.contacts.all()
if isinstance(self.validated_data, list):
contacts = [self.get_object_from_db(item) for item in self.validated_data if item not in existed_contacts]
component.contacts.add(*contacts)
if self.validated_data['_deleted']:
[self.delete_object(item) for item in self.validated_data['_deleted']]
else:
contacts = self.get_object_from_db(self.validated_data)
component.contacts.add(contacts)
return contacts
def get_object_from_db(self, item):
contact = RoleContact.objects.get(**{
'contact_role_id': item.contact_role_id,
'contact_id': item.contact_id
})
return contact
class Meta:
model = RoleContact
fields = ('url', 'contact_role', 'contact')
# In order not to run parent's validators, set validators to []
validators = []
class HackedContactField(serializers.Field):
"""
HackedContactField is used in GlobalComponentSerializer/ReleaseComponentSerializer insteadof HackedContactSerilizer.
It has the ablility to get_attribute() from GlobalComponentSerializer/ReleaseComponentSerializer.
"""
def __init__(self, view_name, *args, **kwargs):
self.view_name = view_name
super(HackedContactField, self).__init__(*args, **kwargs)
def to_representation(self, value):
serializer = HackedContactSerializer(value, many=True, context=self.context, view_name=self.view_name)
return serializer.data
def get_attribute(self, obj):
"""
Get attribute from the serializer which uses this field.
@param obj: The model object related to the serializer.
"""
# NOTE(xchu): The `instance_pk` is needed for building a valid url,
# it's not provided when used as a field, so we should inject one.
if 'extra_kwargs' not in self.context or 'instance_pk' not in self.context['extra_kwargs']:
self.context['extra_kwargs'] = {'instance_pk': obj.pk}
return obj.contacts.all()
class UpstreamSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = Upstream
fields = ('homepage', 'scm_type', 'scm_url')
class UpstreamRelatedField(serializers.RelatedField):
def to_representation(self, value):
serializer = UpstreamSerializer(value)
return serializer.data
def to_internal_value(self, value):
request = self.context.get('request', None)
if isinstance(value, dict):
try:
upstream = Upstream.objects.get(**value)
except Upstream.DoesNotExist:
serializer = UpstreamSerializer(data=value, many=False, context={'request': request})
if serializer.is_valid(raise_exception=True):
upstream = serializer.save()
model_name = ContentType.objects.get_for_model(upstream).model
if request and request.changeset:
request.changeset.add(model_name,
upstream.id,
'null',
json.dumps(upstream.export()))
return upstream
else:
self._errors = serializer._errors
except Exception as err:
raise serializers.ValidationError("Can not get or create Upstream with the input(%s): %s." % (value, err))
else:
return upstream
else:
raise serializers.ValidationError("Unsupported upstream input.")
class GlobalComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
contacts = HackedContactField(required=False, read_only=False, view_name='globalcomponentcontact-detail')
name = serializers.CharField(required=True,
max_length=100)
dist_git_path = serializers.CharField(required=False,
max_length=200,
allow_blank=True)
dist_git_web_url = serializers.URLField(required=False,
max_length=200)
labels = LabelSerializer(many=True, required=False, read_only=True)
upstream = UpstreamRelatedField(read_only=False, required=False, queryset=Upstream.objects.all())
class Meta:
model = GlobalComponent
fields = ('id', 'name', 'dist_git_path', 'dist_git_web_url', 'contacts', 'labels', 'upstream')
class TreeForeignKeyField(serializers.Field):
def to_representation(self, value):
request = self.context.get("request", None)
serializer = BugzillaComponentSerializer(value, context={'request': request, 'top_level': False})
return serializer.data
def to_internal_value(self, data):
if data.strip() == "":
raise serializers.ValidationError({'bugzilla_component': 'This field is required.'})
else:
components = data.strip("/").split("/")
len_components = len(components)
bc = None
# Only Bugzilla component name exists, parent component name will be considered as None.
if len_components == 1:
try:
bc = BugzillaComponent.objects.get(name=components[0], parent_component=None)
except:
raise serializers.ValidationError({'bugzilla_component': ("Bugzilla component with name %s does not exist."
% data)})
# Not only bugzilla Component, but also its ancestors exist.
if len_components > 1:
z = zip(components, components[1:])
root_bc_name, bc_name = z[0]
qs = BugzillaComponent.objects.filter(name=bc_name, parent_component__name=root_bc_name)
for _, bc_name in z[1:]:
qs = BugzillaComponent.objects.filter(name=bc_name, parent_component__in=qs)
if not qs:
raise serializers.ValidationError({'bugzilla_component': ("Bugzilla component with name %s does not exist."
% data)})
if len(qs) > 1:
raise serializers.ValidationError({'bugzilla_component': ("Duplicate Bugzilla component with name %s exists."
% data)})
if qs:
bc = qs[0]
return bc
class BugzillaComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
Bugzilla Component serializer.
"""
parent_component = serializers.CharField(required=False, max_length=200)
subcomponents = serializers.SerializerMethodField()
extra_fields = ['parent_pk']
def get_subcomponents(self, obj):
"""[string]"""
return obj.get_subcomponents()
class Meta:
model = BugzillaComponent
fields = ('id', 'name', 'parent_component', 'subcomponents')
class ReleaseField(serializers.SlugRelatedField):
def __init__(self, **kwargs):
super(ReleaseField, self).__init__(slug_field='release_id',
queryset=Release.objects.all(),
**kwargs)
def to_representation(self, value):
return {
'release_id': value.release_id,
'active': value.active
}
class ReleaseComponentTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = ReleaseComponentType
fields = ('name',)
class ReleaseComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
ReleaseComponent Serializer
"""
release = ReleaseField(read_only=False)
global_component = serializers.SlugRelatedField(slug_field='name', read_only=False, queryset=GlobalComponent.objects.all())
contacts = HackedContactField(required=False, read_only=False, view_name='releasecomponentcontact-detail')
dist_git_branch = serializers.CharField(source='inherited_dist_git_branch', required=False)
dist_git_web_url = serializers.URLField(required=False, max_length=200, read_only=True)
bugzilla_component = TreeForeignKeyField(read_only=False, required=False, allow_null=True)
brew_package = serializers.CharField(required=False)
active = serializers.BooleanField(required=False, default=True)
type = ChoiceSlugField(slug_field='name', queryset=ReleaseComponentType.objects.all(), required=False,
allow_null=True)
def update(self, instance, validated_data):
signals.releasecomponent_serializer_extract_data.send(sender=self, validated_data=validated_data)
instance = super(ReleaseComponentSerializer, self).update(instance, validated_data)
signals.releasecomponent_serializer_post_update.send(sender=self, release_component=instance)
if hasattr(instance, 'pk'):
# reload to make sure changes in mapping are reflected
instance = ReleaseComponent.objects.get(pk=instance.pk)
# from view's doc, for ReleaseComponent,
# PUT and PATCH update works the same as each other except `name` is required when PUT update,
# so there will be not setattr here.
return instance
def create(self, validated_data):
signals.releasecomponent_serializer_extract_data.send(sender=self, validated_data=validated_data)
instance = super(ReleaseComponentSerializer, self).create(validated_data)
signals.releasecomponent_serializer_post_create.send(sender=self, release_component=instance)
return instance
def to_representation(self, instance):
ret = super(ReleaseComponentSerializer, self).to_representation(instance)
request = self.context.get("request", None)
# Include global component contacts - PDC-184
gcs = GlobalComponentSerializer(
instance=instance.global_component,
context={'request': request})
# Exclude global component contacts whose contact_role are already in release component contacts
gcc = gcs.data.get('contacts', [])
contacts = ret.get('contacts', [])
contact_role_lists = [contact['contact_role'] for contact in contacts]
for contact in gcc:
if contact['contact_role'] in contact_role_lists:
continue
contact['inherited'] = True
contacts.append(contact)
return ret
def to_internal_value(self, data):
# Raise error explictly when release and global_component are given.
if self.instance:
allowed_keys = self.get_allowed_keys() - set(['release', 'global_component'])
extra_fields = set(data.keys()) - allowed_keys
self.maybe_raise_error(extra_fields)
data['release'] = self.instance.release
data['global_component'] = self.instance.global_component
return super(ReleaseComponentSerializer, self).to_internal_value(data)
def validate_release(self, value):
if not isinstance(value, Release):
if isinstance(value, dict):
release_id = value['release_id']
else:
release_id = value
if release_id is None or release_id.strip() == "":
self._errors = {'release': 'This field is required.'}
return
release = get_object_or_404(Release, release_id=release_id)
if not release.is_active():
self._errors = {'release': 'Can not create a release component with an inactive release.'}
return
value = release
return value
def validate_global_component(self, value):
if not isinstance(value, GlobalComponent):
global_component_name = value
if global_component_name is None or global_component_name.strip() == "":
self._errors = {'global_component': 'This field is required.'}
return
gc = get_object_or_404(GlobalComponent, name=global_component_name)
value = gc
return value
def validate_name(self, value):
if value.strip() == "":
self._errors = {'name': 'This field is required.'}
return value
def validate_type(self, value):
if not isinstance(value, ReleaseComponentType):
if value is not None and value.strip() != "":
value = get_object_or_404(ReleaseComponentType, name=value.strip())
else:
raise serializers.ValidationError("This field can't be set to null.")
return value
class Meta:
model = ReleaseComponent
fields = ('id', 'release', 'bugzilla_component', 'brew_package', 'global_component',
'name', 'dist_git_branch', 'dist_git_web_url', 'active',
'contacts', 'type')
validators = [UniqueTogetherValidator(
queryset=ReleaseComponent.objects.all(),
fields=('name', 'release', 'global_component')
)]
class GroupTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
description = serializers.CharField(required=False)
class Meta:
model = GroupType
fields = ('id', 'name', 'description')
class ReleaseComponentRelatedField(serializers.RelatedField):
doc_format = '{"id": "int", "name": "string"}'
def to_representation(self, value):
result = dict()
if value:
result['id'] = value.id
result['name'] = value.name
return result
def to_internal_value(self, data):
if not isinstance(data, dict):
raise serializers.ValidationError({'detail': "Input [%s] for ReleaseComponent must be a dict." % data})
if set(data.keys()) not in [set(['id']), set(['release', 'global_component', 'name'])]:
raise serializers.ValidationError(
{'detail': "Only accept ['id'] or ['release', 'global_component', 'name']"})
kwargs = dict()
if 'id' in data:
kwargs['id'] = convert_str_to_int(data.get('id'))
else:
kwargs['release__release_id'] = data.get('release')
kwargs['global_component__name'] = data.get('global_component')
kwargs['name'] = data.get('name')
try:
rc = ReleaseComponent.objects.get(**kwargs)
except ReleaseComponent.DoesNotExist:
raise serializers.ValidationError({'detail': "ReleaseComponent [%s] doesn't exist" % data})
return rc
class GroupSerializer(StrictSerializerMixin, serializers.ModelSerializer):
group_type = serializers.SlugRelatedField(
queryset=GroupType.objects.all(),
slug_field='name',
required=True
)
release = serializers.SlugRelatedField(
queryset=Release.objects.all(),
slug_field='release_id',
required=True
)
description = serializers.CharField(required=True)
components = ReleaseComponentRelatedField(
required=False,
many=True,
queryset=ReleaseComponent.objects.all()
)
def validate(self, value):
# # POST
if not self.instance:
components = value.get('components', [])
release = value.get('release')
# PUT or PATCH
else:
components = value.get('components', self.instance.components.all())
release = value.get('release', self.instance.release)
for component in components:
if component.release != release:
raise serializers.ValidationError({
'detail': 'Not allow to group release_component[%s] <release[%s]> with other release[%s].'
% (component.name, component.release.release_id, release.release_id)})
return value
class Meta:
model = ReleaseComponentGroup
fields = ('id', 'group_type', 'description', 'release', 'components')
class RCRelationshipTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = ReleaseComponentRelationshipType
fields = ('name',)
class RCForRelationshipRelatedField(ReleaseComponentRelatedField):
doc_format = '{"id": "int", "name": "string", "release": "Release.release_id"}'
def to_representation(self, value):
result = dict()
if value:
result['id'] = value.id
result['name'] = value.name
result['release'] = value.release.release_id
return result
class ReleaseComponentRelationshipSerializer(StrictSerializerMixin, serializers.ModelSerializer):
type = ChoiceSlugField(
queryset=ReleaseComponentRelationshipType.objects.all(),
slug_field='name',
required=True,
source='relation_type'
)
from_component = RCForRelationshipRelatedField(
required=True,
queryset=ReleaseComponent.objects.all()
)
to_component = RCForRelationshipRelatedField(
required=True,
queryset=ReleaseComponent.objects.all()
)
class Meta:
model = ReleaseComponentRelationship
fields = ('id', 'type', 'from_component', 'to_component')
|
mit
| 9,017,138,999,367,044,000
| 41.115315
| 129
| 0.609438
| false
| 4.636778
| false
| false
| false
|
iddl/git-events
|
messages.py
|
1
|
1641
|
import sys
from termcolor import colored
class Messages():
LOGFILE = "git-events.log"
#Status and operations
RUNNING = 'Successfully started gitevents'
WAS_RUNNING = 'Gitevents is already running'
NOT_RUNNING = 'Git-events is not running'
STOPPED = 'Successfully stopped gitevents'
#Errors
INCOMPATIBLE_OS = 'Your OS is not compatible with Git events'
GITHUB_API_ERROR = 'I\'m unable to access your GitHub account, please check your internet connection and GitHub access token'
GITHUB_LOGIN_ERROR = 'Unable to login. Wrong username/password ?'
CONFIGURATION_ERROR = 'Please configure cfg.ini before starting'
#Success
ACCESS_TOKEN_SET = 'Successfully set access token'
INTERVAL_SET = 'Successfully set polling interval'
#Setup
INPUT_USERNAME = 'Please type your Github account name: '
INPUT_PASSWORD = 'Please type your Github account password: '
SETUP_FAIL = 'Failed to create Github access token'
SETUP_SUCCESS = 'Successfully saved access token. You are all set.'
def abort(self, message=""):
print(colored(message, 'red'))
sys.exit(1)
def print_success(self, message=""):
print(colored(message, 'green'))
def log(self, message=""):
print(message)
def use_logfile(self):
sys.stdout = open(self.LOGFILE, 'w')
sys.stderr = open(self.LOGFILE, 'w')
class MessagesProvider():
def __init__(self):
self.instance = None
def get(self):
if self.instance is None:
self.instance = Messages()
return self.instance
messages_provider = MessagesProvider()
|
apache-2.0
| -2,440,621,822,528,702,500
| 29.388889
| 129
| 0.669714
| false
| 3.992701
| false
| false
| false
|
carmenfdezb/osmscout-server
|
scripts/import/valhalla_country_pack.py
|
1
|
1633
|
import glob
from poly import parse_poly
from shapely.geometry import Polygon
# directories used for searching for packages
valhalla_meta_dir = 'valhalla/packages_meta'
valhalla_packages_dir = 'valhalla/packages'
valhalla_tiles_timestamp = "valhalla/tiles/timestamp"
version = "1"
def getsize(sname):
f = open(sname, 'r')
return int(f.read().split()[0])
def gettimestamp(sname):
f = open(valhalla_tiles_timestamp, 'r')
return f.read().split()[0]
# call with the name of POLY filename
def country_pack(country_poly_fname):
country = parse_poly(country_poly_fname)
packs = []
size_compressed = 0
size = 0
ts = None
for bbox in glob.glob(valhalla_meta_dir + "/*.bbox"):
coors = []
for i in open(bbox, 'r'):
for k in i.split():
coors.append(float(k))
poly = Polygon( ( (coors[0], coors[1]), (coors[0], coors[3]),
(coors[2], coors[3]), (coors[2], coors[1]) ) )
if country.intersects(poly):
pname = bbox[len(valhalla_meta_dir)+1:-len(".bbox")]
packs.append(pname)
pdata = valhalla_packages_dir + "/" + bbox[len(valhalla_meta_dir)+1:-len(".bbox")] + ".tar"
size_compressed += getsize(pdata + '.size-compressed')
size += getsize(pdata + '.size')
ts = gettimestamp(pdata)
return { "packages": packs,
"timestamp": ts,
"version": version,
"size": str(size),
"size-compressed": str(size_compressed) }
if __name__ == '__main__':
print country_pack('hierarchy/europe/estonia/poly')
|
gpl-3.0
| -4,252,452,534,935,834,600
| 31.66
| 103
| 0.581751
| false
| 3.325866
| false
| false
| false
|
aglitke/vdsm
|
client/vdsClient.py
|
1
|
102506
|
# Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import sys
import ast
import getopt
import traceback
import xmlrpclib
import re
import socket
import pprint as pp
from vdsm import vdscli
try:
import vdsClientGluster as ge
_glusterEnabled = True
except ImportError:
_glusterEnabled = False
BLANK_UUID = '00000000-0000-0000-0000-000000000000'
STATUS_ERROR = {'status': {'code': 100, 'message': "ERROR"}}
# Storage Domain Types
UNKNOWN_DOMAIN = 0
NFS_DOMAIN = 1
FCP_DOMAIN = 2
ISCSI_DOMAIN = 3
LOCALFS_DOMAIN = 4
CIFS_DOMAIN = 5
# Volume Types
UNKNOWN_VOL = 0
PREALLOCATED_VOL = 1
SPARSE_VOL = 2
# Volume Format
UNKNOWN_FORMAT = 3
COW_FORMAT = 4
RAW_FORMAT = 5
# Volume Role
SHARED_VOL = 6
INTERNAL_VOL = 7
LEAF_VOL = 8
def validateArgTypes(args, conv, requiredArgsNumber=0):
if len(args) > len(conv) or len(args) < requiredArgsNumber:
raise ValueError("Wrong number of arguments provided, "
"expecting %d (%d required) got %d"
% (len(conv), requiredArgsNumber, len(args)))
for i in range(len(args)):
args[i] = conv[i](args[i])
def fmt3(num):
for x in ['', 'KB', 'MB', 'GB', 'TB']:
if num < 1024:
return "%3.1f%s" % (num, x)
num /= 1024
def usage(cmd, full=True):
print "Usage: vdsClient [OPTIONS] <server> <command> [Command parameters]"
print "\nOptions"
print "-h\tDisplay this help"
print "-m\tList supported methods and their params (Short help)"
print "-s [--truststore path]\tConnect to server with SSL."
print "-o, --oneliner\tShow the key-val information in one line."
print "\tIf truststore path is not specified, use defaults."
print "\nCommands"
verbs = cmd.keys()
verbs.sort()
for entry in verbs:
if full:
print entry
for line in cmd[entry][1]:
print '\t' + line
else:
print entry + '\t' + cmd[entry][1][0]
def printConf(conf):
try:
print "\n" + conf['vmId']
print "\tStatus = " + conf['status']
except:
pass
for element in conf.keys():
if element not in ('vmId', 'status'):
print "\t%s = %s" % (element, conf[element])
def printDict(dict, pretty=True):
keys = dict.keys()
keys.sort()
for element in keys:
if pretty:
representation = pp.pformat(dict[element]).replace(
'\n', '\n\t' + ' ' * len(element + ' = '))
else:
representation = dict[element]
print "\t%s = %s" % (element, representation)
def printStats(list):
for conf in list:
printConf(conf)
class service:
def __init__(self):
self.useSSL = False
self.truststore = None
self.pretty = True
def do_connect(self, hostPort):
self.s = vdscli.connect(hostPort, self.useSSL, self.truststore)
def ExecAndExit(self, response, parameterName='none'):
if response['status']['code'] != 0:
print response['status']['message']
else:
if 'vmList' in response:
printConf(response['vmList'])
elif 'statsList' in response:
if parameterName != 'none':
print response['statsList'][0][parameterName]
else:
printStats(response['statsList'])
elif 'info' in response:
printDict(response['info'], self.pretty)
else:
printDict(response['status'], self.pretty)
sys.exit(response['status']['code'])
def do_create(self, args):
params = {}
drives = []
devices = []
cpuPinning = {}
confLines = []
confFile = open(args[0])
for line in confFile.readlines():
line = re.sub("\s+", '', line)
line = re.sub("\#.*", '', line)
if line:
confLines.append(line)
if len(args) > 1:
confLines.extend(args[1:])
for line in confLines:
if '=' in line:
param, value = line.split("=", 1)
if param == 'devices':
devices.append(self._parseDriveSpec(value))
elif param == 'drive':
drives.append(self._parseDriveSpec(value))
elif param == 'cpuPinning':
cpuPinning, rStr = self._parseNestedSpec(value)
elif param.startswith('custom_'):
if not 'custom' in params:
params['custom'] = {}
params['custom'][param[7:]] = value
else:
if param in ('cdrom', 'floppy'):
value = self._parseDriveSpec(value)
params[param] = value
else:
params[line.strip()] = ''
if cpuPinning:
params['cpuPinning'] = cpuPinning
if drives:
params['drives'] = drives
if devices:
params['devices'] = devices
##Backward compatibility for vdsClient users
if 'vt' in params:
params['kvmEnable'] = params['vt']
if 'imageFile' in params:
params['hda'] = params['imageFile']
drives = ['hdd', 'hdc', 'hdb']
if 'moreImages' in params:
for image in params['moreImages'].split(','):
params[drives.pop()] = image
if 'sysprepInf' in params:
infFile = open(params['sysprepInf'], 'rb')
try:
params['sysprepInf'] = xmlrpclib.Binary(infFile.read())
finally:
infFile.close()
return self.ExecAndExit(self.s.create(params))
def vmUpdateDevice(self, args):
params = self._eqSplit(args[1:])
if 'portMirroring' in params:
params['portMirroring'] = [net for net in params['portMirroring']
.split(',') if net != '']
return self.ExecAndExit(self.s.vmUpdateDevice(args[0], params))
def hotplugNic(self, args):
nic = self._parseDriveSpec(args[1])
nic['type'] = 'interface'
params = {'vmId': args[0], 'nic': nic}
return self.ExecAndExit(self.s.hotplugNic(params))
def hotunplugNic(self, args):
nic = self._parseDriveSpec(args[1])
nic['type'] = 'interface'
params = {'vmId': args[0], 'nic': nic}
return self.ExecAndExit(self.s.hotunplugNic(params))
def hotplugDisk(self, args):
drive = self._parseDriveSpec(args[1])
drive['type'] = 'disk'
drive['device'] = 'disk'
params = {'vmId': args[0], 'drive': drive}
return self.ExecAndExit(self.s.hotplugDisk(params))
def hotunplugDisk(self, args):
drive = self._parseDriveSpec(args[1])
drive['type'] = 'disk'
drive['device'] = 'disk'
params = {'vmId': args[0], 'drive': drive}
return self.ExecAndExit(self.s.hotunplugDisk(params))
def do_changeCD(self, args):
vmId = args[0]
file = self._parseDriveSpec(args[1])
return self.ExecAndExit(self.s.changeCD(vmId, file))
def do_changeFloppy(self, args):
vmId = args[0]
file = self._parseDriveSpec(args[1])
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
"""
Usage: vdsClient 0 list [table/long/ids] [vms:vmId1,vmId2]
"""
def _vmsParser(vmsParam):
vmsList = vmsParam.split(':')[1].strip()
if vmsList:
vmsList = [vm.strip() for vm in vmsList.split(',')]
else:
raise ValueError('Empty VMs list.')
return vmsList
vmListViews = ('table', 'long', 'ids')
view = 'long' # Default view
vms = []
if args:
if args[0].startswith('vms:'):
vms = _vmsParser(args[0])
else:
view = args[0]
if len(args) > 1 and args[1].startswith('vms:'):
vms = _vmsParser(args[1])
if view not in vmListViews:
raise ValueError('Invalid argument "%s".' % view)
if view == 'table':
allStats = {}
response = self.s.getAllVmStats()
if response['status']['code']:
return (response['status']['code'],
response['status']['message'])
for res in response['statsList']:
if not vms or res['vmId'] in vms:
allStats[res['vmId']] = res
response = self.s.list(True, vms)
if response['status']['code']:
return response['status']['code'], response['status']['message']
for conf in response['vmList']:
if view == 'long':
if 'sysprepInf' in conf:
conf['sysprepInf'] = '<<exists>>'
printConf(conf)
elif view == 'table':
vmId = conf['vmId']
if vmId not in allStats: # Avoid race.
continue
status = conf['status']
if allStats[vmId].get('monitorResponse') == '-1':
status += '*'
print ("%-36s %6s %-20s %-20s %-20s" %
(vmId, conf.get('pid', 'none'),
conf.get('vmName', '<< NO NAME >>'),
status, allStats[vmId].get('guestIPs', '')))
elif view == 'ids':
print conf['vmId']
sys.exit(response['status']['code'])
def do_destroy(self, args):
vmId = args[0]
response = self.s.destroy(vmId)
print response['status']['message']
sys.exit(response['status']['code'])
def do_pause(self, args):
vmId = args[0]
return self.ExecAndExit(self.s.pause(vmId))
def do_continue(self, args):
vmId = args[0]
response = self.s.cont(vmId)
return self.ExecAndExit(response)
def do_shutdown(self, args):
vmId, timeout, message = args
response = self.s.shutdown(vmId, timeout, message)
print response['status']['message']
sys.exit(response['status']['code'])
def do_setVmTicket(self, args):
if len(args) == 3:
vmId, otp64, secs = args[:3]
connAct = 'disconnect'
params = {}
else:
vmId, otp64, secs, connAct = args[:4]
params = {}
if (len(args) > 4):
params = self._parseDriveSpec(args[4])
return self.ExecAndExit(self.s.setVmTicket(vmId, otp64, secs, connAct,
params))
def do_reset(self, args):
vmId = args[0]
return self.ExecAndExit(self.s.reset(vmId))
def monitorCommand(self, args):
vmId = args[0]
cmd = args[1]
response = self.s.monitorCommand(vmId, cmd)
if response['status']['code']:
print response['status']['message']
else:
for line in response['output']:
print line
sys.exit(response['status']['code'])
def do_newDisk(self, args):
file, size = args
response = self.s.newDisk(file, size)
print response['status']['message']
sys.exit(response['status']['code'])
def do_sendkeys(self, args):
vmId = args[0]
return self.ExecAndExit(self.s.sendkeys(vmId, args[1:]))
def hibernate(self, args):
vmId, hiberVolHandle = args[0], args[1]
response = self.s.hibernate(vmId, hiberVolHandle)
print response['status']['message']
sys.exit(response['status']['code'])
def do_migrate(self, args):
params = {}
if len(args) > 0:
for line in args:
param, value = line.split("=")
params[param] = value
else:
raise Exception("Not enough parameters")
response = self.s.migrate(params)
print response['status']['message']
sys.exit(response['status']['code'])
def do_mStat(self, args):
vmId = args[0]
response = self.s.migrateStatus(vmId)
if not response['status']['code']:
print (response['status']['message'] +
' ' + str(response['progress']) + '%')
else:
print response['status']['message']
sys.exit(response['status']['code'])
def do_mCancel(self, args):
vmId = args[0]
response = self.s.migrateCancel(vmId)
print response['status']['message']
sys.exit(response['status']['code'])
def do_getCap(self, args):
return self.ExecAndExit(self.s.getVdsCapabilities())
def do_getHardware(self, args):
return self.ExecAndExit(self.s.getVdsHardwareInfo())
def do_getVdsStats(self, args):
return self.ExecAndExit(self.s.getVdsStats())
def do_getVmStats(self, args):
vmId = args[0]
if len(args) > 1:
return self.ExecAndExit(self.s.getVmStats(vmId), args[1])
else:
return self.ExecAndExit(self.s.getVmStats(vmId))
def do_getAllVmStats(self, args):
return self.ExecAndExit(self.s.getAllVmStats())
def desktopLogin(self, args):
vmId, domain, user, password = tuple(args)
response = self.s.desktopLogin(vmId, domain, user, password)
print response['status']['message']
sys.exit(response['status']['code'])
def desktopLock(self, args):
vmId = args[0]
response = self.s.desktopLock(vmId)
print response['status']['message']
sys.exit(response['status']['code'])
def desktopLogoff(self, args):
vmId, force = tuple(args)
response = self.s.desktopLogoff(vmId, force)
print response['status']['message']
sys.exit(response['status']['code'])
def sendHcCmd(self, args):
vmId, message = tuple(args)
response = self.s.sendHcCmdToDesktop(vmId, message)
print response['status']['message']
sys.exit(response['status']['code'])
def getDiskAlignment(self, args):
driveSpecs = {}
driveSpecs['device'] = 'disk'
vmId = BLANK_UUID if args[0] == '0' else args[0]
if len(args) > 2:
driveSpecs['poolID'] = args[1]
driveSpecs['domainID'] = args[2]
driveSpecs['imageID'] = args[3]
driveSpecs['volumeID'] = args[4]
else:
driveSpecs['GUID'] = args[1]
res = self.s.getDiskAlignment(vmId, driveSpecs)
if res['status'] == 0:
for pName, aligned in res['alignment'].items():
print "\t%s = %s" % (pName, aligned)
else:
print "Error in scan disk alignment"
sys.exit(0)
######## IRS methods ####################
def createStorageDomain(self, args):
validateArgTypes(args, [int, str, str, str, int, int])
dom = self.s.createStorageDomain(*args)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def setStorageDomainDescription(self, args):
sdUUID = args[0]
descr = args[1]
dom = self.s.setStorageDomainDescription(sdUUID, descr)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def validateStorageDomain(self, args):
sdUUID = args[0]
dom = self.s.validateStorageDomain(sdUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def activateStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
dom = self.s.activateStorageDomain(sdUUID, spUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def deactivateStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
msdUUID = args[2]
mVer = int(args[3])
dom = self.s.deactivateStorageDomain(sdUUID, spUUID, msdUUID, mVer)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def attachStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
dom = self.s.attachStorageDomain(sdUUID, spUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def detachStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
msdUUID = args[2]
mVer = int(args[3])
dom = self.s.detachStorageDomain(sdUUID, spUUID, msdUUID, mVer)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def forcedDetachStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
dom = self.s.forcedDetachStorageDomain(sdUUID, spUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def formatStorageDomain(self, args):
sdUUID = args[0]
if len(args) > 1:
autoDetach = args[1]
else:
autoDetach = 'False'
dom = self.s.formatStorageDomain(sdUUID, autoDetach)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def getStorageDomainInfo(self, args):
sdUUID = args[0]
info = self.s.getStorageDomainInfo(sdUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
for element in info['info'].keys():
print "\t%s = %s" % (element, info['info'][element])
return 0, ''
def getStorageDomainStats(self, args):
sdUUID = args[0]
stats = self.s.getStorageDomainStats(sdUUID)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
dt = stats['stats']['disktotal']
df = stats['stats']['diskfree']
print "\tdisktotal = %s (%s)" % (dt, fmt3(int(dt)))
print "\tdiskfree = %s (%s)" % (df, fmt3(int(df)))
return 0, ''
def getStorageDomainsList(self, args):
if len(args) > 0:
spUUID = args[0]
else:
spUUID = BLANK_UUID
domains = self.s.getStorageDomainsList(spUUID)
if domains['status']['code']:
return domains['status']['code'], domains['status']['message']
for entry in domains['domlist']:
print entry
return 0, ''
def getDeviceList(self, args):
devices = self.s.getDeviceList(*args)
if devices['status']['code']:
return devices['status']['code'], devices['status']['message']
pp.pprint(devices['devList'])
return 0, ''
def getDevicesVisibility(self, args):
devList = args[0].split(',')
res = self.s.getDevicesVisibility(devList, {})
if res['status']['code']:
return res['status']['code'], res['status']['message']
for guid, visible in res['visible'].iteritems():
print '\t%s = %s' % (guid, visible)
return 0, ''
def getVGList(self, args):
if len(args) > 0:
storageType = int(args[0])
vgs = self.s.getVGList(storageType)
else:
vgs = self.s.getVGList()
if vgs['status']['code']:
return vgs['status']['code'], vgs['status']['message']
for entry in vgs['vglist']:
print '============================'
for element in entry.keys():
print "%s = %s " % (element, entry[element])
return 0, ''
def getVGInfo(self, args):
vgUUID = args[0]
info = self.s.getVGInfo(vgUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
#print info['info']
for entry in info['info'].keys():
print '============================'
if entry != 'pvlist':
print "%s = %s " % (entry, info['info'][entry])
else:
print 'pvlist:'
for item in info['info'][entry]:
for i in item.keys():
print "%s = %s " % (i, item[i]),
print
return 0, ''
def createVG(self, args):
sdUUID = args[0]
devList = args[1].split(',')
force = args[2].capitalize() == "True" if len(args) > 2 else False
dom = self.s.createVG(sdUUID, devList, force)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, dom['uuid']
def removeVG(self, args):
vgUUID = args[0]
dom = self.s.removeVG(vgUUID)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def extendStorageDomain(self, args):
sdUUID = args[0]
spUUID = args[1]
devList = args[2].split(',')
dom = self.s.extendStorageDomain(sdUUID, spUUID, devList)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def discoverST(self, args):
portal = args[0].split(":")
ip = portal[0]
port = "3260"
if len(portal) > 1:
port = portal[1]
if len(args) == 1:
username = password = ""
else:
username = args[1]
password = args[2]
con = dict(id="", connection=ip, port=port, iqn="", portal="",
user=username, password=password)
targets = self.s.discoverSendTargets(con)
if targets['status']['code']:
return targets['status']['code'], targets['status']['message']
print "---- fullTargets"
for target in targets['fullTargets']:
print target
print "---- targets"
for target in targets['targets']:
print target
return 0, ''
def cleanupUnusedConnections(self, args):
res = self.s.cleanupUnusedConnections()
return res['status']['code'], res['status']['message']
def connectStorageServer(self, args):
serverType = int(args[0])
spUUID = args[1]
params = args[2].split(',')
conList = []
con = {}
for item in params:
key, value = item.split('=')
con[key] = value
conList.append(con)
res = self.s.connectStorageServer(serverType, spUUID, conList)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def validateStorageServerConnection(self, args):
serverType = int(args[0])
spUUID = args[1]
params = args[2].split(',')
conList = []
con = {}
for item in params:
key, value = item.split('=')
con[key] = value
conList.append(con)
res = self.s.validateStorageServerConnection(serverType,
spUUID, conList)
if res['status']['code']:
return res['status']['code'], res['status']['message']
else:
for i in res['statuslist']:
print "Connection id %s - status %s" % (i['id'], i['status'])
return 0, ''
def disconnectStorageServer(self, args):
serverType = int(args[0])
spUUID = args[1]
params = args[2].split(',')
conList = []
con = {}
for item in params:
key, value = item.split('=')
con[key] = value
conList.append(con)
res = self.s.disconnectStorageServer(serverType, spUUID, conList)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def spmStart(self, args):
validateArgTypes(args, [str, int, int, int, str, int, int],
requiredArgsNumber=5)
status = self.s.spmStart(*args)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, status['uuid']
def spmStop(self, args):
spUUID = args[0]
status = self.s.spmStop(spUUID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, ''
def getSpmStatus(self, args):
spUUID = args[0]
status = self.s.getSpmStatus(spUUID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for element in status['spm_st'].keys():
print "\t%s = %s" % (element, status['spm_st'][element])
return 0, ''
def fenceSpmStorage(self, args):
spUUID = args[0]
prevID = int(args[1])
prevLVER = int(args[2])
status = self.s.fenceSpmStorage(spUUID, prevID, prevLVER)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for element in status['spm_st'].keys():
print "\t%s = %s" % (element, status['spm_st'][element])
return 0, ''
def updateVM(self, args):
spUUID = args[0]
params = args[1].split(',')
if len(args) >= 3:
sdUUID = args[2]
else:
sdUUID = BLANK_UUID
vmList = []
vm = {}
for item in params:
key, value = item.split('=')
if key == 'imglist':
value = value.replace('+', ',')
vm[key] = value
vmList.append(vm)
res = self.s.updateVM(spUUID, vmList, sdUUID)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def upgradeStoragePool(self, args):
validateArgTypes(args, [str, int], True)
status = self.s.upgradeStoragePool(*args)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, status['upgradeStatus']
def removeVM(self, args):
spUUID = args[0]
vmUUID = args[1]
if len(args) >= 3:
sdUUID = args[2]
else:
sdUUID = BLANK_UUID
res = self.s.removeVM(spUUID, vmUUID, sdUUID)
if res['status']['code']:
return res['status']['code'], res['status']['message']
return 0, ''
def reconstructMaster(self, args):
spUUID = args[0]
poolName = args[1]
masterDom = args[2]
domList = args[3].split(",")
domDict = {}
for item in domList:
key, value = item.split('=')
domDict[key] = value
mVer = int(args[4])
if len(args) > 5:
st = self.s.reconstructMaster(spUUID, poolName, masterDom, domDict,
mVer, *map(int, args[5:]))
else:
st = self.s.reconstructMaster(spUUID, poolName, masterDom, domDict,
mVer)
if st['status']['code']:
return st['status']['code'], st['status']['message']
return 0, ''
def createStoragePool(self, args):
poolType = int(args[0])
spUUID = args[1]
poolName = args[2]
masterDom = args[3]
domList = args[4].split(",")
mVer = int(args[5])
pool = None
if len(args) > 6:
pool = self.s.createStoragePool(poolType, spUUID,
poolName, masterDom,
domList, mVer, *args[6:])
else:
pool = self.s.createStoragePool(poolType, spUUID,
poolName, masterDom,
domList, mVer)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def destroyStoragePool(self, args):
spUUID = args[0]
ID = int(args[1])
scsi_key = args[2]
pool = self.s.destroyStoragePool(spUUID, ID, scsi_key)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def connectStoragePool(self, args):
spUUID = args[0]
ID = int(args[1])
scsi_key = args[2]
if len(args) > 3:
master = args[3]
else:
master = BLANK_UUID
if len(args) > 4:
master_ver = int(args[4])
else:
master_ver = -1
pool = self.s.connectStoragePool(spUUID, ID, scsi_key,
master, master_ver)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def disconnectStoragePool(self, args):
spUUID = args[0]
ID = int(args[1])
scsi_key = args[2]
pool = self.s.disconnectStoragePool(spUUID, ID, scsi_key)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def refreshStoragePool(self, args):
spUUID = args[0]
msdUUID = args[1]
masterVersion = int(args[2])
pool = self.s.refreshStoragePool(spUUID, msdUUID, masterVersion)
if pool['status']['code']:
return pool['status']['code'], pool['status']['message']
return 0, ''
def setStoragePoolDescription(self, args):
spUUID = args[0]
descr = args[1]
dom = self.s.setStoragePoolDescription(spUUID, descr)
if dom['status']['code']:
return dom['status']['code'], dom['status']['message']
return 0, ''
def getStoragePoolInfo(self, args):
spUUID = args[0]
info = self.s.getStoragePoolInfo(spUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
for element in info['info'].keys():
print "\t%s = %s" % (element, info['info'][element])
for element in info['dominfo'].keys():
print "\t%s = %s" % (element, info['dominfo'][element])
return 0, ''
def createVolume(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
diskSize = int(args[3])
convertFactor = 2097152
size = diskSize * convertFactor
volFormat = int(args[4])
preallocate = int(args[5])
diskType = int(args[6])
newVol = args[7]
descr = args[8]
if len(args) > 9:
srcImgUUID = args[9]
else:
srcImgUUID = BLANK_UUID
if len(args) > 10:
srcVolUUID = args[10]
else:
srcVolUUID = BLANK_UUID
image = self.s.createVolume(sdUUID, spUUID, imgUUID, size,
volFormat, preallocate,
diskType, newVol, descr,
srcImgUUID, srcVolUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def getVolumeInfo(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3]
info = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, volUUID)
if info['status']['code']:
return info['status']['code'], info['status']['message']
for element in info['info'].keys():
print "\t%s = %s" % (element, info['info'][element])
return 0, ''
def getVolumePath(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
uuid = args[3]
info = self.s.getVolumePath(sdUUID, spUUID, imgUUID, uuid)
if info['status']['code']:
return info['status']['code'], info['status']['message']
return 0, info['path']
def getVolumeSize(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
uuid = args[3]
size = self.s.getVolumeSize(sdUUID, spUUID, imgUUID, uuid)
if size['status']['code']:
return size['status']['code'], size['status']['message']
del size['status']
printDict(size, self.pretty)
return 0, ''
def extendVolumeSize(self, args):
spUUID, sdUUID, imgUUID, volUUID, newSize = args
status = self.s.extendVolumeSize(
spUUID, sdUUID, imgUUID, volUUID, newSize)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, ''
def setVolumeDescription(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3]
descr = args[4]
status = self.s.setVolumeDescription(sdUUID, spUUID, imgUUID,
volUUID, descr)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, ''
def setVolumeLegality(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3]
legality = args[4]
image = self.s.setVolumeLegality(sdUUID, spUUID, imgUUID,
volUUID, legality)
return image['status']['code'], image['status']['message']
def deleteVolume(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
volUUID = args[3].split(',')
if len(args) > 4:
postZero = args[4]
else:
postZero = 'False'
if len(args) > 5:
force = args[5]
else:
force = 'False'
status = self.s.deleteVolume(sdUUID, spUUID, imgUUID,
volUUID, postZero, force)
if status['status']['code']:
return status['status']['code'], status['status']['message']
return 0, status['uuid']
def deleteVolumeByDescr(self, args):
sdUUID = args[1]
spUUID = args[2]
imgUUID = args[3]
volumes = self.s.getVolumesList(sdUUID, spUUID, imgUUID)
todelete = []
if volumes['status']['code']:
return volumes['status']['code'], volumes['status']['message']
print "Images to delete:"
for entry in volumes['uuidlist']:
info = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, entry)['info']
if info['description']:
if args[0] in info['description']:
print "\t" + entry + " : " + info['description']
todelete.append(entry)
if not len(todelete):
return 0, 'Nothing to delete'
var = raw_input("Are you sure yes/no?[no] :")
if var == "yes":
print self.s.deleteVolume(sdUUID, spUUID, imgUUID,
todelete, 'false')
return 0, ''
def getVolumesList(self, args):
sdUUID = args[0]
spUUID = args[1]
if len(args) > 2:
images = [args[2]]
else:
imgs = self.s.getImagesList(sdUUID)
if imgs['status']['code'] == 0:
images = imgs['imageslist']
for imgUUID in images:
volumes = self.s.getVolumesList(sdUUID, spUUID, imgUUID)
if volumes['status']['code']:
return volumes['status']['code'], volumes['status']['message']
for entry in volumes['uuidlist']:
message = entry + ' : '
res = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, entry)
if not 'info' in res:
print 'ERROR:', entry, ':', res
continue
info = res['info']
if info['description']:
message += info['description'] + '. '
if BLANK_UUID not in info['parent']:
message += 'Parent is ' + info['parent']
print message
return 0, ''
def getFileStats(self, args):
assert args
validateArgTypes(args, [str, str])
response = self.s.getFileStats(*args)
if response['status']['code']:
return response['status']['code'], response['status']['message']
for key, value in response['fileStats'].iteritems():
print 'file: ', key, 'stats: ', value
return 0, ''
def getIsoList(self, args):
spUUID = args[0]
isos = self.s.getIsoList(spUUID)
if isos['status']['code']:
return isos['status']['code'], isos['status']['message']
print '------ ISO list with proper permissions only -------'
for entry in isos['isolist']:
print entry
return 0, ''
def getFloppyList(self, args):
spUUID = args[0]
floppies = self.s.getFloppyList(spUUID)
if floppies['status']['code']:
return floppies['status']['code'], floppies['status']['message']
for entry in floppies['isolist']:
print entry
return 0, ''
def getImagesList(self, args):
sdUUID = args[0]
images = self.s.getImagesList(sdUUID)
if images['status']['code']:
return images['status']['code'], images['status']['message']
for entry in images['imageslist']:
print entry
return 0, ''
def getImageDomainsList(self, args):
spUUID = args[0]
imgUUID = args[1]
domains = self.s.getImageDomainsList(spUUID, imgUUID)
if domains['status']['code']:
return domains['status']['code'], domains['status']['message']
for entry in domains['domainslist']:
print entry
return 0, ''
def getConnectedStoragePoolsList(self, args):
pools = self.s.getConnectedStoragePoolsList()
if pools['status']['code']:
return pools['status']['code'], pools['status']['message']
for entry in pools['poollist']:
print entry
return 0, ''
def getTaskInfo(self, args):
taskID = args[0]
status = self.s.getTaskInfo(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for k, v in status['TaskInfo'].iteritems():
print '\t', k, '=', v
return 0, ''
def getAllTasksInfo(self, args):
status = self.s.getAllTasksInfo()
if status['status']['code']:
return status['status']['code'], status['status']['message']
for t, inf in status['allTasksInfo'].iteritems():
print t, ':'
for k, v in inf.iteritems():
print '\t', k, '=', v
return 0, ''
def getTaskStatus(self, args):
taskID = args[0]
status = self.s.getTaskStatus(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print "TASK: %s STATUS: %s RESULT: %s MESSAGE: '%s'" % (
taskID,
status["taskStatus"]["taskState"],
status["taskStatus"]["taskResult"],
status["taskStatus"]["message"])
print "%s" % status # TODO
return 0, ''
def getAllTasksStatuses(self, args):
status = self.s.getAllTasksStatuses()
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def getAllTasks(self, args):
keys = []
if len(args) > 0:
keys = [x.strip() for x in args[0].split(',')]
status = self.s.getAllTasks(keys)
if status['status']['code']:
return status['status']['code'], status['status']['message']
for t, inf in status['tasks'].iteritems():
print t, ':'
for k, v in inf.iteritems():
print '\t', k, '=', v
return 0, ''
def stopTask(self, args):
taskID = args[0]
status = self.s.stopTask(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def clearTask(self, args):
taskID = args[0]
status = self.s.clearTask(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def revertTask(self, args):
taskID = args[0]
status = self.s.revertTask(taskID)
if status['status']['code']:
return status['status']['code'], status['status']['message']
print status # TODO
return 0, ''
def getParent(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
uuid = args[3]
image = self.s.getVolumeInfo(sdUUID, spUUID, imgUUID, uuid)
if image['status']['code']:
return image['status']['code'], image['status']['message']
if '00000000-0000-0000-0000-000000000000' in image['info']['parent']:
return 1, 'No parent available'
return 0, image['info']['parent']
def deleteImage(self, args):
sdUUID = args[0]
spUUID = args[1]
imgUUID = args[2]
if len(args) > 3:
postZero = args[3]
else:
postZero = 'False'
if len(args) > 4:
force = args[4]
else:
force = 'False'
image = self.s.deleteImage(sdUUID, spUUID, imgUUID, postZero, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def moveImage(self, args):
spUUID = args[0]
srcDomUUID = args[1]
dstDomUUID = args[2]
imgUUID = args[3]
vmUUID = args[4]
op = int(args[5])
if len(args) > 6:
postZero = args[6]
else:
postZero = 'False'
if len(args) > 7:
force = args[7]
else:
force = 'False'
image = self.s.moveImage(spUUID, srcDomUUID, dstDomUUID,
imgUUID, vmUUID, op, postZero, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def cloneImageStructure(self, args):
spUUID, sdUUID, imgUUID, dstSdUUID = args
image = self.s.cloneImageStructure(spUUID, sdUUID, imgUUID, dstSdUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def syncImageData(self, args):
spUUID, sdUUID, imgUUID, dstSdUUID, syncType = args
image = self.s.syncImageData(spUUID, sdUUID, imgUUID, dstSdUUID,
syncType)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def downloadImage(self, args):
methodArgs, spUUID, sdUUID, imgUUID, volUUID = args
methodArgsValue = ast.literal_eval(methodArgs)
image = self.s.downloadImage(
methodArgsValue, spUUID, sdUUID, imgUUID, volUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def uploadImage(self, args):
methodArgs, spUUID, sdUUID, imgUUID, volUUID = args
methodArgsValue = ast.literal_eval(methodArgs)
image = self.s.uploadImage(
methodArgsValue, spUUID, sdUUID, imgUUID, volUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def moveMultiImage(self, args):
spUUID = args[0]
srcDomUUID = args[1]
dstDomUUID = args[2]
imgList = args[3].split(",")
imgDict = {}
for item in imgList:
key, value = item.split('=')
imgDict[key] = value
vmUUID = args[4]
if len(args) > 5:
force = args[5]
else:
force = 'False'
image = self.s.moveMultipleImages(spUUID, srcDomUUID, dstDomUUID,
imgDict, vmUUID, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def copyImage(self, args):
sdUUID = args[0]
spUUID = args[1]
vmUUID = args[2]
srcImgUUID = args[3]
srcVolUUID = args[4]
dstImgUUID = args[5]
dstVolUUID = args[6]
descr = args[7]
if len(args) > 8:
dstSdUUID = args[8]
else:
dstSdUUID = BLANK_UUID
if len(args) > 9:
volType = int(args[9])
else:
volType = SHARED_VOL
if len(args) > 10:
volFormat = int(args[10])
else:
volFormat = UNKNOWN_VOL
if len(args) > 11:
preallocate = int(args[11])
else:
preallocate = UNKNOWN_VOL
if len(args) > 12:
postZero = args[12]
else:
postZero = 'False'
if len(args) > 13:
force = args[13]
else:
force = 'False'
image = self.s.copyImage(sdUUID, spUUID, vmUUID, srcImgUUID,
srcVolUUID, dstImgUUID, dstVolUUID,
descr, dstSdUUID, volType, volFormat,
preallocate, postZero, force)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def mergeSnapshots(self, args):
sdUUID = args[0]
spUUID = args[1]
vmUUID = args[2]
imgUUID = args[3]
ancestor = args[4]
successor = args[5]
if len(args) > 6:
postZero = args[6]
else:
postZero = 'False'
image = self.s.mergeSnapshots(sdUUID, spUUID, vmUUID, imgUUID,
ancestor, successor, postZero)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, image['uuid']
def acquireDomainLock(self, args):
spUUID = args[0]
sdUUID = args[1]
image = self.s.acquireDomainLock(spUUID, sdUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, ''
def releaseDomainLock(self, args):
spUUID = args[0]
sdUUID = args[1]
image = self.s.releaseDomainLock(spUUID, sdUUID)
if image['status']['code']:
return image['status']['code'], image['status']['message']
return 0, ''
def prepareForShutdown(self, args):
stats = self.s.prepareForShutdown()
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_setLogLevel(self, args):
level = int(args[0])
assert len(args) == 1
stats = self.s.setLogLevel(level)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_setMOMPolicy(self, policyFile):
stats = self.s.setMOMPolicy(policyFile)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_setMOMPolicyParameters(self, args):
# convert arguments in the form of key=value to a dictionary
expand = lambda pair: (pair[0], eval(pair[1]))
key_value_store = dict([expand(arg.split("=", 1))
for arg in args
if "=" in arg])
stats = self.s.setMOMPolicyParameters(key_value_store)
if stats['status']['code']:
return stats['status']['code'], stats['status']['message']
return 0, ''
def do_getVmsInfo(self, args):
spUUID = args[0]
if len(args) >= 2:
sdUUID = args[1]
else:
sdUUID = BLANK_UUID
if len(args) >= 3:
vmList = args[2].split(",")
else:
vmList = []
infos = self.s.getVmsInfo(spUUID, sdUUID, vmList)
if infos['status']['code'] != 0:
return infos['status']['code'], infos['status']['message']
else:
message = ''
for entry in infos['vmlist']:
message += '\n' + '================================' + '\n'
message += entry + '=' + infos['vmlist'][entry]
if not message:
message = 'No VMs found.'
if isinstance(message, unicode):
print message.encode('utf-8')
else:
print message
return 0, ''
def do_getVmsList(self, args):
spUUID = args[0]
if len(args) >= 2:
sdUUID = args[1]
else:
sdUUID = BLANK_UUID
vms = self.s.getVmsList(spUUID, sdUUID)
if vms['status']['code'] != 0:
return vms['status']['code'], vms['status']['message']
else:
message = ''
for entry in vms['vmlist']:
message += '\n' + '================================' + '\n'
message += entry
if not message:
message = 'No VMs found.'
print message
return 0, ''
def _eqSplit(self, args):
d = {}
for arg in args:
kv = arg.split('=', 1)
if len(kv) != 2:
raise ValueError("Invalid argument: %s" % arg)
k, v = kv
d[k] = v
return d
def _splitDriveSpecItems(self, item):
"""
BC is BC.
"""
key, value = item.split(":", 1)
if key in ("domain", "pool", "image", "volume"):
key = "%sID" % key
return key, value
def _parseNestedSpec(self, spec):
d = dict()
if spec[0] != '{':
raise Exception("_parseNestedSpec called with "
"non nested spec: '%s'" % spec)
spec = spec[1:]
while True:
if not spec or not '}' in spec:
raise Exception("nested spec not terminated "
"with '}' in '%s'" % spec)
if spec[0] == '}':
return d, spec[1:]
# Split into first name + the rest
if not ':' in spec:
raise Exception("missing name value separator "
"':' in '%s'" % spec)
name, spec = spec.split(":", 1)
# Determine the value
if spec[0] == '{':
val, spec = self._parseNestedSpec(spec)
d[name] = val
else:
# The value ends either with a ',' meaning it is followed by
# another name:value pair, or with a '}' ending the spec
i = 0
while spec[i] != ',' and spec[i] != '}':
i = i + 1
val = spec[:i]
spec = spec[i:]
d[name] = val
# If there is a comma behind the value remove it before continuing
if spec and spec[0] == ',':
spec = spec[1:]
def _parseDriveSpec(self, spec):
"""
'{' or ',' means dict. (!)
"""
if spec[0] == '{':
val, spec = self._parseNestedSpec(spec)
if spec:
raise Exception("Trailing garbage after spec: '%s'" % spec)
return val
if ',' in spec:
return dict(self._splitDriveSpecItems(item)
for item in spec.split(',') if item)
return spec
def do_setupNetworks(self, args):
params = self._eqSplit(args)
networks = self._parseDriveSpec(params.get('networks', '{}'))
bondings = self._parseDriveSpec(params.get('bondings', '{}'))
for k in ('networks', 'bondings'):
if k in params:
del params[k]
params['connectivityCheck'] = params.get('connectivityCheck', 'False')
for bond in bondings:
if 'nics' in bondings[bond]:
bondings[bond]['nics'] = bondings[bond]['nics'].split("+")
status = self.s.setupNetworks(networks, bondings, params)
return status['status']['code'], status['status']['message']
def do_addNetwork(self, args):
params = self._eqSplit(args)
try:
nics = filter(None, params['nics'].split(','))
except:
raise ValueError
bridge = params.get('bridge', '')
vlan = params.get('vlan', '')
bond = params.get('bond', '')
for k in ['bridge', 'vlan', 'bond', 'nics']:
if k in params:
del params[k]
status = self.s.addNetwork(bridge, vlan, bond, nics, params)
return status['status']['code'], status['status']['message']
def do_editNetwork(self, args):
params = self._eqSplit(args)
try:
nics = params['nics'].split(',')
except:
raise ValueError
oldBridge = params.get('oldBridge', '')
newBridge = params.get('newBridge', '')
vlan = params.get('vlan', '')
bond = params.get('bond', '')
for k in ['oldBridge', 'newBridge', 'vlan', 'bond', 'nics']:
if k in params:
del params[k]
status = self.s.editNetwork(oldBridge, newBridge, vlan, bond,
nics, params)
return status['status']['code'], status['status']['message']
def do_delNetwork(self, args):
params = self._eqSplit(args)
try:
nics = params['nics'].split(',')
except:
raise ValueError
bridge = params.get('bridge', '')
vlan = params.get('vlan', '')
bond = params.get('bond', '')
for k in ['bridge', 'vlan', 'bond', 'nics']:
if k in params:
del params[k]
status = self.s.delNetwork(bridge, vlan, bond, nics, params)
return status['status']['code'], status['status']['message']
def do_setSafeNetworkConfig(self, args):
status = self.s.setSafeNetworkConfig()
return status['status']['code'], status['status']['message']
def do_fenceNode(self, args):
addr, port, agent, user, passwd, action = args[:6]
status = self.s.fenceNode(addr, port, agent, user, passwd, action,
*args[6:])
if action == 'status' and 'power' in status:
return status['status']['code'], status['power']
return status['status']['code'], status['status']['message']
def __image_status(self, imgUUID, res):
if "imagestatus" in res and "message" in res:
status = "OK"
if res["imagestatus"]:
status = "ERROR"
print ("Image %s status %s: %s (%s)" %
(imgUUID, status, res["message"], res["imagestatus"]))
if "badvols" in res:
for v, err in res["badvols"].iteritems():
print "\tVolume %s is bad: %s" % (v, err)
def __domain_status(self, sdUUID, res):
if "domainstatus" in res and "message" in res:
status = "OK"
if res["domainstatus"]:
status = "ERROR"
print ("Domain %s status %s: %s (%s)" %
(sdUUID, status, res["message"], res["domainstatus"]))
if "badimages" in res:
for i in res["badimages"]:
print "\tImage %s is bad" % (i)
self.__image_status(i, res["badimages"][i])
def __pool_status(self, spUUID, res):
if "poolstatus" in res and "message" in res:
status = "OK"
if res["poolstatus"]:
status = "ERROR"
print ("Pool %s status %s: %s (%s)" %
(spUUID, status, res["message"], res["poolstatus"]))
if "masterdomain":
print "\tMaster domain is %s" % res["masterdomain"]
if "spmhost":
print "\tThe SPM host id is %s" % res["spmhost"]
if "baddomains" in res:
for d in res["baddomains"]:
print "\tDomain %s is bad:" % (d)
self.__domain_status(d, res["baddomains"][d])
def repoStats(self, args):
stats = self.s.repoStats()
if stats['status']['code']:
print "count not get repo stats"
return int(stats['status']['code'])
for d in stats:
if d == "status":
continue
print 'Domain %s %s' % (d, str(stats[d]))
return 0, ''
def startMonitoringDomain(self, args):
sdUUID, hostID = args
status = self.s.startMonitoringDomain(sdUUID, hostID)
return status['status']['code'], status['status']['message']
def stopMonitoringDomain(self, args):
sdUUID, = args
status = self.s.stopMonitoringDomain(sdUUID)
return status['status']['code'], status['status']['message']
def snapshot(self, args):
vmUUID, sdUUID, imgUUID, baseVolUUID, volUUID = args
status = self.s.snapshot(vmUUID, [
{'domainID': sdUUID,
'imageID': imgUUID,
'baseVolumeID': baseVolUUID,
'volumeID': volUUID},
])
return status['status']['code'], status['status']['message']
def setBalloonTarget(self, args):
vmId = args[0]
target = int(args[1])
response = self.s.setBalloonTarget(vmId, target)
return response['status']['code'], response['status']['message']
def diskReplicateStart(self, args):
vmUUID, spUUID, sdUUID, imgUUID, volUUID, dstSdUUID = args
status = self.s.diskReplicateStart(
vmUUID,
{'poolID': spUUID, 'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID},
{'poolID': spUUID, 'domainID': dstSdUUID, 'imageID': imgUUID,
'volumeID': volUUID})
return status['status']['code'], status['status']['message']
def diskReplicateFinish(self, args):
vmUUID, spUUID, sdUUID, imgUUID, volUUID, dstSdUUID = args
status = self.s.diskReplicateFinish(
vmUUID,
{'poolID': spUUID, 'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID},
{'poolID': spUUID, 'domainID': dstSdUUID, 'imageID': imgUUID,
'volumeID': volUUID})
return status['status']['code'], status['status']['message']
def diskSizeExtend(self, args):
vmUUID, spUUID, sdUUID, imgUUID, volUUID, newSize = args
status = self.s.diskSizeExtend(
vmUUID, {
'poolID': spUUID, 'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID, 'device': 'disk'
}, newSize)
if status['status']['code'] == 0:
print "New disk size:", status.get('size', None)
return status['status']['code'], status['status']['message']
if __name__ == '__main__':
if _glusterEnabled:
serv = ge.GlusterService()
else:
serv = service()
commands = {
'create': (serv.do_create,
('<configFile> [parameter=value, parameter=value, ......]',
'Creates new machine with the paremeters given in the'
' command line overriding the ones in the config file',
'Example with config file: vdsClient someServer create'
' myVmConfigFile',
'Example with no file : vdsClient someServer create'
' /dev/null vmId=<uuid> memSize=256 '
'imageFile=someImage display=<vnc|qxl|qxlnc>',
'Parameters list: r=required, o=optional',
'r vmId=<uuid> : Unique identification for the '
'created VM. Any additional operation on the VM must '
'refer to this ID',
'o vmType=<qemu/kvm> : Virtual machine technology - '
'if not given kvm is default',
'o kvmEnable=<true/false> : run in KVM enabled mode '
'or full emulation - default is according to the VDS '
'capabilities',
'r memSize=<int> : Memory to allocate for this '
'machine',
'r macAddr=<aa:bb:cc:dd:ee:ff> : MAC address of the '
'machine',
'r display=<vnc|qxl|qxlnc> : send the machine '
'display to vnc, spice, or spice with no '
'image compression',
'o drive=pool:poolID,domain:domainID,image:imageID,'
'volume:volumeID[,boot:true,format:cow] : disk image '
'by UUIDs',
'o (deprecated) hda/b/c/d=<path> : Disk drive '
'images',
'o floppy=<image> : Mount the specified Image as '
'floppy',
'o cdrom=<path> : ISO image file to be mounted as '
'the powerup cdrom',
'o boot=<c/d/n> : boot device - drive C or cdrom or '
'network',
'o sysprepInf=/path/to/file: Launch with the '
'specified file as sysprep.inf in floppy',
#'o any parmeter=<any value> : parameter that is '
#'not familiar is passed as is to the VM',
#' and displayed with '
#'all other parameter. They can be used for '
#'additional',
#' information the user '
#'want to reserve with the machine'
'o acpiEnable : If present will remove the default '
'-no-acpi switch',
'o qgaEnable : use qemu-ga as guest agent',
'o tdf : If present will add the -rtc-td-hack '
'switch',
'o irqChip : If false, add the -no-kvm-irqchip '
'switch',
'o spiceSecureChannels : comma-separated list of '
'spice channel that will be encrypted',
'o spiceMonitors : number of emulated screen heads',
'o soundDevice : emulated sound device',
'o launchPaused : If "true", start qemu paused',
'o vmName : human-readable name of new VM',
'o tabletEnable : If "true", enable tablet input',
'o timeOffset : guest\'s start date, relative to '
'host\'s time, in seconds',
'o smp : number of vcpus',
'o smpCoresPerSocket, smpThreadsPerCore : vcpu '
'topology',
'o keyboardLayout : language code of client '
'keyboard',
'o cpuType : emulated cpu (with optional flags)',
'o emulatedMachine : passed as qemu\'s -M',
'o devices={name:val[, name:val, name:{name:val, '
'name:val}]} : add a fully specified device',
'o cpuPinning={vcpuid:pinning} cpu pinning in '
'libvirt-like format. see '
'http://libvirt.org/formatdomain.html#elementsCPUTuning'
)),
'vmUpdateDevice': (serv.vmUpdateDevice,
('<vmId> <devicespec>',
'Update a VM\'s device',
'Example: vmUpdateDevice xxxx deviceType=interface'
' alias=net0 linkActive=false',
'devicespec list: r=required, '
'o=optional',
'r devicetype: interface',
'o network: network name - No chage if not '
'specified. Dummy bridge and link inactive if '
'empty string',
'o linkActive: bool - No change if not '
'specified',
'r alias: libvirt\'s vnic alias',
'o portMirroring: net[,net] - Only networks to '
'mirror. No change if not specified, no mirroring'
'if empty list.'
)),
'hotplugNic': (serv.hotplugNic,
('<vmId> <nicspec>',
'Hotplug NIC to existing VM',
'nicspec parameters list: r=required, o=optional',
'r device: bridge|sriov|vnlink|bridgeless.',
'r network: network name',
'r macAddr: mac address',
'r nicModel: pv|rtl8139|e1000',
'o bootOrder: <int> - global boot order across '
'all bootable devices'
)),
'hotunplugNic': (serv.hotunplugNic,
('<vmId> <nicspec>',
'Hotunplug NIC from existing VM',
'nicspec parameters list: r=required, o=optional',
'r device: bridge|sriov|vnlink|bridgeless.',
'r network: network name',
'r macAddr: mac address',
'r nicModel: pv|rtl8139|e1000',
'o bootOrder: <int> - global boot order across '
'all bootable devices'
)),
'hotplugDisk': (serv.hotplugDisk,
('<vmId> <drivespec>',
'Hotplug disk to existing VM',
'drivespec parameters list: r=required, o=optional',
'r iface:<ide|virtio> - Unique identification of '
'the existing VM.',
'r index:<int> - disk index unique per interface '
'virtio|ide',
'r [pool:UUID,domain:UUID,image:UUID,volume:UUID]|'
'[GUID:guid]|[UUID:uuid]',
'r format: cow|raw',
'r readonly: True|False - default is False',
'r propagateErrors: off|on - default is off',
'o bootOrder: <int> - global boot order across '
'all bootable devices',
'o shared: exclusive|shared|none',
'o optional: True|False'
)),
'hotunplugDisk': (serv.hotunplugDisk,
('<vmId> <drivespec >',
'Hotunplug disk from existing VM',
'drivespec parameters list: r=required, o=optional',
'r iface:<ide|virtio> - Unique identification of '
'the existing VM.',
'r index:<int> - disk index unique per interface '
'virtio|ide',
'r [pool:UUID,domain:UUID,image:UUID,volume:UUID]|'
'[GUID:guid]|[UUID:uuid]',
'r format: cow|raw',
'r readonly: True|False - default is False',
'r propagateErrors: off|on - default is off',
'o bootOrder: <int> - global boot order across '
'all bootable devices',
'o shared: exclusive|shared|none',
'o optional: True|False'
)),
'changeCD': (serv.do_changeCD,
('<vmId> <fileName|drivespec>',
'Changes the iso image of the cdrom'
)),
'changeFloppy': (serv.do_changeFloppy,
('<vmId> <fileName|drivespec>',
'Changes the image of the floppy drive'
)),
'destroy': (serv.do_destroy,
('<vmId>',
'Stops the emulation and destroys the virtual machine.'
' This is not a shutdown.'
)),
'shutdown': (serv.do_shutdown,
('<vmId> <timeout> <message>',
'Stops the emulation and graceful shutdown the virtual'
' machine.'
)),
'list': (serv.do_list,
('[view] [vms:vmId1,vmId2]',
'Lists all available machines on the specified server.',
"Optional vms list, should start with 'vms:' and follow with"
" 'vmId1,vmId2,...'",
'Optional views:',
' "long" all available configuration info (Default).',
' "table" table output with the fields: vmId, vmName, '
'Status and IP.',
' "ids" all vmIds.'
)),
'pause': (serv.do_pause,
('<vmId>',
'Pauses the execution of the virtual machine without '
'termination'
)),
'continue': (serv.do_continue,
('<vmId>',
'Continues execution after of a paused machine'
)),
'reset': (serv.do_reset,
('<vmId>',
'Sends reset signal to the vm'
)),
'setVmTicket': (serv.do_setVmTicket,
('<vmId> <password> <sec> [disconnect|keep|fail], '
'[params={}]',
'Set the password to the vm display for the next '
'<sec> seconds.',
'Optional argument instructs spice regarding '
'currently-connected client.',
'Optional additional parameters in dictionary format,'
' name:value,name:value'
)),
'migrate': (serv.do_migrate,
('vmId=<id> method=<offline|online> src=<host[:port]> '
'dst=<host[:port]> dstqemu=<host>',
'Migrate a desktop from src machine to dst host using '
'the specified ports'
)),
'migrateStatus': (serv.do_mStat,
('<vmId>',
'Check the progress of current outgoing migration'
)),
'migrateCancel': (serv.do_mCancel,
('<vmId>',
'(not implemented) cancel machine migration'
)),
'sendkeys': (serv.do_sendkeys,
('<vmId> <key1> ...... <keyN>',
'Send the key sequence to the vm'
)),
'getVdsCapabilities': (serv.do_getCap,
('',
'Get Capabilities info of the VDS'
)),
'getVdsCaps': (serv.do_getCap,
('',
'Get Capabilities info of the VDS'
)),
'getVdsHardwareInfo': (serv.do_getHardware,
('',
'Get hardware info of the VDS'
)),
'getVdsStats': (serv.do_getVdsStats,
('',
'Get Statistics info on the VDS'
)),
'getVmStats': (serv.do_getVmStats,
('<vmId>',
'Get Statistics info on the VM'
)),
'getAllVmStats': (serv.do_getAllVmStats,
('',
'Get Statistics info for all existing VMs'
)),
'getVGList': (serv.getVGList,
('storageType',
'List of all VGs.'
)),
'getDeviceList': (serv.getDeviceList,
('[storageType]',
'List of all block devices (optionally - matching '
'storageType).'
)),
'getDevicesVisibility': (serv.getDevicesVisibility,
('<devlist>',
'Get visibility of each device listed'
)),
'getDiskAlignment': (serv.getDiskAlignment,
('[<vmId> <poolId> <domId> <imgId> <volId>]',
'[<vmId> <GUID>]',
'Get alignment of each partition on the device'
)),
'getVGInfo': (serv.getVGInfo,
('<vgUUID>',
'Get info of VG'
)),
'createVG': (serv.createVG,
('<sdUUID> <devlist> [force]',
'Create a new VG from devices devlist (list of dev '
'GUIDs)'
)),
'removeVG': (serv.removeVG,
('<vgUUID>',
'remove the VG identified by its UUID'
)),
'extendStorageDomain': (serv.extendStorageDomain,
('<sdUUID> <spUUID> <devlist>',
'Extend the Storage Domain by adding devices'
' devlist (list of dev GUIDs)'
)),
'discoverST': (serv.discoverST,
('ip[:port] [username password]',
'Discover the available iSCSI targetnames on a '
'specified iSCSI portal'
)),
'cleanupUnusedConnections': (serv.cleanupUnusedConnections,
('',
'Clean up unused iSCSI storage '
'connections'
)),
'connectStorageServer': (serv.connectStorageServer,
('<server type> <spUUID> <conList (id=...,'
'connection=server:/export_path,portal=...,'
'port=...,iqn=...,user=...,password=...'
'[,initiatorName=...])>',
'Connect to a storage low level entity '
'(server)'
)),
'validateStorageServerConnection':
(serv.validateStorageServerConnection,
('<server type> <spUUID> <conList (id=...,'
'connection=server:/export_path,portal=...,port=...,iqn=...,'
'user=...,password=...[,initiatorName=...])>',
'Validate that we can connect to a storage server'
)),
'disconnectStorageServer': (serv.disconnectStorageServer,
('<server type> <spUUID> <conList (id=...,'
'connection=server:/export_path,'
'portal=...,port=...,iqn=...,user=...,'
'password=...[,initiatorName=...])>',
'Disconnect from a storage low level '
'entity (server)'
)),
'spmStart': (serv.spmStart,
('<spUUID> <prevID> <prevLVER> <recoveryMode> '
'<scsiFencing> <maxHostID> <version>',
'Start SPM functionality'
)),
'spmStop': (serv.spmStop,
('<spUUID>',
'Stop SPM functionality'
)),
'getSpmStatus': (serv.getSpmStatus,
('<spUUID>',
'Get SPM status'
)),
'acquireDomainLock': (serv.acquireDomainLock,
('<spUUID> <sdUUID>',
'acquire storage domain lock'
)),
'releaseDomainLock': (serv.releaseDomainLock,
('<spUUID> <sdUUID>',
'release storage domain lock'
)),
'fenceSpmStorage': (serv.fenceSpmStorage,
('<spUUID> <prevID> <prevLVER> ',
'fence SPM storage state'
)),
'updateVM': (serv.updateVM,
("<spUUID> <vmList> ('vm'=vmUUID,'ovf'='...','"
"imglist'='imgUUID1+imgUUID2+...') [sdUUID]",
'Update VM on pool or Backup domain'
)),
'upgradeStoragePool': (serv.upgradeStoragePool,
("<spUUID> <targetVersion>",
'Upgrade a pool to a new version (Requires a '
'running SPM)'
)),
'removeVM': (serv.removeVM,
('<spUUID> <vmUUID> [sdUUID]',
'Remove VM from pool or Backup domain'
)),
'reconstructMaster': (serv.reconstructMaster,
('<spUUID> <poolName> <masterDom> '
'<domDict>({sdUUID1=status,sdUUID2=status,...})'
' <masterVersion>, [<lockPolicy> '
'<lockRenewalIntervalSec> <leaseTimeSec> '
'<ioOpTimeoutSec> <leaseRetries>]',
'Reconstruct master domain'
)),
'createStoragePool': (serv.createStoragePool,
('<storage type> <spUUID> <poolName> <masterDom>'
' <domList>(sdUUID1,sdUUID2,...) '
'<masterVersion>, [<lockPolicy> '
'<lockRenewalIntervalSec> <leaseTimeSec> '
'<ioOpTimeoutSec> <leaseRetries>]',
'Create new storage pool with single/multiple '
'image data domain'
)),
'destroyStoragePool': (serv.destroyStoragePool,
('<spUUID> <id> <scsi-key>',
'Destroy storage pool'
)),
'connectStoragePool': (serv.connectStoragePool,
('<spUUID> <id> <scsi-key> [masterUUID] '
'[masterVer]',
'Connect a Host to specific storage pool'
)),
'disconnectStoragePool': (serv.disconnectStoragePool,
('<spUUID> <id> <scsi-key>',
'Disconnect a Host from the specific '
'storage pool'
)),
'refreshStoragePool': (serv.refreshStoragePool,
('<spUUID> <masterDom> <masterVersion>',
'Refresh storage pool'
)),
'setStoragePoolDescription': (serv.setStoragePoolDescription,
('<spUUID> <descr>',
'Set storage pool description'
)),
'getStoragePoolInfo': (serv.getStoragePoolInfo,
('<spUUID>',
'Get storage pool info'
)),
'createStorageDomain': (serv.createStorageDomain,
('<storage type> <domain UUID> <domain name> '
'<param> <domType> <version>',
'Creates new storage domain'
)),
'setStorageDomainDescription': (serv.setStorageDomainDescription,
('<domain UUID> <descr>',
'Set storage domain description'
)),
'validateStorageDomain': (serv.validateStorageDomain,
('<domain UUID>',
'Validate storage domain'
)),
'activateStorageDomain': (serv.activateStorageDomain,
('<domain UUID> <pool UUID>',
'Activate a storage domain that is already '
'a member in a storage pool.'
)),
'deactivateStorageDomain': (serv.deactivateStorageDomain,
('<domain UUID> <pool UUID> <new master '
'domain UUID> <masterVer>',
'Deactivate a storage domain. '
)),
'attachStorageDomain': (serv.attachStorageDomain,
('<domain UUID> <pool UUID>',
'Attach a storage domain to a storage pool.'
)),
'detachStorageDomain': (serv.detachStorageDomain,
('<domain UUID> <pool UUID> <new master domain'
' UUID> <masterVer>',
'Detach a storage domain from a storage pool.'
)),
'forcedDetachStorageDomain': (serv.forcedDetachStorageDomain,
('<domain UUID> <pool UUID>',
'Forced detach a storage domain from a '
'storage pool.'
)),
'formatStorageDomain': (serv.formatStorageDomain,
('<domain UUID> [<autoDetach>]',
'Format detached storage domain.'
)),
'getStorageDomainInfo': (serv.getStorageDomainInfo,
('<domain UUID>',
'Get storage domain info.'
)),
'getStorageDomainStats': (serv.getStorageDomainStats,
('<domain UUID>',
'Get storage domain statistics.'
)),
'getStorageDomainsList': (serv.getStorageDomainsList,
('<pool UUID>',
'Get storage domains list of pool or all '
'domains if pool omitted.'
)),
'createVolume': (serv.createVolume,
('<sdUUID> <spUUID> <imgUUID> <size> <volFormat> '
'<preallocate> <diskType> <newVolUUID> <descr> '
'<srcImgUUID> <srcVolUUID>',
'Creates new volume or snapshot'
)),
'extendVolumeSize': (serv.extendVolumeSize, (
'<spUUID> <sdUUID> <imgUUID> <volUUID> <newSize>',
'Extend the volume size (virtual disk size seen by the guest).',
)),
'getVolumePath': (serv.getVolumePath,
('<sdUUID> <spUUID> <imgUUID> <volume uuid>',
'Returns the path to the requested uuid'
)),
'setVolumeDescription': (serv.setVolumeDescription,
('<sdUUID> <spUUID> <imgUUID> <volUUID> '
'<Description>',
'Sets a new description to the volume'
)),
'setVolumeLegality': (serv.setVolumeLegality,
('<sdUUID> <spUUID> <imgUUID> <volUUID> '
'<Legality>',
'Set volume legality (ILLEGAL/LEGAL).'
)),
'deleteVolume': (serv.deleteVolume,
('<sdUUID> <spUUID> <imgUUID> <volUUID>,...,<volUUID>'
' <postZero> [<force>]',
'Deletes an volume if its a leaf. Else returns error'
)),
'deleteVolumeByDescr': (serv.deleteVolumeByDescr,
('<part of description> <sdUUID> <spUUID> '
'<imgUUID>',
'Deletes list of volumes(only leafs) '
'according to their description'
)),
'getVolumeInfo': (serv.getVolumeInfo,
('<sdUUID> <spUUID> <imgUUID> <volUUID>',
'Returns all the volume details'
)),
'getParent': (serv.getParent,
('<sdUUID> <spUUID> <imgUUID> <Disk Image uuid>',
'Returns the parent of the volume. Error if no parent'
' exists'
)),
'getVolumesList': (serv.getVolumesList,
('<sdUUID> <spUUID> [imgUUID]',
'Returns list of volumes of imgUUID or sdUUID if '
'imgUUID absent'
)),
'getVolumeSize': (serv.getVolumeSize,
('<sdUUID> <spUUID> <imgUUID> <volUUID>',
'Returns the apparent size and the true size of the'
' volume (in bytes)'
)),
'getFileStats': (serv.getFileStats,
('<sdUUID> [pattern][caseSensitive]',
'Returns files statistics from ISO domain'
)),
'getIsoList': (serv.getIsoList,
('<spUUID>',
'Returns list of all .iso images in ISO domain'
)),
'getFloppyList': (serv.getFloppyList,
('<spUUID>',
'Returns list of all .vfd images in ISO domain'
)),
'getImagesList': (serv.getImagesList,
('<sdUUID>',
'Get list of all images of specific domain'
)),
'getImageDomainsList': (serv.getImageDomainsList,
('<spUUID> <imgUUID> [datadomain=True]',
'Get list of all data domains in the pool '
'that contains imgUUID'
)),
'getConnectedStoragePoolsList': (serv.getConnectedStoragePoolsList,
('',
'Get storage pools list'
)),
'getTaskInfo': (serv.getTaskInfo,
('<TaskID>',
'get async task info'
)),
'getAllTasksInfo': (serv.getAllTasksInfo,
('',
'get info of all async tasks'
)),
'getTaskStatus': (serv.getTaskStatus,
('<TaskID>',
'get task status'
)),
'getAllTasksStatuses': (serv.getAllTasksStatuses,
('',
'list statuses of all async tasks'
)),
'getAllTasks': (serv.getAllTasks,
('[tags=\'\']',
'get status and information for all async tasks'
)),
'stopTask': (serv.stopTask,
('<TaskID>',
'stop async task'
)),
'clearTask': (serv.clearTask,
('<TaskID>',
'clear async task'
)),
'revertTask': (serv.revertTask,
('<TaskID>',
'revert async task'
)),
'prepareForShutdown': (serv.prepareForShutdown,
('', '')),
'setLogLevel': (serv.do_setLogLevel,
('<level> [logName][,logName]...', 'set log verbosity'
' level (10=DEBUG, 50=CRITICAL'
)),
'setMOMPolicy': (serv.do_setMOMPolicy,
('<policyfile>', 'set MOM policy')),
'setMOMPolicyParameters': (serv.do_setMOMPolicyParameters,
('key=python_code [key=python_code] ...',
'set variables for MOM policy fine '
'tuning')),
'deleteImage': (serv.deleteImage,
('<sdUUID> <spUUID> <imgUUID> [<postZero>] [<force>]',
'Delete Image folder with all volumes.',
)),
'moveImage': (serv.moveImage,
('<spUUID> <srcDomUUID> <dstDomUUID> <imgUUID> <vmUUID>'
' <op = COPY_OP/MOVE_OP> [<postZero>] [ <force>]',
'Move/Copy image between storage domains within same '
'storage pool'
)),
'cloneImageStructure': (serv.cloneImageStructure,
('<spUUID> <sdUUID> <imgUUID> <dstSdUUID>',
'Clone an image structure from a source '
'domain to a destination domain within the '
'same pool.'
)),
'syncImageData': (serv.syncImageData,
('<spUUID> <sdUUID> <imgUUID> <dstSdUUID> '
'<syncType>',
'Synchronize image data between storage domains '
'within same pool.'
)),
'uploadImage': (serv.uploadImage, (
'<methodArgs> <spUUID> <sdUUID> <imgUUID> [<volUUID>]',
'Upload an image to a remote endpoint using the specified'
'methodArgs.'
)),
'downloadImage': (serv.downloadImage, (
'<methodArgs> <spUUID> <sdUUID> <imgUUID> [<volUUID>]',
'Download an image from a remote endpoint using the specified',
'methodArgs.'
)),
'moveMultiImage': (serv.moveMultiImage,
('<spUUID> <srcDomUUID> <dstDomUUID> '
'<imgList>({imgUUID=postzero,'
'imgUUID=postzero,...}) <vmUUID> [<force>]',
'Move multiple images between storage domains '
'within same storage pool'
)),
'copyImage': (serv.copyImage,
('<sdUUID> <spUUID> <vmUUID> <srcImgUUID> <srcVolUUID> '
'<dstImgUUID> <dstVolUUID> <dstDescr> <dstSdUUID> '
'<volType> <volFormat> <preallocate> [<postZero>] '
'[<force>]',
'Create new template/volume from VM.',
'Do it by collapse and copy the whole chain '
'(baseVolUUID->srcVolUUID)'
)),
'mergeSnapshots': (serv.mergeSnapshots,
('<sdUUID> <spUUID> <vmUUID> <imgUUID> <Ancestor '
'Image uuid> <Successor Image uuid> [<postZero>]',
'Merge images from successor to ancestor.',
'The result is a image named as successor image '
'and contents the data of whole successor->'
'ancestor chain'
)),
'desktopLogin': (serv.desktopLogin,
('<vmId> <domain> <user> <password>',
'Login to vmId desktop using the supplied '
'credentials'
)),
'desktopLogoff': (serv.desktopLogoff,
('<vmId> <force>',
'Lock user session. force should be set to '
'true/false'
)),
'desktopLock': (serv.desktopLock,
('<vmId>',
'Logoff current user'
)),
'sendHcCmd': (serv.sendHcCmd,
('<vmId> <message>',
'Sends a message to a specific VM through Hypercall '
'channel'
)),
'hibernate': (serv.hibernate,
('<vmId> <hiberVolHandle>',
'Hibernates the desktop'
)),
'monitorCommand': (serv.monitorCommand,
('<vmId> <string>',
'Send a string containing monitor command to the '
'desktop'
)),
'getVmsInfo': (serv.do_getVmsInfo,
('<spUUID> [<sdUUID> [vmList](vmId1,vmId2,...)]',
'Return info of VMs from the pool or a backup domain '
'if its sdUUID is given. If vmList is also given, '
'return info for these VMs only.'
)),
'getVmsList': (serv.do_getVmsList,
('<spUUID> [sdUUID]',
'Get list of VMs from the pool or domain if sdUUID '
'given. Run only from the SPM.'
)),
'setupNetworks': (serv.do_setupNetworks,
('[connectivityCheck=False(default)|True] '
'[connectivityTimeout=<seconds>] '
'[<option>=<value>] '
'[networks=\'{<bridge>:{nic:<nic>,vlan:<number>,'
'bonding:<bond>,...}}\'] '
'[bondings=\'{<bond>:{nics:<nic>[+<nic>],..}}\']',
'Setup new configuration of multiple networks and '
'bonds.'
)),
'addNetwork': (serv.do_addNetwork,
('bridge=<bridge> [vlan=<number>] [bond=<bond>] '
'nics=nic[,nic]',
'Add a new network to this vds.'
)),
'delNetwork': (serv.do_delNetwork,
('bridge=<bridge> [vlan=<number>] [bond=<bond>] '
'nics=nic[,nic]',
'Remove a network (and parts thereof) from this vds.'
)),
'editNetwork': (serv.do_editNetwork,
('oldBridge=<bridge> newBridge=<bridge> [vlan=<number>]'
' [bond=<bond>] nics=nic[,nic]',
'Replace a network with a new one.'
)),
'setSafeNetworkConfig': (serv.do_setSafeNetworkConfig,
('',
'declare current network configuration as '
'"safe"'
)),
'fenceNode': (serv.do_fenceNode,
('<addr> <port> <agent> <user> <passwd> <action> '
'[<secure> [<options>]] \n\t<action> is one of '
'(status, on, off, reboot),\n\t<agent> is one of '
'(rsa, ilo, ipmilan, drac5, etc)\n\t<secure> '
'(true|false) may be passed to some agents',
'send a fencing command to a remote node'
)),
'repoStats': (serv.repoStats,
('',
'Get the health status of the monitored domains'
)),
'startMonitoringDomain': (serv.startMonitoringDomain,
('<sdUUID> <hostID>',
'Start SD: sdUUID monitoring with hostID'
)),
'stopMonitoringDomain': (serv.stopMonitoringDomain,
('<sdUUID>',
'Stop monitoring SD: sdUUID'
)),
'snapshot': (serv.snapshot,
('<vmId> <sdUUID> <imgUUID> <baseVolUUID> <volUUID>',
'Take a live snapshot'
)),
'setBalloonTarget': (serv.setBalloonTarget,
('<vmId> <target>',
"Set VM's balloon target"
)),
'diskReplicateStart': (serv.diskReplicateStart,
('<vmId> <spUUID> <sdUUID> <imgUUID> <volUUID> '
'<dstSdUUID>',
'Start live replication to the destination '
'domain'
)),
'diskReplicateFinish': (serv.diskReplicateFinish,
('<vmId> <spUUID> <sdUUID> <imgUUID> <volUUID>'
' <dstSdUUID>',
'Finish live replication to the destination '
'domain'
)),
'diskSizeExtend': (
serv.diskSizeExtend, (
'<vmId> <spUUID> <sdUUID> <imgUUID> <volUUID> <newSize>',
'Extends the virtual size of a disk'
)),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
try:
opts, args = getopt.getopt(sys.argv[1:], "hmso", ["help", "methods",
"SSL", "truststore=",
"oneliner"])
for o, v in opts:
if o == "-h" or o == "--help":
usage(commands)
sys.exit(0)
if o == "-m" or o == "--methods":
usage(commands, False)
sys.exit(0)
if o == "-s" or o == "--SSL":
serv.useSSL = True
if o == "--truststore":
serv.truststore = v
if o == '-o' or o == '--oneliner':
serv.pretty = False
if len(args) < 2:
raise Exception("Need at least two arguments")
server, command = args[0:2]
if command not in commands:
raise Exception("Unknown command")
hostPort = vdscli.cannonizeHostPort(server)
except SystemExit as status:
sys.exit(status)
except Exception as e:
print "ERROR - %s" % (e)
usage(commands)
sys.exit(-1)
try:
serv.do_connect(hostPort)
try:
commandArgs = args[2:]
except:
commandArgs = []
code, message = commands[command][0](commandArgs)
if code != 0:
code = 1
print message
sys.exit(code)
except (TypeError, IndexError, ValueError, AssertionError) as e:
print "Error using command:", e, "\n"
print command
for line in commands[command][1]:
print '\t' + line
sys.exit(-1)
except SystemExit as status:
sys.exit(status)
except socket.error as e:
if e[0] == 111:
print "Connection to %s refused" % hostPort
else:
traceback.print_exc()
sys.exit(-1)
except:
traceback.print_exc()
sys.exit(-1)
|
gpl-2.0
| -6,174,053,681,563,520,000
| 39.057054
| 79
| 0.468451
| false
| 4.414747
| false
| false
| false
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsvariable.py
|
1
|
16571
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsvariable(base_resource) :
""" Configuration for variable resource. """
def __init__(self) :
self._name = ""
self._type = ""
self._scope = ""
self._iffull = ""
self._ifvaluetoobig = ""
self._ifnovalue = ""
self._init = ""
self._expires = 0
self._comment = ""
self._referencecount = 0
self.___count = 0
@property
def name(self) :
"""Variable name. This follows the same syntax rules as other default syntax expression entity names:
It must begin with an alpha character (A-Z or a-z) or an underscore (_).
The rest of the characters must be alpha, numeric (0-9) or underscores.
It cannot be re or xp (reserved for regular and XPath expressions).
It cannot be a default syntax expression reserved word (e.g. SYS or HTTP).
It cannot be used for an existing default syntax expression object (HTTP callout, patset, dataset, stringmap, or named expression).<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Variable name. This follows the same syntax rules as other default syntax expression entity names:
It must begin with an alpha character (A-Z or a-z) or an underscore (_).
The rest of the characters must be alpha, numeric (0-9) or underscores.
It cannot be re or xp (reserved for regular and XPath expressions).
It cannot be a default syntax expression reserved word (e.g. SYS or HTTP).
It cannot be used for an existing default syntax expression object (HTTP callout, patset, dataset, stringmap, or named expression).<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def type(self) :
"""Specification of the variable type; one of the following:
ulong - singleton variable with an unsigned 64-bit value.
text(value-max-size) - singleton variable with a text string value.
map(text(key-max-size),ulong,max-entries) - map of text string keys to unsigned 64-bit values.
map(text(key-max-size),text(value-max-size),max-entries) - map of text string keys to text string values.
where
value-max-size is a positive integer that is the maximum number of bytes in a text string value.
key-max-size is a positive integer that is the maximum number of bytes in a text string key.
max-entries is a positive integer that is the maximum number of entries in a map variable.
For a global singleton text variable, value-max-size <= 64000.
For a global map with ulong values, key-max-size <= 64000.
For a global map with text values, key-max-size + value-max-size <= 64000.
max-entries is a positive integer that is the maximum number of entries in a map variable. This has a theoretical maximum of 2^64-1, but in actual use will be much smaller, considering the memory available for use by the map.
Example:
map(text(10),text(20),100) specifies a map of text string keys (max size 10 bytes) to text string values (max size 20 bytes), with 100 max entries.<br/>Minimum length = 1.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Specification of the variable type; one of the following:
ulong - singleton variable with an unsigned 64-bit value.
text(value-max-size) - singleton variable with a text string value.
map(text(key-max-size),ulong,max-entries) - map of text string keys to unsigned 64-bit values.
map(text(key-max-size),text(value-max-size),max-entries) - map of text string keys to text string values.
where
value-max-size is a positive integer that is the maximum number of bytes in a text string value.
key-max-size is a positive integer that is the maximum number of bytes in a text string key.
max-entries is a positive integer that is the maximum number of entries in a map variable.
For a global singleton text variable, value-max-size <= 64000.
For a global map with ulong values, key-max-size <= 64000.
For a global map with text values, key-max-size + value-max-size <= 64000.
max-entries is a positive integer that is the maximum number of entries in a map variable. This has a theoretical maximum of 2^64-1, but in actual use will be much smaller, considering the memory available for use by the map.
Example:
map(text(10),text(20),100) specifies a map of text string keys (max size 10 bytes) to text string values (max size 20 bytes), with 100 max entries.<br/>Minimum length = 1
"""
try :
self._type = type
except Exception as e:
raise e
@property
def scope(self) :
"""Scope of the variable:
global - (default) one set of values visible across all Packet Engines and, in a cluster, all nodes.<br/>Default value: global<br/>Possible values = global.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
"""Scope of the variable:
global - (default) one set of values visible across all Packet Engines and, in a cluster, all nodes.<br/>Default value: global<br/>Possible values = global
"""
try :
self._scope = scope
except Exception as e:
raise e
@property
def iffull(self) :
"""Action to perform if an assignment to a map exceeds its configured max-entries:
lru - (default) reuse the least recently used entry in the map.
undef - force the assignment to return an undefined (Undef) result to the policy executing the assignment.<br/>Default value: lru<br/>Possible values = undef, lru.
"""
try :
return self._iffull
except Exception as e:
raise e
@iffull.setter
def iffull(self, iffull) :
"""Action to perform if an assignment to a map exceeds its configured max-entries:
lru - (default) reuse the least recently used entry in the map.
undef - force the assignment to return an undefined (Undef) result to the policy executing the assignment.<br/>Default value: lru<br/>Possible values = undef, lru
"""
try :
self._iffull = iffull
except Exception as e:
raise e
@property
def ifvaluetoobig(self) :
"""Action to perform if an value is assigned to a text variable that exceeds its configured max-size,
or if a key is used that exceeds its configured max-size:
truncate - (default) truncate the text string to the first max-size bytes and proceed.
undef - force the assignment or expression evaluation to return an undefined (Undef) result to the policy executing the assignment or expression.<br/>Default value: truncate<br/>Possible values = undef, truncate.
"""
try :
return self._ifvaluetoobig
except Exception as e:
raise e
@ifvaluetoobig.setter
def ifvaluetoobig(self, ifvaluetoobig) :
"""Action to perform if an value is assigned to a text variable that exceeds its configured max-size,
or if a key is used that exceeds its configured max-size:
truncate - (default) truncate the text string to the first max-size bytes and proceed.
undef - force the assignment or expression evaluation to return an undefined (Undef) result to the policy executing the assignment or expression.<br/>Default value: truncate<br/>Possible values = undef, truncate
"""
try :
self._ifvaluetoobig = ifvaluetoobig
except Exception as e:
raise e
@property
def ifnovalue(self) :
"""Action to perform if on a variable reference in an expression if the variable is single-valued and uninitialized
or if the variable is a map and there is no value for the specified key:
init - (default) initialize the single-value variable, or create a map entry for the key and the initial value,
using the -init value or its default.
undef - force the expression evaluation to return an undefined (Undef) result to the policy executing the expression.<br/>Default value: init<br/>Possible values = undef, init.
"""
try :
return self._ifnovalue
except Exception as e:
raise e
@ifnovalue.setter
def ifnovalue(self, ifnovalue) :
"""Action to perform if on a variable reference in an expression if the variable is single-valued and uninitialized
or if the variable is a map and there is no value for the specified key:
init - (default) initialize the single-value variable, or create a map entry for the key and the initial value,
using the -init value or its default.
undef - force the expression evaluation to return an undefined (Undef) result to the policy executing the expression.<br/>Default value: init<br/>Possible values = undef, init
"""
try :
self._ifnovalue = ifnovalue
except Exception as e:
raise e
@property
def init(self) :
"""Initialization value for values in this variable. Default: 0 for ulong, NULL for text.
"""
try :
return self._init
except Exception as e:
raise e
@init.setter
def init(self, init) :
"""Initialization value for values in this variable. Default: 0 for ulong, NULL for text.
"""
try :
self._init = init
except Exception as e:
raise e
@property
def expires(self) :
"""Value expiration in seconds. If the value is not referenced within the expiration period it will be deleted. 0 (the default) means no expiration.<br/>Maximum length = 31622400.
"""
try :
return self._expires
except Exception as e:
raise e
@expires.setter
def expires(self, expires) :
"""Value expiration in seconds. If the value is not referenced within the expiration period it will be deleted. 0 (the default) means no expiration.<br/>Maximum length = 31622400
"""
try :
self._expires = expires
except Exception as e:
raise e
@property
def comment(self) :
"""Comments associated with this variable.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Comments associated with this variable.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def referencecount(self) :
"""The number of references to the variable in expressions and assignments.
"""
try :
return self._referencecount
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsvariable_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsvariable
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add nsvariable.
"""
try :
if type(resource) is not list :
addresource = nsvariable()
addresource.name = resource.name
addresource.type = resource.type
addresource.scope = resource.scope
addresource.iffull = resource.iffull
addresource.ifvaluetoobig = resource.ifvaluetoobig
addresource.ifnovalue = resource.ifnovalue
addresource.init = resource.init
addresource.expires = resource.expires
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsvariable() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].type = resource[i].type
addresources[i].scope = resource[i].scope
addresources[i].iffull = resource[i].iffull
addresources[i].ifvaluetoobig = resource[i].ifvaluetoobig
addresources[i].ifnovalue = resource[i].ifnovalue
addresources[i].init = resource[i].init
addresources[i].expires = resource[i].expires
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete nsvariable.
"""
try :
if type(resource) is not list :
deleteresource = nsvariable()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsvariable() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsvariable() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the nsvariable resources that are configured on netscaler.
"""
try :
if not name :
obj = nsvariable()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = nsvariable()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsvariable() for _ in range(len(name))]
obj = [nsvariable() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nsvariable()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of nsvariable resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsvariable()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the nsvariable resources configured on NetScaler.
"""
try :
obj = nsvariable()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of nsvariable resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsvariable()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Iffull:
undef = "undef"
lru = "lru"
class Scope:
GLOBAL = "global"
class Ifvaluetoobig:
undef = "undef"
truncate = "truncate"
class Ifnovalue:
undef = "undef"
init = "init"
class nsvariable_response(base_response) :
def __init__(self, length=1) :
self.nsvariable = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsvariable = [nsvariable() for _ in range(length)]
|
apache-2.0
| 4,863,755,548,847,731,000
| 35.181223
| 227
| 0.704001
| false
| 3.525745
| true
| false
| false
|
rickiepark/openbidder
|
protobuf/protobuf-2.6.1/python/ez_setup.py
|
1
|
10431
|
#!python
# This file was obtained from:
# http://peak.telecommunity.com/dist/ez_setup.py
# on 2011/1/21.
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print((
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
), file=sys.stderr)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
return do_download()
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict as e:
if was_imported:
print((
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0]), file=sys.stderr)
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib.request, urllib.error, urllib.parse, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib.request.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print((
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
), file=sys.stderr)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print("Setuptools version",version,"or greater has been installed.")
print('(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)')
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in list(md5_data.items())]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print("Internal error!", file=sys.stderr)
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
mit
| 4,900,200,301,619,704,000
| 35.728873
| 86
| 0.654683
| false
| 2.990539
| false
| false
| false
|
ryfeus/lambda-packs
|
HDF4_H5_NETCDF/source2.7/pyhdf/VS.py
|
1
|
95700
|
# $Id: VS.py,v 1.4 2005-07-14 01:36:41 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.3 2004/08/02 17:06:20 gosselin
# pyhdf-0.7.2
#
# Revision 1.2 2004/08/02 15:36:04 gosselin
# pyhdf-0.7-1
#
# Author: Andre Gosselin
# Maurice-Lamontagne Institute
# gosselina@dfo-mpo.gc.ca
"""
VS (Vdata table) API (:mod:`pyhdf.VS`)
======================================
A module of the pyhdf package implementing the VS (Vdata table)
API of the NCSA HDF4 library.
(see: hdf.ncsa.uiuc.edu)
Introduction
------------
VS is one of the modules composing pyhdf, a python package implementing
the NCSA HDF library and letting one manage HDF files from within a python
program. Two versions of the HDF library currently exist, version 4 and
version 5. pyhdf only implements version 4 of the library. Many
different APIs are to be found inside the HDF4 specification.
Currently, pyhdf implements just a few of those: the SD, VS and V APIs.
Other APIs should be added in the future (GR, AN, etc).
VS allows the definition of structured data tables inside an HDF file.
Those tables are designated as "vdatas" (the name has to do with data
associated with the "vertices" of geometrical models, the storage of which
the API was originally designed for). A vdata is composed of a fixed
number of columns (also called fields), where a column can store a fixed
number of data values, all of the same type. The number of values allowed
inside a field is called the "order" of the field. A table is composed of a
varying number of rows (also called records), a record representing the
sequence of values stored in each field of the vdata.
A vdata is associated with a descriptive name, and likewise each field of
the vdata. A vdata can also be tagged with a "class" to further describe the
vdata purpose. Records and fields are identified by a zero-based index.
An arbitrary number of attributes of different types can be attached to
a vdata as a whole, or to its individual fields. An attribute is a
(name, value) pair, where "value" can be of many types, and be either
single or multi-valued. The number of values stored in an attribute is
called the "order" of the attribute.
The following example illustrates a simple vdata that could be stored
inside an HDF file. See section "Programming models" for an example
program implementing this vdata.
INVENTORY (experimental status)
====== =========== === ======== ========
partid description qty wght(lb) price($)
====== =========== === ======== ========
Q1234 bolt 12 0.01 0.05
B5432 brush 10 0.4 4.25
S7613 scissor 2 0.2 3.75
====== =========== === ======== ========
The vdata is composed of 5 fields. 3 records are shown (of course, a vdata
can store much more than that). "INVENTORY" would be the vdata name, and
"partid", "description", etc, would be the field names. The data type varies
between fields. "partid" and "description" would be of "multicharacter" type
(aka "string"), "qty" would be a integer, and "wght" and "price" would be
floats. The text in parentheses could be stored as attributes. A "status"
attribute could be defined for the table as a whole, and given the
value "experimental". Likewise, a "unit" attribute could be associated
with fields "wght" and "price", and given the values "lb" and "$", resp.
The VS API allows one to create, locate and open a vdata inside an
HDF file, update and append records inside it, read records randomly
or sequentially, and access and update the vdata and field attributes.
Attributes can be read and written using the familiar python "dot
notation", and records can be read and written by indexing and slicing the
vdata as if it were a python sequence.
VS module key features
----------------------
VS key features are as follows.
- pyhdf implements almost every routine of the original VS API.
Only a few have been ignored, most of them being of a rare use:
- VSgetblocksize() / VSsetblocksize()
- VSsetnumblocks()
- VSlone
- It is quite straightforward to go from a C version to a python version
of a program accessing the VS API, and to learn VS usage by refering to
the C API documentation.
- A few high-level python methods have been developped to ease
programmers task. Of greatest interest are the following:
- Access to attributes through the familiar "dot notation".
- Indexing and slicing a vdata to read and write its records,
similarly to a python sequence.
- Easy retrieval of info on a vdata and its fields.
- Easy creation of vdatas.
Accessing the VS module
-----------------------
To access the VS module a python program can say one of:
>>> import pyhdf.VS # must prefix names with "pyhdf.VS."
>>> from pyhdf import VS # must prefix names with "VS."
>>> from pyhdf.VS import * # names need no prefix
This document assumes the last import style is used.
VS is not self-contained, and needs functionnality provided by another
pyhdf module, namely the HDF module. This module must thus be imported
also:
>>> from .HDF import *
Package components
------------------
pyhdf is a proper Python package, eg a collection of modules stored under
a directory whose name is that of the package and which stores an
__init__.py file. Following the normal installation procedure, this
directory will be <python-lib>/site-packages/pyhdf', where <python-lib>
stands for the python installation directory.
For each HDF API exists a corresponding set of modules.
The following modules are related to the VS API.
_hdfext
C extension module responsible for wrapping the HDF
C library for all python modules
hdfext
python module implementing some utility functions
complementing the _hdfext extension module
error
defines the HDF4Error exception
HDF
python module providing support to the VS module
VS
python module wrapping the VS API routines inside
an OOP framework
_hdfext and hdfext were generated using the SWIG preprocessor.
SWIG is however *not* needed to run the package. Those two modules
are meant to do their work in the background, and should never be called
directly. Only HDF and VS should be imported by the user program.
Prerequisites
-------------
The following software must be installed in order for VS to
work.
HDF (v4) library
pyhdf does *not* include the HDF4 library, which must
be installed separately.
HDF is available at:
"http://hdf.ncsa.uiuc.edu/obtain.html".
Numeric is also needed by the SD module. See the SD module documentation.
Documentation
-------------
pyhdf has been written so as to stick as closely as possible to
the naming conventions and calling sequences documented inside the
"HDF User s Guide" manual. Even if pyhdf gives an OOP twist
to the C API, the manual can be easily used as a documentary source
for pyhdf, once the class to which a function belongs has been
identified, and of course once requirements imposed by the Python
langage have been taken into account. Consequently, this documentation
will not attempt to provide an exhaustive coverage of the HDF VS
API. For this, the user is referred to the above manual.
The documentation of each pyhdf method will indicate the name
of the equivalent routine as it is found inside the C API.
This document (in both its text and html versions) has been completely
produced using "pydoc", the Python documentation generator (which
made its debut in the 2.1 Python release). pydoc can also be used
as an on-line help tool. For example, to know everything about
the VS.VD class, say:
>>> from pydoc import help
>>> from pyhdf.VS import *
>>> help(VD)
To be more specific and get help only for the read() method of the
VD class:
>>> help(VD.read)
pydoc can also be called from the command line, as in::
% pydoc pyhdf.VS.VD # doc for the whole VD class
% pydoc pyhdf.VS.VD.read # doc for the VD.read method
Summary of differences between the pyhdf and C VS API
-----------------------------------------------------
Most of the differences between the pyhdf and C VS API can
be summarized as follows.
- In the C API, every function returns an integer status code, and values
computed by the function are returned through one or more pointers
passed as arguments.
- In pyhdf, error statuses are returned through the Python exception
mechanism, and values are returned as the method result. When the
C API specifies that multiple values are returned, pyhdf returns a
sequence of values, which are ordered similarly to the pointers in the
C function argument list.
Error handling
--------------
All errors reported by the C VS API with a SUCCESS/FAIL error code
are reported by pyhdf using the Python exception mechanism.
When the C library reports a FAIL status, pyhdf raises an HDF4Error
exception (a subclass of Exception) with a descriptive message.
Unfortunately, the C library is rarely informative about the cause of
the error. pyhdf does its best to try to document the error, but most
of the time cannot do more than saying "execution error".
VS needs support from the HDF module
------------------------------------
The VS module is not self-contained (countrary to the SD module).
It requires help from the HDF module, namely:
- the HDF.HDF class to open and close the HDF file, and initialize the
VS interface
- the HDF.HC class to provide different sorts of constants (opening modes,
data types, etc).
A program wanting to access HDF vdatas will almost always need to execute
the following minimal set of calls:
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> hdfFile = HDF(name, HC.xxx)# open HDF file
>>> vs = hdfFile.vstart() # initialize VS interface on HDF file
>>> ... # manipulate vdatas through "vs"
>>> vs.end() # terminate VS interface
>>> hdfFile.close() # close HDF file
Classes summary
---------------
pyhdf wraps the VS API using different python classes::
VS HDF VS interface
VD vdata
VDField vdata field
VDattr attribute (either at the vdata or field level)
In more detail::
VS The VS class implements the VS (Vdata) interface applied to an
HDF file. This class encapsulates the hdf instance, and all
the top-level functions of the VS API.
To create a VS instance, call the vstart() method of an
HDF instance.
methods:
constructors:
attach() open an existing vdata given its name or
reference number, or create a new one,
returning a VD instance
create() create a new vdata and define its structure,
returning a VD instance
creating and initializing a simple vdata
storedata() create a single-field vdata and initialize
its values
closing the interface
end() close the VS interface on the HDF file
searching
find() get a vdata reference number given its name
next() get the reference number of the vdata following
a given one
inquiry
vdatainfo() return info about all the vdatas in the
HDF file
VD The VD class describes a vdata. It encapsulates
the VS instance to which the vdata belongs, and the vdata
identifier.
To instantiate a VD class, call the attach() or create()
method of a VS class instance.
methods:
constructors
attr() create a VDAttr instance representing a
vdata attribute; "dot notation" can also be
used to access a vdata attribute
field() return a VDField instance representing a given
field of the vdata
closing vdata
detach() end access to the vdata
defining fields
fdefine() define the name, type and order of a new field
setfields() define the field names and field order for
the read() and write() methods; also used to
initialize the structure of a vdata previously
created with the VS.attach() method
reading and writing
note: a vdata can be indexed and sliced like a
python sequence
read() return the values of a number of records
starting at the current record position
seek() reset the current record position
seekend() seek past the last record
tell() return the current record position
write() write a number of records starting at the
current record position
inquiry
attrinfo() return info about all the vdata attributes
fexist() check if a vdata contains a given set of fields
fieldinfo() return info about all the vdata fields
findattr() locate an attribute, returning a VDAttr instance
if found
inquire() return info about the vdata
sizeof() return the size in bytes of one or more fields
VDField The VDField class represents a vdata field. It encapsulates
the VD instance to which the field belongs, and the field
index number.
To instantiate a VDField, call the field() method of a VD class
instance.
methods:
constructors:
attr() return a VDAttr instance representing an
attribute of the field; "dot notation"
can also be used to get/set an attribute.
inquiry
attrinfo() return info about all the field attributes
find() locate an attribute, returning a VDAttr
instance if found
VDAttr The VDAttr class encapsulates methods used to set and query
attributes defined at the level either of the vdata or the
vdata field.
To create an instance of this class, call the attr() or
findattr() methods of a VD instance (for vdata attributes),
or call the attr() or find() methods of a VDField instance
(for field attributes).
methods:
get / set
get() get the attribute value
set() set the attribute value
info
info() retrieve info about the attribute
Data types
----------
Data types come into play when first defining vdata fields and attributes,
and later when querying the definition of those fields and attributes.
Data types are specified using the symbolic constants defined inside the
HC class of the HDF module.
- CHAR and CHAR8 (equivalent): an 8-bit character.
- UCHAR, UCHAR8 and UINT8 (equivalent): unsigned 8-bit values (0 to 255)
- INT8: signed 8-bit values (-128 to 127)
- INT16: signed 16-bit values
- UINT16: unsigned 16 bit values
- INT32: signed 32 bit values
- UINT32: unsigned 32 bit values
- FLOAT32: 32 bit floating point values (C floats)
- FLOAT64: 64 bit floating point values (C doubles)
There is no explicit "string" type. To simulate a string, set the field or
attribute type to CHAR, and set the field or attribute "order" to
a value of 'n' > 1. This creates and "array of characters", close
to a string (except that strings will always be of length 'n', right-padded
with spaces if necessary).
Attribute access: low and high level
------------------------------------
The VS API allow setting attributes on vdatas and vdata fields. Attributes
can be of many types (int, float, char) of different bit lengths (8, 16, 32,
64 bits), and can be single or multi-valued. Values of a multi-valued
attribute must all be of the same type.
Attributes can be set and queried in two different ways. First, given a
VD instance (describing a vdata object) or a VDField instance (describing a
vdata field), the attr() method of that instance is called to create a
VDAttr instance representing the wanted attribute (possibly non existent).
The set() method of this VDAttr instance is then called to define the
attribute value, creating it if it does not already exist. The get() method
returns the current attribute value. Here is an example.
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> vs = f.vstart() # init vdata interface
>>> vd = vs.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> attr = vd.attr('version') # prepare to define the 'version' attribute
# on the vdata
>>> attr.set(HC.CHAR8,'1.0') # set attribute 'version' to string '1.0'
>>> print(attr.get()) # get and print attribute value
>>> fld = vd.field('fld1') # obtain a field instance for field 'fld1'
>>> attr = fld.attr('range') # prepare to define attribute 'range' on
# this field
>>> attr.set(HC.INT32,(-10, 15)) # set attribute 'range' to a pair of ints
>>> print(attr.get()) # get and print attribute value
>>> vd.detach() # "close" the vdata
>>> vs.end() # terminate the vdata interface
>>> f.close() # close the HDF file
The second way consists of setting/querying an attribute as if it were a
normal python class attribute, using the usual dot notation. Above example
then becomes:
>>> from pyhdf.HDF import *
>>> from pyhdf.VS import *
>>> f = HDF('test.hdf', HC.WRITE) # Open file 'test.hdf' in write mode
>>> vs = f.vstart() # init vdata interface
>>> vd = vs.attach('vtest', 1) # attach vdata 'vtest' in write mode
>>> vd.version = '1.0' # create vdata attribute 'version',
# setting it to string '1.0'
>>> print(vd.version) # print attribute value
>>> fld = vd.field('fld1') # obtain a field instance for field 'fld1'
>>> fld.range = (-10, 15) # create field attribute 'range', setting
# it to the pair of ints (-10, 15)
>>> print(fld.range) # print attribute value
>>> vd.detach() # "close" the vdata
>>> vs.end() # terminate the vdata interface
>>> f.close() # close the HDF file
Note how the dot notation greatly simplifies and clarifies the code.
Some latitude is however lost by manipulating attributes in that way,
because the pyhdf package, not the programmer, is then responsible of
setting the attribute type. The attribute type is chosen to be one of:
=========== ====================================
HC.CHAR8 if the attribute value is a string
HC.INT32 if all attribute values are integers
HC.FLOAT64 otherwise
=========== ====================================
The first way of handling attribute values must be used if one wants to
define an attribute of any other type (for ex. 8 or 16 bit integers,
signed or unsigned). Also, only a VDAttr instance gives access to attribute
info, through its info() method.
However, accessing HDF attributes as if they were python attributes raises
an important issue. There must exist a way to assign generic attributes
to the python objects without requiring those attributes to be converted
to HDF attributes. pyhdf uses the following rule: an attribute whose name
starts with an underscore ('_') is either a "predefined" attribute
(see below) or a standard python attribute. Otherwise, the attribute
is handled as an HDF attribute. Also, HDF attributes are not stored inside
the object dictionnary: the python dir() function will not list them.
Attribute values can be updated, but it is illegal to try to change the
value type, or the attribute order (number of values). This is important
for attributes holding string values. An attribute initialized with an
'n' character string is simply a character attribute of order 'n' (eg a
character array of length 'n'). If 'vd' is a vdata and we initialize its
'a1' attribute as 'vd.a1 = "abcdef"', then a subsequent update attempt
like 'vd.a1 = "12"' will fail, because we then try to change the order
of the attribute (from 6 to 2). It is mandatory to keep the length of string
attributes constant. Examples below show simple ways how this can be done.
Predefined attributes
---------------------
The VD and VDField classes support predefined attributes to get (and
occasionnaly set) attribute values easily, without having to call a
class method. The names of predefined attributes all start with an
underscore ('_').
In the following tables, the RW column holds an X if the attribute
is read/write. See the HDF User s guide for details about more
"exotic" topics like "class", "faked vdata" and "tag".
VD predefined attributes
=========== == ========================== =============================
name RW description C library routine
=========== == ========================== =============================
_class X class name VSgetclass/VSsetclass
_fields list of field names VSgetfields
_interlace X interlace mode VSgetinterlace/VSsetinterlace
_isattr true if vdata is "faked" VSisattr
by HDF to hold attributes
_name X name of the vdata VSgetname/VSsetname
_nattrs number of attributes VSfnattrs
_nfields number of fields VFnfields
_nrecs number of records VSelts
_recsize record size (bytes) VSQueryvsize
_refnum reference number VSQueryref
_tag vdata tag VSQuerytag
_tnattrs total number of vdata and VSnattrs
field attributes
=========== == ========================== =============================
VDField predefined attributes
=========== == ========================== =============================
name RW description C library routine
=========== == ========================== =============================
_esize external size (bytes) VFfieldesize
_index index number VSfindex
_isize internal size (bytes) VFfieldisize
_name name VFfieldname
_nattrs number of attributes VSfnattrs
_order order (number of values) VFfieldorder
_type field type (HC.xxx) VFfieldtype
=========== == ========================== =============================
Record access: low and high level
---------------------------------
vdata records can be read and written in two different ways. The first one
consists of calling the basic I/O methods of the vdata:
- seek() to set the current record position, if necessary;
- read() to retrieve a given number of records from that position;
- write() to write a given number of records starting at
that position
A second, higher level way, lets one see a vdata similarly to a python
sequence, and access its contents using the familiar indexing and slicing
notation in square brackets. Reading and writing a vdata as if it were a
python sequence may often look simpler, and improve code legibility.
Here are some examples of how a vdata 'vd' holding 3 fields could be read.
>>> print(vd[0]) # print record 0
>>> print(vd[-1]) # print last record
>>> print(vd[2:]) # print records 2 and those that follow
>>> print(vd[:]) # print all records
>>> print(vd[:,0]) # print field 0 of all records
>>> print(vd[:3,:2]) # print first 2 fields of first 3 records
As the above examples show, the usual python rules are obeyed regarding
the interpretation of indexing and slicing values. Note that the vdata
fields can be indexed and sliced, not only the records. The setfields()
method can also be used to select a subset to the vdata fields
(setfields() also let you reorder the fields). When the vdata is
indexed (as opposed to being sliced), a single record is returned as a list
of values. When the vdata is sliced, a list of records is
always returned (thus a 2-level list), even if the slice contains only
one record.
A vdata can also be written similarly to a python sequence. When indexing
the vdata (as opposed to slicing it), a single record must be assigned,
and the record must be given as a sequence of values. It is legal to use
as an index the current number of records in the vdata: the record is then
appended to the vdata. When slicing the vdata, the records assigned to the
slice must always be given as a list of records, even
if only one record is assigned. Also, the number of records assigned must
always match the width of the slice, except if the slice includes or goes
past the last record of the vdata. In that case, the number of records
assigned can exceed the width of the slice, and the extra records are
appended to the vdata. So, to append records to vdata 'vd', simply
assign records to the slice 'vd[vd._nrecs:]'. Note that, even if the
'field' dimension can be specified in the left-hand side expression,
there is no real interest in doing so, since all fields must
be specified when assigning a record to the vdata: it is an error to
try to assign just a few of the fields.
For example, given a vdata 'vd' holding 5 records, and lists 'reca',
'recb', etc, holding record values::
vd[0] = reca # updates record 0
vd[0,:] = reca # specifying fields is OK, but useless
vd[0,1:] = reca[1:] # error: all fields must be assigned
vd[1] = [recb, recc] # error: only one record allowed
vd[5] = recc # append one record
vd[1:3] = [reca,recb] # updates second and third record
vd[1:4] = [reca, recb] # error: 3 records needed
vd[5:] = [reca,recb] # appends 2 records to the vdata
vd[4:] = [reca, recb] # updates last record, append one
Programming models
------------------
Creating and initializing a new vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following code can serve as a model for the creation and
initialization of a new vdata. It implements the INVENTORY example
described in the "Introduction" section::
from pyhdf.HDF import *
from pyhdf.VS import *
# Open HDF file and initialize the VS interface
f = HDF('inventory.hdf', # Open file 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
# Create vdata and define its structure
vd = vs.create( # create a new vdata
'INVENTORY', # name of the vdata
# fields of the vdata follow
(('partid',HC.CHAR8, 5), # 5 char string
('description',HC.CHAR8, 10), # 10 char string field
('qty',HC.INT16, 1), # 1 16 bit int field
('wght',HC.FLOAT32, 1), # 1 32 bit float
('price',HC.FLOAT32,1) # 1 32 bit float
)) # 5 fields allocated in the vdata
# Set attributes on the vdata and its fields
vd.field('wght').unit = 'lb'
vd.field('price').unit = '$'
# In order to be able to update a string attribute, it must
# always be set to the same length. This sets 'status' to a 20
# char long, left-justified string, padded with spaces on the right.
vd.status = "%-20s" % 'phase 1 done'
# Store records
vd.write(( # write 3 records
('Q1234', 'bolt',12, 0.01, 0.05), # record 1
('B5432', 'brush', 10, 0.4, 4.25), # record 2
('S7613', 'scissor', 2, 0.2, 3.75) # record 3
))
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Note that is mandatory to always write whole records to the vdata.
Note also the comments about the initialization of the 'status'
vdata attribute. We want to be able update this attribute (see
following examples). However, the VS API prohibits changing an attribute
type when updating its value. Since the length (order) of an attribute
is part of its type, we make sure of setting the attribute to a length
long enough to accomodate the longest possible string we migh want to
assign to the attribute.
Appending records to a vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Appending records requires first seeking to the end of the vdata, to avoid
overwriting existing records. The following code can serve as a model. The
INVENTORY vdata created before is used::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list
# where number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 2 done')
vd[vd._nrecs:] = ( # append 2 records
('A4321', 'axe', 5, 1.5, 25), # first record
('C3214', 'cup', 100, 0.1, 3.25) # second record
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Note how, when updating the value of the 'status' vdata attribute,
we take care of assigning a value of the same length as that of the
original value. Otherwise, the assignment would raise an exception.
Records are written by assigning the vdata through a slicing
expression, like a python sequence. By specifying the number of records
as the start of the slice, the records are appended to the vdata.
Updating records in a vdata
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Updating requires seeking to the record to update before writing the new
records. New data will overwrite this record and all records that follow,
until a new seek is performed or the vdata is closed. Note that record
numbering starts at 0.
The following code can serve as a model. The INVENTORY vdata created
before is used::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list
# where number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 3 done')
# Update record at index 1 (second record)
vd[1] = ('Z4367', 'surprise', 10, 3.1, 44.5)
# Update record at index 4, and all those that follow
vd[4:] = (
('QR231', 'toy', 12, 2.5, 45),
('R3389', 'robot', 3, 45, 2000)
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
Reading a vdata
^^^^^^^^^^^^^^^
The following example shows how read the vdata attributes and sequentially
maneuver through its records. Note how we use the exception mechanism
to break out of the reading loop when we reach the end of the vdata::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf') # open 'inventory.hdf' in read mode
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY') # attach 'INVENTORY' in read mode
# Display some vdata attributes
print "status:", vd.status
print "vdata: ", vd._name # predefined attribute: vdata name
print "nrecs: ", vd._nrecs # predefined attribute: num records
# Display value of attribute 'unit' for all fields on which
# this attribute is set
print "units: ",
for fieldName in vd._fields: # loop over all field names
try:
# instantiate field and obtain value of attribute 'unit'
v = vd.field(fieldName).unit
print "%s: %s" % (fieldName, v),
except: # no 'unit' attribute: ignore
pass
print ""
print ""
# Display table header.
header = "%-7s %-12s %3s %4s %8s" % tuple(vd._fields)
print "-" * len(header)
print header
print "-" * len(header)
# Loop over the vdata records, displaying each record as a table row.
# Current record position is 0 after attaching the vdata.
while 1:
try:
rec = vd.read() # read next record
# equivalent to:
# rec = vd[vd.tell()]
print "%-7s %-12s %3d %4.1f %8.2f" % tuple(rec[0])
except HDF4Error: # end of vdata reached
break
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
In the previous example, the reading/displaying loop can be greatly
simplified by rewriting it as follows::
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf') # open 'inventory.hdf' in read mode
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY') # attach 'INVENTORY' in read mode
....
# Read all records at once, and loop over the sequence.
for rec in vd[:]:
print "%-7s %-12s %3d %4.1f %8.2f" % tuple(rec)
vd.detach() # "close" the vdata
...
The indexing expression 'vd[:]' returns the complete set of records,
which can then be looped over using a 'for' statement. This style of loop
is quite clean, and should look very familiar to python adepts.
"""
import os, sys, types
from . import hdfext as _C
from . import six
from .six.moves import xrange
from .HC import HC
from .error import HDF4Error, _checkErr
# List of names we want to be imported by an "from pyhdf.VS import *"
# statement
__all__ = ['VS', 'VD', 'VDField', 'VDAttr']
class VS(object):
"""The VS class implements the VS (Vdata) interface applied to an
HDF file.
To instantiate a VS class, call the vstart() method of an
HDF instance. """
def __init__(self, hinst):
# Not to be called directly by the user.
# A VS object is instantiated using the vstart()
# method of an HDF instance.
# Args:
# hinst HDF instance
# Returns:
# A VS instance
#
# C library equivalent : Vstart (rather: Vinitialize)
# Private attributes:
# _hdf_inst: HDF instance
# Note: Vstart is just a macro; use 'Vinitialize' instead
status = _C.Vinitialize(hinst._id)
_checkErr('VS', status, "cannot initialize VS interface")
self._hdf_inst = hinst
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._hdf_inst:
self.end()
except:
pass
def end(self):
"""Close the VS interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
_checkErr('end', _C.Vfinish(self._hdf_inst._id),
"cannot terminate VS interface")
self._hdf_inst = None
vend = end # For backward compatibility
def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd)
def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg)
def find(self, vName):
"""Get the reference number of a vdata given its name.
The vdata can then be opened (attached) by passing this
reference number to the attach() method.
Args::
vName Name of the vdata for which the reference number
is needed. vdatas names are not guaranteed to be
unique. When more than one vdata bear the same name,
find() will return the refnum of the first one founmd.
Returns::
vdata reference number. 0 is returned if the vdata does not exist.
C library equivalent : VSfind
"""
refNum = _C.VSfind(self._hdf_inst._id, vName)
_checkErr("find", refNum, "cannot find vdata %s" % vName)
return refNum
def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num
def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst
def storedata(self, fieldName, values, data_type, vName, vClass):
"""Create and initialize a single field vdata, returning
the vdata reference number.
Args::
fieldName Name of the single field in the vadata to create
values Sequence of values to store in the field;. Each value can
itself be a sequence, in which case the field will be
multivalued (all second-level sequences must be of
the same length)
data_type Values type (one of HC.xxx constants). All values
must be of the same type
vName Name of the vdata to create
vClass Vdata class (string)
Returns::
vdata reference number
C library equivalent : VHstoredata / VHstoredatam
"""
# See if the field is multi-valued.
nrecs = len(values)
if type(values[0]) in [list, tuple]:
order = len(values[0])
# Replace input list with a flattened list.
newValues = []
for el in values:
for e in el:
newValues.append(e)
values = newValues
else:
order = 1
n_values = nrecs * order
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("storedata: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
if order == 1:
vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass)
else:
vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass, order)
_checkErr('storedata', vd, 'cannot create vdata')
return vd
class VD(object):
"""The VD class encapsulates the functionnality of a vdata.
To instantiate a VD class, call the attach() or the create()
method of a VS class instance."""
def __init__(self, vsinst, id):
# This construtor is not intended to be called directly
# by the user program. The attach() method of an
# VS class instance should be called instead.
# Arg:
# vsinst VS instance from which the call is made
# id vdata reference number
# Private attributes:
# _vs_inst VS instance to which the vdata belongs
# _id vdata identifier
# _offset current record offset
# _setfields last arg to setfields()
self._vs_inst = vsinst
self._id = id
self._offset = 0
self._setfields = None
def __getattr__(self, name):
"""Some vdata properties can be queried/set through the following
attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes. Most are read-only.
Only the _class, _fields, _interlace and _name can be modified.
_fields and _interlace can only be set once.
Name RO Description C library routine
----- -- ----------------- -----------------
_class class name VSgetclass
_fields X field names VSgetfields
_interlace interlace mode VSgetinterlace
_isattr X attribute vs real vdata VSisattr
_name name VSgetname
_nattrs X number of attributes VSfnattrs
_nfields X number of fields VFnfields
_nrecs X number of records VSelts
_recsize X record size VSQueryvsize
_refnum X reference number VSQueryref
_tag X tag VSQuerytag
_tnattrs X total number of attr. VSnattrs
"""
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute
elif name == "_class":
status, nm = _C.VSgetclass(self._id)
_checkErr('_class', status, 'cannot get vdata class')
return nm
elif name == "_fields":
n, fields = _C.VSgetfields(self._id)
_checkErr('_fields', n, "cannot get vdata field names")
return fields.split(',')
elif name == "_interlace":
mode = _C.VSgetinterlace(self._id)
_checkErr('_interlace', mode, "cannot get vdata interlace mode")
return mode
elif name == "_isattr":
return _C.VSisattr(self._id)
elif name == "_name":
status, nm = _C.VSgetname(self._id)
_checkErr('_name', status, 'cannot get vdata name')
return nm
elif name == "_nattrs":
n = _C.VSfnattrs(self._id, -1) # -1: vdata attributes
_checkErr("_nfields", n, "cannot retrieve number of attributes")
return n
elif name == "_nfields":
n = _C.VFnfields(self._id)
_checkErr("_nfields", n, "cannot retrieve number of fields")
return n
elif name == "_nrecs":
n = _C.VSelts(self._id)
_checkErr('_nrecs', n, 'cannot get vdata number of records')
return n
elif name == "_recsize":
return self.inquire()[3]
elif name == "_refnum":
n = _C.VSQueryref(self._id)
_checkErr('refnum', n, 'cannot get reference number')
return n
elif name == "_tag":
n = _C.VSQuerytag(self._id)
_checkErr('_tag', n, 'cannot get tag')
return n
elif name == "_tnattrs":
n = _C.VSnattrs(self._id)
_checkErr('_tnattrs', n, 'execution error')
return n
raise AttributeError
def __setattr__(self, name, value):
# A name starting with an underscore will be treated as
# a standard python attribute, and as an HDF attribute
# otherwise.
# Forbid assigning to our predefined attributes
if name in ["_fields", "_isattr", "_nattrs", "_nfields",
"_nrecs", "_recsize", "_refnum", "_tag", "_tnattrs"]:
raise AttributeError("%s: read-only attribute" % name)
# Handle the 3 VS attributes: _class, _interlace
# and _name. _interlace can only be set once.
elif name == "_class":
_checkErr(name, _C.VSsetclass(self._id, value),
'cannot set _class property')
elif name == "_interlace":
_checkErr(name, _C.VSsetinterlace(self._id, value),
'cannot set _interlace property')
elif name == "_name":
_checkErr(name, _C.VSsetname(self._id, value),
'cannot set _name property')
# Try to set the attribute.
else:
_setattr(self, name, value)
def __getitem__(self, elem):
# This method is called when the vdata is read
# like a Python sequence.
# Parse the indexing expression.
start, count = self.__buildStartCount(elem)
# Reset current position if necessary.
if self._offset != start[0]:
self.seek(start[0])
# Get records. A negative count means that an index was used.
recs = self.read(abs(count[0]))
# See if all the fields must be returned.
f0 = start[1]
if f0 == 0 and count[1] == self._nfields:
out = recs
else:
# Return only a subset of the vdata fields.
out = []
f1 = f0 + count[1]
for r in recs:
out.append(r[f0:f1])
# If an index was used (not a slice), return the record as
# a list, instead of returning it inside a 2-level list,
if count[0] < 0:
return out[0]
return out
def __setitem__(self, elem, data):
# This method is called when the vdata is written
# like a Python sequence.
#
# When indexing the vdata, 'data' must specify exactly
# one record, which must be specifed as a sequence. If the index is
# equal to the current number of records, the record
# is appended to the vdata.
#
# When slicing the vdata, 'data' must specify a list of records.
# The number of records in the top level-list must match the width
# of the slice, except if the slice extends past the end of the
# vdata. In that case, extra records can be specified in the list,
# which will be appended to the vdata. In other words,
# to append records to vdata 'vd', assign records to
# the slice 'vd[vd._nrecs:]'.
#
# For ex., given a vdata 'vd' holding 5 records, and lists
# 'reca', 'recb', etc holding record values:
# vd[0] = reca # updates record 0
# vd[1] = [recb, recc] # error: only one record allowed
# vd[1:3] = [reca,recb] # updates second and third record
# vd[1:4] = [reca, recb] # error: 3 records needed
# vd[5:] = [reca,recb] # appends 2 records to the vdata
# Check that arg is a list.
if not type(data) in [tuple, list]:
raise HDF4Error("record(s) must be specified as a list")
start, count = self.__buildStartCount(elem, setitem=1)
# Records cannot be partially written.
if start[1] != 0 or count[1] != self._nfields:
raise HDF4Error("each vdata field must be written")
# If an index (as opposed to a slice) was applied to the
# vdata, a single record must be passed. Since write() requires
# a 2-level list, wrap this record inside a list.
if count[0] < 0:
if len(data) != self._nfields:
raise HDF4Error("record does not specify all fields")
data = [data]
# A slice was used. The slice length must match the number of
# records, except if the end of the slice equals the number
# of records. Then, extra recors can be specified, which will
# be appended to the vdata.
else:
if count[0] != len(data):
if start[0] + count[0] != self._nrecs:
raise HDF4Error("illegal number of records")
# Reset current record position if necessary.
if self._offset != start[0]:
self.seek(start[0])
# Write records.
recs = self.write(data)
def __del__(self):
"""Delete the instance, first calling the detach() method
if not already done. """
try:
if self._id:
self.detach()
except:
pass
def detach(self):
"""Terminate access to the vdata.
Args::
no argument
Returns::
None
C library equivalent : VSdetach
"""
_checkErr('detach', _C.VSdetach(self._id), "cannot detach vdata")
self._id = None
def fdefine(self, name, type, order):
"""Define a field. To initialize a newly created vdata with
fields created with fdefine(), assign a tuple of field names
to the _fields attribute or call the setfields() method.
Args::
name field name
type field data type (one of HC.xxx)
order field order (number of values in the field)
Returns::
None
C library equivalent : VSfdefine
"""
_checkErr('fdefine', _C.VSfdefine(self._id, name, type, order),
'cannot define field')
def setfields(self, *fldNames):
"""Define the name and order of the fields to access
with the read() and write() methods.
Args::
fldNames variable length argument specifying one or more
vdata field names
Returns::
None
C library equivalent : VSsetfields
setfields() indicates how to perform the matching between the vdata
fields and the values passed to the write() method or returned
by the read() method.
For example, if the vdata contains fields 'a', 'b' and 'c' and
a "setfields('c','a')" call is made, read() will thereafter return
for each record the values of field 'c' and 'a', in that order.
Field 'b' will be ignored.
When writing to a vdata, setfields() has a second usage. It is used
to initialize the structure of the vdata, that is, the name and order
of the fields that it will contain. The fields must have been
previously defined by calls to the fdefine() method.
Following that first call, setfields() can be called again to
change the order in which the record values will be passed
to the write() method. However, since it is mandatory to write
whole records, subsequent calls to setfields() must specify every
field name: only the field order can be changed.
"""
_checkErr('setfields', _C.VSsetfields(self._id, ','.join(fldNames)),
'cannot execute')
self._setfields = fldNames # remember for read/write routines
def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index)
def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend()
else:
raise HDF4Error("attempt to seek past last record")
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n
def seekend(self):
"""Set the current record position past the last vdata record.
Subsequent write() calls will append records to the vdata.
Args::
no argument
Returns::
index of the last record plus 1
C library equivalent : no equivalent
"""
try:
# Seek to the next-to-last record position
n = self.seek(self._nrecs - 1) # updates _offset
# Read last record, ignoring values
self.read(1) # updates _offset
return self._nrecs
except HDF4Error:
raise HDF4Error("seekend: cannot execute")
def tell(self):
"""Return current record position in the vdata.
Args::
no argument
Returns::
current record position; 0 is at start of vdata.
C library equivalent : no equivalent
"""
return self._offset
def read(self, nRec=1):
"""Retrieve the values of a number of records, starting
at the current record position. The current record position
is advanced by the number of records read. Current position
is 0 after "opening" the vdata with the attach() method.
Args::
nRec number of records to read
Returns::
2-level list. First level is a sequence of records,
second level gives the sequence of values for each record.
The values returned for each record are those of the fields
specified in the last call to method setfields(), in that
order. The complete vdata field set is returned if
setfields() has not been called.
An exception is raised if the current record position is
already at the end of the vdata when read() is called. This
exception can be caught as an "end of vdata" indication to
exit a loop which scans each record of the vdata. Otherwise,
the number of records to be read is lowered to the number of
records remaining in the vdata, if that number is less than
the number asked for by parameter 'nRec'. Setting 'nRec' to
an arbitrarily large value can thus be used to retrieve the
remaining records in the vdata.
C library equivalent : VSread
"""
# Validate number of records to read vs the current offset.
# Return "end of vdata" exception if already at end of vdata
# otherwise "clip" the number of records if it exceeds the
# number of remaining records in the vdata.
n = self._nrecs
if self._offset == n:
raise HDF4Error("end of vdata reached")
if self._offset + nRec > n:
nRec = self._offset + nRec - n
fields = self._setfields or self._fields
nFields = len(fields)
fieldList = ','.join(fields)
_checkErr('read', _C.VSsetfields(self._id, fieldList),
'error defining fields to read')
# Allocate a buffer to store the packed records.
bufSize = self.sizeof(fields) * nRec
bigBuf = _C.array_byte(bufSize)
# Read records
nRead = _C.VSread(self._id, bigBuf, nRec, 0) # 0: FULL_INTERLACE
_checkErr('read', nRead, 'read error')
self._offset += nRec
# Allocate an array to store a pointer to the field buffer.
fldArr = _C.new_array_voidp(1)
# Initialize return value
values = []
for numRec in range(nRead):
v = []
for numFld in range(nFields):
v.append(None)
values.append(v)
# Unpack each field in turn.
for numFld in range(nFields):
fld = self.field(fields[numFld])
data_type = fld._type
order = fld._order
n_values = order * nRead
# Allocate a buffer to store the field values.
if data_type in [HC.CHAR8, HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("read: illegal or unupported type %d" % \
data_type)
# Unpack the field values.
_C.array_voidp_setitem(fldArr, 0, buf)
_checkErr('read',
_C.VSfpack(self._id, 1, fieldList, bigBuf, bufSize,
nRead, fld._name, fldArr),
"cannot execute")
# Extract values from the field buffer.
k = 0
for numRec in range(nRead):
if order == 1:
values[numRec][numFld] = buf[k]
k += 1
else:
# Handle strings specially
if data_type == HC.CHAR8:
s = ''
for i in range(order):
v = buf[k]
if v != 0:
s += chr(v)
k += 1
values[numRec][numFld] = s
# Return field values as a list
else:
values[numRec][numFld] = []
for i in range(order):
values[numRec][numFld].append(buf[k])
k += 1
del buf
return values
def write(self, values):
"""Write records to the vdata. Writing starts at the current
record position, which is advanced by the number of records
written.
Args::
values: 2-level sequence. First level is a sequence of records.
A second level gives the sequence of record values.
It is mandatory to always write whole records. Thus
every record field must appear at the second level.
The record values are ordered according the list of
field names set in the last call to the setfields()
method. The ordre of the complete vdata field set is
used if setfields() has not been called.
Returns::
number of records written
To append to a vdata already holding 'n' records, it is necessary
to first move the current record position to 'n-1' with a call to
method seek(), then to call method read() for the side effect
of advancing the current record position past this last record.
Method seekend() does just that.
C library equivalent : VSwrite
"""
nFields = self._nfields
# Fields give the order the record values, as defined in the
# last call to setfields()
fields = self._setfields or self._fields
# We must pack values using the effective field order in the vdata
fieldList = ','.join(self._fields)
# Validate the values argument.
if nFields != len(fields):
raise HDF4Error("write: must write whole records")
if type(values) not in [list, tuple]:
raise HDF4Error("write: values must be a sequence")
nRec = len(values)
for n in range(nRec):
rec = values[n]
if type(rec) not in [list, tuple]:
raise HDF4Error("write: records must be given as sequences")
# Make sure each record is complete.
if len(rec) != nFields:
raise HDF4Error("write: records must specify every field")
# Allocate a buffer to store the packed records.
bufSize = self._recsize * nRec
bigBuf = _C.array_byte(bufSize)
# Allocate an array to store a pointer to the field buffer.
fldArr = _C.new_array_voidp(1)
# Pack each field in turn.
for numFld in range(nFields):
fld = self.field(fields[numFld])
data_type = fld._type
order = fld._order
n_values = order * nRec
# Allocate a buffer to store the field values.
if data_type in [HC.CHAR8, HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("write: illegal or unupported type %d" % \
data_type)
# Load the field buffer with values.
k = 0
for numRec in range(nRec):
val = values[numRec][numFld]
# Single-valued field
if order == 1:
buf[k] = val
k += 1
# Multi-valued field
else:
# Handle strings specially.
if data_type == HC.CHAR8:
if not isinstance(val, str):
raise HDF4Error("char fields must be set with strings")
n = len(val)
for i in range(order):
buf[k] = i < n and ord(val[i]) or 0
k += 1
# Should validate field elements ...
elif type(val) not in [list, tuple]:
raise HDF4Error("multi-values fields must be given as sequences")
else:
for i in range(order):
buf[k] = val[i]
k += 1
# Store address of the field buffer in first position
# of the field array. Pack the field values.
_C.array_voidp_setitem(fldArr, 0, buf) # fldArr[0] = buf
_checkErr('write',
_C.VSfpack(self._id, 0, fieldList, bigBuf, bufSize,
nRec, fld._name, fldArr),
"cannot execute")
del buf
# Write the packed records.
n = _C.VSwrite(self._id, bigBuf, nRec, 0) # 0: FULL_INTERLACE
_checkErr('write', n, 'cannot execute')
self._offset += nRec
return n
def inquire(self):
"""Retrieve info about the vdata.
Args::
no argument
Returns::
5-element tuple with the following elements:
-number of records in the vdata
-interlace mode
-list of vdata field names
-size in bytes of the vdata record
-name of the vdata
C library equivalent : VSinquire
"""
status, nRecs, interlace, fldNames, size, vName = \
_C.VSinquire(self._id)
_checkErr('inquire', status, "cannot query vdata info")
return nRecs, interlace, fldNames.split(','), size, vName
def fieldinfo(self):
"""Retrieve info about all vdata fields.
Args::
no argument
Returns::
list where each element describes a field of the vdata;
each field is described by an 7-element tuple containing
the following elements:
- field name
- field data type (one of HC.xxx constants)
- field order
- number of attributes attached to the field
- field index number
- field external size
- field internal size
C library equivalent : no equivalent
"""
lst = []
for n in range(self._nfields):
fld = self.field(n)
lst.append((fld._name,
fld._type,
fld._order,
fld._nattrs,
fld._index,
fld._esize,
fld._isize))
return lst
def sizeof(self, fields):
"""Retrieve the size in bytes of the given fields.
Args::
fields sequence of field names to query
Returns::
total size of the fields in bytes
C library equivalent : VSsizeof
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
n = _C.VSsizeof(self._id, str)
_checkErr('sizeof', n, "cannot retrieve field sizes")
return n
def fexist(self, fields):
"""Check if a vdata contains a given set of fields.
Args::
fields sequence of field names whose presence in the
vdata must be checked
Returns::
true (1) if the given fields are present
false (0) otherwise
C library equivalent : VSfexist
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
ret = _C.VSfexist(self._id, str)
if ret < 0:
return 0
else:
return 1
def attr(self, name_or_index):
"""Create a VDAttr instance representing a vdata attribute.
Args::
name_or_index attribute name or index number; if a name is
given, the attribute may not exist; in that
case, it will be created when the VSAttr
instance set() method is called
Returns::
VSAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VDAttr(self, name_or_index, -1) # -1: vdata attribute
def findattr(self, name):
"""Search the vdata for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
def attrinfo(self):
"""Return info about all the vdata attributes.
Args::
no argument
Returns::
dictionnary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
def __buildStartCount(self, elem, setitem=0):
# Called by __getitem__() and __setitem__() methods
# to parse the expression used inside square brackets to
# index/slice a vdata.
# If 'setitem' is set, the call comes from __setitem__()
# We then allow the start value to be past the last record
# so as to be able to append to the vdata.
#
# Return a 2-element tuple:
# - tuple of the start indices along the vdata dimensions
# - tuple of the count values along the vdata dimensions
# a count of -1 indicates that an index, not a slice
# was applied on the correcponding dimension.
# Make sure the indexing expression does not exceed the
# vdata number of dimensions (2).
if isinstance(elem, tuple):
if len(elem) > 2:
raise HDF4Error("illegal indexing expression")
else: # Convert single index to sequence
elem = [elem]
start = []
count = []
shape = [self._nrecs, self._nfields]
n = -1
for e in elem:
n += 1
# Simple index
if isinstance(e, int):
is_slice = False
if e < 0:
e += shape[n]
if e < 0 or e >= shape[n]:
if e == shape[n] and setitem:
pass
else:
raise HDF4Error("index out of range")
beg = e
end = e + 1
# Slice index
elif isinstance(e, slice):
is_slice = True
# None or 0 means not specified
if e.start:
beg = e.start
if beg < 0:
beg += shape[n]
else:
beg = 0
# None or maxint means not specified
if e.stop and e.stop != sys.maxsize:
end = e.stop
if end < 0:
end += shape[n]
else:
end = shape[n]
# Bug
else:
raise ValueError("invalid indexing expression")
# Clip end index and compute number of elements to get
if end > shape[n]:
end = shape[n]
if beg > end:
beg = end
if is_slice:
cnt = end - beg
else:
cnt = -1
start.append(beg)
count.append(cnt)
if n == 0:
start.append(0)
count.append(shape[1])
return start, count
class VDField(object):
"""The VDField class represents a vdata field.
To create a VDField instance, call the field() method of a
VD class instance. """
def __init__(self, vdinst, fIndex):
# This method should not be called directly by the user program.
# To create a VDField instance, obtain a VD class instance and
# call its field() method.
# Args:
# vdinst VD instance to which the field belongs
# fIndex field index
#
# Private attributes:
# _vd_inst VD instance to which the field belongs
# _idx field index
self._vd_inst = vdinst
self._idx = fIndex
def __getattr__(self, name):
"""Some field properties can be queried through the following
read-only attributes. Their names all start with an "_" to avoid
clashes with user-defined attributes.
Name Description C library routine
----- ------------------- -----------------
_esize field external size VFfieldesize
_index field index number VSfindex
_isize field internal size VFfieldisize
_name field name VFfieldname
_nattrs number of attributes VSfnattrs
_order field order VFfieldorder
_type field type VFfieldtype
"""
# Check for a user defined attribute first.
att = self.attr(name)
if att._index is not None: # Then the attribute exists
return att.get()
# Check for a predefined attribute.
elif name == "_esize":
n = _C.VFfieldesize(self._vd_inst._id, self._idx)
_checkErr('_esize', n, "execution error")
return n
elif name == "_index":
return self._idx
elif name == "_isize":
n = _C.VFfieldisize(self._vd_inst._id, self._idx)
_checkErr('_isize', n, "execution error")
return n
elif name == "_name":
n = _C.VFfieldname(self._vd_inst._id, self._idx)
_checkErr('_name', n, "execution error")
return n
elif name == "_nattrs":
n = _C.VSfnattrs(self._vd_inst._id, self._idx)
_checkErr('_nattrs', n, "execution error")
return n
elif name == "_order":
n = _C.VFfieldorder(self._vd_inst._id, self._idx)
_checkErr('_order', n, "execution error")
return n
elif name == "_type":
type = _C.VFfieldtype(self._vd_inst._id, self._idx)
_checkErr('_type', type, 'cannot retrieve field type')
return type
raise AttributeError
def __setattr__(self, name, value):
# Forbid assigning to our predefined attributes
if name in ["_esize", "_index", "_isize", "_name",
"_nattrs", "_order", "_type"]:
raise AttributeError("%s: read-only attribute" % name)
# Try to set the attribute.
else:
_setattr(self, name, value)
def attr(self, name_or_index):
"""Create a VDAttr instance representing a field attribute.
Args::
name_or_index attribute name or index number; if a name is
specified, the attribute may not exist; in that
case, it will be created when the VDAttr
instance set() method is called; if an
index number is specified, the attribute
must exist
Returns::
VSAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return VDAttr(self, name_or_index, self._idx)
def find(self, name):
"""Search the field for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att
def attrinfo(self):
"""Return info about all the field attributes.
Args::
no argument
Returns::
dictionnary describing each vdata attribute; for each attribute
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic
class VDAttr(object):
"""The VDAttr class encapsulates methods used to set and query attributes
defined at the level either of the vdata or of the vdata field.
To create an instance of this class, call the attr() method of a VD
(vdata) or VDField (vdata field) instance. """
def __init__(self, obj, name_or_index, fIndex):
# This constructor should not be called directly by the user
# program. The attr() method of a VD (vdata) or VDField
# (vdata field) must be called to instantiate this class.
# Args:
# obj object instance (VD or VDField) to which the
# attribute belongs
# name_or_index name or index of the attribute; if a name is
# given, an attribute with that name will be
# searched, if not found, a new index number will
# be generated
# fIndex field index, or -1 if the attribute belongs
# to the vdata
# Private attributes:
# _vd_inst VD instance
# _vdf_inst VDField instance or None
# _index attribute index or None
# _name attribute name or None
# _fIndex field index, or -1 obj is a VD instance
if isinstance(obj, VD):
self._vd_inst = obj
self._vdf_instance = None
self._fIndex = -1
else:
self._vd_inst = obj._vd_inst
self._vdf_inst = obj
self._fIndex = fIndex
# Name is given. Attribute may exist or not.
if isinstance(name_or_index, type('')):
self._name = name_or_index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
self._index = None
# Index is given. Attribute Must exist.
else:
self._index = name_or_index
status, self._name, data_type, n_values, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex,
self._index)
_checkErr('attr', status, 'non-existent attribute')
def get(self):
"""Retrieve the attribute value.
Args::
no argument
Returns::
attribute value(s); a list is returned if the attribute
is made up of more than one value, except in the case of a
string-valued attribute (data type HC.CHAR8) where the
values are returned as a string
C library equivalent : VSgetattr
"""
# Make sure th attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
# Obtain attribute type and the number of values.
status, aName, data_type, n_values, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex,
self._index)
_checkErr('get', status, 'illegal parameters')
# Get attribute value.
convert = _array_to_ret
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
buf = _C.array_int8(n_values)
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("get: attribute index %d has an "\
"illegal or unupported type %d" % \
(self._index, data_type))
status = _C.VSgetattr(self._vd_inst._id, self._fIndex,
self._index, buf)
_checkErr('get', status, 'illegal attribute ')
return convert(buf, n_values)
def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
C library equivalent : VSsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
if not isinstance(values[n], int):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.VSsetattr(self._vd_inst._id, self._fIndex, self._name,
data_type, n_values, buf)
_checkErr('attr', status, 'cannot execute')
# Update the attribute index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index")
def info(self):
"""Retrieve info about the attribute.
Args::
no argument
Returns::
4-element tuple with the following components:
-attribute name
-attribute data type (one of HC.xxx constants)
-attribute order (number of values)
-attribute size in bytes
C library equivalent : VSattrinfo
"""
# Make sure the attribute exists.
if self._index is None:
raise HDF4Error("non existent attribute")
status, name, type, order, size = \
_C.VSattrinfo(self._vd_inst._id, self._fIndex, self._index)
_checkErr('info', status, "execution error")
return name, type, order, size
###########################
# Support functions
###########################
def _setattr(obj, name, value):
# Called by the __setattr__ method of the VD and VDField objects.
#
# obj instance on which the attribute is set
# name attribute name
# value attribute value
if isinstance(value, six.string_types):
value = value.encode('utf8')
# Treat a name starting with and underscore as that of a
# standard python instance attribute.
if name[0] == '_':
obj.__dict__[name] = value
return
# Treat everything else as an HDF attribute.
if type(value) not in [list, tuple]:
value = [value]
typeList = []
for v in value:
t = type(v)
# Prohibit mixing numeric types and strings.
if t in [int, float] and \
not bytes in typeList:
if t not in typeList:
typeList.append(t)
# Prohibit sequence of strings or a mix of numbers and string.
elif t == bytes and not typeList:
typeList.append(t)
else:
typeList = []
break
if bytes in typeList:
xtype = HC.CHAR8
value = value[0]
# double is "stronger" than int
elif float in typeList:
xtype = HC.FLOAT64
elif int in typeList:
xtype = HC.INT32
else:
raise HDF4Error("Illegal attribute value")
# Assign value
try:
a = obj.attr(name)
a.set(xtype, value)
except HDF4Error as msg:
raise HDF4Error("cannot set attribute: %s" % msg)
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
# Strip NULL at end
if chrs[-1] == '\0':
del chrs[-1]
return ''.join(chrs)
|
mit
| -6,665,755,709,785,605,000
| 35.694785
| 89
| 0.558892
| false
| 4.318202
| false
| false
| false
|
OpenProvenance/python-bitcoinlib-scripting
|
03-CTxIn.py
|
1
|
2407
|
### Open Provenance February 2016 - https://myveryown.org
### Bitcoin Blockchain Information using python-bitcoinlib
### CTxIn & COutPoint Objects and Properties
### Donate to Open Provenance: 1opDUZQ9nsL1LJALBdV1dvqSMtcvNj9EC
## Import the modules required and setup a connection to bitcoin
import bitcoin
## Create a proxy object and connect to the bitcoin.rpc
import bitcoin.rpc
myproxy = bitcoin.rpc.Proxy()
## Get the latest CBlock data from bitcoin rpc proxy
block_info = myproxy.getblock(myproxy.getblockhash(myproxy.getblockcount()))
## From the CBlock object we are able to get the transactions
vtx = block_info.vtx
## Print the details to the screen.
print "----------------------------------------------------------------"
print "Bitcoin CTxIn Object Information: Block Height ", myproxy.getblockcount()
print "----------------------------------------------------------------"
## We need a non coinbase transaction for this demo as coinbase transactions have no inputs.
## in this example we will show the second transaction or first non "coinbase" transaction details.
if len(vtx) > 2 :
for x in range (1, 2) :
## Each Transaction is a CTransaction Object
thetx = vtx[x]
## Now we have the object we can get info from it
print "Is Coinbase: ", thetx.is_coinbase()
print "nVersion: ", thetx.nVersion
print "nLockTime: ", thetx.nLockTime
print "TX: ", bitcoin.core.b2lx(thetx.GetHash())
## From the CTransaction Object we get the CTxIn Objects
vin = thetx.vin
## There could be more than one IN so we loop
if len(vin) >= 1 :
for i in range (0, len(vin)) :
## vi is a CTxIn Object
vi = vin[i]
print " "
## From this Object we can get info
print "is_final: ", vi.is_final()
print "nSequence : ", vi.nSequence
## the CTxIn Object also contains a COutPoint Object
vip = vi.prevout
print "COutPoint Hash: "
print bitcoin.core.b2lx(vip.hash)
print "COutPoint n: ", vip.n
print "COutPoint is_null: ", vip.is_null()
## and finally it includes a signature
print "scriptSig : "
print bitcoin.core.b2lx(vi.scriptSig)
print '----------'
print "Dump of RAW CTxIn Object:"
print vi
print " "
print "Dump of RAW COutPoint Object:"
print vip
print '----------'
else :
print "Sorry this block only has a coinbase transaction."
print "----------------------------------------------------------------"
print " "
exit()
|
mit
| -396,867,115,745,804,740
| 32.901408
| 99
| 0.645617
| false
| 3.473304
| false
| false
| false
|
lironsc/ORange
|
ORange1_LoadBalancers/Project1/Controller/Split/Elcp0Table.py
|
1
|
1035
|
import Flow,Range
from ryu.ofproto import ofproto_v1_3
#This file contains all the logic for populating the fourth table, used for the balancing of traffic
#Creates a flow for the table, one for each range, representing the start of a range
def createThirdTableFlow(flowRange, datapath):
ofproto=ofproto_v1_3
match = datapath.ofproto_parser.OFPMatch(eth_type=0x800,ipv4_src=flowRange.getZeroELCP())
#If a match is found, send to the last table which will send the packet to the chosen server
inst = [datapath.ofproto_parser.OFPInstructionGotoTable(4),
datapath.ofproto_parser.OFPInstructionWriteMetadata(Range.fromBinary(Range.toBinary(int(flowRange.ID)) +flowRange.end), Flow.getMetaDataMask(), type_=None, len_=None)]
return Flow.createFlow(datapath,int(flowRange.ID),3,100-Range.starsInString(flowRange.zeroELCP),match,inst)
#Install all flows in table
def prepareELCP0Table(dp,ranges):
for i in range(0, len(ranges)):
dp.send_msg(createThirdTableFlow(ranges[i], dp))
|
apache-2.0
| -6,491,032,691,780,397,000
| 56.5
| 187
| 0.752657
| false
| 3.532423
| false
| false
| false
|
jet-code/multivariable-control-systems
|
cp2/cp2_method0.py
|
1
|
3758
|
# coding: utf-8
# In[1]:
# Alexander Hebert
# ECE 6390
# Computer Project #2
# In[2]:
# Tested using Python v3.4 and IPython v2
##### Import libraries
# In[3]:
import numpy as np
# In[4]:
import scipy
# In[5]:
import sympy
# In[6]:
from IPython.display import display
# In[7]:
from sympy.interactive import printing
# In[8]:
np.set_printoptions(precision=6)
# In[9]:
#np.set_printoptions(suppress=True)
##### Original system:
# In[10]:
A = np.loadtxt('A_ex1.txt')
# In[11]:
A
# In[12]:
n,nc = A.shape
# In[13]:
B = np.loadtxt('B_ex1.txt')
# In[14]:
B
# In[15]:
nr,m = B.shape
##### Compute eigenvalues/poles of A to determine system stability:
# In[16]:
A_eigvals, M = np.linalg.eig(A)
# In[17]:
A_eigvals
# In[18]:
# Two poles lie in the RHP and are unstable.
# In[19]:
A_eigvals_desired = np.array([-0.2,-0.5,A_eigvals[2],A_eigvals[3]])
# In[20]:
A_eigvals_desired
# In[21]:
Lambda = np.diag(A_eigvals_desired)
# In[22]:
Lambda
##### Pole Assignment Algorithm from journal paper
# In[23]:
# Step A: Decomposition of B using SVD
# B = U*S*V.H
# In[24]:
U, s, VH = np.linalg.svd(B)
# In[25]:
U
# In[26]:
s
# In[27]:
S = np.zeros((4, 2))
S[:2, :2] = np.diag(s)
# In[28]:
S
# In[29]:
VH
# In[30]:
# Extract U_0 and U_1 from matrix U = [U_0,U_1]
# In[31]:
U_0 = U[:n,:m]
# In[32]:
U_0
# In[33]:
U_1 = U[:n,m:]
# In[34]:
U_1
# In[35]:
# B = [U_0,U_1][Z,0].T
# Compute Z from SVD of B
# In[36]:
Z = np.diag(s).dot(VH)
# In[37]:
Z
# In[38]:
# Compute the nullspace of U_1.T *(A - lambda_j*I)
# for initial eigenvectors in X
X = np.zeros((n,n))
for j in range(len(A_eigvals_desired)):
lambda_j = A_eigvals_desired[j]
# M_j is a temp matrix
exec("M_%d = np.dot(U_1.T,(A - lambda_j*np.identity(n)))" %(j+1))
# U_1.T *(A - lambda_j*I) = T_j *[Gamma_j,0]*[S_j_hat,S_j].T
exec("T_%d, gamma_%d, SH_%d = np.linalg.svd(M_%d)" %(j+1,j+1,j+1,j+1))
exec("X[:,j] = SH_%d[-2,:]" %(j+1))
# no transpose in SH_j due to 1-d vector
exec("S_hat_%d = SH_%d[:m,:].T" %(j+1,j+1))
exec("S_%d = SH_%d[m:,:].T" %(j+1,j+1))
# In[39]:
# Initial eigenvectors in X
X
# In[40]:
# Test X for full rank
X_rank = np.linalg.matrix_rank(X)
# In[41]:
all((X_rank,n))
# In[42]:
# Step X with Method 0
maxiter = 2
v2current = 0
v2prev = np.linalg.cond(X)
eps = 10e-5
flag = 0
X_j = np.zeros((n,n-1))
cond_num = np.zeros((n,1))
for r in range(maxiter):
for j in range(n):
X_j = np.delete(X,j,1)
Q,R = np.linalg.qr(X_j,mode='complete')
y_j = Q[:,-1].reshape((4,1))
exec("S_j = S_%d" %(j+1))
x_j = (S_j.dot(S_j.T).dot(y_j) / np.linalg.norm(np.dot(S_j.T,y_j)))
X[:,j] = x_j[:,0]
cond_num[j,0] = 1 / np.abs(np.dot(y_j.T,x_j))
v2current = np.linalg.cond(X)
if ((v2current - v2prev) < eps):
print("Tolerance met")
print("v2 = %.3f" %v2current)
flag = 1
else:
v2prev = v2current
if (flag == 0):
print("Tolerance not met")
print("v2 = %.3f" %v2current)
# In[43]:
X
# In[44]:
np.linalg.matrix_rank(X)
# In[45]:
X_inv = np.linalg.inv(X)
# In[46]:
X_inv
# In[47]:
# M defined as A + BF
M = X.dot(Lambda).dot(X_inv)
# In[48]:
M
# In[49]:
# Eigenvalues of controlled system
M_eigvals, H = np.linalg.eig(M)
M_eigvals
# In[50]:
# Compute feedback matrix F
F = np.dot(np.linalg.inv(Z),np.dot(U_0.T,(M - A)))
# In[51]:
F
# In[52]:
np.linalg.norm(F)
# In[53]:
# Compute condition number norms
# In[54]:
# Inf norm
np.linalg.norm(cond_num,np.inf)
# In[55]:
# 2 norm
np.linalg.norm(cond_num)
# In[55]:
|
mit
| -1,500,753,311,782,675,700
| 9.438889
| 75
| 0.530601
| false
| 2.182346
| false
| false
| false
|
openstack/tacker
|
tacker/tests/unit/test_wsgi.py
|
1
|
25815
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import testtools
from unittest import mock
from urllib import request as urllibrequest
import webob
import webob.exc
from oslo_config import cfg
import oslo_i18n
from tacker.common import exceptions as exception
from tacker.tests import base
from tacker import wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'var'))
class TestWSGIServer(base.BaseTestCase):
"""WSGI server tests."""
def test_start_random_port(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="127.0.0.1")
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_start_random_port_with_ipv6(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="::1")
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_ipv6_listen_called_with_scope(self):
self.skipTest("Not ready yet")
server = wsgi.Server("test_app")
with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
mock_get_addr.return_value = [
(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
]
with mock.patch.object(server, 'pool') as mock_pool:
server.start(None,
1234,
host="fe80::204:acff:fe96:da87%eth0")
mock_get_addr.assert_called_once_with(
"fe80::204:acff:fe96:da87%eth0",
1234,
socket.AF_UNSPEC,
socket.SOCK_STREAM
)
mock_listen.assert_called_once_with(
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
family=socket.AF_INET6,
backlog=cfg.CONF.backlog
)
mock_pool.spawn.assert_has_calls([
mock.call(
server._run,
None,
mock_listen.return_value)
])
def test_app(self):
self.skipTest("Not ready yet")
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = urllibrequest.urlopen('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
class SerializerTest(base.BaseTestCase):
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
input_dict = {'servers': {'test': 'pass'}}
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType, serializer.serialize,
input_dict, content_type)
def test_get_deserialize_handler_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType,
serializer.get_deserialize_handler, content_type)
def test_serialize_content_type_json(self):
"""Test serialize with content type json."""
input_data = {'servers': ['test=pass']}
content_type = 'application/json'
serializer = wsgi.Serializer()
result = serializer.serialize(input_data, content_type)
self.assertEqual(b'{"servers": ["test=pass"]}', result)
def test_deserialize_raise_bad_request(self):
"""Test serialize verifies that exception is raises."""
content_type = 'application/unknown'
data_string = 'test'
serializer = wsgi.Serializer()
self.assertRaises(
webob.exc.HTTPBadRequest,
serializer.deserialize, data_string, content_type)
def test_deserialize_json_content_type(self):
"""Test Serializer.deserialize with content type json."""
content_type = 'application/json'
data_string = '{"servers": ["test=pass"]}'
serializer = wsgi.Serializer()
result = serializer.deserialize(data_string, content_type)
self.assertEqual({'body': {'servers': ['test=pass']}}, result)
class RequestDeserializerTest(testtools.TestCase):
def setUp(self):
super(RequestDeserializerTest, self).setUp()
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
self.body_deserializers = {'application/json': JSONDeserializer()}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def test_get_deserializer(self):
"""Test RequestDeserializer.get_body_deserializer."""
expected_json_serializer = self.deserializer.get_body_deserializer(
'application/json')
self.assertEqual(
expected_json_serializer,
self.body_deserializers['application/json'])
def test_get_expected_content_type(self):
"""Test RequestDeserializer.get_expected_content_type."""
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual('application/json',
self.deserializer.get_expected_content_type(request))
def test_get_action_args(self):
"""Test RequestDeserializer.get_action_args."""
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12}]}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected,
self.deserializer.get_action_args(env))
def test_deserialize(self):
"""Test RequestDeserializer.deserialize."""
with mock.patch.object(
self.deserializer, 'get_action_args') as mock_method:
mock_method.return_value = {'action': 'create'}
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/json')
self.assertEqual(expected, deserialized)
def test_get_body_deserializer_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
deserializer = wsgi.RequestDeserializer()
self.assertRaises(
exception.InvalidContentType,
deserializer.get_body_deserializer, content_type)
class ResponseSerializerTest(testtools.TestCase):
def setUp(self):
super(ResponseSerializerTest, self).setUp()
class JSONSerializer(object):
def serialize(self, data, action='default'):
return b'pew_json'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {'application/json': JSONSerializer()}
self.serializer = wsgi.ResponseSerializer(
self.body_serializers, HeadersSerializer())
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
def test_get_body_serializer(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.get_body_serializer, 'application/unknown')
def test_get_serializer(self):
"""Test ResponseSerializer.get_body_serializer."""
content_type = 'application/json'
self.assertEqual(self.body_serializers[content_type],
self.serializer.get_body_serializer(content_type))
def test_serialize_json_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'pew_json', response.body)
self.assertEqual(404, response.status_int)
def test_serialize_response_None(self):
response = self.serializer.serialize(
None, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'', response.body)
self.assertEqual(404, response.status_int)
class RequestTest(base.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = b"fake<br />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/new-type;"
self.assertIsNone(request.get_content_type())
def test_content_type_from_accept(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, ")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/new_type"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
oslo_i18n.get_available_languages = mock.MagicMock()
oslo_i18n.get_available_languages.return_value = [
'known-language', 'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual('known-language', language)
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ActionDispatcherTest(base.BaseTestCase):
def test_dispatch(self):
"""Test ActionDispatcher.dispatch."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x
self.assertEqual('pants',
serializer.dispatch('pants', action='create'))
def test_dispatch_action_None(self):
"""Test ActionDispatcher.dispatch with none action."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action='update'))
class ResponseHeadersSerializerTest(base.BaseTestCase):
def test_default(self):
serializer = wsgi.ResponseHeaderSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'fake')
self.assertEqual(200, response.status_int)
def test_custom(self):
class Serializer(wsgi.ResponseHeaderSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(404, response.status_int)
self.assertEqual('123', response.headers['X-Custom-Header'])
class DictSerializerTest(base.BaseTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('',
serializer.serialize({}, 'NonExistentAction'))
class JSONDictSerializerTest(base.BaseTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = b'{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
def test_json_with_unicode(self):
input_dict = dict(servers=dict(a=(2, '\u7f51\u7edc')))
expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
class TextDeserializerTest(base.BaseTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({},
deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(base.BaseTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1'}}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_default_raise_Malformed_Exception(self):
"""Test JsonDeserializer.default.
Test verifies JsonDeserializer.default raises exception
MalformedRequestBody correctly.
"""
data_string = ""
deserializer = wsgi.JSONDeserializer()
self.assertRaises(
exception.MalformedRequestBody, deserializer.default, data_string)
def test_json_with_utf8(self):
data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
as_dict = {'body': {'a': '\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_json_with_unicode(self):
data = b'{"a": "\u7f51\u7edc"}'
as_dict = {'body': {'a': '\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
class RequestHeadersDeserializerTest(base.BaseTestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual({},
deserializer.deserialize(req, 'nonExistent'))
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual({'a': 'b'},
deserializer.deserialize(req, 'update'))
class ResourceTest(base.BaseTestCase):
@staticmethod
def my_fault_body_function():
return 'off'
class Controller(object):
def index(self, request, index=None):
return index
def test_dispatch(self):
resource = wsgi.Resource(self.Controller())
req = wsgi.Request.blank('/')
actual = resource.dispatch(
req, 'index', action_args={'index': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_dispatch_unknown_controller_action(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
self.assertRaises(
AttributeError, resource.dispatch,
resource.controller, 'create', {})
def test_malformed_request_body_throws_bad_request(self):
resource = wsgi.Resource(None)
request = wsgi.Request.blank(
"/", body=b"{mal:formed", method='POST',
headers={'Content-Type': "application/json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_throws_unsupported_media_type_error(self):
resource = wsgi.Resource(None)
request = wsgi.Request.blank(
"/", body=b"{some:json}", method='POST',
headers={'Content-Type': "xxx"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_bad_request_error(self):
resource = wsgi.Resource(self.Controller())
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_call_resource_class_bad_request(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = 'body'
def method(self):
pass
def best_match_content_type(self):
return 'best_match_content_type'
resource = wsgi.Resource(self.Controller())
request = FakeRequest()
result = resource(request)
self.assertEqual(415, result.status_int)
def test_type_error(self):
resource = wsgi.Resource(self.Controller())
request = wsgi.Request.blank(
"/", method='GET', headers={'Content-Type': "json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_call_resource_class_bad_request_error(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = '{"Content-Type": "json"}'
def method(self):
pass
def best_match_content_type(self):
return 'application/json'
resource = wsgi.Resource(self.Controller())
request = FakeRequest()
result = resource(request)
self.assertEqual(400, result.status_int)
class MiddlewareTest(base.BaseTestCase):
def test_process_response(self):
def application(environ, start_response):
response = 'Success'
return response
response = application('test', 'fake')
result = wsgi.Middleware(application).process_response(response)
self.assertEqual('Success', result)
class FaultTest(base.BaseTestCase):
def test_call_fault(self):
class MyException(object):
code = 415
explanation = 'test'
my_exception = MyException()
converted_exp = exception.ConvertedException(code=my_exception.code,
explanation=my_exception.explanation)
my_fault = wsgi.Fault(converted_exp)
req = wsgi.Request.blank("/", method='POST',
headers={'Content-Type': "unknow"})
response = my_fault(req)
self.assertEqual(415, response.status_int)
class TestWSGIServerWithSSL(base.BaseTestCase):
"""WSGI server tests."""
def setUp(self):
super(TestWSGIServerWithSSL, self).setUp()
self.skip("Not ready yet")
def test_app_using_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = urllibrequest.urlopen('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ssl_combined_cert_and_key(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certandkey.pem'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = urllibrequest.urlopen('https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_app_using_ipv6_and_ssl(self):
CONF.set_default('use_ssl', True)
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="::1")
response = urllibrequest.urlopen('https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
|
apache-2.0
| 9,851,767,278,263,516
| 34.557851
| 79
| 0.595197
| false
| 4.205082
| true
| false
| false
|
freeipa/freeipa-pr-ci
|
tasks/test_tasks.py
|
1
|
2171
|
import os
import pytest
from .ansible import AnsiblePlaybook
from .common import PopenTask, TimeoutException, TaskException
from .vagrant import VagrantBoxDownload
def test_timeout():
PopenTask(['sleep', '0.1'])()
PopenTask(['sleep', '0.1'], timeout=None)()
PopenTask(['sleep', '0.1'], timeout=0.2)()
task = PopenTask(['sleep', '0.1'], timeout=0.01)
with pytest.raises(TimeoutException) as exc_info:
task()
assert exc_info.value.task == task
def test_fallible_task():
task = PopenTask(['ls', '/tmp/ag34feqfdafasdf'])
with pytest.raises(TaskException) as exc_info:
task()
assert exc_info.value.task == task
assert task.returncode != 0
task = PopenTask(['ls', '/tmp/ag34feqfdafasdf'], raise_on_err=False)
task()
assert task.returncode != 0
def test_popen():
task = PopenTask(['ls', '/tmp'])
task()
assert task.returncode == 0
task = PopenTask(['ls', '/tmp/adsdasafgsag'], raise_on_err=False)
task()
assert task.returncode == 2
PopenTask('for i in `seq 3`; do echo $i; done', shell=True)()
task = PopenTask('ls /tmp/$DIR', shell=True, raise_on_err=False)
task()
assert task.returncode == 0
env = dict(DIR='gfdsgsdfgsfd')
task = PopenTask('ls /tmp/$DIR', shell=True, env=env, raise_on_err=False)
task()
assert task.returncode == 2
def test_vagrant_box_download():
path = os.path.dirname(os.path.realpath(__file__))
task = VagrantBoxDownload(
vagrantfile='Vagrantfile.mock',
path=path)
vagrantfile = task.get_vagrantfile()
assert vagrantfile.vm.box == 'freeipa/ci-master-f25'
assert vagrantfile.vm.box_version == '0.2.5'
def test_ansible_playbook():
assert ' '.join(
AnsiblePlaybook(playbook='a.yml', inventory='hosts.test').cmd
) == 'ansible-playbook -i hosts.test a.yml'
assert ' '.join(
AnsiblePlaybook(playbook='a.yml', inventory='hosts.test',
extra_vars={'a': 1, 'b': 'xyz'}, verbosity='vvv').cmd
) == 'ansible-playbook -i hosts.test -e b=xyz -e a=1 a.yml -vvv'
with pytest.raises(TaskException):
AnsiblePlaybook()
|
gpl-3.0
| -5,545,033,217,916,845,000
| 27.946667
| 77
| 0.628743
| false
| 3.289394
| true
| false
| false
|
Ebag333/Pyfa
|
gui/builtinStatsViews/rechargeViewFull.py
|
1
|
5430
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import wx
from gui.statsView import StatsView
from gui.bitmapLoader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
import gui.mainFrame
import gui.builtinStatsViews.resistancesViewFull as rvf
from service.fit import Fit
class RechargeViewFull(StatsView):
name = "rechargeViewFull"
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.mainFrame.Bind(rvf.EFFECTIVE_HP_TOGGLED, self.toggleEffective)
self.effective = True
def getHeaderText(self, fit):
return "Recharge rates"
def getTextExtentW(self, text):
width, height = self.parent.GetTextExtent(text)
return width
def toggleEffective(self, event):
self.effective = event.effective
sFit = Fit.getInstance()
self.refreshPanel(sFit.getFit(self.mainFrame.getActiveFit()))
event.Skip()
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
self.panel = contentPanel
self.headerPanel = headerPanel
sizerTankStats = wx.FlexGridSizer(3, 5)
for i in range(4):
sizerTankStats.AddGrowableCol(i + 1)
contentSizer.Add(sizerTankStats, 0, wx.EXPAND, 0)
# Add an empty label first for correct alignment.
sizerTankStats.Add(wx.StaticText(contentPanel, wx.ID_ANY, ""), 0)
toolTipText = {"shieldPassive": "Passive shield recharge", "shieldActive": "Active shield boost",
"armorActive": "Armor repair amount", "hullActive": "Hull repair amount"}
for tankType in ("shieldPassive", "shieldActive", "armorActive", "hullActive"):
bitmap = BitmapLoader.getStaticBitmap("%s_big" % tankType, contentPanel, "gui")
tooltip = wx.ToolTip(toolTipText[tankType])
bitmap.SetToolTip(tooltip)
sizerTankStats.Add(bitmap, 0, wx.ALIGN_CENTER)
toolTipText = {"reinforced": "Reinforced", "sustained": "Sustained"}
for stability in ("reinforced", "sustained"):
bitmap = BitmapLoader.getStaticBitmap("regen%s_big" % stability.capitalize(), contentPanel, "gui")
tooltip = wx.ToolTip(toolTipText[stability])
bitmap.SetToolTip(tooltip)
sizerTankStats.Add(bitmap, 0, wx.ALIGN_CENTER)
for tankType in ("shieldPassive", "shieldActive", "armorActive", "hullActive"):
if stability == "reinforced" and tankType == "shieldPassive":
sizerTankStats.Add(wx.StaticText(contentPanel, wx.ID_ANY, ""))
continue
tankTypeCap = tankType[0].capitalize() + tankType[1:]
lbl = wx.StaticText(contentPanel, wx.ID_ANY, "0.0", style=wx.ALIGN_RIGHT)
setattr(self, "labelTank%s%s" % (stability.capitalize(), tankTypeCap), lbl)
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add(lbl, 0, wx.EXPAND)
box.Add(wx.StaticText(contentPanel, wx.ID_ANY, " HP/s"), 0, wx.EXPAND)
sizerTankStats.Add(box, 0, wx.ALIGN_CENTRE)
contentPanel.Layout()
def refreshPanel(self, fit):
# If we did anything intresting, we'd update our labels to reflect the new fit's stats here
for stability in ("reinforced", "sustained"):
if stability == "reinforced" and fit is not None:
tank = fit.effectiveTank if self.effective else fit.tank
elif stability == "sustained" and fit is not None:
tank = fit.effectiveSustainableTank if self.effective else fit.sustainableTank
else:
tank = None
for name in ("shield", "armor", "hull"):
lbl = getattr(self, "labelTank%s%sActive" % (stability.capitalize(), name.capitalize()))
if tank is not None:
lbl.SetLabel("%.1f" % tank["%sRepair" % name])
else:
lbl.SetLabel("0.0")
if fit is not None:
label = getattr(self, "labelTankSustainedShieldPassive")
value = fit.effectiveTank["passiveShield"] if self.effective else fit.tank["passiveShield"]
label.SetLabel(formatAmount(value, 3, 0, 9))
else:
value = 0
label = getattr(self, "labelTankSustainedShieldPassive")
label.SetLabel("0")
label.SetToolTip(wx.ToolTip("%.3f" % value))
self.panel.Layout()
self.headerPanel.Layout()
RechargeViewFull.register()
|
gpl-3.0
| -1,832,182,773,703,005,200
| 41.093023
| 110
| 0.618785
| false
| 3.851064
| false
| false
| false
|
juju/juju-gui-charm
|
server/runserver.py
|
1
|
1408
|
# This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Juju GUI server entry point.
Arguments example:
--apiurl="wss://ec2-75-101-177-185.compute-1.example.com:17070"
--apiversion="go"
--sslpath="/etc/ssl/juju-gui"
--tests_root="/var/lib/juju/agents/unit-juju-gui-0/charm/juju-gui/test/"
--insecure
--sandbox
--logging=debug|info|warning|error
--charmworldurl="https://manage.jujucharms.com/"
The --sslpath option is ignored if --insecure is set.
The --apiurl and --apiversion options are ignored if --sandbox is set.
"""
from guiserver import manage
if __name__ == '__main__':
manage.setup()
manage.run()
|
agpl-3.0
| 2,942,294,371,774,037,500
| 36.052632
| 79
| 0.724432
| false
| 3.459459
| false
| false
| false
|
snehil/CNN_Text_Classification_Tensorflow
|
cnn/text_cnn.py
|
1
|
4202
|
import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda = 0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32 , [None, sequence_length], name = "input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes] , name = "input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer (Pre-trained + learnt from training data!)
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name = "W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name = "W")
b = tf.Variable(tf.constant(0.1, shape = [num_filters]), name = "b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides = [1, 1, 1, 1],
padding = "VALID",
name = "conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name = "relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize = [1, sequence_length - filter_size + 1, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name = "pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape = [num_filters_total, num_classes],
initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape = [num_classes]), name = "b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name = "scores")
self.softmaxScores = tf.nn.softmax(self.scores, name = "softmaxScores")
self.predictions = tf.argmax(self.softmaxScores, 1, name = "predictions")
self.topKPreds = tf.nn.top_k(self.softmaxScores, k = 1, sorted = True, name = "topKPreds")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name = "accuracy")
|
apache-2.0
| -3,228,817,458,301,248,500
| 47.860465
| 126
| 0.540219
| false
| 3.673077
| false
| false
| false
|
sadikovi/pulsar
|
analytics/selector/selector.py
|
1
|
9133
|
#!/usr/bin/env python
# import libs
from types import StringType, ListType
import warnings
# import classes
import analytics.utils.queryengine as q
import analytics.utils.misc as misc
from analytics.algorithms.algorithmsmap import AlgorithmsMap
from analytics.core.map.clustermap import ClusterMap
from analytics.core.map.elementmap import ElementMap
from analytics.core.map.pulsemap import PulseMap
from analytics.core.pulse import StaticPulse, DynamicPulse
# some of the tables to use for filtering
CLUSTERS = "CLUSTERS"
ELEMENTS = "ELEMENTS"
PULSES = "PULSES"
ALGORITHMS = "ALGORITHMS"
class FilterBlock(object):
"""
Simple class to update maps in batch.
Attributes:
_alg (AlgorithmsMap): map of algorithms
_pul (PulseMap): map of pulses
_clu (ClusterMap): map of clusters
_ele (ElementMap): map of elements
_isFiltered (bool): flag to show that filter block is filtered
"""
def __init__(self, algorithmsmap, pulsemap, clustermap, elementmap):
self._alg = algorithmsmap
self._pul = pulsemap
self._clu = clustermap
self._ele = elementmap
self._isFiltered = False
# [Public]
def filterWithBlock(queryset, flrblock):
"""
Recommended method for filtering maps with queryset. Takes care of
filtering order and overall process.
Args:
queryset (str): query set
flrblock (FilterBlock): filter block with maps
"""
# check if filter block has already been filtered
if flrblock._isFiltered:
return flrblock
# extract query blocks
blocks = parseQueryset(queryset, q.QueryEngine())
if not blocks:
return flrblock
# filter blocks to match maps
ablock = None; pblock = None; cblock = None
for block in blocks:
if block._statement._table.upper() == ALGORITHMS:
ablock = block
elif block._statement._table.upper() == PULSES:
pblock = block
elif block._statement._table.upper() == CLUSTERS:
cblock = block
# use each block to parse map
flrblock._alg = filterAlgorithms(ablock, flrblock._alg)
flrblock._pul = filterPulses(pblock, flrblock._pul)
flrblock._clu = filterClusters(cblock, flrblock._clu)
flrblock._ele = filterElements(flrblock._ele, flrblock._clu, flrblock._pul)
# finished filtering
flrblock._isFiltered = True
return flrblock
# [Public]
def parseQueryset(queryset=None, engine=None):
"""
Parsing query set. If query set is None or not a string, query set is
reset to empty string. If query set is invalid, exception is thrown.
Args:
queryset (str): query set
engine (QueryEngine): query engine to parse queryset
Returns:
list<QueryBlock>: list of query blocks
"""
if queryset is None:
queryset = ""
elif type(queryset) is not StringType:
msg = "Queryset is not a string and will be reset to empty"
warnings.warn(msg, UserWarning)
queryset = ""
else:
queryset = queryset.strip()
# query blocks
blocks = []
# check if queryset is empty, and in this case return empty list
if queryset == "":
blocks = []
else:
# return query blocks
engine = engine if type(engine) is q.QueryEngine else q.QueryEngine()
blocks = engine.parse(queryset)
return blocks
# [Public]
def filterAlgorithms(queryblock, algorithmsmap):
"""
Filters algorithms.
Args:
queryblock (QueryBlock): query block for algorithms
algorithmsmap (AlgorithmsMap): map of algorithms
Returns:
AlgorithmsMap: reference to updated algorithms map
"""
# if queryblock is None then do not filter at all
if queryblock is None:
return algorithmsmap
misc.checkTypeAgainst(type(queryblock), q.QueryBlock, __file__)
misc.checkTypeAgainst(type(algorithmsmap), AlgorithmsMap, __file__)
# get predicates
predicates = queryblock._predicates
# algorithm keys
akeys = []
for predicate in predicates:
ptype = predicate._type
parameter = predicate._parameter
# check only equal predicates with parameter "id"
if ptype == q._PREDICATE_TYPES.EQUAL and parameter.upper() == "ID":
values = predicate._values
keys.append(values[0])
# remove keys that are not selected
for key in algorithmsmap.keys():
if key not in akeys:
algorithmsmap.remove(key)
return algorithmsmap
# [Public]
def filterPulses(queryblock, pulsemap):
"""
Filters pulses.
Args:
queryblock (QueryBlock): query block for pulses
pulsemap (PulseMap): map of pulses
Returns:
PulseMap: reference to updated pulses map
"""
# if queryblock is None then do not filter at all
if queryblock is None:
return pulsemap
misc.checkTypeAgainst(type(queryblock), q.QueryBlock, __file__)
misc.checkTypeAgainst(type(pulsemap), PulseMap, __file__)
# get predicates
predicates = queryblock._predicates
# check assign predicates first
for predicate in predicates:
ptype = predicate._type
if ptype == q._PREDICATE_TYPES.ASSIGN:
values = predicate._values
pulse = pulsemap.get(predicate._parameter)
if pulse is not None and type(pulse) is DynamicPulse:
pulse.setStatic(not values[0].upper()=="DYNAMIC")
# check equal predicate
for predicate in predicates:
ptype = predicate._type
# check equal predicate
if ptype == q._PREDICATE_TYPES.EQUAL:
pulse = pulsemap.get(predicate._parameter)
if pulse is not None:
values = predicate._values
_passed = pulse.setDefaultValue(values[0])
# 30.03.2015 ivan.sadikov: added issue#27 fix
# reporting warning, if value is incorrect
if not _passed:
_n = pulse.name(); _v = str(values[0])
msg = "Pulse %s cannot set value %s as default" %(_n, _v)
warnings.warn(msg, UserWarning)
# return updated pulsemap
return pulsemap
# [Public]
def filterClusters(queryblock, clustermap):
"""
Filters clusters.
Args:
queryblock (QueryBlock): query block for clusters
clustermap (ClusterMap): map of clusters
Returns:
ClusterMap: reference to updated clusters map
"""
# if queryblock is None then do not filter at all
if queryblock is None:
return clustermap
misc.checkTypeAgainst(type(queryblock), q.QueryBlock, __file__)
misc.checkTypeAgainst(type(clustermap), ClusterMap, __file__)
# storing clusters
clusters = []
# get predicates
predicates = queryblock._predicates
for predicate in predicates:
ptype = predicate._type
parameter = predicate._parameter
if ptype == q._PREDICATE_TYPES.EQUAL and parameter.upper() == "ID":
values = predicate._values
if clustermap.has(values[0]):
clusters.append(values[0])
# filter clusters
updatedmap = ClusterMap()
for key in clusters:
if not updatedmap.has(key):
updatedmap.add(clustermap.get(key))
# return updated cluster map
return updatedmap
# [Public]
def filterElements(elementmap, clustermap, pulsemap):
"""
Filters elements using cluster map and pulse map.
Args:
elementmap (ElementMap): map of elements
clustermap (ClusterMap): filtered map of clusters
pulsemap (PulseMap): filtered map of pulses
Returns:
ElementMap: reference to updated element map
"""
misc.checkTypeAgainst(type(elementmap), ElementMap, __file__)
misc.checkTypeAgainst(type(clustermap), ClusterMap, __file__)
misc.checkTypeAgainst(type(pulsemap), PulseMap, __file__)
# filter by clusters
elements = elementmap._map.values()
for element in elements:
parent = element.cluster()
if parent is None or not clustermap.has(parent.id()):
elementmap.remove(element.id())
# filter by pulses
elements = elementmap._map.values()
# pulses
# "is selectable" closure
def isselectable(x):
if type(x) is DynamicPulse and x.static() is True:
return True if x.default() is not None else False
elif type(x) is StaticPulse:
return True if x.default() is not None else False
else:
return False
pulses = [x for x in pulsemap._map.values() if isselectable(x)]
for element in elements:
toRemove = False
for pulse in pulses:
feature = element._features[pulse.id()]
if feature is None or feature.value() != pulse.default():
toRemove = True
if toRemove:
elementmap.remove(element.id())
# return element map
return elementmap
|
apache-2.0
| -2,103,489,210,357,787,600
| 33.464151
| 79
| 0.630461
| false
| 4.197151
| false
| false
| false
|
mikf/gallery-dl
|
gallery_dl/extractor/bcy.py
|
1
|
6802
|
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://bcy.net/"""
from .common import Extractor, Message
from .. import text
import json
import re
class BcyExtractor(Extractor):
"""Base class for bcy extractors"""
category = "bcy"
directory_fmt = ("{category}", "{user[id]} {user[name]}")
filename_fmt = "{post[id]} {id}.{extension}"
archive_fmt = "{post[id]}_{id}"
root = "https://bcy.net"
def __init__(self, match):
Extractor.__init__(self, match)
self.item_id = match.group(1)
def items(self):
sub = re.compile(r"^https?://p\d+-bcy\.byteimg\.com/img/banciyuan").sub
iroot = "https://img-bcy-qn.pstatp.com"
noop = self.config("noop")
for post in self.posts():
if not post["image_list"]:
continue
multi = None
tags = post.get("post_tags") or ()
data = {
"user": {
"id" : post["uid"],
"name" : post["uname"],
"avatar" : sub(iroot, post["avatar"].partition("~")[0]),
},
"post": {
"id" : text.parse_int(post["item_id"]),
"tags" : [t["tag_name"] for t in tags],
"date" : text.parse_timestamp(post["ctime"]),
"parody" : post["work"],
"content": post["plain"],
"likes" : post["like_count"],
"shares" : post["share_count"],
"replies": post["reply_count"],
},
}
yield Message.Directory, data
for data["num"], image in enumerate(post["image_list"], 1):
data["id"] = image["mid"]
data["width"] = image["w"]
data["height"] = image["h"]
url = image["path"].partition("~")[0]
text.nameext_from_url(url, data)
if data["extension"]:
if not url.startswith(iroot):
url = sub(iroot, url)
data["filter"] = ""
yield Message.Url, url, data
else:
if not multi:
if len(post["multi"]) < len(post["image_list"]):
multi = self._data_from_post(post["item_id"])
multi = multi["post_data"]["multi"]
else:
multi = post["multi"]
image = multi[data["num"] - 1]
if image["origin"]:
data["filter"] = "watermark"
yield Message.Url, image["origin"], data
if noop:
data["extension"] = ""
data["filter"] = "noop"
yield Message.Url, image["original_path"], data
def posts(self):
"""Returns an iterable with all relevant 'post' objects"""
def _data_from_post(self, post_id):
url = "{}/item/detail/{}".format(self.root, post_id)
page = self.request(url).text
return json.loads(
text.extract(page, 'JSON.parse("', '");')[0]
.replace('\\\\u002F', '/')
.replace('\\"', '"')
)["detail"]
class BcyUserExtractor(BcyExtractor):
"""Extractor for user timelines"""
subcategory = "user"
pattern = r"(?:https?://)?bcy\.net/u/(\d+)"
test = (
("https://bcy.net/u/1933712", {
"pattern": r"https://img-bcy-qn.pstatp.com/\w+/\d+/post/\w+/.+jpg",
"count": ">= 20",
}),
("https://bcy.net/u/109282764041", {
"pattern": r"https://p\d-bcy.byteimg.com/img/banciyuan/[0-9a-f]+"
r"~tplv-banciyuan-logo-v3:.+\.image",
"range": "1-25",
"count": 25,
}),
)
def posts(self):
url = self.root + "/apiv3/user/selfPosts"
params = {"uid": self.item_id, "since": None}
while True:
data = self.request(url, params=params).json()
try:
items = data["data"]["items"]
except KeyError:
return
if not items:
return
for item in items:
yield item["item_detail"]
params["since"] = item["since"]
class BcyPostExtractor(BcyExtractor):
"""Extractor for individual posts"""
subcategory = "post"
pattern = r"(?:https?://)?bcy\.net/item/detail/(\d+)"
test = (
("https://bcy.net/item/detail/6355835481002893070", {
"url": "301202375e61fd6e0e2e35de6c3ac9f74885dec3",
"count": 1,
"keyword": {
"user": {
"id" : 1933712,
"name" : "wukloo",
"avatar" : "re:https://img-bcy-qn.pstatp.com/Public/",
},
"post": {
"id" : 6355835481002893070,
"tags" : list,
"date" : "dt:2016-11-22 08:47:46",
"parody" : "东方PROJECT",
"content": "re:根据微博的建议稍微做了点修改",
"likes" : int,
"shares" : int,
"replies": int,
},
"id": 8330182,
"num": 1,
"width" : 3000,
"height": 1687,
"filename": "712e0780b09011e696f973c3d1568337",
"extension": "jpg",
},
}),
# only watermarked images available
("https://bcy.net/item/detail/6950136331708144648", {
"pattern": r"https://p\d-bcy.byteimg.com/img/banciyuan/[0-9a-f]+"
r"~tplv-banciyuan-logo-v3:.+\.image",
"count": 8,
"keyword": {"filter": "watermark"},
}),
# deleted
("https://bcy.net/item/detail/6780546160802143236", {
"count": 0,
}),
# only visible to logged in users
("https://bcy.net/item/detail/6747523535150783495", {
"count": 0,
}),
)
def posts(self):
try:
data = self._data_from_post(self.item_id)
except KeyError:
return ()
post = data["post_data"]
post["image_list"] = post["multi"]
post["plain"] = text.parse_unicode_escapes(post["plain"])
post.update(data["detail_user"])
return (post,)
|
gpl-2.0
| -6,658,767,170,279,108,000
| 33.186869
| 79
| 0.448663
| false
| 3.804947
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.