code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Authors : Roberto Majadas <roberto.majadas@openshine.com>
# Cesar Garcia Tapia <tapia@openshine.com>
#
# Copyright (c) 2003-2012, Telefonica Móviles España S.A.U.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os
import gobject
import sqlite3
import re
import xml.etree.ElementTree
from xml.etree.ElementTree import ElementTree
import tgcm
import Config
import Singleton
from tgcm.ui.MSD.MSDUtils import error_dialog
months = [_("January"), _("February"), _("March"), _("April"), _("May"), _("June"),
_("July"), _("August") ,_("September"), _("October"), _("November"), _("December")]
#User-defined REGEXP operator
def regexp(expr, item):
r = re.compile(expr)
return r.match(item) is not None
class QueryError(Exception):
pass
class HotSpotsService (gobject.GObject):
__metaclass__ = Singleton.Singleton
__gsignals__ = {
'hotspots-updated' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
VALUE_ALL = _('All')
class _InitCheck():
def __init__(self, retval):
self.__retval = retval
def __call__(self, func):
default_return_value = self.__retval
def newf(self, *args, **kwargs):
if self.hotspots_db is None:
return default_return_value
return func(self, *args, **kwargs)
return newf
def __init__(self):
gobject.GObject.__init__(self)
self.hotspots_db = None
self.hotspots_db_file = os.path.join(tgcm.config_dir, "hotspot-%s.db" % tgcm.country_support)
if os.path.exists(self.hotspots_db_file):
self.hotspots_db = sqlite3.connect(self.hotspots_db_file)
self.hotspots_db.create_function("regexp", 2, regexp)
else:
regional_hotspot_file = os.path.join(tgcm.regional_info_dir, tgcm.country_support, "hotspot-list.xml")
if os.path.exists(regional_hotspot_file):
self.register_new_hotspot_file(regional_hotspot_file, self.hotspots_db_file)
self.hotspots_db = sqlite3.connect(self.hotspots_db_file)
self.hotspots_db.create_function("regexp", 2, regexp)
def register_new_hotspot_file(self, in_file, out_file=None):
out_file = self.hotspots_db_file if (out_file is None) else out_file
try:
tree = ElementTree()
tree.parse(in_file)
except (xml.etree.ElementTree.ParseError, sqlite3.OperationalError), err:
# -- This failure appears when importing an empty file. In that case ignore the failure
# -- but don't forget to create the database!
tgcm.warning("@WARNING: Importing hotspots file '%s', %s" % (in_file, err))
return False
except Exception, err:
config = Config.Config()
srv_name = config.get_wifi_service_name()
error_dialog(_("The hotspots list of %s will not be available due an import error.") % srv_name, \
markup = _("Unexpected error reading hotspots"), \
threaded = False)
return False
finally:
self.hotspots_db = self.__create_db_table(out_file)
# -- Start processing the input data
root = tree.getroot()
c = self.hotspots_db.cursor()
c.execute('''insert into metadata values ("date", "%s")''' % root.attrib["date"])
for node in root :
if node.tag == "country" :
country = node.attrib["name"]
for node_s in node :
if node_s.tag == "state" :
state = node_s.attrib["name"]
for node_c in node_s :
city = node_c.attrib["name"]
if node_c.tag == "city" :
for node_h in node_c :
if node_h.tag == "hotspot" :
self.__hotspot_to_sqlite(c, country, state, city, node_h)
self.hotspots_db.commit()
self.emit("hotspots-updated")
return True
def get_states_list(self):
if self.hotspots_db == None:
return []
c = self.hotspots_db.cursor()
c.execute('select distinct state from hotspots order by state')
l = [self.VALUE_ALL]
for row in c :
l.append(row[0])
return l
#@staticmethod
def __create_query_condition(self, province=None, city=None, zipcode=None):
query = [ ]
if (province is not None) and (province != self.VALUE_ALL):
query.append("state = '%s'" % province)
if (city is not None) and (city != self.VALUE_ALL):
query.append("city = '%s'" % city)
if (zipcode is not None) and (zipcode != self.VALUE_ALL):
query.append("zipcode = '%s'" % zipcode)
if len(query) == 0:
raise QueryError
return ' and '.join(query)
#@staticmethod
def __create_return_list(self, cursor, all_first=True):
retval = [ ]
for row in cursor:
retval.append(row[0])
retval.sort()
if all_first is True:
retval = [self.VALUE_ALL] + retval
return retval
def get_cities_list(self):
if self.hotspots_db == None:
return []
c = self.hotspots_db.cursor()
c.execute('select distinct city from hotspots order by city')
l = [self.VALUE_ALL]
for row in c :
l.append(row[0])
return l
def get_types_list(self):
if self.hotspots_db == None:
return []
c = self.hotspots_db.cursor()
c.execute('select distinct type from hotspots order by type')
l = [self.VALUE_ALL]
for row in c :
l.append(row[0])
return l
def __create_query_select(self, column):
return "select DISTINCT %s FROM hotspots" % column
@_InitCheck([ ])
def get_provinces(self, city=None, zipcode=None):
try:
cmd = self.__create_query_select('state')
query = "%s where (%s)" % (cmd, self.__create_query_condition(None, city, zipcode))
except QueryError:
query = cmd
cursor = self.hotspots_db.cursor()
cursor.execute(query)
return self.__create_return_list(cursor, all_first=True)
def get_provinces_of_city(self, city):
return self.get_provinces(city=city)
def get_provinces_of_zipcode(self, zipcode):
return self.get_provinces(zipcode=zipcode)
@_InitCheck([ ])
def get_cities(self, province=None, zipcode=None):
try:
cmd = self.__create_query_select('city')
query = "%s where (%s)" % (cmd, self.__create_query_condition(province, None, zipcode))
except QueryError:
query = cmd
cursor = self.hotspots_db.cursor()
cursor.execute(query)
return self.__create_return_list(cursor, all_first=True)
def get_cities_of_province(self, province):
return self.get_cities(province=province)
def get_cities_of_zipcode(self, zipcode):
return self.get_cities(zipcode=zipcode)
@_InitCheck([ ])
def get_zipcodes(self, province=None, city=None):
try:
cmd = self.__create_query_select('zipcode')
query = "%s where (%s)" % (cmd, self.__create_query_condition(province, city, None))
except QueryError:
query = cmd
cursor = self.hotspots_db.cursor()
cursor.execute(query)
return self.__create_return_list(cursor, all_first=True)
def get_zipcodes_of_province(self, province):
return self.get_zipcodes(province=province)
def get_zipcodes_of_city(self, city):
return self.get_zipcodes(city=city)
def get_zipcodes_list(self):
if self.hotspots_db == None:
return []
c = self.hotspots_db.cursor()
c.execute('select distinct zipcode from hotspots order by zipcode')
l = [self.VALUE_ALL]
for row in c :
l.append(row[0])
return l
def get_update_date(self):
if self.hotspots_db == None:
return "--"
c = self.hotspots_db.cursor()
c.execute('select value from metadata where key = "date"')
for row in c :
y = row[0].split("-")[0]
#m = months[int(row[0].split("-")[1]) - 1]
m = row[0].split("-")[1]
d = row[0].split("-")[2]
return '%s/%s/%s' % (d, m, y)
return None
def search_hotspots(self, state=None, city=None, t=None, zipcode=None, location=None):
c = self.hotspots_db.cursor()
sql = "select * from hotspots"
if state != None or city != None or t != None or zipcode != None or location != None:
sql += " where "
count = 0
if state != None:
if count > 0:
sql += " and "
s_state = state + "%"
sql += "state LIKE '%s'" % s_state
count += 1
if city != None:
if count > 0:
sql += " and "
s_city = city + "%"
sql += "city LIKE '%s'" % s_city
count += 1
if t != None:
if count > 0:
sql += " and "
s_type = "%%%s%%" % t
sql += "type LIKE '%s'" % s_type
count += 1
if zipcode != None:
if count > 0:
sql += " and "
s_zipcode = "%%%s%%" % zipcode
sql += "zipcode LIKE '%s'" % s_zipcode
count += 1
if location != None:
if count > 0:
sql += " and "
s_location = "%%%s%%" % location
sql += "name LIKE '%s'" % s_location
count += 1
c.execute(sql)
#print sql
ret_list = []
for row in c :
name = "<b>%s</b>\n<small>%s</small>\n<small>%s</small>\n<small>%s - %s - %s</small>" % (row[4],
row[3],
row[5],
row[6],
row[2],
row[1])
ret_list.append(name)
return ret_list
def __create_db_table(self, destination):
# -- If the file already exists, remove it first
if os.path.exists(destination):
os.unlink(destination)
# -- Create the connection to the new file
conn = sqlite3.connect(destination)
#-- Now create the table with the default columns
c = conn.cursor()
c.execute('''create table hotspots (country text, state text, city text, provider text, name text, address text, zipcode text, type text)''')
c.execute('''create table metadata (key text, value text)''')
return conn
def __hotspot_to_sqlite(self, cursor, country, state, city, node_h):
d = {'provider' : '',
'name' : '',
'address' : '',
'zipcode' : '',
'type' : ''
}
for node in node_h :
d[node.tag] = node.text
cursor.execute('''insert into hotspots
values ("%s","%s","%s","%s","%s","%s","%s","%s")''' % (country, state, city,
d["provider"],
d["name"],
d["address"],
d["zipcode"],
d["type"]))
gobject.type_register(HotSpotsService)
if __name__ == '__main__':
x = HotSpotsService()
x.register_new_hotspot_file("/usr/share/tgcm/regional-info/uk/hotspot-list.xml", "/tmp/h.db")
| tgcmteam/tgcmlinux | src/tgcm/core/HotSpotsService.py | Python | gpl-2.0 | 13,115 |
# -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re
import urllib
import urlparse
from openscrapers.modules import cleantitle
from openscrapers.modules import client
from openscrapers.modules import dom_parser
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['movie2z.to']
self.base_link = 'https://www.movie2z.to/de/'
self.search_link = 'search-%s.html'
self.get_link = 'redirect.php?a=m&id=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases))
if not url and tvshowtitle != localtvshowtitle: url = self.__search(
[tvshowtitle] + source_utils.aliases_to_array(aliases))
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
url = url[:-1] if url.endswith('/') else url
url += '/%d/%d/' % (int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'})
r = dom_parser.parse_dom(r, 'li')
for i in r:
i = dom_parser.parse_dom(i, 'a')
i = i[0][0]['href']
i = client.request(i)
i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'})
i = dom_parser.parse_dom(i, 'option')
for x in i:
hoster = re.search('^\S*', x[1]).group().lower()
url = x[0]['value']
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False,
'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
url = url.replace('amp;', '')
url = client.request(url, output='geturl')
return url
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(urllib.quote_plus(cleantitle.query(titles[0]))))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
post = urllib.urlencode({'movlang_de': '1', 'movlang': ''})
r = client.request(query, post=post)
r = dom_parser.parse_dom(r, 'table', attrs={'class': 'table'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'PreviewImage'})
for x in r:
title = cleantitle.get(x[1])
if title in t:
return source_utils.strip_domain(x[0]['href'])
return
except:
return
| repotvsupertuga/tvsupertuga.repository | script.module.openscrapers/lib/openscrapers/sources_openscrapers/de/movie2z.py | Python | gpl-2.0 | 5,033 |
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.remote import create_guest_client
from trove.common import utils
from trove.db import get_db_api
from trove.guestagent.db import models as guest_models
from trove.instance import models as base_models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_and_verify(context, instance_id):
# Load InstanceServiceStatus to verify if its running
instance = base_models.Instance.load(context, instance_id)
if not instance.is_datastore_running:
raise exception.UnprocessableEntity(
"Instance %s is not ready." % instance.id)
else:
return instance
class Root(object):
@classmethod
def load(cls, context, instance_id):
load_and_verify(context, instance_id)
# TODO(pdmars): remove the is_root_enabled call from the guest agent,
# just check the database for this information.
# If the root history returns null or raises an exception, the root
# user hasn't been enabled.
try:
root_history = RootHistory.load(context, instance_id)
except exception.NotFound:
return False
if not root_history:
return False
return True
@classmethod
def create(cls, context, instance_id, user, root_password,
cluster_instances_list=None):
load_and_verify(context, instance_id)
if root_password:
root = create_guest_client(context,
instance_id).enable_root_with_password(
root_password)
else:
root = create_guest_client(context, instance_id).enable_root()
root_user = guest_models.RootUser()
root_user.deserialize(root)
# if cluster_instances_list none, then root create is called for
# single instance, adding an RootHistory entry for the instance_id
if cluster_instances_list is None:
RootHistory.create(context, instance_id, user)
return root_user
class RootHistory(object):
_auto_generated_attrs = ['id']
_data_fields = ['instance_id', 'user', 'created']
_table_name = 'root_enabled_history'
def __init__(self, instance_id, user):
self.id = instance_id
self.user = user
self.created = utils.utcnow()
def save(self):
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self)
@classmethod
def load(cls, context, instance_id):
history = get_db_api().find_by(cls, id=instance_id)
return history
@classmethod
def create(cls, context, instance_id, user):
history = cls.load(context, instance_id)
if history is not None:
return history
history = RootHistory(instance_id, user)
return history.save()
| fabian4/trove | trove/extensions/common/models.py | Python | apache-2.0 | 3,639 |
#!/usr/bin/python
#------------------------------------------------------------------------------
# DP Solution
#------------------------------------------------------------------------------
class Solution:
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
# 2D Cache to store the results
# Each row is a char of the pattern
# Each col is a char of the string
table = [[False] * (len(s) + 1) for _ in range(len(p) + 1)]
# Update the corner case of matching two empty strings.
table[0][0] = True
# Update the corner case of when s is an empty string but p is not.
# Each star can eliminate the * char if there are no occurances
for i in range(2, len(p) + 1):
table[i][0] = table[i - 2][0] and p[i - 1] == '*'
for i in range(1, len(p) + 1):
for j in range(1, len(s) + 1):
if p[i - 1] != "*":
# Update the table by referring the diagonal element.
table[i][j] = table[i - 1][j - 1] and \
(p[i - 1] == s[j - 1] or p[i - 1] == '.')
else:
# For the pattern:
# If the pattern is a *, it will be true if:
# Case 1: The previous char matched (1+ occurances of * char)
# Case 2: The char before the previous matched (0 occurances of * char)
table[i][j] = table[i - 2][j] or table[i - 1][j]
# For the string:
# Case 1: * char is equal to the current string char
# Case 2: the * char is a . so accept everything
if p[i - 2] == s[j - 1] or p[i - 2] == '.':
table[i][j] = table[i][j] or table[i][j - 1]
return table[-1][-1]
#------------------------------------------------------------------------------
# My cheaty one liner
# re.match matches a regular expression to a string. The first parameter is the pattern,
# the second is the string, and the third is any special rules used. I concatenate '^'
# and '$' to the pattern because those signify the start and ending of the line. I then
# use re.M as the rule because it enforces the ^ and $ match for the line. It returns
# the match object on sucess and None on failure, I can just convert this to a boolean
# because we only want a true/false value.
#------------------------------------------------------------------------------
import re
class Solution:
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
# Simple RegEx match enforcing the match to be the entire line
return bool(re.match('^'+p+'$', s, re.M))
#------------------------------------------------------------------------------
#Testing | kyle8998/Practice-Coding-Questions | leetcode/10-Hard-Regular-Expression-Matching/answer.py | Python | unlicense | 2,948 |
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Barbican Keystone notification listener server.
"""
import eventlet
import os
import sys
# Oslo messaging notification server uses eventlet.
#
# To have remote debugging, thread module needs to be disabled.
# eventlet.monkey_patch(thread=False)
eventlet.monkey_patch()
# 'Borrowed' from the Glance project:
# If ../barbican/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'barbican', '__init__.py')):
sys.path.insert(0, possible_topdir)
from barbican.common import config
from barbican.openstack.common import log
from barbican.openstack.common import service
from barbican import queue
from barbican.queue import keystone_listener
from oslo_config import cfg
def fail(returncode, e):
sys.stderr.write("ERROR: {0}\n".format(e))
sys.exit(returncode)
if __name__ == '__main__':
try:
config.parse_args()
config.setup_remote_pydev_debug()
# Import and configure logging.
log.setup('barbican')
LOG = log.getLogger(__name__)
LOG.info("Booting up Barbican Keystone listener node...")
# Queuing initialization
CONF = cfg.CONF
queue.init(CONF)
if getattr(getattr(CONF, queue.KS_NOTIFICATIONS_GRP_NAME), 'enable'):
service.launch(
keystone_listener.MessageServer(CONF)
).wait()
else:
LOG.info("Exiting as Barbican Keystone listener is not enabled...")
except RuntimeError as e:
fail(1, e)
| jmvrbanac/barbican | bin/barbican-keystone-listener.py | Python | apache-2.0 | 2,403 |
# Definition for a point.
# class Point:
# def __init__(self, a=0, b=0):
# self.x = a
# self.y = b
class Solution:
def gcd(self,x,y):
if y==0 :
return x
else:
return self.gcd(y, x%y)
def helper(self,a,b):
if a.x-b.x==0:
return (a.x,'max')
else:
if a.y-b.y==0:
k=(0,0)
else:
tmp=self.gcd(a.y-b.y,a.x-b.x)
k=(int((a.y-b.y)/tmp),int((a.x-b.x)/tmp))
if k[1]<0:
k=(-k[0],-k[1])
if a.y*b.x-b.y*a.x==0:
b=(0,0)
else:
tmp=self.gcd(a.y*b.x-b.y*a.x,b.x-a.x)
b=(int((a.y*b.x-b.y*a.x)/tmp),int((b.x-a.x)/tmp))
if b[1]<0:
b=(-b[0],-b[1])
return k+b
def maxPoints(self, points):
"""
:type points: List[Point]
:rtype: int
"""
num_dict={}
i=0
while i<len(points):
if (points[i].x,points[i].y) in num_dict:
num_dict[(points[i].x,points[i].y)]+=1
points=points[:i]+points[i+1:]
else:
num_dict[(points[i].x,points[i].y)]=1
i+=1
if len(points)==0:
return 0
if len(points)==1:
return num_dict[(points[0].x,points[0].y)]
pointsdict={}
countdict={}
for i in range(len(points)):
for j in range(i+1,len(points)):
tmp=self.helper(points[i],points[j])
if tmp in pointsdict:
if (points[i].x,points[i].y) not in pointsdict[tmp]:
pointsdict[tmp].add((points[i].x,points[i].y))
countdict[tmp]+=num_dict[(points[i].x,points[i].y)]
if (points[j].x,points[j].y) not in pointsdict[tmp]:
pointsdict[tmp].add((points[j].x,points[j].y))
countdict[tmp]+=num_dict[(points[j].x,points[j].y)]
else:
pointsdict[tmp]=set([(points[i].x,points[i].y),(points[j].x,points[j].y)])
countdict[tmp]=num_dict[(points[i].x,points[i].y)]+num_dict[(points[j].x,points[j].y)]
#print(num_dict,pointsdict,countdict)
res=1
for i in countdict:
res=max(res,countdict[i])
return res
| Hehwang/Leetcode-Python | code/149 Max Points on a Line.py | Python | mit | 2,490 |
a = u'LNsdsdsdfSA'
if a[0:2].lower() == u'ln':
print 'dfdfdf'
fn = open(u'1.txt')
num = 0
ln = 0
lx = 0
lm = 0
for line in fn.readlines():
if line[0:2].lower() == u'ln':
ln = ln + 1
if line[0:2].lower() == u'lx':
lx = lx + 1
if line[0:2].lower() == u'lm':
lm = lm + 1
print line[0:2].lower()
print len(line[0:2])
print u'ln: %d'%(ln)
print u'lx: %d'%(lx)
print u'lm: %d'%(lm)
print ln
print lx
print lm | softtyphoon/tz | tools/read_file.py | Python | gpl-2.0 | 432 |
import tinctest
from mpp.models import SQLTestCase
class SampleSQLMockTest(SQLTestCase):
"""
@gpdiff No
"""
sql_dir = 'sql/'
pass
| cjcjameson/gpdb | src/test/tinc/tincmmgr/test/e2e/sample/sample_tincmm_sql_tests.py | Python | apache-2.0 | 152 |
# Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# Python stdlib imports
from datetime import time, datetime
from tempfile import NamedTemporaryFile
import os
import os.path
import shutil
# 3rd party imports
from nose.tools import eq_, raises
from openpyxl.workbook import Workbook
from openpyxl.writer import dump_worksheet
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.strings import StringTableBuilder
from openpyxl.shared.compat import xrange
from openpyxl.shared.exc import WorkbookAlreadySaved
def _get_test_filename():
test_file = NamedTemporaryFile(mode='w', prefix='openpyxl.', suffix='.xlsx', delete=False)
test_file.close()
return test_file.name
def test_dump_sheet_title():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet(title='Test1')
wb.save(test_filename)
wb2 = load_workbook(test_filename)
ws = wb2.get_sheet_by_name('Test1')
eq_('Test1', ws.title)
def test_dump_sheet():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
letters = [get_column_letter(x + 1) for x in xrange(20)]
expected_rows = []
for row in xrange(20):
expected_rows.append(['%s%d' % (letter, row + 1) for letter in letters])
for row in xrange(20):
expected_rows.append([(row + 1) for letter in letters])
for row in xrange(10):
expected_rows.append([datetime(2010, ((x % 12) + 1), row + 1) for x in range(len(letters))])
for row in xrange(20):
expected_rows.append(['=%s%d' % (letter, row + 1) for letter in letters])
for row in expected_rows:
ws.append(row)
wb.save(test_filename)
wb2 = load_workbook(test_filename)
ws = wb2.worksheets[0]
for ex_row, ws_row in zip(expected_rows[:-20], ws.rows):
for ex_cell, ws_cell in zip(ex_row, ws_row):
eq_(ex_cell, ws_cell.value)
os.remove(test_filename)
def test_table_builder():
sb = StringTableBuilder()
result = {'a':0, 'b':1, 'c':2, 'd':3}
for letter in sorted(result.keys()):
for x in range(5):
sb.add(letter)
table = dict(sb.get_table())
for key, idx in result.items():
eq_(idx, table[key])
def test_open_too_many_files():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
for i in range(200): # over 200 worksheets should raise an OSError ('too many open files')
wb.create_sheet()
wb.save(test_filename)
os.remove(test_filename)
def test_create_temp_file():
f = dump_worksheet.create_temporary_file()
if not os.path.isfile(f):
raise Exception("The file %s does not exist" % f)
@raises(WorkbookAlreadySaved)
def test_dump_twice():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
ws.append(['hello'])
wb.save(test_filename)
os.remove(test_filename)
wb.save(test_filename)
@raises(WorkbookAlreadySaved)
def test_append_after_save():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
ws.append(['hello'])
wb.save(test_filename)
os.remove(test_filename)
ws.append(['hello'])
def test_equal_string():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
ws.append(['', '', None, '='])
wb.save(test_filename)
wb2 = load_workbook(test_filename, True)
last_cell = list(wb2.worksheets[0].iter_rows())[0][-1]
assert last_cell.data_type == 's'
| quisas/albus | cli_tools/openpyxl/tests/test_dump.py | Python | agpl-3.0 | 4,837 |
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/DeformedShape/A7 case
# Create Deformed Shape for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("DeformedShape/A7")
file = datadir + "Tetra4.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.DEFORMEDSHAPE], picturedir, pictureext)
| FedoraScientific/salome-paravis | test/VisuPrs/DeformedShape/A7.py | Python | lgpl-2.1 | 1,519 |
# Copyright 2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import exceptions
from odoo.tests import common
class TestStockLockLot(common.SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.category = cls.env["product.category"].create(
{"name": "Test category", "lot_default_locked": True}
)
cls.product = cls.env["product.product"].create(
{"name": "Test product", "categ_id": cls.category.id}
)
def _get_lot_default_vals(self):
return {
"name": "Test lot",
"product_id": self.product.id,
"company_id": self.env.user.company_id.id,
}
def test_new_lot_unlocked(self):
self.category.lot_default_locked = False
lot = self.env["stock.production.lot"].create(self._get_lot_default_vals())
self.assertFalse(lot.locked)
def test_new_lot_locked(self):
lot = self.env["stock.production.lot"].create(self._get_lot_default_vals())
self.assertTrue(lot.locked)
def test_lot_onchange_product(self):
lot = self.env["stock.production.lot"].new(self._get_lot_default_vals())
lot._onchange_product_id()
self.assertTrue(lot.locked)
def test_lock_permissions(self):
self.env.user.groups_id -= self.env.ref("stock_lock_lot.group_lock_lot")
# This should work correctly
lot = self.env["stock.production.lot"].create(self._get_lot_default_vals())
with self.assertRaises(exceptions.AccessError):
lot.locked = False
| OCA/stock-logistics-workflow | stock_lock_lot/tests/test_stock_lock_lot.py | Python | agpl-3.0 | 1,639 |
from useintest.modules.irods.executables import IrodsBaseExecutablesController, Irods4_1_10ExecutablesController, \
IrodsExecutablesController, irods_executables_controllers_and_versions, irods_executables_controllers
from useintest.modules.irods.helpers import AccessLevel, IrodsSetupHelper
from useintest.modules.irods.models import IrodsResource, IrodsUser, IrodsDockerisedService
from useintest.modules.irods.setup_irods import setup_irods
from useintest.modules.irods.services import IrodsBaseServiceController, Irods4ServiceController, \
Irods4_1_10ServiceController, IrodsServiceController, irods_service_controllers | wtsi-hgi/startfortest | useintest/modules/irods/__init__.py | Python | mit | 631 |
import sys
from views import *
from config import config
# Load default config and override config from an environment variable
if config.LOCAL:
app.config.from_pyfile('../local.cfg')
else:
app.config.from_pyfile('../phenopolis.cfg')
if __name__ == "__main__":
# use ssl
# add some common url. Would be good if can generate the url in real time
home = ''
#from OpenSSL import SSL
# altnerative
#context = SSL.Context(SSL.SSLv23_METHOD)
#context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
#context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# this is now handled by Apache
#app.run(host='0.0.0.0',port=8000,threaded=True,debug=True)
app.run(host='0.0.0.0',port=8000,threaded=True)
# threaded
#app.run(threaded=True)
#app.run(host='127.0.0.1',port=8000, debug = True, ssl_context=context)
#app.run(host='0.0.0.0', port=8000, ssl_context=context)
#app.run(host='0.0.0.0', port=8000, debug=True, ssl_context='adhoc')
#app.run(host='0.0.0.0', port=8000, debug=True)
#app.run(host='127.0.0.1', port=8000, debug=True)
#toolbar=DebugToolbarExtension(app)
#runner = Runner(app) # adds Flask command line options for setting host, port, etc.
#runner.run()
| Withington/phenopolis | runserver.py | Python | mit | 1,238 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Saves and restore variables inside traced @tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_hook
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import nest
class _SingleDeviceSaver(object):
"""Saves and restores checkpoints from the current device."""
__slots__ = ["_saveable_objects"]
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a list of SaveableObjects, got %s." % (saveable,))
self._saveable_objects = saveable_objects
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor = spec.tensor
# A tensor value of `None` indicates that this SaveableObject gets
# recorded in the object graph, but that no value is saved in the
# checkpoint.
if tensor is not None:
tensor_names.append(spec.name)
tensors.append(tensor)
tensor_slices.append(spec.slice_spec)
save_device = options.experimental_io_device or "cpu:0"
with ops.device(save_device):
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
options = options or checkpoint_options.CheckpointOptions()
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
restore_device = options.experimental_io_device or "cpu:0"
with ops.device(restore_device):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, tensor_slices, tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
def sharded_filename(filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
class MultiDeviceSaver(object):
"""Saves checkpoints directly from multiple devices.
Note that this is a low-level utility which stores Tensors in the keys
specified by `SaveableObject`s. Higher-level utilities for object-based
checkpointing are built on top of it.
"""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
Objects extending `SaveableObject` will be saved and restored, and
objects extending `SaveableHook` will be called into at save and
restore time.
"""
self._before_save_callbacks = []
self._after_restore_callbacks = []
saveable_objects = list(saveable_objects)
saveables_by_device = {}
for saveable in saveable_objects:
is_saveable = isinstance(saveable, saveable_object.SaveableObject)
is_hook = isinstance(saveable, saveable_hook.SaveableHook)
if not is_saveable and not is_hook:
raise ValueError(
"Expected a dictionary of SaveableObjects, got {}."
.format(saveable))
if is_hook:
self._before_save_callbacks.append(saveable.before_save)
self._after_restore_callbacks.append(saveable.after_restore)
if is_saveable:
host_device = saveable_object_util.set_cpu0(saveable.device)
saveables_by_device.setdefault(host_device, []).append(saveable)
self._single_device_savers = {
device: _SingleDeviceSaver(saveables)
for device, saveables in saveables_by_device.items()}
def to_proto(self):
"""Serializes to a SaverDef referencing the current graph."""
filename_tensor = array_ops.placeholder(
shape=[], dtype=dtypes.string, name="saver_filename")
save_tensor = self._traced_save(filename_tensor)
restore_op = self._traced_restore(filename_tensor).op
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
version=saver_pb2.SaverDef.V2)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_save(self, file_prefix):
save_op = self.save(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies([save_op]):
return array_ops.identity(file_prefix)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_restore(self, file_prefix):
restore_ops = self.restore(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies(restore_ops.values()):
return array_ops.identity(file_prefix)
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
for callback in self._before_save_callbacks:
callback()
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Filesystems with eventual consistency (such as S3), don't need a
# temporary location. Using a temporary directory in those cases might
# cause situations where files are not available during copy.
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
with ops.device("CPU"):
sharded_suffix = array_ops.where(
string_ops.regex_full_match(file_prefix, "^s3://.*"),
constant_op.constant(".part"),
constant_op.constant("_temp/part"))
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
def save_fn():
num_shards = len(self._single_device_savers)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but
# initial read operations should be placed on the SaveableObject's
# device.
sharded_saves.append(saver.save(shard_prefix, options))
with ops.control_dependencies(sharded_saves):
# Merge on the io_device if specified, otherwise co-locates the merge op
# with the last device used.
merge_device = (
options.experimental_io_device or
saveable_object_util.set_cpu0(last_device))
with ops.device(merge_device):
# V2 format write path consists of a metadata merge step. Once
# merged, attempts to delete the temporary directory,
# "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, file_prefix, delete_old_dirs=True)
# Since this will causes a function re-trace on each save, limit this to the
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
# Explicitly place the identity op on the first device.
@def_function.function(jit_compile=False)
def tf_function_save():
save_fn()
tf_function_save()
else:
return save_fn()
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
When not run eagerly or when saving on a single device, returns a
dictionary mapping from SaveableObject names to restore operations;
otherwise, returns an empty dict.
"""
options = options or checkpoint_options.CheckpointOptions()
def restore_fn():
restore_ops = {}
# Sort by device name to avoid propagating non-deterministic dictionary
# ordering in some Python versions.
for device, saver in sorted(self._single_device_savers.items()):
with ops.device(device):
restore_ops.update(saver.restore(file_prefix, options))
return restore_ops
# Since this will causes a function re-trace on each restore, limit this to
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
@def_function.function(jit_compile=False)
def tf_function_restore():
restore_fn()
return {}
restore_ops = tf_function_restore()
else:
restore_ops = restore_fn()
for callback in self._after_restore_callbacks:
callback()
return restore_ops
| petewarden/tensorflow | tensorflow/python/training/saving/functional_saver.py | Python | apache-2.0 | 13,689 |
from django import template
from django.template.defaultfilters import stringfilter
from pastebin.utils import languageformat
register = template.Library()
@register.filter
@stringfilter
def truncatechars(value, index):
result = value[:index]
if len(value) >= index:
result += '...'
return result
@register.filter(name='languageformat')
def do_languageformat(file_list):
return languageformat(file_list)
@register.filter
def fileformat(file_list):
filenames = []
unnamed = 0
for file in file_list:
if file.name:
filenames.append(file.name)
else:
unnamed += 1
if not filenames:
if unnamed == 1:
return 'Untitled'
return '{0} untitled file{1}'.format(
unnamed,
's' if unnamed != 1 else ''
)
result = ', '.join(filenames)
if unnamed:
result += 'and {0} untitled file{1}'.format(
unnamed,
's' if unnamed != 1 else ''
)
return result
@register.filter
@stringfilter
def linecount(value):
return '%d' % len(value.split('\n'))
@register.filter
@stringfilter
def sloc(value):
return len([x for x in value.split('\n') if x.strip()])
| DrMegahertz/codenotes | applications/pastebin/templatetags/pastebin_tags.py | Python | bsd-3-clause | 1,243 |
import json
import mimetypes
import os
from itertools import chain
from zipfile import ZipFile, BadZipfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.forms import ModelForm, formset_factory, HiddenInput, NumberInput, Select, BaseModelFormSet
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404, render
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.views.generic import DetailView
from judge.highlight_code import highlight_code
from judge.models import ProblemData, Problem, ProblemTestCase, problem_data_storage
from judge.utils.problem_data import ProblemDataCompiler
from judge.utils.views import TitleMixin
from judge.views.problem import ProblemMixin
mimetypes.init()
mimetypes.add_type('application/x-yaml', '.yml')
def checker_args_cleaner(self):
data = self.cleaned_data['checker_args']
if not data or data.isspace():
return ''
try:
if not isinstance(json.loads(data), dict):
raise ValidationError(_('Checker arguments must be a JSON object'))
except ValueError:
raise ValidationError(_('Checker arguments is invalid JSON'))
return data
class ProblemDataForm(ModelForm):
def clean_zipfile(self):
if hasattr(self, 'zip_valid') and not self.zip_valid:
raise ValidationError(_('Your zip file is invalid!'))
return self.cleaned_data['zipfile']
clean_checker_args = checker_args_cleaner
class Meta:
model = ProblemData
fields = ['zipfile', 'generator', 'output_limit', 'output_prefix', 'checker', 'checker_args']
widgets = {
'checker_args': HiddenInput,
}
class ProblemCaseForm(ModelForm):
clean_checker_args = checker_args_cleaner
class Meta:
model = ProblemTestCase
fields = ('order', 'type', 'input_file', 'output_file', 'points',
'is_pretest', 'output_limit', 'output_prefix', 'checker', 'checker_args', 'generator_args')
widgets = {
'generator_args': HiddenInput,
'type': Select(attrs={'style': 'width: 100%'}),
'points': NumberInput(attrs={'style': 'width: 4em'}),
'output_prefix': NumberInput(attrs={'style': 'width: 4.5em'}),
'output_limit': NumberInput(attrs={'style': 'width: 6em'}),
'checker_args': HiddenInput,
}
class ProblemCaseFormSet(formset_factory(ProblemCaseForm, formset=BaseModelFormSet, extra=1, max_num=1, can_delete=True)):
model = ProblemTestCase
def __init__(self, *args, **kwargs):
self.valid_files = kwargs.pop('valid_files', None)
super(ProblemCaseFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
form = super(ProblemCaseFormSet, self)._construct_form(i, **kwargs)
form.valid_files = self.valid_files
return form
class ProblemDataView(LoginRequiredMixin, TitleMixin, ProblemMixin, DetailView):
template_name = 'problem/data.jade'
def get_title(self):
return _('Editing data for %s') % self.object.name
def get_content_title(self):
return mark_safe(escape(_('Editing data for %s')) % (
format_html(u'<a href="{1}">{0}</a>', self.object.name,
reverse('problem_detail', args=[self.object.code]))))
def get_object(self, queryset=None):
problem = super(ProblemDataView, self).get_object(queryset)
if self.request.user.is_superuser or problem.is_editable_by(self.request.user):
return problem
raise Http404()
def get_data_form(self, post=False):
return ProblemDataForm(data=self.request.POST if post else None, prefix='problem-data',
files=self.request.FILES if post else None,
instance=ProblemData.objects.get_or_create(problem=self.object)[0])
def get_case_formset(self, files, post=False):
return ProblemCaseFormSet(data=self.request.POST if post else None, prefix='cases', valid_files=files,
queryset=ProblemTestCase.objects.filter(dataset_id=self.object.pk).order_by('order'))
def get_valid_files(self, data, post=False):
try:
if post and 'problem-data-zipfile-clear' in self.request.POST:
return []
elif post and 'problem-data-zipfile' in self.request.FILES:
return ZipFile(self.request.FILES['problem-data-zipfile']).namelist()
elif data.zipfile:
return ZipFile(data.zipfile.path).namelist()
except BadZipfile:
return []
return []
def get_context_data(self, **kwargs):
context = super(ProblemDataView, self).get_context_data(**kwargs)
if 'data_form' not in context:
context['data_form'] = self.get_data_form()
valid_files = context['valid_files'] = self.get_valid_files(context['data_form'].instance)
context['data_form'].zip_valid = valid_files is not False
context['cases_formset'] = self.get_case_formset(valid_files)
context['valid_files_json'] = mark_safe(json.dumps(context['valid_files']))
context['valid_files'] = set(context['valid_files'])
context['all_case_forms'] = chain(context['cases_formset'], [context['cases_formset'].empty_form])
return context
def post(self, request, *args, **kwargs):
self.object = problem = self.get_object()
data_form = self.get_data_form(post=True)
valid_files = self.get_valid_files(data_form.instance, post=True)
data_form.zip_valid = valid_files is not False
cases_formset = self.get_case_formset(valid_files, post=True)
if data_form.is_valid() and cases_formset.is_valid():
data = data_form.save()
for case in cases_formset.save(commit=False):
case.dataset_id = problem.id
case.save()
for case in cases_formset.deleted_objects:
case.delete()
ProblemDataCompiler.generate(problem, data, problem.cases.order_by('order'), valid_files)
return HttpResponseRedirect(request.get_full_path())
return self.render_to_response(self.get_context_data(data_form=data_form, cases_formset=cases_formset,
valid_files=valid_files))
put = post
@login_required
def problem_data_file(request, problem, path):
object = get_object_or_404(Problem, code=problem)
if not object.is_editable_by(request.user):
raise Http404()
response = HttpResponse()
if hasattr(settings, 'PROBLEM_DATA_INTERNAL') and request.META.get('SERVER_SOFTWARE', '').startswith('nginx/'):
response['X-Accel-Redirect'] = '%s/%s/%s' % (settings.PROBLEM_DATA_INTERNAL, problem, path)
else:
try:
with problem_data_storage.open(os.path.join(problem, path), 'rb') as f:
response.content = f.read()
except IOError:
raise Http404()
type, encoding = mimetypes.guess_type(path)
response['Content-Type'] = type or 'application/octet-stream'
if encoding is not None:
response['Content-Encoding'] = encoding
return response
@login_required
def problem_init_view(request, problem):
problem = get_object_or_404(Problem, code=problem)
if not request.user.is_superuser and not problem.is_editable_by(request.user):
raise Http404()
try:
with problem_data_storage.open(os.path.join(problem.code, 'init.yml')) as f:
data = f.read().rstrip('\n')
except IOError:
raise Http404()
return render(request, 'problem/yaml.jade', {
'raw_source': data, 'highlighted_source': highlight_code(data, 'yaml'),
'title': _('Generated init.yml for %s') % problem.name,
'content_title': mark_safe(escape(_('Generated init.yml for %s')) % (
format_html(u'<a href="{1}">{0}</a>', problem.name,
reverse('problem_detail', args=[problem.code]))))
})
| Minkov/site | judge/views/problem_data.py | Python | agpl-3.0 | 8,427 |
#! /usr/bin/env python
from zplot import *
# populate zplot table from data file
t = table('verticalbars.data')
# create the postscript file we'll use as our canvas
canvas = postscript('verticalbars.eps')
# on the x-axis, we want categories, not numbers. Thus, we
# determine the number of categories by checking the max
# "rownumber" (a field automatically added by zplot). We want a
# half bar width (0.5) to the left and right of the bar locations
# so we don't overflow the drawable.
d = drawable(canvas, xrange=[-0.5,t.getmax('rownumber')+0.5], yrange=[0,80])
# xmanual is a list of the form [(label1,x1), (label2,x2), ...].
# We want to use the "op" field from the data file as our labels
# and use "rownumber" as our x coordinate.
axis(d, xtitle='Operation', xmanual=t.query(select='op,rownumber'),
ytitle='Latency (ms)', yauto=[0,80,20])
# we are going to create several bars with similar arguments. One
# easy way to do this is to put all the arguments in a dict, and
# use Python's special syntax ("**") for using the dict as named
# args. Then we can tweak the args between each call to
# verticalbars.
#
# yfield determines the bar height, and stackfields determines
# where the bottom of a bar starts. This is useful for showing
# several bar sections to indicate a breakdown. After the first
# bar, we append the previous yfield to stackfields to stack the bars.
p = plotter()
L = legend()
barargs = {'drawable':d, 'table':t, 'xfield':'rownumber',
'linewidth':0, 'fill':True, 'barwidth':0.8,
'legend':L, 'stackfields':[]}
# compute bar
barargs['yfield'] = 'compute'
barargs['legendtext'] = 'CPU'
barargs['fillcolor'] = 'red'
p.verticalbars(**barargs)
# network bar
barargs['stackfields'].append(barargs['yfield'])
barargs['yfield'] = 'network'
barargs['legendtext'] = 'Net'
barargs['fillcolor'] = 'green'
p.verticalbars(**barargs)
# storage bar
barargs['stackfields'].append(barargs['yfield'])
barargs['yfield'] = 'storage'
barargs['legendtext'] = 'Disk'
barargs['fillcolor'] = 'blue'
p.verticalbars(**barargs)
# we want legend entries to be all on one line. Thus, we use
# skipnext=1 to get one row. We specify the horizontal space
# between legend symbols (not considering text) with skipspace.
L.draw(canvas, coord=[d.left()+30, d.top()-5], skipnext=1, skipspace=40)
canvas.render()
| z-plot/z-plot | examples/basics/verticalbars.py | Python | bsd-3-clause | 2,348 |
# Copyright (C) 2009-2016 CS-SI. All Rights Reserved.
# Author: Yoann Vandoorselaere <yoann.v@prelude-ids.com>
# Author: Wes Young <wes@barely3am.com>
#
# This file is part of the Prelude-Correlator program.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from preludecorrelator import require, log, download
from preludecorrelator.pluginmanager import Plugin, PluginDependenciesError
from preludecorrelator.context import Context
try:
# Note:
# Versions 0.7.10 to 0.7.15 (inclusive) are known to be very slow
# due to a bug in python-netaddr.
# See https://github.com/drkjam/netaddr/issues/94 for more information
import netaddr
except:
raise PluginDependenciesError("missing netaddr module, https://pypi.python.org/pypi/netaddr")
logger = log.getLogger(__name__)
if tuple(int(x) for x in netaddr.__version__.split(".")) >= (0, 7):
from netaddr import IPAddress, IPNetwork, IPSet
else:
from netaddr import IP as IPAddress
from netaddr import CIDR as IPNetwork
class IPSet(list):
def __contains__(self, y):
for i in iter(self):
if y in i:
return True
return False
def add(self, obj):
self.append(obj)
class SpamhausDownload(download.HTTPDownloadCache):
def __init__(self, filename, uri, timeout, reload):
download.HTTPDownloadCache.__init__(self, "SpamhausDrop", filename, uri, timeout, reload, logger)
def parse(self, data):
mynets = IPSet()
for line in data.split("\n"):
if not line or line[0] == ';':
continue
ip, sbl = line.split(';')
ip = IPNetwork(ip.strip())
mynets.add(ip)
return mynets
class SpamhausDropPlugin(Plugin):
RELOAD = 7 * 24 * 60 * 60
URI = "http://www.spamhaus.org/drop/drop.txt"
TIMEOUT = 10
def __init__(self, env):
Plugin.__init__(self, env)
reload = self.getConfigValue("reload", self.RELOAD, type=int)
filename = self.getConfigValue("filename", require.get_data_filename("spamhaus_drop.dat", module=__name__, profile=env.profile))
uri = self.getConfigValue("uri", self.URI)
timeout = self.getConfigValue("timeout", self.TIMEOUT, type=float)
self.__data = SpamhausDownload(filename, uri, timeout, reload)
def run(self, idmef):
for source in idmef.get("alert.source(*).node.address(*).address"):
try:
addr = IPAddress(source)
except:
continue
if addr in self.__data.get():
ca = Context(("SPAMHAUS", source), { "expire": 300, "alert_on_expire": True }, update = True, idmef = idmef)
if ca.getUpdateCount() == 0:
ca.set("alert.classification.text", "IP source matching Spamhaus DROP dataset")
ca.set("alert.correlation_alert.name", "IP source matching Spamhaus DROP dataset")
ca.set("alert.assessment.impact.description", "Spamhaus gathered this IP address in their DROP list - %s" % (source))
ca.set("alert.assessment.impact.severity", "medium")
| agil3b3ast/CNITCorrelator | rules/SpamhausDropPlugin.py | Python | gpl-2.0 | 3,842 |
'''
not sure if bug ... or brillant
'''
from AI import AI
@AI.Team
class Ditto(AI):
def __init__(self):
try:
self.other = [team for team in AI.team if team != self.__class__][0]()
except:
self.other = None
def Update(self, ai_input):
if self.other:
return self.other.Update(ai_input)
return AI.Update(self, ai_input)
def GetPlayerStats(self, player_number):
if self.other:
return self.other.GetPlayerStats(player_number)
return AI.GetPlayerStats(self, player_number) | jedislight/Shockball | Team/Ditto.py | Python | mit | 585 |
import logging
from django.utils import six
from django import template
from django.conf import settings
from ella.photos.models import Photo, Format, FormatedPhoto
from ella.core.cache.utils import get_cached_object
log = logging.getLogger('ella.photos')
register = template.Library()
class ImageTag(template.Node):
#{% image <photo_variable> in "format" as foobar %}
def __init__(self, format, photo, var_name):
self.format, self.photo, self.var_name = format, photo, var_name
def render(self, context):
if isinstance(self.format, template.Variable):
try:
format = self.format.resolve(context)
if isinstance(format, six.string_types):
format = Format.objects.get_for_name(format)
except (template.VariableDoesNotExist, Format.DoesNotExist):
context[self.var_name] = None
return ''
else:
format = self.format
try:
# try retrieving just the ID first to avoid DB lookup
photo = template.Variable(self.photo + '_id').resolve(context)
except template.VariableDoesNotExist:
try:
photo = template.Variable(self.photo).resolve(context)
except template.VariableDoesNotExist:
context[self.var_name] = None
return ''
formated_photo = FormatedPhoto.objects.get_photo_in_format(photo, format)
context[self.var_name] = formated_photo
return ''
def _parse_image(bits):
if len(bits) != 6 or bits[2] != 'in' or bits[4] != 'as':
raise template.TemplateSyntaxError('{% image <photo_variable> in "format" as foobar %}')
format = template.Variable(bits[3])
if format.literal is not None:
try:
format = Format.objects.get_for_name(format.literal)
except Format.DoesNotExist:
logmsg = "Format with name %r does not exist (for site id %d)" % (format.literal, settings.SITE_ID)
log.error(logmsg)
if not settings.TEMPLATE_DEBUG:
return template.Node()
raise template.TemplateSyntaxError(logmsg)
return ImageTag(format, bits[1], bits[5])
@register.tag
def image(parser, token):
"""
Generates thumbnails for ``Photo`` instances.
syntax::
{% image <photo> in <format> as <var_name> %}
examples::
{% image article.photo in "thumbnail" as thumb %}
{% image article.photo in thumb_format as thumb %}
"""
bits = token.split_contents()
return _parse_image(bits)
class ImgTag(template.Node):
def __init__(self, photo, format, var_name):
self.photo, self.format, self.var_name = photo, format, var_name
def render(self, context):
if isinstance(self.photo, six.string_types):
try:
# try retrieving just the ID first to avoid DB lookup
photo = template.Variable(self.photo + '_id').resolve(context)
except template.VariableDoesNotExist:
try:
photo = template.Variable(self.photo).resolve(context)
except template.VariableDoesNotExist:
context[self.var_name] = None
return ''
if not photo:
context[self.var_name] = None
return ''
formated_photo = FormatedPhoto.objects.get_photo_in_format(photo, self.format)
else:
formated_photo = self.photo
context[self.var_name] = formated_photo
return ''
@register.tag
def img(parser, token):
"""
Deprecated, use {% image %} instead. Generates thumbnails for ``Photo`` instances.
syntax::
{% img <format> for <var> as <var_name> %}
{% img <format> with <field_value> as <var_name> %}
examples::
{% img category_listing for object.photo as thumb %}
{% img category_listing with pk 1150 as thumb %}
"""
log.warning('You are using the deprecated {% img %} tag. please upgrade to {% image %}.')
bits = token.split_contents()
return _parse_img(bits)
def _parse_img(bits, legacy=True):
if len(bits) < 2 or bits[-2] != 'as':
raise template.TemplateSyntaxError("{% img FORMAT for VAR as VAR_NAME %} or {% img FORMAT with FIELD VALUE as VAR_NAME %}")
try:
format = Format.objects.get_for_name(bits[1])
except Format.DoesNotExist:
logmsg = "Format with name %r does not exist (for site id %d)" % (bits[1], settings.SITE_ID)
log.error(logmsg)
if not settings.TEMPLATE_DEBUG:
return template.Node()
raise template.TemplateSyntaxError(logmsg)
if len(bits) == 6:
# img FORMAT for VAR_NAME
if bits[2] != 'for':
raise template.TemplateSyntaxError("{% img FORMAT for VAR as VAR_NAME %}")
formated_photo = bits[3]
elif len(bits) == 7:
# img FORMAT with FIELD VALUE
if bits[2] != 'with':
raise template.TemplateSyntaxError("{% img FORMAT with FIELD VALUE as VAR_NAME %}")
try:
photo = get_cached_object(Photo, **{str(bits[3]) : bits[4]})
except photo.DoesNotExist:
raise template.TemplateSyntaxError("Photo with %r of %r does not exist" % (bits[3], bits[4]))
formated_photo = FormatedPhoto.objects.get_photo_in_format(photo, format)
else:
raise template.TemplateSyntaxError("{% img FORMAT for VAR as VAR_NAME %} or {% img FORMAT with FIELD VALUE as VAR_NAME %}")
return ImgTag(formated_photo, format, bits[-1])
| MichalMaM/ella | ella/photos/templatetags/photos.py | Python | bsd-3-clause | 5,630 |
from rest_framework import serializers
from .models import Fund,FundNet,FundRealTimeNet
class FundSerializer(serializers.ModelSerializer):
class Meta:
model = Fund
fields = ('id', 'fund_code', 'fund_name','created_at','updated_at')
class FundNetSerializer(serializers.ModelSerializer):
class Meta:
model = FundNet
fields = ('id', 'fund_code',
'date','net_asset_value','accumulated_net','created_at','updated_at')
class FundRealTimeNetSerializer(serializers.ModelSerializer):
class Meta:
model = FundRealTimeNet
fields = ('id', 'fund_code', 'date','net_asset_value','delta','created_at','updated_at')
| ashione/bamslips | bamslipsrest/bamfund/serializers.py | Python | gpl-3.0 | 682 |
import pyintersim.corelib as corelib
import ctypes
### Load Library
_core = corelib.LoadCoreLibrary()
### State wrapper class to be used in 'with' statement
class State:
"""Wrapper Class for the State"""
def __init__(self):
self.p_state = setup()
def __enter__(self):
return self.p_state
def __exit__(self, exc_type, exc_value, traceback):
delete(self.p_state)
### Setup State
_State_Setup = _core.State_Setup
_State_Setup.argtypes = None
_State_Setup.restype = ctypes.c_void_p
def setup():
return _State_Setup()
### Delete State
_State_Delete = _core.State_Delete
_State_Delete.argtypes = [ctypes.c_void_p]
_State_Delete.restype = None
def delete(p_state):
return _State_Delete(p_state) | GPMueller/intersim | core/pyintersim/state.py | Python | mit | 743 |
#!/usr/bin/python3
#
# Copyright: Conor O'Callghan 2016
# Version: v1.1.3
#
# Please feel free to fork this project, modify the code and improve
# it on the github repo https://github.com/brioscaibriste/iarnrod
#
# Powered by TfL Open Data
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
import tempfile
import time
import os
from urllib.request import urlopen
'''
ParseArgs
A simple function to parse the command line arguments passed to the function.
The function does very little sanitisation on the input variables. The
argument passed is then returned from the function.
'''
def ParseArgs():
# Parse our command line argument for the line name
parser = argparse.ArgumentParser()
parser.add_argument('--line',dest='LineName',help='Specify the London line you want to report on')
args = parser.parse_args()
# Check if the value is blank
Line = (args.LineName)
if not Line:
print ("\nError, you must specify a line name! e.g. --line district\n")
sys.exit(1)
# Convert the line name to lower case for easy comparison
Line = Line.lower()
# If the line isn't in the line list, fail badly
if Line not in ('district','circle','victoria','central','northern',
'bakerloo','hammersmith-city','jubilee','metropolitan',
'piccadilly','waterloo-city','dlr',):
print ("\nError, you have specified " + Line + " as your line. You must specify one of the following: "
"\n\tDistrict"
"\n\tCircle"
"\n\tVictora"
"\n\tCentral"
"\n\tNorthern"
"\n\tPiccadilly"
"\n\tBakerloo"
"\n\thammersmith-city"
"\n\twaterloo-city"
"\n\tDLR"
"\n\tMetropolitan"
"\n\tJubilee\n")
sys.exit(1)
# Convert the tube line back to upper case for nice display
Line = Line.upper()
return Line
'''
RetrieveTFLData
Inputs:
Line - Which line to retrieve information on
Run - Should the data retrieval be run or should the cache file be used
SFileName - The file in which to store the line status cache
This function takes the Line variable (a name of a Transport For London line
name) and polls the TFL API. The function then returns the current line
status for the specified line.
'''
def RetrieveTFLData(Line,Run,SFileName):
# TFL Unified API URL
TFLDataURL = "https://api.tfl.gov.uk/Line/" + Line + ("/Status?detail=False"
"&app_id=&app_key=")
if Run:
# Read all the information from JSON at the specified URL, can be re-done with requests?
RawData = urlopen(TFLDataURL).readall().decode('utf8') or die("Error, failed to "
"retrieve the data from the TFL website")
TFLData = json.loads(RawData)
# Sanitize the data to get the line status
Scratch = (TFLData[0]['lineStatuses'])
LineStatusData = (Scratch[0]['statusSeverityDescription'])
# Cache the staus in a file
with open(SFileName, 'w+') as SFile:
SFile.write(LineStatusData)
SFile.closed
else:
with open(SFileName, 'r+') as SFile:
LineStatusData = SFile.read()
SFile.closed
return LineStatusData
'''
Throttle
Inputs
PollIntervalMinutes - Polling interval in minutes
Throttle - Should we throttle the connection or not?
TFileName - The file where the timestamp for throttling usage is stored
This function is used to determine whether or not the next run of the retrieval of data should run.
It retrieves the previously run time from a file in /tmp if it exists, if the file does not exist
the run status will return as 1 and the current time stamp will be written into a new file.
If throttling is disabled, the file will be removed from /tmp and run will be set to 1.
'''
def Throttle(PollIntervalMinutes,Throttling,TFileName):
if Throttling == "True":
# Current epoch time
# CurrentStamp = str(time.time()).split('.')[0]
CurrentStamp = int(time.time())
# Does the temporary file exist or not
if os.path.isfile(TFileName):
# Open the temp file and read the time stamp
with open(TFileName, 'r+') as TFile:
TimeFile = TFile.read()
Remainder = CurrentStamp - int(TimeFile)
else:
# Get the current time stamp and write it to the temp file
with open(TFileName, 'w') as TFile:
TFile.write(str(CurrentStamp))
# Set the Remainder high to force the next run
Remainder = 1000000
# If the remainder is less than the poll interval don't run the command, if it isn't run the command
if ( Remainder < (PollIntervalMinutes * 60) ):
Run = 0
else:
Run = 1
# Set the command to run and re-write the poll time to file
with open(TFileName, 'w') as TFile:
TFile.write(str(CurrentStamp))
return Run
else:
# Remove the time file if it exists
try:
os.remove(TFileName)
except OSError:
pass
Run = 1
return Run
| brioscaibriste/iarnrod | coire.py | Python | gpl-3.0 | 6,003 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('rules', '0029_auto_20150102_1212'),
]
operations = [
migrations.AlterField(
model_name='category',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 1, 3, 11, 36, 49, 971523), verbose_name=b'date created'),
preserve_default=True,
),
migrations.AlterField(
model_name='sourceatversion',
name='updated_date',
field=models.DateTimeField(default=datetime.datetime(2015, 1, 3, 11, 36, 49, 970816), verbose_name=b'date updated', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='sourceupdate',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 1, 3, 11, 36, 49, 971165), verbose_name=b'date of update', blank=True),
preserve_default=True,
),
]
| CyberTaoFlow/scirius | rules/migrations/0030_auto_20150103_1136.py | Python | gpl-3.0 | 1,122 |
#!/usr/bin/env python
import yaml
import argparse
import subprocess
import os
import signal
import time
import pwd
import jinja2
import codecs
import sys
__author__ = "Anoop P Alias"
__copyright__ = "Copyright Anoop P Alias"
__license__ = "GPL"
__email__ = "anoopalias01@gmail.com"
installation_path = "/opt/nDeploy" # Absolute Installation Path
backend_config_file = installation_path+"/conf/backends.yaml"
php_fpm_config = installation_path+"/conf/php-fpm.conf"
# Function defs
# Define a function to silently remove files
def silentremove(filename):
try:
os.remove(filename)
except OSError:
pass
def control_php_fpm(trigger):
if "PHP" in backend_data_yaml_parsed:
php_backends_dict = backend_data_yaml_parsed["PHP"]
if trigger == "autofix":
conf_list = os.listdir("/var/cpanel/users")
for user in conf_list:
try:
pwd.getpwnam(user)
except KeyError:
silentremove("/opt/nDeploy/php-fpm.d/"+user+".conf")
silentremove("/opt/nDeploy/secure-php-fpm.d/"+user+".conf")
if user != 'nobody' and user != 'system':
user_home = pwd.getpwnam(user).pw_dir
user_shell = pwd.getpwnam(user).pw_shell
if user_shell == '/usr/local/cpanel/bin/noshell':
print('Please set Jailed shell for user: '+user)
else:
print('VirtfsJailFix:: '+user)
subprocess.call('su - '+user+' -c "touch '+user_home+'/public_html"', shell=True)
elif trigger == "start":
subprocess.call("sysctl -q -w net.core.somaxconn=4096", shell=True)
subprocess.call("sysctl -q -w vm.max_map_count=131070", shell=True)
for path in list(php_backends_dict.values()):
if os.path.isfile(path+"/sbin/php-fpm"):
php_fpm_bin = path+"/sbin/php-fpm"
else:
php_fpm_bin = path+"/usr/sbin/php-fpm"
subprocess.call(php_fpm_bin+" --prefix "+path+" --fpm-config "+php_fpm_config, shell=True)
elif trigger == "stop":
for path in list(php_backends_dict.values()):
php_fpm_pid = path+"/var/run/php-fpm.pid"
if os.path.isfile(php_fpm_pid):
with open(php_fpm_pid) as f:
mypid = f.read()
f.close()
try:
os.kill(int(mypid), signal.SIGQUIT)
time.sleep(3) # Give enough time for all child process to exit
except OSError:
break
elif trigger == "reload":
for path in list(php_backends_dict.values()):
php_fpm_pid = path+"/var/run/php-fpm.pid"
if os.path.isfile(path+"/sbin/php-fpm"):
php_fpm_bin = path+"/sbin/php-fpm"
else:
php_fpm_bin = path+"/usr/sbin/php-fpm"
if os.path.isfile(php_fpm_pid):
with open(php_fpm_pid) as f:
mypid = f.read()
try:
os.kill(int(mypid), signal.SIGUSR2)
except OSError:
subprocess.call(php_fpm_bin+" --prefix "+path+" --fpm-config "+php_fpm_config, shell=True)
time.sleep(3)
try:
with open(path + "/var/run/php-fpm.pid") as f:
newpid = f.read()
except IOError:
subprocess.call(php_fpm_bin+" --prefix "+path+" --fpm-config "+php_fpm_config, shell=True)
try:
os.kill(int(newpid), 0)
except OSError:
subprocess.call(php_fpm_bin+" --prefix "+path+" --fpm-config "+php_fpm_config, shell=True)
else:
subprocess.call(php_fpm_bin+" --prefix "+path+" --fpm-config "+php_fpm_config, shell=True)
elif trigger == "secure-php":
try:
subprocess.call(['systemctl', '--version'])
except OSError:
print('secure-php needs systemd . upgrade your cPanel system to CentOS7 ')
sys.exit(1)
else:
for backend_name in list(php_backends_dict.keys()):
systemd_socket_file = "/etc/systemd/system/"+backend_name+"@.socket"
systemd_service_file = "/etc/systemd/system/"+backend_name+"@.service"
templateLoader = jinja2.FileSystemLoader(installation_path + "/conf/")
templateEnv = jinja2.Environment(loader=templateLoader)
socket_template = templateEnv.get_template('secure-php-fpm.socket.j2')
templateVars = {"PHP_ROOT_PATH": php_backends_dict.get(backend_name)}
socket_generated_config = socket_template.render(templateVars)
with codecs.open(systemd_socket_file, "w", 'utf-8') as confout:
confout.write(socket_generated_config)
service_template = templateEnv.get_template('secure-php-fpm.service.j2')
service_generated_config = service_template.render(templateVars)
with codecs.open(systemd_service_file, "w", 'utf-8') as confout:
confout.write(service_generated_config)
subprocess.call(['systemctl', 'daemon-reload'])
print('Disabling root owned php-fpm master process:')
subprocess.call(['systemctl', 'stop', 'ndeploy_backends.service'])
subprocess.call(['systemctl', 'disable', 'ndeploy_backends.service'])
if not os.path.isfile(installation_path+"/conf/secure-php-enabled"):
os.mknod(installation_path+"/conf/secure-php-enabled")
elif trigger == "disable-secure-php":
conf_list = os.listdir("/opt/nDeploy/secure-php-fpm.d")
for filename in conf_list:
user, extension = filename.split('.')
for backend_name in list(php_backends_dict.keys()):
subprocess.call(['systemctl', 'stop', backend_name+'@'+user+'.socket'])
subprocess.call(['systemctl', 'disable', backend_name+'@'+user+'.socket'])
subprocess.call(['killall', '-SIGKILL', 'php-fpm'])
silentremove(installation_path+"/conf/secure-php-enabled")
subprocess.call(['systemctl', 'enable', 'ndeploy_backends.service'])
subprocess.call(['systemctl', 'restart', 'ndeploy_backends.service'])
# Following is provided to remove legacy Apache PHPFPM selector plugin
elif trigger == 'httpd-php-uninstall':
silentremove('/var/cpanel/templates/apache2_4/vhost.local')
silentremove('/var/cpanel/templates/apache2_4/ssl_vhost.local')
silentremove(installation_path+'/conf/PHPFPM_SELECTOR_ENABLED')
subprocess.call(['/scripts/rebuildhttpdconf'], shell=True)
subprocess.call(['/scripts/restartsrv_httpd'], shell=True)
elif trigger == 'jailphpfpm':
if os.path.isfile('/opt/nDeploy/conf/secure-php-enabled'):
print('php-fpm can be chrooted only if master process is run as root')
print('Disable secure-php to setup chrooted php-fpm')
else:
if os.path.isdir('/opt/nDeploy/conf/nDeploy-cluster'):
subprocess.call(['/usr/local/cpanel/bin/whmapi1 set_tweaksetting key=jaildefaultshell value=1'], shell=True)
subprocess.call(['/usr/local/cpanel/bin/whmapi1 set_tweaksetting key=jailapache value=1'], shell=True)
if not os.path.isdir('/var/cpanel/feature_toggles'):
os.mkdir('/var/cpanel/feature_toggles')
subprocess.call(['touch /var/cpanel/feature_toggles/apachefpmjail'], shell=True)
elif trigger == 'disable-jailphpfpm':
silentremove('/var/cpanel/feature_toggles/apachefpmjail')
else:
return
backend_data_yaml = open(backend_config_file, 'r')
backend_data_yaml_parsed = yaml.safe_load(backend_data_yaml)
backend_data_yaml.close()
parser = argparse.ArgumentParser(description="Start/Stop various nDeploy backends")
parser.add_argument("control_command")
args = parser.parse_args()
trigger = args.control_command
control_php_fpm(trigger)
| AnoopAlias/XtendWeb | scripts/init_backends.py | Python | gpl-3.0 | 8,703 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for _conversion.py."""
from __future__ import absolute_import
import unittest
import numpy
from openfermion.hamiltonians import fermi_hubbard
from openfermion.ops import (BosonOperator,
DiagonalCoulombHamiltonian,
FermionOperator,
QuadOperator,
QubitOperator)
from openfermion.transforms import jordan_wigner
from openfermion.utils import is_hermitian, normal_ordered
from openfermion.ops._interaction_operator import InteractionOperatorError
from openfermion.ops._quadratic_hamiltonian import QuadraticHamiltonianError
from openfermion.utils._testing_utils import (
random_hermitian_matrix,
random_quadratic_hamiltonian)
from openfermion.transforms._conversion import (
get_boson_operator,
get_diagonal_coulomb_hamiltonian,
get_fermion_operator,
get_interaction_operator,
get_quad_operator,
get_quadratic_hamiltonian,
get_sparse_operator)
class GetInteractionOperatorTest(unittest.TestCase):
def test_get_molecular_operator(self):
coefficient = 3.
operators = ((2, 1), (3, 0), (0, 0), (3, 1))
op = FermionOperator(operators, coefficient)
molecular_operator = get_interaction_operator(op)
fermion_operator = get_fermion_operator(molecular_operator)
fermion_operator = normal_ordered(fermion_operator)
self.assertTrue(normal_ordered(op) == fermion_operator)
def test_get_interaction_operator_bad_input(self):
with self.assertRaises(TypeError):
get_interaction_operator('3')
def test_get_interaction_operator_too_few_qubits(self):
with self.assertRaises(ValueError):
get_interaction_operator(FermionOperator('3^ 2^ 1 0'), 3)
def test_get_interaction_operator_bad_1body_term(self):
with self.assertRaises(InteractionOperatorError):
get_interaction_operator(FermionOperator('1^ 0^'))
def test_get_interaction_operator_bad_2body_term(self):
with self.assertRaises(InteractionOperatorError):
get_interaction_operator(FermionOperator('3^ 2 1 0'))
def test_get_interaction_operator_nonmolecular_term(self):
with self.assertRaises(InteractionOperatorError):
get_interaction_operator(FermionOperator('3^ 2 1'))
def test_get_molecular_data(self):
"""Test conversion to MolecularData from InteractionOperator"""
class GetQuadraticHamiltonianTest(unittest.TestCase):
def setUp(self):
self.hermitian_op = FermionOperator((), 1.)
self.hermitian_op += FermionOperator('1^ 1', 3.)
self.hermitian_op += FermionOperator('1^ 2', 3. + 4.j)
self.hermitian_op += FermionOperator('2^ 1', 3. - 4.j)
self.hermitian_op += FermionOperator('3^ 4^', 2. + 5.j)
self.hermitian_op += FermionOperator('4 3', 2. - 5.j)
self.hermitian_op_pc = FermionOperator((), 1.)
self.hermitian_op_pc += FermionOperator('1^ 1', 3.)
self.hermitian_op_pc += FermionOperator('1^ 2', 3. + 4.j)
self.hermitian_op_pc += FermionOperator('2^ 1', 3. - 4.j)
self.hermitian_op_pc += FermionOperator('3^ 4', 2. + 5.j)
self.hermitian_op_pc += FermionOperator('4^ 3', 2. - 5.j)
self.hermitian_op_bad_term = FermionOperator('1^ 1 2', 3.)
self.hermitian_op_bad_term += FermionOperator('2^ 1^ 1', 3.)
self.not_hermitian_1 = FermionOperator('2^ 0^')
self.not_hermitian_2 = FermionOperator('3^ 0^')
self.not_hermitian_2 += FermionOperator('3 0', 3.)
self.not_hermitian_3 = FermionOperator('2 0')
self.not_hermitian_4 = FermionOperator('4 0')
self.not_hermitian_4 += FermionOperator('4^ 0^', 3.)
self.not_hermitian_5 = FermionOperator('2^ 3', 3.)
self.not_hermitian_5 += FermionOperator('3^ 2', 2.)
def test_get_quadratic_hamiltonian_hermitian(self):
"""Test properly formed quadratic Hamiltonians."""
# Non-particle-number-conserving without chemical potential
quadratic_op = get_quadratic_hamiltonian(self.hermitian_op)
fermion_operator = get_fermion_operator(quadratic_op)
fermion_operator = normal_ordered(fermion_operator)
self.assertTrue(
normal_ordered(self.hermitian_op) == fermion_operator)
# Non-particle-number-conserving chemical potential
quadratic_op = get_quadratic_hamiltonian(self.hermitian_op,
chemical_potential=3.)
fermion_operator = get_fermion_operator(quadratic_op)
fermion_operator = normal_ordered(fermion_operator)
self.assertTrue(
normal_ordered(self.hermitian_op) == fermion_operator)
# Particle-number-conserving
quadratic_op = get_quadratic_hamiltonian(self.hermitian_op_pc)
fermion_operator = get_fermion_operator(quadratic_op)
fermion_operator = normal_ordered(fermion_operator)
self.assertTrue(
normal_ordered(self.hermitian_op_pc) == fermion_operator)
def test_get_quadratic_hamiltonian_hermitian_bad_term(self):
"""Test an operator with non-quadratic terms."""
with self.assertRaises(QuadraticHamiltonianError):
get_quadratic_hamiltonian(self.hermitian_op_bad_term)
def test_get_quadratic_hamiltonian_not_hermitian(self):
"""Test non-Hermitian operators."""
with self.assertRaises(QuadraticHamiltonianError):
get_quadratic_hamiltonian(self.not_hermitian_1)
with self.assertRaises(QuadraticHamiltonianError):
get_quadratic_hamiltonian(self.not_hermitian_2)
with self.assertRaises(QuadraticHamiltonianError):
get_quadratic_hamiltonian(self.not_hermitian_3)
with self.assertRaises(QuadraticHamiltonianError):
get_quadratic_hamiltonian(self.not_hermitian_4)
with self.assertRaises(QuadraticHamiltonianError):
get_quadratic_hamiltonian(self.not_hermitian_5)
def test_get_quadratic_hamiltonian_bad_input(self):
"""Test improper input."""
with self.assertRaises(TypeError):
get_quadratic_hamiltonian('3')
def test_get_quadratic_hamiltonian_too_few_qubits(self):
"""Test asking for too few qubits."""
with self.assertRaises(ValueError):
get_quadratic_hamiltonian(FermionOperator('3^ 2^'), n_qubits=3)
def test_ignore_incompatible_terms(self):
ferm_op = (FermionOperator('0^ 2') + FermionOperator('2^ 0') +
FermionOperator('1^ 0^ 2') + FermionOperator('1^ 0^ 2 1') +
FermionOperator('0^ 0 1^ 1') + FermionOperator('1^ 2^ 1 2'))
converted_op = get_quadratic_hamiltonian(
ferm_op,
ignore_incompatible_terms=True)
self.assertTrue(numpy.allclose(converted_op.hermitian_part,
numpy.array([[0, 0, 1],
[0, 0, 0],
[1, 0, 0]])))
class GetDiagonalCoulombHamiltonianTest(unittest.TestCase):
def test_hubbard(self):
x_dim = 4
y_dim = 5
tunneling = 2.
coulomb = 3.
chemical_potential = 7.
magnetic_field = 11.
periodic = False
hubbard_model = fermi_hubbard(x_dim, y_dim, tunneling, coulomb,
chemical_potential, magnetic_field,
periodic)
self.assertTrue(
normal_ordered(hubbard_model) ==
normal_ordered(
get_fermion_operator(
get_diagonal_coulomb_hamiltonian(hubbard_model))))
def test_random_quadratic(self):
n_qubits = 5
quad_ham = random_quadratic_hamiltonian(n_qubits, True)
ferm_op = get_fermion_operator(quad_ham)
self.assertTrue(
normal_ordered(ferm_op) ==
normal_ordered(
get_fermion_operator(
get_diagonal_coulomb_hamiltonian(ferm_op))))
def test_ignore_incompatible_terms(self):
ferm_op = (FermionOperator('0^ 2') + FermionOperator('2^ 0') +
FermionOperator('1^ 0^ 2') + FermionOperator('1^ 0^ 2 1') +
FermionOperator('0^ 0 1^ 1') + FermionOperator('1^ 2^ 1 2'))
converted_op = get_diagonal_coulomb_hamiltonian(
ferm_op,
ignore_incompatible_terms=True)
self.assertTrue(numpy.allclose(converted_op.one_body,
numpy.array([[0, 0, 1],
[0, 0, 0],
[1, 0, 0]])))
self.assertTrue(numpy.allclose(converted_op.two_body,
numpy.array([[0, 0.5, 0],
[0.5, 0, -0.5],
[0, -0.5, 0]])))
def test_exceptions(self):
op1 = QubitOperator()
op2 = FermionOperator('0^ 3') + FermionOperator('3^ 0')
op3 = FermionOperator('0^ 1^')
op4 = FermionOperator('0^ 1^ 2^ 3')
op5 = FermionOperator('0^ 3')
op6 = FermionOperator('0^ 0 1^ 1', 1.j)
with self.assertRaises(TypeError):
_ = get_diagonal_coulomb_hamiltonian(op1)
with self.assertRaises(ValueError):
_ = get_diagonal_coulomb_hamiltonian(op2, n_qubits=2)
with self.assertRaises(ValueError):
_ = get_diagonal_coulomb_hamiltonian(op3)
with self.assertRaises(ValueError):
_ = get_diagonal_coulomb_hamiltonian(op4)
with self.assertRaises(ValueError):
_ = get_diagonal_coulomb_hamiltonian(op5)
with self.assertRaises(ValueError):
_ = get_diagonal_coulomb_hamiltonian(op6)
class GetSparseOperatorQubitTest(unittest.TestCase):
def test_sparse_matrix_Y(self):
term = QubitOperator(((0, 'Y'),))
sparse_operator = get_sparse_operator(term)
self.assertEqual(list(sparse_operator.data), [1j, -1j])
self.assertEqual(list(sparse_operator.indices), [1, 0])
self.assertTrue(is_hermitian(sparse_operator))
def test_sparse_matrix_ZX(self):
coefficient = 2.
operators = ((0, 'Z'), (1, 'X'))
term = QubitOperator(operators, coefficient)
sparse_operator = get_sparse_operator(term)
self.assertEqual(list(sparse_operator.data), [2., 2., -2., -2.])
self.assertEqual(list(sparse_operator.indices), [1, 0, 3, 2])
self.assertTrue(is_hermitian(sparse_operator))
def test_sparse_matrix_ZIZ(self):
operators = ((0, 'Z'), (2, 'Z'))
term = QubitOperator(operators)
sparse_operator = get_sparse_operator(term)
self.assertEqual(list(sparse_operator.data),
[1, -1, 1, -1, -1, 1, -1, 1])
self.assertEqual(list(sparse_operator.indices), list(range(8)))
self.assertTrue(is_hermitian(sparse_operator))
def test_sparse_matrix_combo(self):
qop = (QubitOperator(((0, 'Y'), (1, 'X')), -0.1j) +
QubitOperator(((0, 'X'), (1, 'Z')), 3. + 2.j))
sparse_operator = get_sparse_operator(qop)
self.assertEqual(list(sparse_operator.data),
[3 + 2j, 0.1, 0.1, -3 - 2j,
3 + 2j, -0.1, -0.1, -3 - 2j])
self.assertEqual(list(sparse_operator.indices),
[2, 3, 2, 3, 0, 1, 0, 1])
def test_sparse_matrix_zero_1qubit(self):
sparse_operator = get_sparse_operator(QubitOperator((), 0.0), 1)
sparse_operator.eliminate_zeros()
self.assertEqual(len(list(sparse_operator.data)), 0)
self.assertEqual(sparse_operator.shape, (2, 2))
def test_sparse_matrix_zero_5qubit(self):
sparse_operator = get_sparse_operator(QubitOperator((), 0.0), 5)
sparse_operator.eliminate_zeros()
self.assertEqual(len(list(sparse_operator.data)), 0)
self.assertEqual(sparse_operator.shape, (32, 32))
def test_sparse_matrix_identity_1qubit(self):
sparse_operator = get_sparse_operator(QubitOperator(()), 1)
self.assertEqual(list(sparse_operator.data), [1] * 2)
self.assertEqual(sparse_operator.shape, (2, 2))
def test_sparse_matrix_identity_5qubit(self):
sparse_operator = get_sparse_operator(QubitOperator(()), 5)
self.assertEqual(list(sparse_operator.data), [1] * 32)
self.assertEqual(sparse_operator.shape, (32, 32))
def test_sparse_matrix_linearity(self):
identity = QubitOperator(())
zzzz = QubitOperator(tuple((i, 'Z') for i in range(4)), 1.0)
sparse1 = get_sparse_operator(identity + zzzz)
sparse2 = get_sparse_operator(identity, 4) + get_sparse_operator(zzzz)
self.assertEqual(list(sparse1.data), [2] * 8)
self.assertEqual(list(sparse1.indices),
[0, 3, 5, 6, 9, 10, 12, 15])
self.assertEqual(list(sparse2.data), [2] * 8)
self.assertEqual(list(sparse2.indices),
[0, 3, 5, 6, 9, 10, 12, 15])
class GetSparseOperatorFermionTest(unittest.TestCase):
def test_sparse_matrix_zero_n_qubit(self):
sparse_operator = get_sparse_operator(FermionOperator.zero(), 4)
sparse_operator.eliminate_zeros()
self.assertEqual(len(list(sparse_operator.data)), 0)
self.assertEqual(sparse_operator.shape, (16, 16))
class GetSparseOperatorBosonTest(unittest.TestCase):
def setUp(self):
self.hbar = 1.
self.d = 4
self.b = numpy.diag(numpy.sqrt(numpy.arange(1, self.d)), 1)
self.bd = self.b.conj().T
self.q = numpy.sqrt(self.hbar/2)*(self.b + self.bd)
def test_sparse_matrix_ladder(self):
sparse_operator = get_sparse_operator(BosonOperator('0'), trunc=self.d)
self.assertTrue(numpy.allclose(sparse_operator.toarray(), self.b))
self.assertEqual(sparse_operator.shape, (self.d, self.d))
def test_sparse_matrix_quad(self):
sparse_operator = get_sparse_operator(QuadOperator('q0'), trunc=self.d)
self.assertTrue(numpy.allclose(sparse_operator.toarray(), self.q))
self.assertEqual(sparse_operator.shape, (self.d, self.d))
def test_sparse_matrix_error(self):
with self.assertRaises(TypeError):
_ = get_sparse_operator(1)
class GetSparseOperatorDiagonalCoulombHamiltonianTest(unittest.TestCase):
def test_diagonal_coulomb_hamiltonian(self):
n_qubits = 5
one_body = random_hermitian_matrix(n_qubits, real=False)
two_body = random_hermitian_matrix(n_qubits, real=True)
constant = numpy.random.randn()
op = DiagonalCoulombHamiltonian(one_body, two_body, constant)
op1 = get_sparse_operator(op)
op2 = get_sparse_operator(jordan_wigner(get_fermion_operator(op)))
diff = op1 - op2
discrepancy = 0.
if diff.nnz:
discrepancy = max(abs(diff.data))
self.assertAlmostEqual(discrepancy, 0.)
class GetQuadOperatorTest(unittest.TestCase):
def setUp(self):
self.hbar = 0.5
def test_invalid_op(self):
op = QuadOperator()
with self.assertRaises(TypeError):
_ = get_quad_operator(op)
def test_zero(self):
b = BosonOperator()
q = get_quad_operator(b)
self.assertTrue(q == QuadOperator.zero())
def test_identity(self):
b = BosonOperator('')
q = get_quad_operator(b)
self.assertTrue(q == QuadOperator.identity())
def test_creation(self):
b = BosonOperator('0^')
q = get_quad_operator(b, hbar=self.hbar)
expected = QuadOperator('q0') - 1j*QuadOperator('p0')
expected /= numpy.sqrt(2*self.hbar)
self.assertTrue(q == expected)
def test_annihilation(self):
b = BosonOperator('0')
q = get_quad_operator(b, hbar=self.hbar)
expected = QuadOperator('q0') + 1j*QuadOperator('p0')
expected /= numpy.sqrt(2*self.hbar)
self.assertTrue(q == expected)
def test_two_mode(self):
b = BosonOperator('0^ 2')
q = get_quad_operator(b, hbar=self.hbar)
expected = QuadOperator('q0') - 1j*QuadOperator('p0')
expected *= (QuadOperator('q2') + 1j*QuadOperator('p2'))
expected /= 2*self.hbar
self.assertTrue(q == expected)
def test_two_term(self):
b = BosonOperator('0^ 0') + BosonOperator('0 0^')
q = get_quad_operator(b, hbar=self.hbar)
expected = (QuadOperator('q0') - 1j*QuadOperator('p0')) \
* (QuadOperator('q0') + 1j*QuadOperator('p0')) \
+ (QuadOperator('q0') + 1j*QuadOperator('p0')) \
* (QuadOperator('q0') - 1j*QuadOperator('p0'))
expected /= 2*self.hbar
self.assertTrue(q == expected)
def test_q_squared(self):
b = self.hbar*(BosonOperator('0^ 0^') + BosonOperator('0 0')
+ BosonOperator('') + 2*BosonOperator('0^ 0'))/2
q = normal_ordered(
get_quad_operator(b, hbar=self.hbar), hbar=self.hbar)
expected = QuadOperator('q0 q0')
self.assertTrue(q == expected)
def test_p_squared(self):
b = self.hbar*(-BosonOperator('1^ 1^') - BosonOperator('1 1')
+ BosonOperator('') + 2*BosonOperator('1^ 1'))/2
q = normal_ordered(
get_quad_operator(b, hbar=self.hbar), hbar=self.hbar)
expected = QuadOperator('p1 p1')
self.assertTrue(q == expected)
class GetBosonOperatorTest(unittest.TestCase):
def setUp(self):
self.hbar = 0.5
def test_invalid_op(self):
op = BosonOperator()
with self.assertRaises(TypeError):
_ = get_boson_operator(op)
def test_zero(self):
q = QuadOperator()
b = get_boson_operator(q)
self.assertTrue(b == BosonOperator.zero())
def test_identity(self):
q = QuadOperator('')
b = get_boson_operator(q)
self.assertTrue(b == BosonOperator.identity())
def test_x(self):
q = QuadOperator('q0')
b = get_boson_operator(q, hbar=self.hbar)
expected = BosonOperator('0') + BosonOperator('0^')
expected *= numpy.sqrt(self.hbar/2)
self.assertTrue(b == expected)
def test_p(self):
q = QuadOperator('p2')
b = get_boson_operator(q, hbar=self.hbar)
expected = BosonOperator('2') - BosonOperator('2^')
expected *= -1j*numpy.sqrt(self.hbar/2)
self.assertTrue(b == expected)
def test_two_mode(self):
q = QuadOperator('p2 q0')
b = get_boson_operator(q, hbar=self.hbar)
expected = -1j*self.hbar/2 \
* (BosonOperator('0') + BosonOperator('0^')) \
* (BosonOperator('2') - BosonOperator('2^'))
self.assertTrue(b == expected)
def test_two_term(self):
q = QuadOperator('p0 q0') + QuadOperator('q0 p0')
b = get_boson_operator(q, hbar=self.hbar)
expected = -1j*self.hbar/2 \
* ((BosonOperator('0') + BosonOperator('0^'))
* (BosonOperator('0') - BosonOperator('0^'))
+ (BosonOperator('0') - BosonOperator('0^'))
* (BosonOperator('0') + BosonOperator('0^')))
self.assertTrue(b == expected)
| jarrodmcc/OpenFermion | src/openfermion/transforms/_conversion_test.py | Python | apache-2.0 | 19,956 |
import logging
log = logging.getLogger('zen.CloudFoundryLoader')
from zope.interface import implements
from Products.Zuul import getFacade
from Products.ZenModel.interfaces import IDeviceLoader
class CloudFoundryLoader(object):
"""
Loader for the CloudFoundry ZenPack.
"""
implements(IDeviceLoader)
def load_device(self, dmd, target, email, password, collector):
return getFacade('cloudfoundry', dmd).addEndpoint(
target, email, password, collector)
| zenoss/ZenPacks.zenoss.CloudFoundry | ZenPacks/zenoss/CloudFoundry/deviceloaders.py | Python | gpl-2.0 | 495 |
# -*- coding: utf-8 -*-
"""
A library of useful functions used throughout the *fyrd* package.
These include functions to handle data, format outputs, handle file opening,
run commands, check file extensions, get user input, and search and format
imports.
These functions are not intended to be accessed directly.
"""
from __future__ import print_function
from __future__ import with_statement
import os as _os
import re as _re
import sys as _sys
import inspect as _inspect
import argparse as _argparse
import bz2
import gzip
from subprocess import Popen
from subprocess import PIPE
from time import sleep
from six import reraise
from six.moves import input as _get_input
from . import logme as _logme
###############################################################################
# Useful Classes #
###############################################################################
class CustomFormatter(_argparse.ArgumentDefaultsHelpFormatter,
_argparse.RawDescriptionHelpFormatter):
"""Custom argparse formatting."""
pass
class CommandError(Exception):
"""A custom exception."""
pass
###############################################################################
# Misc Functions #
###############################################################################
def listify(iterable):
"""Try to force any iterable into a list sensibly."""
if isinstance(iterable, list):
return iterable
if isinstance(iterable, (str, int, float)):
return [iterable]
if not iterable:
return []
try:
iterable = list(iterable)
except TypeError:
iterable = [iterable]
return iterable
def merge_lists(lists):
"""Turn a list of lists into a single list."""
outlist = []
for lst in listify(lists):
outlist += lst
return outlist
def write_iterable(iterable, outfile):
"""Write all elements of iterable to outfile."""
with open_zipped(outfile, 'w') as fout:
fout.write('\n'.join(iterable))
def indent(string, prefix=' '):
"""Replicate python3's textwrap.indent for python2.
Args:
string (str): Any string.
prefix (str): What to indent with.
Returns:
str: Indented string
"""
out = ''
for i in string.split('\n'):
out += '{}{}\n'.format(prefix, i)
return out
def is_exc(x):
"""Check if x is the output of sys.exc_info().
Returns:
bool: True if matched the output of sys.exc_info().
"""
return bool(isinstance(x, tuple)
and len(x) == 3
and issubclass(BaseException, x[0]))
###############################################################################
# File Management #
###############################################################################
def open_zipped(infile, mode='r'):
"""Open a regular, gzipped, or bz2 file.
If infile is a file handle or text device, it is returned without
changes.
Returns:
text mode file handle.
"""
mode = mode[0] + 't'
if hasattr(infile, 'write'):
return infile
if isinstance(infile, str):
if infile.endswith('.gz'):
return gzip.open(infile, mode)
if infile.endswith('.bz2'):
if hasattr(bz2, 'open'):
return bz2.open(infile, mode)
else:
return bz2.BZ2File(infile, mode)
return open(infile, mode)
def block_read(files, size=65536):
"""Iterate through a file by blocks."""
while True:
b = files.read(size)
if not b:
break
yield b
def count_lines(infile, force_blocks=False):
"""Return the line count of a file as quickly as possible.
Uses `wc` if avaialable, otherwise does a rapid read.
"""
if which('wc') and not force_blocks:
_logme.log('Using wc', 'debug')
if infile.endswith('.gz'):
cat = 'zcat'
elif infile.endswith('.bz2'):
cat = 'bzcat'
else:
cat = 'cat'
command = "{cat} {infile} | wc -l | awk '{{print $1}}'".format(
cat=cat, infile=infile
)
return int(cmd(command)[1])
else:
_logme.log('Using block read', 'debug')
with open_zipped(infile) as fin:
return sum(bl.count("\n") for bl in block_read(fin))
def split_file(infile, parts, outpath='', keep_header=False):
"""Split a file in parts and return a list of paths.
NOTE: Linux specific (uses wc).
**Note**: If has_header is True, the top line is stripped off the infile
prior to splitting and assumed to be the header.
Args:
outpath: The directory to save the split files.
has_header: Add the header line to the top of every file.
Returns:
list: Paths to split files.
"""
# Determine how many reads will be in each split sam file.
_logme.log('Getting line count', 'debug')
num_lines = int(count_lines(infile)/int(parts)) + 1
# Subset the file into X number of jobs, maintain extension
cnt = 0
currjob = 1
suffix = '.split_' + str(currjob).zfill(4) + '.' + infile.split('.')[-1]
file_name = _os.path.basename(infile)
run_file = _os.path.join(outpath, file_name + suffix)
outfiles = [run_file]
# Actually split the file
_logme.log('Splitting file', 'debug')
with open_zipped(infile) as fin:
header = fin.readline() if keep_header else ''
sfile = open_zipped(run_file, 'w')
sfile.write(header)
for line in fin:
cnt += 1
if cnt < num_lines:
sfile.write(line)
elif cnt == num_lines:
sfile.write(line)
sfile.close()
currjob += 1
suffix = '.split_' + str(currjob).zfill(4) + '.' + \
infile.split('.')[-1]
run_file = _os.path.join(outpath, file_name + suffix)
sfile = open_zipped(run_file, 'w')
outfiles.append(run_file)
sfile.write(header)
cnt = 0
sfile.close()
_logme.log('Split files: {}'.format(outfiles), 'debug')
return tuple(outfiles)
def is_exe(fpath):
"""Return True is fpath is executable."""
return _os.path.isfile(fpath) and _os.access(fpath, _os.X_OK)
def file_type(infile):
"""Return file type after stripping gz or bz2."""
name_parts = infile.split('.')
if name_parts[-1] == 'gz' or name_parts[-1] == 'bz2':
name_parts.pop()
return name_parts[-1]
def is_file_type(infile, types):
"""Return True if infile is one of types.
Args:
infile: Any file name
types: String or list/tuple of strings (e.g ['bed', 'gtf'])
Returns:
True or False
"""
if hasattr(infile, 'write'):
infile = infile.name
types = listify(types)
for typ in types:
if file_type(infile) == typ:
return True
return False
###############################################################################
# Running Commands #
###############################################################################
def cmd(command, args=None, stdout=None, stderr=None, tries=1):
"""Run command and return status, output, stderr.
Args:
command (str): Path to executable.
args (tuple): Tuple of arguments.
stdout (str): File or open file like object to write STDOUT to.
stderr (str): File or open file like object to write STDERR to.
tries (int): Number of times to try to execute. 1+
Returns:
tuple: exit_code, STDOUT, STDERR
"""
tries = int(tries)
assert tries > 0
count = 1
if isinstance(command, (list, tuple)):
if args:
raise ValueError('Cannot submit list/tuple command as ' +
'well as args argument')
command = ' '.join(command)
assert isinstance(command, str)
if args:
if isinstance(args, (list, tuple)):
args = ' '.join(args)
args = command + args
else:
args = command
_logme.log('Running {} as {}'.format(command, args), 'verbose')
while True:
try:
pp = Popen(args, shell=True, universal_newlines=True,
stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
_logme.log('{} does not exist'.format(command), 'critical')
raise
out, err = pp.communicate()
code = pp.returncode
if code == 0 or count == tries:
break
_logme.log('Command {} failed with code {}, retrying.'
.format(command, code), 'warn')
sleep(1)
count += 1
_logme.log('{} completed with code {}'.format(command, code), 'debug')
if stdout:
with open_zipped(stdout, 'w') as fout:
fout.write(out)
if stderr:
with open_zipped(stderr, 'w') as fout:
fout.write(err)
return code, out.rstrip(), err.rstrip()
def export_run(function, args, kwargs):
"""Execute a function after first exporting all imports."""
kwargs['imports'] = export_imports(function, kwargs)
print('bob', kwargs['imports'])
return function(*args, **kwargs)
def which(program):
"""Replicate the UNIX which command.
Taken verbatim from:
stackoverflow.com/questions/377017/test-if-executable-exists-in-python
Args:
program: Name of executable to test.
Returns:
Path to the program or None on failu_re.
"""
fpath, program = _os.path.split(program)
if fpath:
if is_exe(program):
return _os.path.abspath(program)
else:
for path in _os.environ["PATH"].split(_os.pathsep):
path = path.strip('"')
exe_file = _os.path.join(path, program)
if is_exe(exe_file):
return _os.path.abspath(exe_file)
return None
def check_pid(pid):
"""Check For the existence of a unix pid."""
try:
_os.kill(pid, 0)
except OSError:
return False
else:
return True
###############################################################################
# Option and Argument Management #
###############################################################################
def replace_argument(args, find_string, replace_string, error=True):
"""Replace find_string with replace string in a tuple or dict.
If dict, the values are replaced, not the keys.
Note: args can also be a list, in which case the first item is assumed
to be a tuple, and the second a dictionary
Args:
args (list/tuple/dict): Tuple or dict of args
find_string (str): A string to search for
replace_string (str): A string to replace with
error (bool): Raise ValueError if replacement fails
Returns:
The same object as was passed, with alterations made.
"""
double = False
if isinstance(args, list):
args, kwargs = args
double = True
elif isinstance(args, tuple):
kwargs = None
elif isinstance(args, dict):
kwargs = args.copy()
args = None
else:
raise ValueError('args must be list/tuple/dict, is {}\nval: {}'
.format(type(args), args))
if not args and not kwargs:
msg = 'No arguments or keyword arguments found'
if error:
raise ValueError(msg)
else:
_logme.log(msg, 'warn')
if double:
return None, None
else:
return None
found = False
newargs = tuple()
if args:
for arg in listify(args):
if isinstance(arg, str) and find_string in arg:
arg = arg.format(**{find_string.strip('{}'): replace_string})
found = True
newargs += (arg,)
newkwds = {}
if kwargs:
for arg, value in kwargs.items():
if isinstance(value, str) and find_string in value:
value = replace_string
found = True
newkwds[arg] = value
if found is not True:
msg = 'Could not find {}'.format(find_string)
if error:
raise ValueError(msg)
else:
_logme.log(msg, 'warn')
if double:
return None, None
else:
return None
if double:
return [newargs, newkwds]
else:
if newargs:
return newargs
else:
return newkwds
def opt_split(opt, split_on):
"""Split options by chars in split_on, merge all into single list.
Args:
opt (list): A list of strings, can be a single string.
split_on (list): A list of characters to use to split the options.
Returns:
list: A single merged list of split options, uniqueness guaranteed,
order not.
"""
opt = listify(opt)
split_on = listify(split_on)
final_list = []
for o in opt:
final_list += _re.split('[{}]'.format(''.join(split_on)), o)
return list(set(final_list)) # Return unique options only, order lost.
###############################################################################
# User Input #
###############################################################################
def get_yesno(message, default=None):
"""Get yes/no answer from user.
Args:
message (str): A message to print, an additional space will be added.
default (str): One of {'y', 'n'}, the default if the user gives no
answer. If None, answer forced.
Returns:
bool: True on yes, False on no
"""
if default:
if default.lower().startswith('y'):
tailstr = '[Y/n] '
elif default.lower().startswith('n'):
tailstr = '[y/N] '
else:
raise ValueError('Invalid default')
else:
tailstr = '[y/n] '
message = message + tailstr if message.endswith(' ') \
else message + ' ' + tailstr
ans = get_input(message, 'yesno', default)
if ans.lower().startswith('y'):
return True
elif ans.lower().startswith('n'):
return False
else:
raise ValueError('Invalid response: {}'.format(ans))
def get_input(message, valid_answers=None, default=None):
"""Get input from the command line and check answers.
Allows input to work with python 2/3
Args:
message (str): A message to print, an additional space will be
added.
valid_answers (list): A list of answers to accept, if None, ignored.
Case insensitive. There is one special option
here: 'yesno', this allows all case insensitive
variations of y/n/yes/no.
default (str): The default answer.
Returns:
str: The response
"""
if not message.endswith(' '):
message = message + ' '
if valid_answers:
if isinstance(valid_answers, str):
if valid_answers.lower() == 'yesno':
valid_answers = ['yes', 'no', 'y', 'n']
else:
valid_answers = [valid_answers]
if not isinstance(valid_answers, (list, tuple, set, frozenset)):
_logme.log('valid_answers must be a list, is {}'
.format(type(valid_answers)), 'critical')
raise ValueError('Invalid argument')
valid_answers = [i.lower() for i in valid_answers]
while True:
ans = _get_input(message)
if not ans and default:
return default
if ans.lower() in valid_answers:
return ans
else:
_logme.log('Invalid response to input question', 'debug')
_sys.stderr.write('Invalid response: {}\n'.format(ans) +
'Valid responses: {}\n'
.format(valid_answers) +
'Please try again.\n')
else:
return _get_input(message)
###############################################################################
# Imports #
###############################################################################
def syspath_fmt(syspaths):
"""Take a list of paths and return a sys of sys.path.append strings."""
outlist = []
for pth in listify(syspaths):
if 'sys.path' in pth:
outlist.append(pth)
continue
if _os.path.exists(pth):
outlist.append("sys.path.append('{}')".format(
_os.path.abspath(pth)
))
else:
raise OSError('Paths must exist, {} does not.'
.format(pth))
return '\n'.join(outlist)
PROT_IMPT = """\
try:
{}
except ImportError:
pass
"""
def normalize_imports(imports, prot=True):
"""Take a heterogenous list of imports and normalize it.
Args:
imports (list): A list of strings, formatted differently.
prot (bool): Protect imports with try..except blocks
Returns:
list: A list of strings that can be used for imports
"""
out_impts = []
prot_impts = []
path_impts = []
imports = listify(imports)
if not imports:
return []
for imp in imports:
if not isinstance(imp, str):
raise ValueError('All imports must be strings')
if imp.startswith('try:'):
prot_impts.append(imp.rstrip())
elif imp.startswith('import') or imp.startswith('from'):
out_impts.append(imp.rstrip())
elif imp.startswith('sys.path.append')\
or imp.startswith('sys.path.insert'):
path_impts.append(imp.rstrip())
else:
if imp.startswith('@'):
continue
out_impts.append('import {}'.format(imp))
if prot:
for imp in out_impts:
prot_impts.append(PROT_IMPT.format(imp))
out = prot_impts
else:
out = out_impts + prot_impts
# Remove duplicates
out = list(set(out))
# Add PATHS
if path_impts:
out = list(set(path_impts)) + out
return out
def get_function_path(function):
"""Return path to module defining a function if it exists."""
mod = _inspect.getmodule(function)
if mod and mod != '__main__':
return _os.path.dirname(_inspect.getabsfile(function))
else:
return None
def update_syspaths(function, kwds=None):
"""Add function path to 'syspaths' in kwds."""
if kwds:
syspaths = listify(kwds['syspaths']) if 'syspaths' in kwds else []
else:
syspaths = []
return [get_function_path(function)] + syspaths
def import_function(function, mode='string'):
"""Return an import string for the function.
Attempts to resolve the parent module also, if the parent module is a file,
ie it isn't __main__, the import string will include a call to
sys.path.append to ensure the module is importable.
If this function isn't defined by a module, returns an empty string.
Args:
mode (str): string/list, return as a unified string or a list.
"""
if not callable(function):
raise ValueError('Function must be callable, {} is not'
.format(function))
if mode not in ['string', 'list']:
raise ValueError("Invalid mode {}, must be 'list' or 'string'"
.format(mode))
if _inspect.ismethod(function):
name = (dict(_inspect.getmembers(function.__self__))['__class__']
.__name__)
else:
name = function.__name__
# Attempt to resolve defining file
parent = _inspect.getmodule(function)
imports = []
if parent and parent.__name__ != '__main__':
path = _os.path.dirname(parent.__file__)
module = parent.__name__
# If module is the child of a package, change the directory up to the
# parent
if '.' in module:
path = _os.path.abspath(
_os.path.join(
path, *['..' for i in range(module.count('.'))]
)
)
imports.append("sys.path.append('{}')".format(path))
imports.append('import {}'.format(module))
imports.append('from {} import *'.format(module))
imports.append('from {} import {}'.format(module, name))
return imports if mode == 'list' else '\n'.join(imports)
def get_imports(function, mode='string'):
"""Build a list of potentially useful imports from a function handle.
Gets:
- All modules from globals()
- All modules from the function's globals()
- All functions from the function's globals()
Modes:
string: Return a list of strings formatted as unprotected import calls
prot: Similar to string, but with try..except blocks
list: Return two lists: (import name, module name) for modules
and (import name, function name, module name) for functions
Args:
function (callable): A function handle
mode (str): A string corresponding to one of the above modes
Returns:
str or list
"""
if mode not in ['string', 'prot', 'list']:
raise ValueError('mode must be one of string/prot/list')
rootmod = _inspect.getmodule(function)
imports = []
func_imports = []
# For interactive sessions
members = dict(_inspect.getmembers(function))
locations = [members]
if '__globals__' in members:
locations.append(members['__globals__'])
for location in locations:
for name, item in location.items():
if name.startswith('__'):
continue
# Modules
if _inspect.ismodule(item):
imports.append((name, item.__name__))
# Functions
elif callable(item):
try:
func_imports.append((name, item.__name__, item.__module__))
except AttributeError:
pass
# Import all modules in the root module
imports += [(k,v.__name__)
for k,v in _inspect.getmembers(rootmod, _inspect.ismodule)
if not k.startswith('__')]
# Make unique
imports = sorted(list(set(imports)), key=_sort_imports)
func_imports = sorted(list(set(func_imports)), key=_sort_imports)
_logme.log('Imports: {}'.format(imports), 'debug')
_logme.log('Func imports: {}'.format(func_imports), 'debug')
# Create a sane set of imports
ignore_list = ['os', 'sys', 'dill', 'pickle', '__main__']
filtered_imports = []
filtered_func_imports = []
for iname, name in imports:
if iname in ignore_list:
continue
if name.startswith('@') or iname.startswith('@'):
continue
filtered_imports.append((iname, name))
for iname, name, mod in func_imports:
if iname in ignore_list:
continue
if name.startswith('@') or iname.startswith('@'):
continue
filtered_func_imports.append((iname, name, mod))
if mode == 'list':
return filtered_imports, filtered_func_imports
import_strings = []
for iname, name in filtered_imports:
names = name.split('.')
if names[0] == '__main__':
continue
if iname != name:
if len(names) > 1:
if '.'.join(names[1:]) != iname:
import_strings.append(
'from {} import {} as {}'
.format('.'.join(names[:-1]), names[-1], iname)
)
else:
import_strings.append(
'from {} import {}'
.format(names[0], '.'.join(names[1:]))
)
else:
import_strings.append(
('import {} as {}').format(name, iname)
)
else:
import_strings.append('import {}'.format(name))
# Function imports
for iname, name, mod in filtered_func_imports:
if mod == '__main__':
continue
if iname == name:
import_strings.append('from {} import {}'.format(mod, name))
else:
import_strings.append('from {} import {} as {}'
.format(mod, name, iname)
)
if mode == 'string':
return import_strings
elif mode == 'prot':
return normalize_imports(import_strings, prot=True)
else:
raise ValueError('Mode changed unexpectedly')
def export_globals(function):
"""Add a function's globals to the current globals."""
rootmod = _inspect.getmodule(function)
globals()[rootmod.__name__] = rootmod
for k, v in _inspect.getmembers(rootmod, _inspect.ismodule):
if not k.startswith('__'):
globals()[k] = v
def get_all_imports(function, kwds, prot=False):
"""Get all imports from a function and from kwds.
Args:
function (callable): A function handle
kwds (dict): A dictionary of keyword arguments
prot (bool): Wrap all import in try statement
Returns:
list: Imports
"""
imports = listify(kwds['imports'] if 'imports' in kwds else None)
imports = normalize_imports(imports, prot=False)
imports += get_imports(function, mode='string')
return normalize_imports(imports, prot=prot)
def export_imports(function, kwds):
"""Get imports from a function and from kwds.
Also sets globals and adds path to module to sys path.
Args:
function (callable): A function handle
kwds (dict): A dictionary of keyword arguments
Returns:
list: imports + sys.path.append for module path
"""
export_globals(function)
return import_function(function, 'list') + get_all_imports(function, kwds)
def _sort_imports(x):
"""Sort a list of tuples and strings, for use with sorted."""
if isinstance(x, tuple):
if x[1] == '__main__':
return 0
return x[1]
return x
| MikeDacre/slurmy | fyrd/run.py | Python | mit | 26,828 |
# Status: ported, except for tests.
# Base revision: 64070
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
import sys
from functools import total_ordering
from b2.util.utility import *
from b2.build import feature
from b2.util import sequence, qualify_jam_action, is_iterable_typed
import b2.util.set
from b2.manager import get_manager
__re_two_ampersands = re.compile ('&&')
__re_comma = re.compile (',')
__re_split_condition = re.compile ('(.*):(<.*)')
__re_split_conditional = re.compile (r'(.+):<(.+)')
__re_colon = re.compile (':')
__re_has_condition = re.compile (r':<')
__re_separate_condition_and_property = re.compile (r'(.*):(<.*)')
_not_applicable_feature='not-applicable-in-this-context'
feature.feature(_not_applicable_feature, [], ['free'])
__abbreviated_paths = False
class PropertyMeta(type):
"""
This class exists to implement the isinstance() and issubclass()
hooks for the Property class. Since we've introduce the concept of
a LazyProperty, isinstance(p, Property) will fail when p is a LazyProperty.
Implementing both __instancecheck__ and __subclasscheck__ will allow
LazyProperty instances to pass the isinstance() and issubclass check for
the Property class.
Additionally, the __call__ method intercepts the call to the Property
constructor to ensure that calling Property with the same arguments
will always return the same Property instance.
"""
_registry = {}
current_id = 1
def __call__(mcs, f, value, condition=None):
"""
This intercepts the call to the Property() constructor.
This exists so that the same arguments will always return the same Property
instance. This allows us to give each instance a unique ID.
"""
from b2.build.feature import Feature
if not isinstance(f, Feature):
f = feature.get(f)
if condition is None:
condition = []
key = (f, value) + tuple(sorted(condition))
if key not in mcs._registry:
instance = super(PropertyMeta, mcs).__call__(f, value, condition)
mcs._registry[key] = instance
return mcs._registry[key]
@staticmethod
def check(obj):
return (hasattr(obj, 'feature') and
hasattr(obj, 'value') and
hasattr(obj, 'condition'))
def __instancecheck__(self, instance):
return self.check(instance)
def __subclasscheck__(self, subclass):
return self.check(subclass)
@total_ordering
class Property(object):
__slots__ = ('feature', 'value', 'condition', '_to_raw', '_hash', 'id')
__metaclass__ = PropertyMeta
def __init__(self, f, value, condition=None):
assert(f.free or ':' not in value)
if condition is None:
condition = []
self.feature = f
self.value = value
self.condition = condition
self._hash = hash((self.feature, self.value) + tuple(sorted(self.condition)))
self.id = PropertyMeta.current_id
# increment the id counter.
# this allows us to take a list of Property
# instances and use their unique integer ID
# to create a key for PropertySet caching. This is
# much faster than string comparison.
PropertyMeta.current_id += 1
condition_str = ''
if condition:
condition_str = ",".join(str(p) for p in self.condition) + ':'
self._to_raw = '{}<{}>{}'.format(condition_str, f.name, value)
def to_raw(self):
return self._to_raw
def __str__(self):
return self._to_raw
def __hash__(self):
return self._hash
def __eq__(self, other):
return self._hash == other._hash
def __lt__(self, other):
return (self.feature.name, self.value) < (other.feature.name, other.value)
@total_ordering
class LazyProperty(object):
def __init__(self, feature_name, value, condition=None):
if condition is None:
condition = []
self.__property = Property(
feature.get(_not_applicable_feature), feature_name + value, condition=condition)
self.__name = feature_name
self.__value = value
self.__condition = condition
self.__feature = None
def __getattr__(self, item):
if self.__feature is None:
try:
self.__feature = feature.get(self.__name)
self.__property = Property(self.__feature, self.__value, self.__condition)
except KeyError:
pass
return getattr(self.__property, item)
def __hash__(self):
return hash(self.__property)
def __str__(self):
return self.__property._to_raw
def __eq__(self, other):
return self.__property == other
def __lt__(self, other):
return (self.feature.name, self.value) < (other.feature.name, other.value)
def create_from_string(s, allow_condition=False,allow_missing_value=False):
assert isinstance(s, basestring)
assert isinstance(allow_condition, bool)
assert isinstance(allow_missing_value, bool)
condition = []
import types
if not isinstance(s, types.StringType):
print type(s)
if __re_has_condition.search(s):
if not allow_condition:
raise BaseException("Conditional property is not allowed in this context")
m = __re_separate_condition_and_property.match(s)
condition = m.group(1)
s = m.group(2)
# FIXME: break dependency cycle
from b2.manager import get_manager
if condition:
condition = [create_from_string(x) for x in condition.split(',')]
feature_name = get_grist(s)
if not feature_name:
if feature.is_implicit_value(s):
f = feature.implied_feature(s)
value = s
p = Property(f, value, condition=condition)
else:
raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s)
else:
value = get_value(s)
if not value and not allow_missing_value:
get_manager().errors()("Invalid property '%s' -- no value specified" % s)
if feature.valid(feature_name):
p = Property(feature.get(feature_name), value, condition=condition)
else:
# In case feature name is not known, it is wrong to do a hard error.
# Feature sets change depending on the toolset. So e.g.
# <toolset-X:version> is an unknown feature when using toolset Y.
#
# Ideally we would like to ignore this value, but most of
# Boost.Build code expects that we return a valid Property. For this
# reason we use a sentinel <not-applicable-in-this-context> feature.
#
# The underlying cause for this problem is that python port Property
# is more strict than its Jam counterpart and must always reference
# a valid feature.
p = LazyProperty(feature_name, value, condition=condition)
return p
def create_from_strings(string_list, allow_condition=False):
assert is_iterable_typed(string_list, basestring)
return [create_from_string(s, allow_condition) for s in string_list]
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __results
# A cache of results from as_path
__results = {}
reset ()
def set_abbreviated_paths(on=True):
global __abbreviated_paths
if on == 'off':
on = False
on = bool(on)
__abbreviated_paths = on
def get_abbreviated_paths():
return __abbreviated_paths or '--abbreviated-paths' in sys.argv
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0
def identify(string):
return string
# Uses Property
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
"""
assert is_iterable_typed(properties, Property)
assert is_iterable_typed(requirements, Property)
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition:
required[r.feature] = r
for p in properties:
# Skip conditional properties
if p.condition:
result.add(p)
# No processing for free properties
elif p.feature.free:
result.add(p)
else:
if p.feature in required:
result.add(required[p.feature])
else:
result.add(p)
return sequence.unique(list(result) + requirements)
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if p.feature.path:
values = __re_two_ampersands.split(p.value)
new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values)
if new_value != p.value:
result.append(Property(p.feature, new_value, p.condition))
else:
result.append(p)
else:
result.append (p)
return result
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module."""
assert is_iterable_typed(properties, Property)
assert isinstance(context_module, basestring)
result = []
for p in properties:
if p.value[0] == '@':
q = qualify_jam_action(p.value[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature, '@' + q, p.condition))
else:
result.append(p)
return result
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
"""
if isinstance(properties, Property):
properties = [properties]
assert is_iterable_typed(properties, Property)
for p in properties:
__validate1(p)
def expand_subfeatures_in_conditions (properties):
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if not p.condition:
result.append(p)
else:
expanded = []
for c in p.condition:
# It common that condition includes a toolset which
# was never defined, or mentiones subfeatures which
# were never defined. In that case, validation will
# only produce an spirious error, so don't validate.
expanded.extend(feature.expand_subfeatures ([c], True))
# we need to keep LazyProperties lazy
if isinstance(p, LazyProperty):
value = p.value
feature_name = get_grist(value)
value = value.replace(feature_name, '')
result.append(LazyProperty(feature_name, value, condition=expanded))
else:
result.append(Property(p.feature, p.value, expanded))
return result
# FIXME: this should go
def split_conditional (property):
""" If 'property' is conditional property, returns
condition and the property, e.g
<variant>debug,<toolset>gcc:<inlining>full will become
<variant>debug,<toolset>gcc <inlining>full.
Otherwise, returns empty string.
"""
assert isinstance(property, basestring)
m = __re_split_conditional.match (property)
if m:
return (m.group (1), '<' + m.group (2))
return None
def select (features, properties):
""" Selects properties which correspond to any of the given features.
"""
assert is_iterable_typed(properties, basestring)
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features]
def validate_property_sets (sets):
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(sets, PropertySet)
for s in sets:
validate(s.all())
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properties
in conditions are looked up in 'context'
"""
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(properties, Property)
assert isinstance(context, PropertySet)
base = []
conditional = []
for p in properties:
if p.condition:
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition):
result.append(Property(p.feature, p.value))
return result
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(feature, basestring)
assert isinstance(value, (basestring, type(None)))
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result
################################################################
# Private functions
def __validate1 (property):
""" Exit with error if property is not valid.
"""
assert isinstance(property, Property)
msg = None
if not property.feature.free:
feature.validate_value_string (property.feature, property.value)
###################################################################
# Still to port.
# Original lines are prefixed with "# "
#
#
# import utility : ungrist ;
# import sequence : unique ;
# import errors : error ;
# import feature ;
# import regex ;
# import sequence ;
# import set ;
# import path ;
# import assert ;
#
#
# rule validate-property-sets ( property-sets * )
# {
# for local s in $(property-sets)
# {
# validate [ feature.split $(s) ] ;
# }
# }
#
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
if isinstance(attributes, basestring):
attributes = [attributes]
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result
def translate_dependencies(properties, project_id, location):
assert is_iterable_typed(properties, Property)
assert isinstance(project_id, basestring)
assert isinstance(location, basestring)
result = []
for p in properties:
if not p.feature.dependency:
result.append(p)
else:
v = p.value
m = re.match("(.*)//(.*)", v)
if m:
rooted = m.group(1)
if rooted[0] == '/':
# Either project id or absolute Linux path, do nothing.
pass
else:
rooted = os.path.join(os.getcwd(), location, rooted)
result.append(Property(p.feature, rooted + "//" + m.group(2), p.condition))
elif os.path.isabs(v):
result.append(p)
else:
result.append(Property(p.feature, project_id + "//" + v, p.condition))
return result
class PropertyMap:
""" Class which maintains a property set -> string mapping.
"""
def __init__ (self):
self.__properties = []
self.__values = []
def insert (self, properties, value):
""" Associate value with properties.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(value, basestring)
self.__properties.append(properties)
self.__values.append(value)
def find (self, properties):
""" Return the value associated with properties
or any subset of it. If more than one
subset has value assigned to it, return the
value for the longest subset, if it's unique.
"""
assert is_iterable_typed(properties, basestring)
return self.find_replace (properties)
def find_replace(self, properties, value=None):
assert is_iterable_typed(properties, basestring)
assert isinstance(value, (basestring, type(None)))
matches = []
match_ranks = []
for i in range(0, len(self.__properties)):
p = self.__properties[i]
if b2.util.set.contains (p, properties):
matches.append (i)
match_ranks.append(len(p))
best = sequence.select_highest_ranked (matches, match_ranks)
if not best:
return None
if len (best) > 1:
raise NoBestMatchingAlternative ()
best = best [0]
original = self.__values[best]
if value:
self.__values[best] = value
return original
# local rule __test__ ( )
# {
# import errors : try catch ;
# import feature ;
# import feature : feature subfeature compose ;
#
# # local rules must be explicitly re-imported
# import property : path-order ;
#
# feature.prepare-test property-test-temp ;
#
# feature toolset : gcc : implicit symmetric ;
# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
# 3.0 3.0.1 3.0.2 : optional ;
# feature define : : free ;
# feature runtime-link : dynamic static : symmetric link-incompatible ;
# feature optimization : on off ;
# feature variant : debug release : implicit composite symmetric ;
# feature rtti : on off : link-incompatible ;
#
# compose <variant>debug : <define>_DEBUG <optimization>off ;
# compose <variant>release : <define>NDEBUG <optimization>on ;
#
# import assert ;
# import "class" : new ;
#
# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ;
#
# assert.result <toolset>gcc <rtti>off <define>FOO
# : refine <toolset>gcc <rtti>off
# : <define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <optimization>on
# : refine <toolset>gcc <optimization>off
# : <optimization>on
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off
# : refine <toolset>gcc : <rtti>off : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO
# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar
# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar
# : $(test-space)
# ;
#
# assert.result <define>MY_RELEASE
# : evaluate-conditionals-in-context
# <variant>release,<rtti>off:<define>MY_RELEASE
# : <toolset>gcc <variant>release <rtti>off
#
# ;
#
# try ;
# validate <feature>value : $(test-space) ;
# catch "Invalid property '<feature>value': unknown feature 'feature'." ;
#
# try ;
# validate <rtti>default : $(test-space) ;
# catch \"default\" is not a known value of feature <rtti> ;
#
# validate <define>WHATEVER : $(test-space) ;
#
# try ;
# validate <rtti> : $(test-space) ;
# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ;
#
# try ;
# validate value : $(test-space) ;
# catch "value" is not a value of an implicit feature ;
#
#
# assert.result <rtti>on
# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ;
#
# assert.result <include>a
# : select include : <include>a <toolset>gcc ;
#
# assert.result <include>a
# : select include bar : <include>a <toolset>gcc ;
#
# assert.result <include>a <toolset>gcc
# : select include <bar> <toolset> : <include>a <toolset>gcc ;
#
# assert.result <toolset>kylix <include>a
# : change <toolset>gcc <include>a : <toolset> kylix ;
#
# # Test ordinary properties
# assert.result
# : split-conditional <toolset>gcc
# ;
#
# # Test properties with ":"
# assert.result
# : split-conditional <define>FOO=A::B
# ;
#
# # Test conditional feature
# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO
# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO
# ;
#
# feature.finish-test property-test-temp ;
# }
#
| stan-dev/math | lib/boost_1.75.0/tools/build/src/build/property.py | Python | bsd-3-clause | 23,372 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
from buildbot.test.util import migration
from sqlalchemy.engine import reflection
from twisted.trial import unittest
class Migration(migration.MigrateTestMixin, unittest.TestCase):
def setUp(self):
return self.setUpMigrateTest()
def tearDown(self):
return self.tearDownMigrateTest()
def create_tables_thd(self, conn):
metadata = sa.MetaData()
metadata.bind = conn
self.changes = sa.Table('changes', metadata,
sa.Column('changeid', sa.Integer, primary_key=True),
sa.Column('author', sa.String(256), nullable=False),
sa.Column('comments', sa.String(1024), nullable=False),
sa.Column('is_dir', sa.SmallInteger, nullable=False),
sa.Column('branch', sa.String(256)),
sa.Column('revision', sa.String(256)),
sa.Column('revlink', sa.String(256)),
sa.Column('when_timestamp', sa.Integer, nullable=False),
sa.Column('category', sa.String(256)),
sa.Column('repository', sa.String(length=512), nullable=False,
server_default=''),
sa.Column('project', sa.String(length=512), nullable=False,
server_default=''),
)
self.changes.create(bind=conn)
self.schedulers = sa.Table("schedulers", metadata,
sa.Column('schedulerid', sa.Integer, primary_key=True),
sa.Column('name', sa.String(128), nullable=False),
sa.Column('class_name', sa.String(128), nullable=False),
)
self.schedulers.create(bind=conn)
self.users = sa.Table("users", metadata,
sa.Column("uid", sa.Integer, primary_key=True),
sa.Column("identifier", sa.String(256), nullable=False),
sa.Column("bb_username", sa.String(128)),
sa.Column("bb_password", sa.String(128)),
)
self.users.create(bind=conn)
self.objects = sa.Table("objects", metadata,
sa.Column("id", sa.Integer, primary_key=True),
sa.Column('name', sa.String(128), nullable=False),
sa.Column('class_name', sa.String(128), nullable=False),
)
self.objects.create()
self.object_state = sa.Table("object_state", metadata,
sa.Column("objectid", sa.Integer, sa.ForeignKey('objects.id'),
nullable=False),
sa.Column("name", sa.String(length=256), nullable=False),
sa.Column("value_json", sa.Text, nullable=False),
)
self.object_state.create()
# these indices should already exist everywhere but on sqlite
if conn.dialect.name != 'sqlite':
sa.Index('name_and_class', self.schedulers.c.name,
self.schedulers.c.class_name).create()
sa.Index('changes_branch', self.changes.c.branch).create()
sa.Index('changes_revision', self.changes.c.revision).create()
sa.Index('changes_author', self.changes.c.author).create()
sa.Index('changes_category', self.changes.c.category).create()
sa.Index('changes_when_timestamp',
self.changes.c.when_timestamp).create()
# create this index without the unique attribute
sa.Index('users_identifier', self.users.c.identifier).create()
# tests
def test_migrate(self):
def setup_thd(conn):
self.create_tables_thd(conn)
def verify_thd(conn):
insp = reflection.Inspector.from_engine(conn)
indexes = (insp.get_indexes('changes')
+ insp.get_indexes('schedulers'))
self.assertEqual(
sorted([i['name'] for i in indexes]),
sorted([
'changes_author',
'changes_branch',
'changes_category',
'changes_revision',
'changes_when_timestamp',
'name_and_class',
]))
indexes = insp.get_indexes('users')
for idx in indexes:
if idx['name'] == 'users_identifier':
self.assertTrue(idx['unique'])
break
else:
self.fail("no users_identifier index")
return self.do_test_migration(16, 17, setup_thd, verify_thd)
| zozo123/buildbot | master/buildbot/test/unit/test_db_migrate_versions_017_restore_other_indices.py | Python | gpl-3.0 | 5,782 |
# encoding: utf-8
"""A dict subclass that supports attribute style access.
Authors:
* Fernando Perez (original)
* Brian Granger (refactoring to a dict subclass)
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
__all__ = ['Struct']
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class Struct(dict):
"""A dict subclass with attribute style access.
This dict subclass has a a few extra features:
* Attribute style access.
* Protection of class members (like keys, items) when using attribute
style access.
* The ability to restrict assignment to only existing keys.
* Intelligent merging.
* Overloaded operators.
"""
_allownew = True
def __init__(self, *args, **kw):
"""Initialize with a dictionary, another Struct, or data.
Parameters
----------
args : dict, Struct
Initialize with one dict or Struct
kw : dict
Initialize with key, value pairs.
Examples
--------
>>> s = Struct(a=10,b=30)
>>> s.a
10
>>> s.b
30
>>> s2 = Struct(s,c=30)
>>> sorted(s2.keys())
['a', 'b', 'c']
"""
object.__setattr__(self, '_allownew', True)
dict.__init__(self, *args, **kw)
def __setitem__(self, key, value):
"""Set an item with check for allownew.
Examples
--------
>>> s = Struct()
>>> s['a'] = 10
>>> s.allow_new_attr(False)
>>> s['a'] = 10
>>> s['a']
10
>>> try:
... s['b'] = 20
... except KeyError:
... print('this is not allowed')
...
this is not allowed
"""
if not self._allownew and key not in self:
raise KeyError(
"can't create new attribute %s when allow_new_attr(False)" % key)
dict.__setitem__(self, key, value)
def __setattr__(self, key, value):
"""Set an attr with protection of class members.
This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to
:exc:`AttributeError`.
Examples
--------
>>> s = Struct()
>>> s.a = 10
>>> s.a
10
>>> try:
... s.get = 10
... except AttributeError:
... print("you can't set a class member")
...
you can't set a class member
"""
# If key is an str it might be a class member or instance var
if isinstance(key, str):
# I can't simply call hasattr here because it calls getattr, which
# calls self.__getattr__, which returns True for keys in
# self._data. But I only want keys in the class and in
# self.__dict__
if key in self.__dict__ or hasattr(Struct, key):
raise AttributeError(
'attr %s is a protected member of class Struct.' % key
)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __getattr__(self, key):
"""Get an attr by calling :meth:`dict.__getitem__`.
Like :meth:`__setattr__`, this method converts :exc:`KeyError` to
:exc:`AttributeError`.
Examples
--------
>>> s = Struct(a=10)
>>> s.a
10
>>> type(s.get)
<... 'builtin_function_or_method'>
>>> try:
... s.b
... except AttributeError:
... print("I don't have that key")
...
I don't have that key
"""
try:
result = self[key]
except KeyError:
raise AttributeError(key)
else:
return result
def __iadd__(self, other):
"""s += s2 is a shorthand for s.merge(s2).
Examples
--------
>>> s = Struct(a=10,b=30)
>>> s2 = Struct(a=20,c=40)
>>> s += s2
>>> sorted(s.keys())
['a', 'b', 'c']
"""
self.merge(other)
return self
def __add__(self, other):
"""s + s2 -> New Struct made from s.merge(s2).
Examples
--------
>>> s1 = Struct(a=10,b=30)
>>> s2 = Struct(a=20,c=40)
>>> s = s1 + s2
>>> sorted(s.keys())
['a', 'b', 'c']
"""
sout = self.copy()
sout.merge(other)
return sout
def __sub__(self, other):
"""s1 - s2 -> remove keys in s2 from s1.
Examples
--------
>>> s1 = Struct(a=10,b=30)
>>> s2 = Struct(a=40)
>>> s = s1 - s2
>>> s
{'b': 30}
"""
sout = self.copy()
sout -= other
return sout
def __isub__(self, other):
"""Inplace remove keys from self that are in other.
Examples
--------
>>> s1 = Struct(a=10,b=30)
>>> s2 = Struct(a=40)
>>> s1 -= s2
>>> s1
{'b': 30}
"""
for k in other.keys():
if k in self:
del self[k]
return self
def __dict_invert(self, data):
"""Helper function for merge.
Takes a dictionary whose values are lists and returns a dict with
the elements of each list as keys and the original keys as values.
"""
outdict = {}
for k, lst in data.items():
if isinstance(lst, str):
lst = lst.split()
for entry in lst:
outdict[entry] = k
return outdict
def dict(self):
return self
def copy(self):
"""Return a copy as a Struct.
Examples
--------
>>> s = Struct(a=10,b=30)
>>> s2 = s.copy()
>>> type(s2) is Struct
True
"""
return Struct(dict.copy(self))
def hasattr(self, key):
"""hasattr function available as a method.
Implemented like has_key.
Examples
--------
>>> s = Struct(a=10)
>>> s.hasattr('a')
True
>>> s.hasattr('b')
False
>>> s.hasattr('get')
False
"""
return key in self
def allow_new_attr(self, allow=True):
"""Set whether new attributes can be created in this Struct.
This can be used to catch typos by verifying that the attribute user
tries to change already exists in this Struct.
"""
object.__setattr__(self, '_allownew', allow)
def merge(self, __loc_data__=None, __conflict_solve=None, **kw):
"""Merge two Structs with customizable conflict resolution.
This is similar to :meth:`update`, but much more flexible. First, a
dict is made from data+key=value pairs. When merging this dict with
the Struct S, the optional dictionary 'conflict' is used to decide
what to do.
If conflict is not given, the default behavior is to preserve any keys
with their current value (the opposite of the :meth:`update` method's
behavior).
Parameters
----------
__loc_data : dict, Struct
The data to merge into self
__conflict_solve : dict
The conflict policy dict. The keys are binary functions used to
resolve the conflict and the values are lists of strings naming
the keys the conflict resolution function applies to. Instead of
a list of strings a space separated string can be used, like
'a b c'.
kw : dict
Additional key, value pairs to merge in
Notes
-----
The `__conflict_solve` dict is a dictionary of binary functions which will be used to
solve key conflicts. Here is an example::
__conflict_solve = dict(
func1=['a','b','c'],
func2=['d','e']
)
In this case, the function :func:`func1` will be used to resolve
keys 'a', 'b' and 'c' and the function :func:`func2` will be used for
keys 'd' and 'e'. This could also be written as::
__conflict_solve = dict(func1='a b c',func2='d e')
These functions will be called for each key they apply to with the
form::
func1(self['a'], other['a'])
The return value is used as the final merged value.
As a convenience, merge() provides five (the most commonly needed)
pre-defined policies: preserve, update, add, add_flip and add_s. The
easiest explanation is their implementation::
preserve = lambda old,new: old
update = lambda old,new: new
add = lambda old,new: old + new
add_flip = lambda old,new: new + old # note change of order!
add_s = lambda old,new: old + ' ' + new # only for str!
You can use those four words (as strings) as keys instead
of defining them as functions, and the merge method will substitute
the appropriate functions for you.
For more complicated conflict resolution policies, you still need to
construct your own functions.
Examples
--------
This show the default policy:
>>> s = Struct(a=10,b=30)
>>> s2 = Struct(a=20,c=40)
>>> s.merge(s2)
>>> sorted(s.items())
[('a', 10), ('b', 30), ('c', 40)]
Now, show how to specify a conflict dict:
>>> s = Struct(a=10,b=30)
>>> s2 = Struct(a=20,b=40)
>>> conflict = {'update':'a','add':'b'}
>>> s.merge(s2,conflict)
>>> sorted(s.items())
[('a', 20), ('b', 70)]
"""
data_dict = dict(__loc_data__, **kw)
# policies for conflict resolution: two argument functions which return
# the value that will go in the new struct
preserve = lambda old, new: old
update = lambda old, new: new
add = lambda old, new: old + new
add_flip = lambda old, new: new + old # note change of order!
add_s = lambda old, new: old + ' ' + new
# default policy is to keep current keys when there's a conflict
conflict_solve = dict.fromkeys(self, preserve)
# the conflict_solve dictionary is given by the user 'inverted': we
# need a name-function mapping, it comes as a function -> names
# dict. Make a local copy (b/c we'll make changes), replace user
# strings for the three builtin policies and invert it.
if __conflict_solve:
inv_conflict_solve_user = __conflict_solve.copy()
for name, func in [('preserve', preserve), ('update', update),
('add', add), ('add_flip', add_flip),
('add_s', add_s)]:
if name in inv_conflict_solve_user.keys():
inv_conflict_solve_user[
func] = inv_conflict_solve_user[name]
del inv_conflict_solve_user[name]
conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))
for key in data_dict:
if key not in self:
self[key] = data_dict[key]
else:
self[key] = conflict_solve[key](self[key], data_dict[key])
| mattvonrocketstein/smash | smashlib/ipy3x/utils/ipstruct.py | Python | mit | 11,894 |
#!/usr/bin/env python
# coding=utf-8
# Author: YAO Matrix (yaoweifeng0301@126.com)
import numpy as np
def precision(predictions, labels):
"""Return the precision based on dense predictions and sparse labels."""
return (100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
| yao-matrix/mLearning | use_case/image_cnn/utils/metrics.py | Python | apache-2.0 | 534 |
from pyramid.events import subscriber
from websauna.system.crud.views import TraverseLinkButton
from pyramid.events import BeforeRender
from websauna.system.user.admins import UserAdmin
@subscriber(BeforeRender)
def contribute_admin(event):
"""Add notebook entry to the admin user interface."""
# XXX: dummy way of adding button on context
if not isinstance(event.get('context'), UserAdmin.Resource):
return
view = event.get('view')
if view is None or not hasattr(view, 'resource_buttons'):
return
if "shell" in [i.id for i in event['view'].resource_buttons]:
return
button = TraverseLinkButton(
id="shell",
name="Shell",
view_name="shell",
permission="shell",
tooltip="Open IPython Notebook shell and have this item prepopulated in obj variable.")
event['view'].resource_buttons.append(button)
| enkidulan/websauna.notebook | websauna/notebook/subscribers.py | Python | mit | 894 |
from unittest import mock
from django.core.checks import Error
from django.db import connections, models
from django.test import SimpleTestCase
from django.test.utils import isolate_apps
def dummy_allow_migrate(db, app_label, **hints):
# Prevent checks from being run on the 'other' database, which doesn't have
# its check_field() method mocked in the test.
return db == 'default'
@isolate_apps('invalid_models_tests')
class BackendSpecificChecksTests(SimpleTestCase):
@mock.patch('django.db.models.fields.router.allow_migrate', new=dummy_allow_migrate)
def test_check_field(self):
""" Test if backend specific checks are performed. """
error = Error('an error')
class Model(models.Model):
field = models.IntegerField()
field = Model._meta.get_field('field')
with mock.patch.object(connections['default'].validation, 'check_field', return_value=[error]):
self.assertEqual(field.check(databases={'default'}), [error])
| atul-bhouraskar/django | tests/invalid_models_tests/test_backend_specific.py | Python | bsd-3-clause | 1,010 |
# xyz
# Copyright (C) 2014 xyz developers <admin@localhost.lh> (see AUTHORS)
#
# All rights reserved.
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.db.models import Count
from . import models, forms
class ProductAdmin(admin.ModelAdmin):
exclude = ( 'progress', )
list_display = ( 'id', 'name', 'quantity', 'status', 'progressbar', )
list_filter = ( 'status', )
search_fields = ( 'status', )
ordering = ( '-progress', )
def progressbar(self, obj):
progress_text = obj.progress if obj.progress else 0
progress = int(round(obj.progress/5)) if obj.progress else 0
remain_progress = 20-progress
return '[%s%s] %d%%' % (progress*'#', remain_progress*'=', progress_text)
progressbar.short_description = "progressbar"
progressbar.admin_order_field = "progress"
def get_queryset(self, request):
qs = super(ProductAdmin, self).get_queryset(request)
return qs
def save_related(self, request, form, formsets, change):
super(ProductAdmin, self).save_related(request, form, formsets, change)
obj = form.instance
obj.save()
admin.site.register(models.Product, ProductAdmin)
| 4geit-module/4i.language.python.django | tests/progressbar/output/myapp/admin.py | Python | gpl-3.0 | 1,234 |
"""Example of NaCl calculation."""
from typing import List
import numpy as np
from phonopy import Phonopy
from phonopy.file_IO import parse_BORN, parse_FORCE_SETS
from phonopy.interface.vasp import read_vasp
# from phonopy.structure.atoms import PhonopyAtoms
def _append_band(bands, q_start, q_end):
band = []
for i in range(51):
band.append(np.array(q_start) + (np.array(q_end) - np.array(q_start)) / 50 * i)
bands.append(band)
# NaCl crystal structure is read from POSCAR.
unitcell = read_vasp("POSCAR-unitcell")
# This can be given via a PhonopyAtoms class as follows:
# unitcell = PhonopyAtoms(symbols=(['Na'] * 4 + ['Cl'] * 4),
# cell=(np.eye(3) * 5.6903014761756712),
# scaled_positions=[[0, 0, 0],
# [0, 0.5, 0.5],
# [0.5, 0, 0.5],
# [0.5, 0.5, 0],
# [0.5, 0.5, 0.5],
# [0.5, 0, 0],
# [0, 0.5, 0],
# [0, 0, 0.5]])
phonon = Phonopy(
unitcell,
[[2, 0, 0], [0, 2, 0], [0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
)
symmetry = phonon.get_symmetry()
print("Space group: %s" % symmetry.get_international_table())
force_sets = parse_FORCE_SETS()
phonon.dataset = force_sets
phonon.produce_force_constants()
primitive = phonon.primitive
# Born effective charges and dielectric constants are read from BORN file.
nac_params = parse_BORN(primitive, filename="BORN")
# Or it can be of course given by hand as follows:
# born = [[[1.08703, 0, 0],
# [0, 1.08703, 0],
# [0, 0, 1.08703]],
# [[-1.08672, 0, 0],
# [0, -1.08672, 0],
# [0, 0, -1.08672]]]
# epsilon = [[2.43533967, 0, 0],
# [0, 2.43533967, 0],
# [0, 0, 2.43533967]]
# factors = 14.400
# nac_params = {'born': born,
# 'factor': factors,
# 'dielectric': epsilon}
phonon.nac_params = nac_params
# BAND = 0.0 0.0 0.0 0.5 0.0 0.0 0.5 0.5 0.0 0.0 0.0 0.0 0.5 0.5 0.5
bands = List[List]
_append_band(bands, [0.0, 0.0, 0.0], [0.5, 0.0, 0.0])
_append_band(bands, [0.5, 0.0, 0.0], [0.5, 0.5, 0.0])
_append_band(bands, [0.5, 0.5, 0.0], [0.0, 0.0, 0.0])
_append_band(bands, [0.0, 0.0, 0.0], [0.5, 0.5, 0.5])
phonon.set_band_structure(bands)
band_dict = phonon.get_band_structure_dict()
q_points = band_dict["qpoints"]
distances = band_dict["distances"]
frequencies = band_dict["frequencies"]
eigvecs = band_dict["eigenvectors"]
for q_path, d_path, freq_path in zip(q_points, distances, frequencies):
for q, d, freq in zip(q_path, d_path, freq_path):
print(
("%10.5f %5.2f %5.2f %5.2f " + (" %7.3f" * len(freq)))
% ((d, q[0], q[1], q[2]) + tuple(freq))
)
phonon.plot_band_structure().show()
# Mesh sampling 20x20x20
phonon.run_mesh(mesh=[20, 20, 20])
phonon.run_thermal_properties(t_step=10, t_max=1000, t_min=0)
# DOS
phonon.run_total_dos(sigma=0.1)
dos_dict = phonon.get_total_dos_dict()
for omega, dos in zip(dos_dict["frequency_points"], dos_dict["total_dos"]):
print("%15.7f%15.7f" % (omega, dos))
phonon.plot_total_dos().show()
# Thermal properties
tprop_dict = phonon.get_thermal_properties_dict()
for t, free_energy, entropy, cv in zip(
tprop_dict["temperatures"],
tprop_dict["free_energy"],
tprop_dict["entropy"],
tprop_dict["heat_capacity"],
):
print(("%12.3f " + "%15.7f" * 3) % (t, free_energy, entropy, cv))
phonon.plot_thermal_properties().show()
# PDOS
phonon.run_mesh(mesh=[10, 10, 10], is_mesh_symmetry=False, with_eigenvectors=True)
phonon.run_projected_dos(use_tetrahedron_method=True)
pdos_dict = phonon.get_projected_dos_dict()
omegas = pdos_dict["frequency_points"]
pdos = pdos_dict["projected_dos"]
pdos_indices = [[0], [1]]
phonon.plot_projected_dos(pdos_indices=pdos_indices, legend=pdos_indices).show()
| atztogo/phonopy | example/NaCl/NaCl.py | Python | bsd-3-clause | 4,060 |
#!/usr/bin/env python
# coding: utf-8
import sys
#
# Код работы со строками
# Название s_ выбрано исходя их краткости и того, что оно
# непохоже на имя переменной (по крайней мере для Python;
# стереотип, что в C++ так обозначают приватные имена атрибутов
# классов, очень мешал, но все равно)
#
# И да, понимаю, что это неформатное имя для публичного кода на Python'е,
# но тут я к этому и не стремлюсь
#
# :TODO: разобраться и оптимизировать (должно же быть быстрее) по такому сценарию:
# import codecs
# encoder_func = codecs.lookup("utf_8").encode
# ...
# return encoder_func(s)
def to_utf8(s):
""" unicode => str """
return s.encode("utf_8")
def to_uni(s):
""" str => unicode """
return s.decode("utf_8")
class EvalFormat:
""" locals() не подходит для глобальных переменных и вызова функций """
def init_context(self):
fo = sys._getframe(2)
self.globs = fo.f_globals
self.locs = fo.f_locals
def eval_key(self, key):
return eval(key, self.globs, self.locs)
def __init__(self):
self.init_context()
def __getitem__(self, key):
return self.eval_key(key)
def make_stream():
import StringIO
return StringIO.StringIO()
def timedelta2str(tdelta):
days = tdelta.days
res = ""
if days:
if days in [1, -1]:
res = "%s day " % days
else:
res = "% days " % days
d = {}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return res + "%(hours)02d:%(minutes)02d:%(seconds)02d" % d
| BradburyLab/show_tv | show_tv/s_.py | Python | gpl-3.0 | 1,964 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""cond_v2 and gradient.
This is a version of cond that emits a single If op, as well as the gradient
function for If ops produced by cond_v2. This will eventually replace the
current tf.cond implementation once it reaches feature and performance parity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.eager import backprop_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2 as util
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify
# that they aren't part of the official public API. These protected members
# often need to be used by implementation code however. Rather than litter the
# code with pylint comments, we ignore protected access violations for
# readability.
# pylint: disable=protected-access
_COND = 1
_CASE = 2
def cond_v2(pred, true_fn, false_fn, name="cond"):
"""Like tf.cond, except emits a single If op."""
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool", pred)
if not name:
name = "cond"
with ops.name_scope(name) as scope:
true_name = util.unique_fn_name(scope, "true")
false_name = util.unique_fn_name(scope, "false")
# Automatic control dependencies are added in defuns, but not in v1
# graphs. Propagate that behavior here.
add_control_dependencies = ops.get_default_graph()._add_control_dependencies
pred = ops.convert_to_tensor(pred)
if (tensor_util.is_tensor(pred) and
(pred.shape.dims is None or pred.shape.dims)):
pred = array_ops.squeeze_v2(pred)
true_graph = func_graph_module.func_graph_from_py_func(
true_name,
true_fn, [], {},
func_graph=util.CondBranchFuncGraph(
true_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access
add_control_dependencies=add_control_dependencies,
op_return_value=pred)
false_graph = func_graph_module.func_graph_from_py_func(
false_name,
false_fn, [], {},
func_graph=util.CondBranchFuncGraph(
false_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access
add_control_dependencies=add_control_dependencies,
op_return_value=pred)
verify_captures(_COND, [true_graph, false_graph])
return _build_cond(
pred,
true_graph,
false_graph,
true_graph.external_captures,
false_graph.external_captures,
building_gradient=False,
name=scope)
@ops.RegisterGradient("StatelessIf")
@ops.RegisterGradient("If")
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
# Get the if operator (this logic handles the case where op is a MockOp)
if_op = op.outputs[0].op
true_graph, false_graph = get_func_graphs(if_op)
# Note: op.graph != ops.get_default_graph() when we are computing the gradient
# of a nested cond.
assert true_graph.outer_graph == if_op.graph
assert false_graph.outer_graph == if_op.graph
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
# functions.
true_grad_graph = _create_grad_func(
true_graph, grads, util.unique_grad_fn_name(true_graph.name))
false_grad_graph = _create_grad_func(
false_graph, grads, util.unique_grad_fn_name(false_graph.name))
# Replaces output None grads with zeros if atleast one branch has non-None
# grad at that index.
_create_zeros_for_none_grads([true_graph, false_graph],
[true_grad_graph, false_grad_graph])
if (true_grad_graph.op_needs_rewrite or false_grad_graph.op_needs_rewrite):
# Modify 'op' to output the intermediates needed by the grad functions. Note
# that all needed intermediates are wrapped in optionals. Each optional
# intermediate output will have a value iff its corresponding branch is
# taken.
# NOTE(skyewm): if there are any active sessions, this modification to `op`
# may make them unrunnable!
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
# XLA does not yet support optionals, so output intermediates directly and
# make them match via FakeParams, which can be converted to zeros in XLA.
# TODO(skyewm,jpienaar): can XLA support optionals?
true_intermediates = true_grad_graph.xla_intermediates
false_intermediates = false_grad_graph.xla_intermediates
extra_true_outputs, extra_false_outputs = _make_intermediates_match_xla(
[true_graph, false_graph], [true_intermediates, false_intermediates])
else:
true_intermediates = true_grad_graph.wrapped_intermediates
false_intermediates = false_grad_graph.wrapped_intermediates
# Make outputs match by adding none optionals.
extra_true_outputs, extra_false_outputs = _make_intermediates_match(
[true_graph, false_graph], [true_intermediates, false_intermediates])
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
# TODO(skyewm): indicate it's an internal bug if this fails.
_check_same_outputs(_COND, [true_graph, false_graph])
true_graph.name += "_rewritten"
false_graph.name += "_rewritten"
if_op._set_func_attr("then_branch", util.create_new_tf_function(true_graph))
if_op._set_func_attr("else_branch",
util.create_new_tf_function(false_graph))
if_op._set_type_list_attr("Tout", true_graph.output_types)
if_op._set_shape_list_attr("output_shapes", true_graph.output_shapes)
if_op._add_outputs(
[t.dtype for t in extra_true_outputs],
[t.shape for t in extra_true_outputs])
# Resolve references to forward graph tensors in grad graphs and ensure
# they are in-scope, i.e., belong to one of outer graphs of the grad graph.
true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)
false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)
# This modifies true_grad_graph and false_grad_graph.
_make_output_composite_tensors_match(_COND,
[true_grad_graph, false_grad_graph])
outputs = _build_cond(
if_op.inputs[0],
true_grad_graph,
false_grad_graph,
true_grad_inputs,
false_grad_inputs,
building_gradient=True,
)
# The predicate has no gradient.
return [None] + outputs
def _build_cond(pred,
true_graph,
false_graph,
true_inputs,
false_inputs,
building_gradient,
name=None):
"""Creates an If op from the specified predicate, branch functions and inputs.
Note that this modifies true_graph and false_graph to make the inputs match,
and to output all intermediates values so they're available for the gradient
computation.
true_graph and false_graph need not have the same input types, but they must
have the same outpute types.
Args:
pred: boolean Tensor
true_graph: FuncGraph
false_graph: FuncGraph
true_inputs: a list of Tensors to be passed to true_graph as input.
false_inputs: a list of Tensors to be passed to false_graph as input.
building_gradient: Whether this is a gradient If op.
name: the name for the If op.
Returns:
A list of Tensors which are the outputs of the If op. Does not include added
intermediate outputs.
"""
_make_indexed_slices_indices_types_match(_COND, [true_graph, false_graph])
_check_same_outputs(_COND, [true_graph, false_graph])
# Add inputs to true_graph and false_graph to make them match. Note that
# this modifies true_graph and false_graph.
cond_inputs = _make_inputs_match([true_graph, false_graph],
[true_inputs, false_inputs])
# We do not output intermediates of the gradient If op since this is just
# for backwards compatibility with existing code.
if not building_gradient and util.output_all_intermediates():
# Add all intermediate tensors as function outputs so they're available for
# the gradient computation. Since the outputs of the two functions must
# match, we wrap all the intermediates in optionals. Each intermediate
# output will have a value iff its corresponding branch is taken.
true_intermediates = _get_intermediates(true_graph)
false_intermediates = _get_intermediates(false_graph)
# Wrap intermediates in optionals.
wrapped_true_intermediates = _wrap_intermediates(true_graph,
true_intermediates)
wrapped_false_intermediates = _wrap_intermediates(false_graph,
false_intermediates)
# Make outputs match by adding none optionals.
extra_true_outputs, extra_false_outputs = _make_intermediates_match( # pylint: disable=unbalanced-tuple-unpacking
[true_graph, false_graph],
[wrapped_true_intermediates, wrapped_false_intermediates])
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
_check_same_outputs(_COND, [true_graph, false_graph])
# Create the If op.
with ops.control_dependencies(
list(true_graph.control_captures) + list(false_graph.control_captures)):
true_stateful_ops = [
op for op in true_graph.get_operations() if op._is_stateful
]
false_stateful_ops = [
op for op in false_graph.get_operations() if op._is_stateful
]
if (true_stateful_ops or false_stateful_ops):
op_fn = gen_functional_ops._if
else:
op_fn = gen_functional_ops.stateless_if
tensors = op_fn(
pred,
cond_inputs, [t.dtype for t in true_graph.outputs],
util.create_new_tf_function(true_graph),
util.create_new_tf_function(false_graph),
output_shapes=_get_output_shapes(true_graph.outputs,
false_graph.outputs),
name=name)
if_op, tensors = _get_op_and_outputs(tensors)
# `if_op` is None if this is a `StatelessIf` op with no outputs.
if if_op is not None:
if_op._true_graph = true_graph
if_op._false_graph = false_graph
util.maybe_set_lowering_attr(if_op)
util.maybe_propagate_compile_time_consts_in_xla(if_op)
# Prevent fetching since the variant outputs can't be fetched directly.
if_op.graph.prevent_fetching(if_op)
# Return identities for each output of the If op, rather than the output of
# the If op directly. This makes pruning work if the output of cond() is
# fetched: the lowering pass converts the If outputs into IdentityN outputs,
# which if fetched will cause all ops in the taken branch to be run (since
# it takes all merge ops as input). After lowering, each output identity op
# will end up with only the appropriate merge op as input.
# TODO(b/79984175): this doesn't have to be a tuple once we covert to the
# correct output structure
tensors = [array_ops.identity(t) for t in tensors]
return _pack_sequence_as(true_graph.structured_outputs, tensors)
def get_func_graphs(op):
"""Returns `FuncGraph`s for the input op branches.
Args:
op: The If or Case Operation.
Returns:
A tuple of the `FuncGraph`s of the then_branch and else_branch (all branches
for Case).
"""
def _get_func_graph_for_branch(name_attr_list, cached_attr_name=None):
"""Generates and returns a FuncGraph for the given branch."""
func_graph = None
if cached_attr_name is not None:
func_graph = getattr(op, cached_attr_name, None)
inputs = op.inputs[1:] # First input is pred.
if func_graph is None:
input_shapes = [t.shape for t in inputs]
func_graph = util.get_func_graph(op, input_shapes, name_attr_list.name)
for external_t, internal_t in zip(inputs, func_graph.inputs):
custom_gradient.copy_handle_data(external_t, internal_t)
func_graph.reset_captures(zip(inputs, func_graph.inputs))
# Link the op so that the gradient code can use it.
func_graph._forward_cond = op
return func_graph
if op.type in ["If", "StatelessIf"]:
return (_get_func_graph_for_branch(
op.get_attr("then_branch"), "_true_graph"),
_get_func_graph_for_branch(
op.get_attr("else_branch"), "_false_graph"))
elif op.type == "Case":
# TODO(b/141114088): investigate whether to cache graphs in forward pass
return [_get_func_graph_for_branch(branch_fn)
for branch_fn in op.get_attr("branches")]
else:
raise ValueError("Unsupported op type: {}".format(op.type))
def _grad_fn(func_graph, grads):
"""The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
"""
# Filter out untrainable function outputs.
# NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes
# cause _GradientsHelper to raise an exception (e.g. the implementation
# doesn't expect 'ys' to contain boolean tensors).
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not backprop_util.IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# in _resolve_grad_inputs.
result = gradients_util._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
return result
def _create_grad_func(func_graph, grads, name):
"""Returns the FuncGraph representation of _grad_fn."""
return func_graph_module.func_graph_from_py_func(
name,
lambda: _grad_fn(func_graph, grads), [], {},
func_graph=_CondGradFuncGraph(name, func_graph))
def _resolve_grad_inputs(cond_graph, grad_graph):
"""Returns the tensors to pass as inputs to `grad_graph`.
The `grad_graph` may have external references to
1. Its outer graph containing the input gradients. These references are kept
as is.
2. Tensors in the forward pass graph. These tensors may not be "live"
when the gradient is being computed. We replace such references by their
corresponding tensor in `cond_graph.outer_graph`. In the case of nested
control flow or functions, the gradient logic handling
`grad_graph.outer_graph` will make sure the tensor from
`cond_graph.outer_graph` is also correctly captured.
Args:
cond_graph: FuncGraph. The forward-pass function.
grad_graph: FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
new_inputs = []
for t in grad_graph.external_captures:
# `t` must either be in `grad_graph.outer_graph` or in the forward
# `cond_graph`.
if t.graph != grad_graph.outer_graph:
assert t.graph == cond_graph
# `internal_captures` are not treated as intermediates and hence not added
# to If op outputs. So we get the outer tensor corresponding to those
# from the list of `external_captures`.
for i, output in enumerate(t.graph.outputs):
if output is t:
t = t.graph._forward_cond.outputs[i]
break
else:
for i, output in enumerate(t.graph.internal_captures):
if output is t:
t = t.graph.external_captures[i]
break
else:
raise ValueError("Could not find external tensor capture {tensor} in "
"captures or outputs".format(tensor=t))
# Note: We rely on the capturing logic of the gradient If op graph to
# correctly capture the tensors in `cond_graph.outer_graph`. Both cond_v2
# and while_v2 handle this while building their gradient functions.
assert t.graph == cond_graph.outer_graph
new_inputs.append(t)
return new_inputs
def _get_intermediates(func_graph):
"""Returns intermediate tensors of `func_graph` for gradient computation."""
intermediates = []
for op in func_graph.get_operations():
for t in op.outputs:
if t in func_graph.inputs: continue
if t in func_graph.outputs: continue
if t.dtype is dtypes.resource:
continue
# Accumulating mutexes can cause deadlock.
if op.type == "MutexLock":
continue
intermediates.append(t)
return intermediates
def _make_intermediates_match(branch_graphs, branch_optionals):
"""Returns new optionals lists that have matching signatures.
This is done by mirroring each list in the other using none optionals.
There is no merging of like optionals.
Args:
branch_graphs: `list` of `FuncGraph`.
branch_optionals: `list` of `list`s of optional `Tensor`s from other
branch_graphs
Returns:
A `list` of `list`s of `Tensor`s for each branch_graph. Each list has the
same number of `Tensor`s, all of which will be optionals of the same
shape/type.
"""
new_branch_optionals = []
# Since the intermediates are optionals with dtype variant, we only need
# enough room for the longest list of intermediates.
intermediates_size = max(len(o) for o in branch_optionals)
for i, branch_graph in enumerate(branch_graphs):
other_optionals = _create_none_optionals(
branch_graph, intermediates_size - len(branch_optionals[i]))
new_branch_optionals.append(branch_optionals[i] + other_optionals)
return new_branch_optionals
def _make_intermediates_match_xla(branch_graphs, branch_intermediates):
"""Like _make_intermediates_match but for the XLA case."""
new_branch_intermediates = []
for i, branch_graph in enumerate(branch_graphs):
other_fakeparams = _create_fakeparams(
branch_graph,
sum((bi for bi in branch_intermediates
if bi is not branch_intermediates[i]), []))
num_preceding = sum(len(bi) for bi in branch_intermediates[:i])
new_branch_intermediates.append(other_fakeparams[:num_preceding] +
branch_intermediates[i] +
other_fakeparams[num_preceding:])
return new_branch_intermediates
def _make_inputs_match(branch_graphs, branch_inputs):
"""Modifies branch_graphs so they have the same input signature.
This method reorders and/or adds parameters to each graph in branch_graphs so
they have the same input signature, and updates the 'inputs' and 'captured'
fields of each graph accordingly. It uses the input tensors from the outer
graph to avoid duplicating shared arguments.
Args:
branch_graphs: a `list` of `FuncGraph`
branch_inputs: a `list` of `list`s of `Tensor`s in the outer graph. The
inputs for the corresponding graph in `branch_graphs`.
Returns:
A new list of Tensors from the outer graph that are the new inputs for each
branch_graph. This is a deduped version of `sum(branch_inputs)`.
"""
assert len(branch_graphs) == len(branch_inputs)
added_inputs = set()
new_inputs = []
for branch_in in branch_inputs:
for tensor in branch_in:
tensor_id = ops.tensor_id(tensor)
if tensor_id not in added_inputs:
added_inputs.add(tensor_id)
new_inputs.append(tensor)
for branch_graph, branch_in in zip(branch_graphs, branch_inputs):
input_ids = [ops.tensor_id(t) for t in branch_in]
branch_input_to_param = dict(zip(input_ids, branch_graph.inputs))
input_list = []
for in_t in new_inputs:
param = branch_input_to_param.get(ops.tensor_id(in_t))
if param is None:
param = _create_dummy_input(branch_graph, in_t)
input_list.append(param)
branch_graph.inputs = input_list
# Rewrite the FuncGraphs' state to reflect the new inputs.
branch_graph.reset_captures(zip(new_inputs, branch_graph.inputs))
return new_inputs
def _create_zeros_for_none_grads(forward_graphs, grad_graphs):
"""Creates zeros for None out grads if atleast one branch has non-None grad.
Args:
forward_graphs: List of forward FuncGraphs.
grad_graphs: List of grad FuncGraphs.
"""
assert len(forward_graphs) == len(grad_graphs)
branch_outputs = [g.structured_outputs for g in grad_graphs]
num_outputs_per_branch = [len(outs) for outs in branch_outputs]
assert len(set(num_outputs_per_branch)) == 1, num_outputs_per_branch
for output_idx, branch_outs in enumerate(zip(*branch_outputs)):
if (any(t is None for t in branch_outs) and
any(t is not None for t in branch_outs)):
for branch_index, t in enumerate(branch_outs):
if t is None:
with grad_graphs[branch_index].as_default():
zeros = default_gradient.zeros_like(
forward_graphs[branch_index].inputs[output_idx])
grad_graphs[branch_index].structured_outputs[output_idx] = zeros
for grad_graph in grad_graphs:
grad_graph.outputs = [
t for t in func_graph_module.flatten(grad_graph.structured_outputs)
if t is not None
]
def _make_output_composite_tensors_match(op_type, branch_graphs):
"""Modifies each branch_graph's outputs to have the same output signature.
Currently the only transformation implemented is turning a Tensor into an
equivalent IndexedSlices if the other branch returns an IndexedSlices.
Updates branch_graph.{outputs,structured_outputs} for each branch_graph in
branch_graphs.
Args:
op_type: _COND or _CASE
branch_graphs: `list` of `FuncGraph`
Raises:
TypeError: if a set of outputs cannot be rewritten.
"""
# Note: since this is only used for gradient graphs, we do not expect the
# outputs to be structured (e.g. nested lists), and thus do not need to use
# nest.flatten, etc.
assert branch_graphs
branch_outputs = [g.structured_outputs for g in branch_graphs]
outputs_per_branch = list(len(outs) for outs in branch_outputs)
assert len(set(outputs_per_branch)) == 1, outputs_per_branch
for output_idx, branch_outs in enumerate(zip(*branch_outputs)):
if len(set(type(out) for out in branch_outs)) == 1:
continue
if not any(isinstance(out, ops.IndexedSlices) for out in branch_outs):
continue
for branch_idx, branch_out in enumerate(branch_outs):
if isinstance(branch_out, ops.IndexedSlices):
continue
elif isinstance(branch_out, ops.Tensor):
with branch_graphs[branch_idx].as_default():
branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices(
branch_out)
else:
raise TypeError(
"Cannot reconcile {op_name} {output_idx}-th outputs:\n"
" outputs from all branches: {outputs}".format(
op_name="tf.cond" if op_type == _COND else "tf.switch_case",
output_idx=output_idx,
outputs=branch_outs))
for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):
branch_graph.structured_outputs = branch_outs
branch_graph.outputs = [
t for t in func_graph_module.flatten(branch_outs) if t is not None
]
def _make_indexed_slices_indices_types_match(op_type, branch_graphs):
"""Match dtype of IndexedSlices.indices in outputs of branch_graphs."""
assert branch_graphs
# Indices of `IndexedSlices.indices` tensors in `branch_graphs[i].outputs`.
indexed_slice_indices = []
current_index = 0
# Note that this still contains Nones. We leave those in so that error
# messages contain the correct indices. We handle the Nones later when
# updating `current_index`.
branch_outputs_flat_with_composites = [
nest.flatten(branch_graph.structured_outputs, expand_composites=False)
for branch_graph in branch_graphs
]
outs_per_branch = [len(outs) for outs in branch_outputs_flat_with_composites]
assert len(set(outs_per_branch)) == 1, outs_per_branch
# Store indices of IndexedSlices.indices in `indexed_slice_indices`.
for output_idx, branch_outs in enumerate(
zip(*branch_outputs_flat_with_composites)):
if len(set(isinstance(out, ops.IndexedSlices) for out in branch_outs)) != 1:
raise TypeError("Cannot reconcile tf.{op_name} {output_idx}-th outputs:\n"
" branches returned: {outputs}".format(
op_name="cond" if op_type == _COND else "switch_case",
output_idx=output_idx,
outputs=branch_outs))
if isinstance(branch_outs[0], ops.IndexedSlices):
# indices is the second component of the composite tensor.
indexed_slice_indices.append(current_index + 1)
if nest.is_sequence_or_composite(branch_outs[0]):
current_index += len(nest.flatten(branch_outs[0], expand_composites=True))
elif branch_outs[0] is not None:
# `FuncGraph.outputs` does not contain Nones so no need to update the
# counter in that case.
current_index += 1
if not indexed_slice_indices:
return
# `FuncGraph.outputs` is the flattened `FuncGraph.structured_outputs` minus
# the Nones.
if current_index != len(branch_graphs[0].outputs):
raise ValueError("Insufficient elements in branch_graphs[0].outputs.\n"
"Expected: %i\n"
"Actual: %i" %
(current_index, len(branch_graphs[0].outputs)))
# Cast indices with mismatching types to int64.
for index in indexed_slice_indices:
if any(bg.outputs[index].dtype not in (dtypes.int32, dtypes.int64)
for bg in branch_graphs):
raise TypeError("Type of IndexedSlices.indices must be int32 or int64. "
"Found: %s" %
str([bg.outputs[index].dtype for bg in branch_graphs]))
if len(set(bg.outputs[index].dtype for bg in branch_graphs)) != 1:
for branch_graph in branch_graphs:
if branch_graph.outputs[index].dtype == dtypes.int32:
with branch_graph.as_default():
branch_graph.outputs[index] = math_ops.cast(
branch_graph.outputs[index], dtypes.int64)
for branch_graph in branch_graphs:
branch_graph.structured_outputs = _pack_sequence_as(
branch_graph.structured_outputs, branch_graph.outputs)
def _get_op_and_outputs(op_or_outputs):
if isinstance(op_or_outputs, ops.Operation):
return op_or_outputs, []
elif not op_or_outputs: # Empty list.
return None, []
else:
return op_or_outputs[0].op, op_or_outputs
def _pack_sequence_as(structured_outputs, op_outputs):
"""Packs the outputs of the gradient If/Case op.
The branch functions may contain None's in the list of `structured_outputs`.
`op_outputs` has those outputs missing. So we need to add those Nones to the
list of `op_outputs` and then pack it in the same structure as
`structured_outputs`.
Args:
structured_outputs: structured_outputs from one of the branch functions.
op_outputs: List of output tensors of the op.
Returns:
`op_outputs` packed like `structured_outputs`.
"""
outputs_with_nones = []
counter = 0
for output in nest.flatten(structured_outputs, expand_composites=True):
if output is None:
outputs_with_nones.append(None)
else:
outputs_with_nones.append(op_outputs[counter])
counter += 1
return func_graph_module.pack_sequence_as(structured_outputs,
outputs_with_nones)
def _wrap_intermediates(func_graph, intermediates):
with func_graph.as_default():
return [gen_dataset_ops.optional_from_value([t]) for t in intermediates]
def _create_dummy_input(func_graph, template_tensor):
"""Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: FuncGraph.
template_tensor: a tensor in the outer graph.
Returns:
A tensor in func_graph.
"""
with func_graph.as_default():
return array_ops.placeholder(
template_tensor.dtype, shape=template_tensor.shape)
def _create_none_optionals(func_graph, n):
"""Creates `n` `None` optionals in func_graph.
Args:
func_graph: FuncGraph.
n: `int` the number of `None` optionals to make.
Returns:
A list of tensors in func_graph.
"""
with func_graph.as_default():
return [gen_dataset_ops.optional_none() for _ in range(n)]
def _create_fakeparams(func_graph, template_tensors):
"""Create FakeParams for the XLA case."""
with func_graph.as_default():
return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)
for t in template_tensors]
def _check_same_outputs(op_type, graphs):
"""Raises an error if `graphs` have different outputs."""
def error(branch_idx, error_detail):
raise TypeError(
"{b0_name} and {bn_name} arguments to {op_name} must have the same "
"number, type, and overall structure of return values.\n"
"\n"
"{b0_name} output: {b0_out}\n"
"{bn_name} output: {bn_out}\n"
"\n"
"Error details:\n"
"{detail}".format(
b0_name="true_fn" if op_type == _COND else "branches[0]",
bn_name=("false_fn" if op_type == _COND else
"branches[{}]".format(branch_idx)),
op_name="tf.cond" if op_type == _COND else "tf.switch_case",
b0_out=graphs[0].structured_outputs,
bn_out=graphs[branch_idx].structured_outputs,
detail=error_detail))
for b in range(1, len(graphs)):
try:
nest.assert_same_structure(
graphs[0].structured_outputs,
graphs[b].structured_outputs,
expand_composites=True)
except (ValueError, TypeError) as e:
error(b, str(e))
op_type_str = "cond" if op_type == _COND else "case"
if len(graphs[0].outputs) != len(graphs[b].outputs):
raise ValueError("Lengths of branch outputs of {op_type} must match.\n"
"len(graphs[0].outputs): {len_0}\n"
"len(graphs[{b}].outputs): {len_b}\n".format(
op_type=op_type_str,
len_0=len(graphs[0].outputs),
b=b,
len_b=len(graphs[b].outputs)))
for b0_out, bn_out in zip(graphs[0].outputs, graphs[b].outputs):
if b0_out.dtype != bn_out.dtype:
error(b, "%s and %s have different types" % (b0_out, bn_out))
def _get_output_shapes(*branch_graph_outputs):
output_shapes = []
for out_by_branch in zip(*branch_graph_outputs):
shape = out_by_branch[0].shape
for other_out in out_by_branch[1:]:
shape = shape.most_specific_compatible_shape(other_out.shape)
output_shapes.append(shape)
return output_shapes
def verify_captures(op_type, branch_graphs):
"""Verify that a branch's tensor is not accessed in another branch fn."""
# Note: It is technically not possible for lower-branch_index branches to
# capture tensors from higher-branch_index branches, because of the order of
# branch graph construction, but we check all for completeness and to
# guard against potential future changes.
other_branch_graphs = {g: i for i, g in enumerate(branch_graphs)}
for i, branch_graph in enumerate(branch_graphs):
for t in branch_graph.external_captures:
if not isinstance(t, ops.EagerTensor) and t.graph in other_branch_graphs:
branch_names = ["true_fn", "false_fn"] if op_type == _COND else [
"branch {}".format(bi) for bi in range(len(branch_graphs))]
raise ValueError(
"Tensor {tname} in {b0name} is accessed from {b1name}.".format(
tname=t.name,
b0name=branch_names[other_branch_graphs[t.graph]],
b1name=branch_names[i]))
class _CondGradFuncGraph(util.CondBranchFuncGraph):
"""FuncGraph for the gradient function of the branch of an If op.
Handles wrapping and unwrapping intermediate values that are captured by the
gradient computation in optionals.
Attributes:
op_needs_rewrite: True if any intermediates were captured, meaning the
forward If op needs to be written to output the wrapped intermediates.
"""
def __init__(self, name, forward_graph):
super(_CondGradFuncGraph, self).__init__(
name, collections=ops.get_default_graph()._collections) # pylint: disable=protected-access
self.op_needs_rewrite = False
self._forward_graph = forward_graph
# Maps from forward intermediate tensor -> the unwrapped captured
# intermediate.
self._indirect_captures = {}
# Maps unwrapped intermediate -> optional-wrapped intermediate in the
# forward graph.
self._wrapped_intermediates = collections.OrderedDict()
# Raw intermediates captured from the forward graph. Populated iff we're in
# an XLA context.
self._xla_intermediates = []
# Maps forward intermediate constant valued tensor's id to the constant
# created in this graph for that tensor.
self._captured_constants = {}
@property
def wrapped_intermediates(self):
"""The optional-wrapped intermediates captured from the forward graph."""
return list(self._wrapped_intermediates.values())
@property
def xla_intermediates(self):
"""Raw intermediates captured from the forward graph if XLA is enabled."""
return self._xla_intermediates
def _capture_helper(self, tensor, name):
if (tensor.graph is not self._forward_graph or
any(tensor is t for t in self._forward_graph.inputs) or
any(tensor is t for t in self._forward_graph.outputs)):
return super(_CondGradFuncGraph, self)._capture_helper(tensor, name)
tensor_id = ops.tensor_id(tensor)
# If `tensor` is a graph-building time constant, we create a constant with
# the same value in the backward graph instead of capturing it.
if tensor_id in self._captured_constants:
return self._captured_constants[tensor_id]
elif constant_op.is_constant(tensor):
self._captured_constants[tensor_id] = constant_op.constant(
tensor_util.constant_value(tensor), dtype=tensor.dtype)
return self._captured_constants[tensor_id]
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
# XLA does not yet support optionals, so capture intermediates directly.
# TODO(skyewm,jpienaar): can XLA support optionals?
if all(tensor is not capture for capture in self.external_captures):
self.xla_intermediates.append(tensor)
self.op_needs_rewrite = True
return super(_CondGradFuncGraph, self)._capture_helper(tensor, name)
captured_tensor = self._indirect_captures.get(tensor_id)
if captured_tensor is not None:
return captured_tensor
# 'tensor' is an uncaptured intermediate in the forward graph.
# If it is not a resource, we wrap it in an optional in the forward graph
# and capture the optional normally. We then unwrap the captured optional
# value in the gradient graph to get the raw intermediate value.
# If it is a resource, we trace the resource upto the input in the forward
# graph and capture that.
if tensor.dtype == dtypes.resource:
# Index of the forward graph input corresponding to the resource tensor.
index = util.resource_input_index(
tensor.name, [t.name for t in self._forward_graph.inputs],
{op.name: op.node_def for op in self._forward_graph.get_operations()},
self._forward_graph._functions)
# This gets mapped to the corresponding If op input in
# `_resolve_grad_inputs`.
captured_tensor = super(_CondGradFuncGraph, self)._capture_helper(
self._forward_graph.inputs[index], name)
else:
if tensor_id not in self._wrapped_intermediates:
# If the gradient has already been computed for this If op, 'tensor' may
# already be wrapped.
for consumer in tensor.consumers():
if (consumer.type == "OptionalFromValue" and
any(consumer.outputs[0] is output
for output in self._forward_graph.outputs)):
optional = consumer.outputs[0]
break
else:
# 'tensor' hasn't been wrapped, do it now.
with self._forward_graph.as_default():
optional = gen_dataset_ops.optional_from_value([tensor])
self.op_needs_rewrite = True
self._wrapped_intermediates[tensor_id] = optional
optional = self._wrapped_intermediates[tensor_id]
captured_optional = super(_CondGradFuncGraph,
self)._capture_helper(optional, name)
captured_tensor = gen_dataset_ops.optional_get_value(
captured_optional, [tensor.dtype], [tensor.shape])[0]
self._indirect_captures[tensor_id] = captured_tensor
return captured_tensor
def indexed_case(branch_index, branch_fns, name="indexed_case"):
"""Like conv_v2, except emits a Case op instead of an If."""
if isinstance(branch_index, int):
raise TypeError("branch_index must not be a Python int", branch_index)
with ops.name_scope(name) as scope:
branch_names = [
util.unique_fn_name(scope, "branch{}".format(b))
for b in range(len(branch_fns))
]
# Automatic control dependencies are added in defuns, but not in v1
# graphs. Propagate that behavior here.
add_control_dependencies = ops.get_default_graph()._add_control_dependencies
branch_index = ops.convert_to_tensor(branch_index, name="branch_index")
branch_graphs = []
for branch_name, branch_fn in zip(branch_names, branch_fns):
branch_graphs.append(
func_graph_module.func_graph_from_py_func(
branch_name,
branch_fn,
[],
{},
func_graph=util.CondBranchFuncGraph(
branch_name,
collections=ops.get_default_graph()._collections), # pylint: disable=protected-access
add_control_dependencies=add_control_dependencies,
op_return_value=branch_index))
verify_captures(_CASE, branch_graphs)
return _build_case(
branch_index,
branch_graphs, [g.external_captures for g in branch_graphs],
name=scope)
@ops.RegisterGradient("Case")
def _CaseGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of a Case op produced by tf.switch_case."""
# Get the Case operator (this logic handles the case where op is a MockOp)
case_op = op.outputs[0].op
branch_graphs = get_func_graphs(case_op)
assert branch_graphs
# Note: op.graph != ops.get_default_graph() when we are computing the gradient
# of a nested cond.
for branch_graph in branch_graphs:
assert branch_graph.outer_graph == case_op.graph
# Create grad functions that compute the gradient of the branch forward
# graphs. These functions will capture tensors from the forward pass
# functions.
branch_grad_graphs = []
for branch_graph in branch_graphs:
branch_grad_graphs.append(
_create_grad_func(branch_graph, grads,
util.unique_grad_fn_name(branch_graph.name)))
# Replaces output None grads with zeros if atleast one branch has non-None
# grad at that index.
_create_zeros_for_none_grads(branch_graphs, branch_grad_graphs)
if any(g.op_needs_rewrite for g in branch_grad_graphs):
# Modify 'op' to output the intermediates needed by the grad functions. Note
# that all needed intermediates are wrapped in optionals. Each optional
# intermediate output will have a value iff its corresponding branch is
# taken.
# NOTE(bjp): if there are any active sessions, this modification to `op`
# may make them unrunnable!
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
# XLA does not yet support optionals, so output intermediates directly and
# make them match via FakeParams, which can be converted to zeros in XLA.
# TODO(bjp,jpienaar): can XLA support optionals?
branches_intermediates = [
branch_grad_graph.xla_intermediates
for branch_grad_graph in branch_grad_graphs
]
extra_branch_outputs = _make_intermediates_match_xla(
branch_graphs, branches_intermediates)
else:
branch_intermediates = [
g.wrapped_intermediates for g in branch_grad_graphs
]
# Make outputs match by adding none optionals.
extra_branch_outputs = _make_intermediates_match(branch_graphs,
branch_intermediates)
for branch_graph, extra_outputs in zip(branch_graphs, extra_branch_outputs):
branch_graph.outputs.extend(extra_outputs)
# TODO(bjp): indicate it's an internal bug if this fails.
_check_same_outputs(_CASE, branch_graphs)
for branch_graph in branch_graphs:
branch_graph.name += "_rewritten"
case_op._set_func_list_attr("branches", [
util.create_new_tf_function(branch_graph)
for branch_graph in branch_graphs
])
case_op._set_type_list_attr("Tout", branch_graphs[0].output_types)
case_op._set_shape_list_attr("output_shapes",
branch_graphs[0].output_shapes)
case_op._add_outputs([t.dtype for t in extra_branch_outputs[0]],
[t.shape for t in extra_branch_outputs[0]])
# Resolve references to forward graph tensors in grad graphs and ensure
# they are in-scope, i.e., belong to one of outer graphs of the grad graph.
branches_grad_inputs = [
_resolve_grad_inputs(branch_graph, branch_grad_graph) for branch_graph,
branch_grad_graph in zip(branch_graphs, branch_grad_graphs)
]
# This modifies the graphs in branch_grad_graphs.
_make_output_composite_tensors_match(_CASE, branch_grad_graphs)
outputs = _build_case(case_op.inputs[0], branch_grad_graphs,
branches_grad_inputs, name="gradient")
# The predicate has no gradient.
return [None] + outputs
def _build_case(branch_index, branch_graphs, branch_inputs, name=None):
"""Creates an `Case` op from `branch_index`, branch graphs and inputs.
Note that this modifies `branch_graphs` to make the inputs match, and to
output all intermediates values so they're available for the gradient
computation.
`branch_graphs` need not have the same input types, but they must
have the same outpute types.
Args:
branch_index: integer Tensor
branch_graphs: List of FuncGraph
branch_inputs: List of lists of Tensors to be passed to corresponding
branch_graph as input.
name: the name for the Case op.
Returns:
A list of Tensors which are the outputs of the Case op. Does not include
added intermediate outputs.
"""
_make_indexed_slices_indices_types_match(_CASE, branch_graphs)
_check_same_outputs(_CASE, branch_graphs)
# Add inputs to branch_graphs to make them match. Note that this modifies the
# graphs in `branch_graphs`.
case_inputs = _make_inputs_match(branch_graphs, branch_inputs)
# Create the Case op.
with ops.control_dependencies(
sum((list(bg.control_captures) for bg in branch_graphs), [])):
tensors = gen_functional_ops.case(
branch_index,
case_inputs, [t.dtype for t in branch_graphs[0].outputs],
[util.create_new_tf_function(g) for g in branch_graphs],
output_shapes=_get_output_shapes(*[g.outputs for g in branch_graphs]),
name=name)
case_op, tensors = _get_op_and_outputs(tensors)
if case_op is not None:
util.maybe_set_lowering_attr(case_op)
util.maybe_propagate_compile_time_consts_in_xla(case_op)
# Prevent fetching since the variant outputs can't be fetched directly.
case_op.graph.prevent_fetching(case_op)
# Return identities for each output of the Case op, rather than the output of
# the Case op directly. This makes pruning work if the output of switch_case()
# is fetched: the lowering pass converts the Case outputs into IdentityN
# outputs, which if fetched will cause all ops in the taken branch to be run
# (since it takes all merge ops as input). After lowering, each output
# identity op will end up with only the appropriate merge op as input.
# TODO(b/79984175): this doesn't have to be a tuple once we covert to the
# correct output structure
tensors = [array_ops.identity(t) for t in tensors]
return _pack_sequence_as(branch_graphs[0].structured_outputs, tensors)
| jhseu/tensorflow | tensorflow/python/ops/cond_v2.py | Python | apache-2.0 | 46,155 |
"""
tests for magic_gui
"""
import wx
import unittest
import os
from programs import magic_gui
from pmagpy import contribution_builder as cb
import dialogs
from dialogs import grid_frame3 as grid_frame
#import dialogs.pmag_widgets as pmag_widgets
from pmagpy import pmag
from pmagpy import data_model3 as data_model
# set constants
DMODEL = data_model.DataModel()
WD = pmag.get_test_WD()
PROJECT_WD = os.path.join(WD, "data_files", "magic_gui", "3_0")
class TestMainFrame(unittest.TestCase):
def setUp(self):
self.app = wx.App()
self.frame = magic_gui.MainFrame(PROJECT_WD,
name="best frame ever",
dmodel=DMODEL)
self.frame.get_wd_data()
self.pnl = self.frame.GetChildren()[0]
def tearDown(self):
# wx.CallAfter(self.frame.Destroy)
# wx.CallAfter(self.app.Destroy)
for fname in ('locations.txt', 'sites.txt'):
try:
os.remove(os.path.join(PROJECT_WD, fname))
except OSError:
pass
os.chdir(WD)
def test_main_panel_is_created(self):
"""
test for existence of main panel
"""
self.assertTrue(self.pnl.IsEnabled())
self.assertEqual("main panel", str(self.pnl.GetName()))
self.assertEqual("best frame ever", str(self.frame.GetName()))
self.assertEqual(magic_gui.MainFrame, type(self.frame))
def test_data_object_is_created(self):
self.assertEqual(cb.Contribution, type(self.frame.contribution))
self.assertIn('measurements', self.frame.contribution.tables)
self.assertIn('specimens', self.frame.contribution.tables)
self.assertIn('samples', self.frame.contribution.tables)
self.assertEqual('sr01g2', self.frame.contribution.tables['specimens'].df.index[1])
def test_specimen_button(self):
window = self.does_top_window_exist(self.pnl, 'specimens_btn', 'specimens')
self.assertTrue(window, 'specimens grid window was not created')
self.assertIsInstance(window, grid_frame.GridFrame)
self.assertTrue(window.IsEnabled())
wx.CallAfter(self.assertTrue,window.IsShown())
def test_sample_button(self):
window = self.does_top_window_exist(self.pnl, 'samples_btn', 'samples')
self.assertTrue(window, 'samples grid window was not created')
self.assertIsInstance(window, grid_frame.GridFrame)
self.assertTrue(window.IsEnabled())
wx.CallAfter(self.assertTrue,window.IsShown())
def test_site_button(self):
window = self.does_top_window_exist(self.pnl, 'sites_btn', 'sites')
self.assertTrue(window, 'sites grid window was not created')
self.assertIsInstance(window, grid_frame.GridFrame)
self.assertTrue(window.IsEnabled())
wx.CallAfter(self.assertTrue,window.IsShown())
def test_location_button(self):
window = self.does_top_window_exist(self.pnl, 'locations_btn', 'locations')
self.assertTrue(window, 'locations grid window was not created')
self.assertIsInstance(window, grid_frame.GridFrame)
self.assertTrue(window.IsEnabled())
wx.CallAfter(self.assertTrue,window.IsShown())
def test_age_button(self):
window = self.does_top_window_exist(self.pnl, 'ages_btn', 'ages')
self.assertTrue(window, 'age grid window was not created')
self.assertIsInstance(window, grid_frame.GridFrame)
self.assertTrue(window.IsEnabled())
wx.CallAfter(self.assertTrue,window.IsShown())
def test_measurement_button(self):
window = self.does_top_window_exist(self.pnl, 'measurements_btn', 'measurements')
self.assertTrue(window, 'measurement grid window was not created')
self.assertIsInstance(window, grid_frame.GridFrame)
self.assertTrue(window.IsEnabled())
self.assertIsInstance(window.grid, dialogs.magic_grid3.HugeMagicGrid)
wx.CallAfter(self.assertTrue,window.IsShown())
def does_top_window_exist(self, parent, btn_name, window_name):
"""
produces a click event on the button called btn_name,
see if it produces a top-level window called window_name
"""
btn = None
children = parent.GetChildren()
print(", ".join([child.GetName() for child in children]))
for child in children:
if child.GetName() == btn_name:
btn = child
break
if not btn:
return None
event = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, btn.GetId())
btn.GetEventHandler().ProcessEvent(event)
for wind in wx.GetTopLevelWindows():
if wind.GetName() == window_name:
return wind
return None
class TestMagICGUIMenu(unittest.TestCase):
def setUp(self):
self.app = wx.App()
self.frame = magic_gui.MainFrame(PROJECT_WD, name="best frame ever",
dmodel=DMODEL)
self.frame.get_wd_data()
self.pnl = self.frame.GetChildren()[0]
self.contribution = self.frame.contribution
def tearDown(self):
#self.frame.Destroy() # this does not work and causes strange errors
for wind in wx.GetTopLevelWindows():
res = wind.Destroy()
self.app.Destroy()
os.chdir(WD)
def test_that_all_menus_exist(self):
"""
check that all expected menus were created
and that each menu item is enabled
"""
menu_names = ['File', 'Help ']
menus = self.frame.MenuBar.Menus
found_menus = []
for menu, menu_name in menus:
self.assertIsInstance(menu, wx.Menu)
for item in menu.GetMenuItems():
self.assertTrue(item.IsEnabled())
self.assertIn(menu_name, menu_names)
found_menus.append(menu_name)
self.assertEqual(set(menu_names), set(found_menus))
def test_show_mainframe(self):
menus = self.frame.MenuBar.Menus
fmenu, fmenu_name = menus[0]
# once you have the correct menu
show_id = fmenu.FindItem('Show main window')
show_item = fmenu.FindItemById(show_id)
self.frame.Hide()
self.assertFalse(self.frame.IsShown())
event = wx.CommandEvent(wx.EVT_MENU.evtType[0], show_id)
self.frame.GetEventHandler().ProcessEvent(event)
self.assertTrue(self.frame.IsShown())
def test_close_grid(self):
self.frame.grid_frame = grid_frame.GridFrame(self.contribution, PROJECT_WD,
"specimens", "specimens")
self.assertTrue(self.frame.grid_frame.IsShown())
menus = self.frame.MenuBar.Menus
fmenu, fmenu_name = menus[0]
# once you have the correct menu
close_id = fmenu.FindItem('Close current grid')
close_item = fmenu.FindItemById(close_id)
event = wx.CommandEvent(wx.EVT_MENU.evtType[0], close_id)
self.frame.GetEventHandler().ProcessEvent(event)
| lfairchild/PmagPy | pmagpy_tests/test_magic_gui.py | Python | bsd-3-clause | 7,108 |
from calendar import month_name
from django.http import Http404
from django.shortcuts import get_object_or_404
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.blog.feeds import PostsRSS, PostsAtom
from mezzanine.conf import settings
from mezzanine.generic.models import Keyword
from mezzanine.utils.views import render, paginate
from mezzanine.utils.models import get_user_model
User = get_user_model()
def blog_post_list(request, tag=None, year=None, month=None, username=None,
category=None, template="blog/blog_post_list.html"):
"""
Display a list of blog posts that are filtered by tag, year, month,
author or category. Custom templates are checked for using the name
``blog/blog_post_list_XXX.html`` where ``XXX`` is either the
category slug or author's username if given.
"""
settings.use_editable()
templates = []
blog_posts = BlogPost.objects.published(for_user=request.user)
if tag is not None:
tag = get_object_or_404(Keyword, slug=tag)
blog_posts = blog_posts.filter(keywords__in=tag.assignments.all())
if year is not None:
blog_posts = blog_posts.filter(publish_date__year=year)
if month is not None:
blog_posts = blog_posts.filter(publish_date__month=month)
month = month_name[int(month)]
if category is not None:
category = get_object_or_404(BlogCategory, slug=category)
blog_posts = blog_posts.filter(categories=category)
templates.append(u"blog/blog_post_list_%s.html" %
unicode(category.slug))
author = None
if username is not None:
author = get_object_or_404(User, username=username)
blog_posts = blog_posts.filter(user=author)
templates.append(u"blog/blog_post_list_%s.html" % username)
prefetch = ("categories", "keywords__keyword")
blog_posts = blog_posts.select_related("user").prefetch_related(*prefetch)
blog_posts = paginate(blog_posts, request.GET.get("page", 1),
settings.BLOG_POST_PER_PAGE,
settings.MAX_PAGING_LINKS)
context = {"blog_posts": blog_posts, "year": year, "month": month,
"tag": tag, "category": category, "author": author}
templates.append(template)
return render(request, templates, context)
def blog_post_detail(request, slug, year=None, month=None, day=None,
template="blog/blog_post_detail.html"):
""". Custom templates are checked for using the name
``blog/blog_post_detail_XXX.html`` where ``XXX`` is the blog
posts's slug.
"""
blog_posts = BlogPost.objects.published(
for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
context = {"blog_post": blog_post, "editable_obj": blog_post}
templates = [u"blog/blog_post_detail_%s.html" % unicode(slug), template]
return render(request, templates, context)
def blog_post_feed(request, format, **kwargs):
"""
Blog posts feeds - maps format to the correct feed view.
"""
try:
return {"rss": PostsRSS, "atom": PostsAtom}[format](**kwargs)(request)
except KeyError:
raise Http404()
| eRestin/Mezz | mezzanine/blog/views.py | Python | bsd-2-clause | 3,269 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_gui.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(660, 370)
MainWindow.setMinimumSize(QtCore.QSize(660, 370))
MainWindow.setMaximumSize(QtCore.QSize(660, 370))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.takeoffButton = QtWidgets.QPushButton(self.centralwidget)
self.takeoffButton.setGeometry(QtCore.QRect(470, 30, 161, 41))
self.takeoffButton.setObjectName("takeoffButton")
self.altdSlider = QtWidgets.QSlider(self.centralwidget)
self.altdSlider.setGeometry(QtCore.QRect(400, 30, 19, 311))
self.altdSlider.setMaximum(100)
self.altdSlider.setProperty("value", 49)
self.altdSlider.setOrientation(QtCore.Qt.Vertical)
self.altdSlider.setObjectName("altdSlider")
self.stopButton = QtWidgets.QPushButton(self.centralwidget)
self.stopButton.setGeometry(QtCore.QRect(470, 80, 161, 41))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/stop.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.stopButton.setIcon(icon)
self.stopButton.setObjectName("stopButton")
self.windowsLabel = QtWidgets.QLabel(self.centralwidget)
self.windowsLabel.setGeometry(QtCore.QRect(540, 190, 71, 21))
self.windowsLabel.setObjectName("windowsLabel")
self.cameraCheck = QtWidgets.QCheckBox(self.centralwidget)
self.cameraCheck.setGeometry(QtCore.QRect(540, 220, 94, 26))
self.cameraCheck.setObjectName("cameraCheck")
self.sensorsCheck = QtWidgets.QCheckBox(self.centralwidget)
self.sensorsCheck.setGeometry(QtCore.QRect(540, 250, 94, 26))
self.sensorsCheck.setObjectName("sensorsCheck")
self.altdLabel = QtWidgets.QLabel(self.centralwidget)
self.altdLabel.setGeometry(QtCore.QRect(390, 340, 51, 21))
self.altdLabel.setObjectName("altdLabel")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(20, 30, 361, 301))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.tlLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.tlLayout.setObjectName("tlLayout")
self.rotationDial = QtWidgets.QDial(self.centralwidget)
self.rotationDial.setGeometry(QtCore.QRect(440, 220, 50, 64))
self.rotationDial.setMaximum(100)
self.rotationDial.setProperty("value", 49)
self.rotationDial.setObjectName("rotationDial")
self.rotationLabel = QtWidgets.QLabel(self.centralwidget)
self.rotationLabel.setGeometry(QtCore.QRect(440, 280, 65, 21))
self.rotationLabel.setObjectName("rotationLabel")
self.XLabel = QtWidgets.QLabel(self.centralwidget)
self.XLabel.setGeometry(QtCore.QRect(20, 340, 21, 21))
self.XLabel.setObjectName("XLabel")
self.YLabel = QtWidgets.QLabel(self.centralwidget)
self.YLabel.setGeometry(QtCore.QRect(130, 340, 21, 21))
self.YLabel.setObjectName("YLabel")
self.XValue = QtWidgets.QLabel(self.centralwidget)
self.XValue.setGeometry(QtCore.QRect(40, 340, 41, 21))
self.XValue.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.XValue.setObjectName("XValue")
self.YValue = QtWidgets.QLabel(self.centralwidget)
self.YValue.setGeometry(QtCore.QRect(150, 340, 41, 21))
self.YValue.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.YValue.setObjectName("YValue")
self.altdValue = QtWidgets.QLabel(self.centralwidget)
self.altdValue.setGeometry(QtCore.QRect(390, 10, 41, 21))
self.altdValue.setAlignment(QtCore.Qt.AlignCenter)
self.altdValue.setObjectName("altdValue")
self.rotValue = QtWidgets.QLabel(self.centralwidget)
self.rotValue.setGeometry(QtCore.QRect(445, 200, 41, 21))
self.rotValue.setAlignment(QtCore.Qt.AlignCenter)
self.rotValue.setObjectName("rotValue")
self.resetButton = QtWidgets.QPushButton(self.centralwidget)
self.resetButton.setGeometry(QtCore.QRect(470, 130, 161, 41))
self.resetButton.setObjectName("resetButton")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Uav Viewer"))
self.takeoffButton.setText(_translate("MainWindow", "Take off"))
self.stopButton.setText(_translate("MainWindow", "Stop"))
self.windowsLabel.setText(_translate("MainWindow", "Windows:"))
self.cameraCheck.setText(_translate("MainWindow", "Camera"))
self.sensorsCheck.setText(_translate("MainWindow", "Sensors"))
self.altdLabel.setText(_translate("MainWindow", "Altitude"))
self.rotationLabel.setText(_translate("MainWindow", "Rotation"))
self.XLabel.setText(_translate("MainWindow", "X:"))
self.YLabel.setText(_translate("MainWindow", "Y:"))
self.XValue.setText(_translate("MainWindow", "0"))
self.YValue.setText(_translate("MainWindow", "0"))
self.altdValue.setText(_translate("MainWindow", "0"))
self.rotValue.setText(_translate("MainWindow", "0"))
self.resetButton.setText(_translate("MainWindow", "Reset"))
import resources_rc
| mazafrav/JdeRobot | src/tools/uav_viewer_py/gui/ui_gui.py | Python | gpl-3.0 | 5,879 |
from __future__ import absolute_import, unicode_literals
from glyphsLib.builder.instances import apply_instance_data
__all__ = ["apply_instance_data"]
| googlei18n/glyphsLib | Lib/glyphsLib/interpolation.py | Python | apache-2.0 | 152 |
n = int(input())
result = set(map(int, input().split()))
operations = int(input())
for _ in range(operations):
operation = input().split()[0]
target_set = set(map(int, input().split()))
if operation == 'update':
result.update(target_set)
elif operation == 'intersection_update':
result.intersection_update(target_set)
elif operation == 'difference_update':
result.difference_update(target_set)
elif operation == 'symmetric_difference_update':
result.symmetric_difference_update(target_set)
print(sum(result))
| avenet/hackerrank | python/sets/set_mutations.py | Python | mit | 574 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for stevedore.extension
"""
import pkg_resources
from stevedore import driver
from stevedore import exception
from stevedore import extension
from stevedore.tests import test_extension
from stevedore.tests import utils
class TestCallback(utils.TestCase):
def test_detect_plugins(self):
em = driver.DriverManager('stevedore.test.extension', 't1')
names = sorted(em.names())
self.assertEqual(names, ['t1'])
def test_call(self):
def invoke(ext, *args, **kwds):
return (ext.name, args, kwds)
em = driver.DriverManager('stevedore.test.extension', 't1')
result = em(invoke, 'a', b='C')
self.assertEqual(result, ('t1', ('a',), {'b': 'C'}))
def test_driver_property_not_invoked_on_load(self):
em = driver.DriverManager('stevedore.test.extension', 't1',
invoke_on_load=False)
d = em.driver
self.assertIs(d, test_extension.FauxExtension)
def test_driver_property_invoked_on_load(self):
em = driver.DriverManager('stevedore.test.extension', 't1',
invoke_on_load=True)
d = em.driver
self.assertIsInstance(d, test_extension.FauxExtension)
def test_no_drivers(self):
try:
driver.DriverManager('stevedore.test.extension.none', 't1')
except exception.NoMatches as err:
self.assertIn("No 'stevedore.test.extension.none' driver found",
str(err))
def test_bad_driver(self):
try:
driver.DriverManager('stevedore.test.extension', 'e2')
except ImportError:
pass
else:
self.assertEqual(False, "No error raised")
def test_multiple_drivers(self):
# The idea for this test was contributed by clayg:
# https://gist.github.com/clayg/6311348
extensions = [
extension.Extension(
'backend',
pkg_resources.EntryPoint.parse('backend = pkg1:driver'),
'pkg backend',
None,
),
extension.Extension(
'backend',
pkg_resources.EntryPoint.parse('backend = pkg2:driver'),
'pkg backend',
None,
),
]
try:
dm = driver.DriverManager.make_test_instance(extensions[0])
# Call the initialization code that verifies the extension
dm._init_plugins(extensions)
except exception.MultipleMatches as err:
self.assertIn("Multiple", str(err))
else:
self.fail('Should have had an error')
| ctrlaltdel/neutrinator | vendor/stevedore/tests/test_driver.py | Python | gpl-3.0 | 3,234 |
#!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
from __future__ import print_function
from capstone import *
from capstone.ppc import *
from xprint import to_x, to_hex, to_x_32
PPC_CODE = b"\x43\x20\x0c\x07\x41\x56\xff\x17\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21\x40\x82\x00\x14"
all_tests = (
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64"),
)
def print_insn_detail(insn):
# print address, mnemonic and operands
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
# "data" instruction generated by SKIPDATA option has no detail
if insn.id == 0:
return
if len(insn.operands) > 0:
print("\top_count: %u" % len(insn.operands))
c = 0
for i in insn.operands:
if i.type == PPC_OP_REG:
print("\t\toperands[%u].type: REG = %s" % (c, insn.reg_name(i.reg)))
if i.type == PPC_OP_IMM:
print("\t\toperands[%u].type: IMM = 0x%s" % (c, to_x_32(i.imm)))
if i.type == PPC_OP_MEM:
print("\t\toperands[%u].type: MEM" % c)
if i.mem.base != 0:
print("\t\t\toperands[%u].mem.base: REG = %s" \
% (c, insn.reg_name(i.mem.base)))
if i.mem.disp != 0:
print("\t\t\toperands[%u].mem.disp: 0x%s" \
% (c, to_x_32(i.mem.disp)))
if i.type == PPC_OP_CRX:
print("\t\toperands[%u].type: CRX" % c)
print("\t\t\toperands[%u].crx.scale: = %u" \
% (c, i.crx.scale))
if i.crx.reg != 0:
print("\t\t\toperands[%u].crx.reg: REG = %s" \
% (c, insn.reg_name(i.crx.reg)))
if i.crx.cond != 0:
print("\t\t\toperands[%u].crx.cond: 0x%x" \
% (c, i.crx.cond))
c += 1
if insn.bc:
print("\tBranch code: %u" % insn.bc)
if insn.bh:
print("\tBranch hint: %u" % insn.bh)
if insn.update_cr0:
print("\tUpdate-CR0: True")
# ## Test class Cs
def test_class():
for (arch, mode, code, comment) in all_tests:
print("*" * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
md.detail = True
for insn in md.disasm(code, 0x1000):
print_insn_detail(insn)
print ()
print("0x%x:\n" % (insn.address + insn.size))
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
| dhxkgozj/DirEngine | lib/capstone/bindings/python/test_ppc.py | Python | bsd-3-clause | 2,840 |
from library import aura as aura_module
from utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
from ansible.compat.tests.mock import patch, call
# test_parted is a pretty good start.
class TestAura(ModuleTestCase):
def setUp(self):
super(TestAura, self).setUp()
self.module = aura_module
self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command')
self.run_command = self.mock_run_command.start()
self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
self.get_bin_path = self.mock_get_bin_path.start()
self.get_bin_path.return_value = '/usr/bin/aura'
def tearDown(self):
self.mock_run_command.stop()
self.mock_get_bin_path.stop()
# This is what we actually call in our tests, setting our expectations
# of what should happen.
def execute_module(self, failed=False, changed=False):
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def set_side_effect(self, side_effect):
self.run_command.side_effect = lambda *args, **kwargs: side_effect[args[0]]
def test_install_already_present(self):
set_module_args({
'name': 'foo',
'state': 'present',
})
self.set_side_effect({
'/usr/bin/aura --query --info foo': (0, "Name : foo", None)
})
self.execute_module()
def test_install_version_present_is_latest(self):
set_module_args({
'name': 'foo',
'state': 'latest',
})
self.set_side_effect({
'/usr/bin/aura --query --info foo': (0, "Version : 1", None),
'/usr/bin/aura --aursync --info foo': (0, "Version : 1", None),
})
self.execute_module()
def test_install_newer_version_on_aur(self):
set_module_args({
'name': 'foo',
'state': 'latest',
})
self.set_side_effect({
'/usr/bin/aura --query --info foo': (0, "Version : 1", None),
'/usr/bin/aura --aursync --info foo': (0, "Version : 2", None),
'/usr/bin/aura --aursync --builduser=nobody foo --noconfirm': (0, "Determining dependencies...", None)
})
self.execute_module(changed=True)
def test_install_older_version_on_aur(self):
set_module_args({
'name': 'foo',
'state': 'latest',
})
self.set_side_effect({
'/usr/bin/aura --query --info foo': (0, "Version : 2", None),
'/usr/bin/aura --aursync --info foo': (0, "Version : 1", None),
'/usr/bin/aura --aursync --builduser=nobody foo --noconfirm': (0, "Determining dependencies...", None)
})
self.execute_module(changed=True)
| AlexandreCarlton/ansible-aura | tests/test_aura.py | Python | gpl-3.0 | 3,472 |
__version__ = '0.6.3-dev'
# Package level logger
import logging
try:
# Python >= 2.7
from logging import NullHandler
except ImportError:
# Python < 2.7
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger("pyoos")
logger.addHandler(logging.NullHandler())
| emiliom/pyoos | pyoos/__init__.py | Python | lgpl-3.0 | 331 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import wp_sync
| ecino/compassion-switzerland | child_sync_wp/tools/__init__.py | Python | agpl-3.0 | 425 |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
import sys
import traceback
from pysnmp.smi import error
from pysnmp import debug
__all__ = ['AbstractMibInstrumController', 'MibInstrumController']
class AbstractMibInstrumController:
def readVars(self, vars, acInfo=(None, None)):
raise error.NoSuchInstanceError(idx=0)
def readNextVars(self, vars, acInfo=(None, None)):
raise error.EndOfMibViewError(idx=0)
def writeVars(self, vars, acInfo=(None, None)):
raise error.NoSuchObjectError(idx=0)
class MibInstrumController(AbstractMibInstrumController):
fsmReadVar = {
# ( state, status ) -> newState
('start', 'ok'): 'readTest',
('readTest', 'ok'): 'readGet',
('readGet', 'ok'): 'stop',
('*', 'err'): 'stop'
}
fsmReadNextVar = {
# ( state, status ) -> newState
('start', 'ok'): 'readTestNext',
('readTestNext', 'ok'): 'readGetNext',
('readGetNext', 'ok'): 'stop',
('*', 'err'): 'stop'
}
fsmWriteVar = {
# ( state, status ) -> newState
('start', 'ok'): 'writeTest',
('writeTest', 'ok'): 'writeCommit',
('writeCommit', 'ok'): 'writeCleanup',
('writeCleanup', 'ok'): 'readTest',
# Do read after successful write
('readTest', 'ok'): 'readGet',
('readGet', 'ok'): 'stop',
# Error handling
('writeTest', 'err'): 'writeCleanup',
('writeCommit', 'err'): 'writeUndo',
('writeUndo', 'ok'): 'readTest',
# Ignore read errors (removed columns)
('readTest', 'err'): 'stop',
('readGet', 'err'): 'stop',
('*', 'err'): 'stop'
}
def __init__(self, mibBuilder):
self.mibBuilder = mibBuilder
self.lastBuildId = -1
self.lastBuildSyms = {}
def getMibBuilder(self):
return self.mibBuilder
# MIB indexing
def __indexMib(self):
# Build a tree from MIB objects found at currently loaded modules
if self.lastBuildId == self.mibBuilder.lastBuildId:
return
(MibScalarInstance, MibScalar, MibTableColumn, MibTableRow,
MibTable) = self.mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalarInstance', 'MibScalar',
'MibTableColumn', 'MibTableRow', 'MibTable'
)
mibTree, = self.mibBuilder.importSymbols('SNMPv2-SMI', 'iso')
#
# Management Instrumentation gets organized as follows:
#
# MibTree
# |
# +----MibScalar
# | |
# | +-----MibScalarInstance
# |
# +----MibTable
# |
# +----MibTableRow
# |
# +-------MibTableColumn
# |
# +------MibScalarInstance(s)
#
# Mind you, only Managed Objects get indexed here, various MIB defs and
# constants can't be SNMP managed so we drop them.
#
scalars = {}
instances = {}
tables = {}
rows = {}
cols = {}
# Sort by module name to give user a chance to slip-in
# custom MIB modules (that would be sorted out first)
mibSymbols = list(self.mibBuilder.mibSymbols.items())
mibSymbols.sort(key=lambda x: x[0], reverse=True)
for modName, mibMod in mibSymbols:
for symObj in mibMod.values():
if isinstance(symObj, MibTable):
tables[symObj.name] = symObj
elif isinstance(symObj, MibTableRow):
rows[symObj.name] = symObj
elif isinstance(symObj, MibTableColumn):
cols[symObj.name] = symObj
elif isinstance(symObj, MibScalarInstance):
instances[symObj.name] = symObj
elif isinstance(symObj, MibScalar):
scalars[symObj.name] = symObj
# Detach items from each other
for symName, parentName in self.lastBuildSyms.items():
if parentName in scalars:
scalars[parentName].unregisterSubtrees(symName)
elif parentName in cols:
cols[parentName].unregisterSubtrees(symName)
elif parentName in rows:
rows[parentName].unregisterSubtrees(symName)
else:
mibTree.unregisterSubtrees(symName)
lastBuildSyms = {}
# Attach Managed Objects Instances to Managed Objects
for inst in instances.values():
if inst.typeName in scalars:
scalars[inst.typeName].registerSubtrees(inst)
elif inst.typeName in cols:
cols[inst.typeName].registerSubtrees(inst)
else:
raise error.SmiError(
'Orphan MIB scalar instance %r at %r' % (inst, self)
)
lastBuildSyms[inst.name] = inst.typeName
# Attach Table Columns to Table Rows
for col in cols.values():
rowName = col.name[:-1] # XXX
if rowName in rows:
rows[rowName].registerSubtrees(col)
else:
raise error.SmiError(
'Orphan MIB table column %r at %r' % (col, self)
)
lastBuildSyms[col.name] = rowName
# Attach Table Rows to MIB tree
for row in rows.values():
mibTree.registerSubtrees(row)
lastBuildSyms[row.name] = mibTree.name
# Attach Tables to MIB tree
for table in tables.values():
mibTree.registerSubtrees(table)
lastBuildSyms[table.name] = mibTree.name
# Attach Scalars to MIB tree
for scalar in scalars.values():
mibTree.registerSubtrees(scalar)
lastBuildSyms[scalar.name] = mibTree.name
self.lastBuildSyms = lastBuildSyms
self.lastBuildId = self.mibBuilder.lastBuildId
debug.logger & debug.flagIns and debug.logger('__indexMib: rebuilt')
# MIB instrumentation
def flipFlopFsm(self, fsmTable, inputNameVals, acInfo):
self.__indexMib()
debug.logger & debug.flagIns and debug.logger('flipFlopFsm: inputNameVals %r' % (inputNameVals,))
mibTree, = self.mibBuilder.importSymbols('SNMPv2-SMI', 'iso')
outputNameVals = []
state, status = 'start', 'ok'
origExc = None
while 1:
k = (state, status)
if k in fsmTable:
fsmState = fsmTable[k]
else:
k = ('*', status)
if k in fsmTable:
fsmState = fsmTable[k]
else:
raise error.SmiError(
'Unresolved FSM state %s, %s' % (state, status)
)
debug.logger & debug.flagIns and debug.logger('flipFlopFsm: state %s status %s -> fsmState %s' % (state, status, fsmState))
state = fsmState
status = 'ok'
if state == 'stop':
break
idx = 0
for name, val in inputNameVals:
f = getattr(mibTree, state, None)
if f is None:
raise error.SmiError(
'Unsupported state handler %s at %s' % (state, self)
)
try:
# Convert to tuple to avoid ObjectName instantiation
# on subscription
rval = f(tuple(name), val, idx, acInfo)
except error.SmiError:
exc_t, exc_v, exc_tb = sys.exc_info()
debug.logger & debug.flagIns and debug.logger('flipFlopFsm: fun %s exception %s for %s=%r with traceback: %s' % (f, exc_t, name, val, traceback.format_exception(exc_t, exc_v, exc_tb)))
if origExc is None: # Take the first exception
origExc, origTraceback = exc_v, exc_tb
status = 'err'
break
else:
debug.logger & debug.flagIns and debug.logger('flipFlopFsm: fun %s suceeded for %s=%r' % (f, name, val))
if rval is not None:
outputNameVals.append((rval[0], rval[1]))
idx = idx + 1
if origExc:
if sys.version_info[0] <= 2:
raise origExc
else:
try:
raise origExc.with_traceback(origTraceback)
finally:
# Break cycle between locals and traceback object
# (seems to be irrelevant on Py3 but just in case)
del origTraceback
return outputNameVals
def readVars(self, vars, acInfo=(None, None)):
return self.flipFlopFsm(self.fsmReadVar, vars, acInfo)
def readNextVars(self, vars, acInfo=(None, None)):
return self.flipFlopFsm(self.fsmReadNextVar, vars, acInfo)
def writeVars(self, vars, acInfo=(None, None)):
return self.flipFlopFsm(self.fsmWriteVar, vars, acInfo)
| filippog/pysnmp | pysnmp/smi/instrum.py | Python | bsd-3-clause | 9,238 |
'''
Created on Feb 20, 2017
@author: havrila
'''
import json
from pprint import pprint
from nuage.API_wrappers.nuage_vspk_interface import nuage_vspk_wrapper
def nuage_assign(args):
pprint (args)
if args.OBJECT is None:
print("nuage assign didn't recieved mandatory parameter, exiting ....")
return
if args.OBJECT == 'enterprise-to-gateway-vlan':
if args.entname is None or args.entname == '':
print("You are assigning without enterprise name?! Specify --entname for object")
return
if args.gateway is None or args.gateway == '':
print("You did not specified a gateway with --gateway")
return
if args.gateway_interface is None or args.gateway_interface == '':
print("You did not specified a gateway with --gateway-interface")
return
if args.vlan_id is None or args.vlan_id == '':
print("You did not specified a gateway with --vlan-id")
return
nuage = nuage_vspk_wrapper();
nuage.connect()
nuage.assign_permission_vlan_under_gateway_interface(args.entname[0],args.gateway[0],args.gateway_interface[0],args.vlan_id[0])
if args.OBJECT == 'user-to-group':
print("assigning user-to-group starting")
if args.entname is None or args.entname == '':
print("You are assigning without enterprise any name?! Specify name for object")
return
if args.username is None or args.username == '':
print("You are assigning without username any name?! Specify name for object")
return
if args.groupname is None or args.groupname == '':
print("You are assigning without group any name?! Specify name for object")
return
nuage = nuage_vspk_wrapper();
nuage.connect()
return nuage.assign_user_to_group(args.entname[0],args.groupname[0],args.username[0])
if args.OBJECT == 'permission-for-group-to-domain':
print("assigning permission-for-group-to-domain starting ...")
if args.entname is None or args.entname == '':
print("You are assigning without enterprise any name?! Specify name for object")
return 1
if args.groupname is None or args.groupname == '':
print("You are assigning without group any name?! Specify name for object")
return 1
if args.domname is None or args.domname == '':
print("You are assigning without domain name any name?! Specify name for object")
return 1
if args.permission is None or args.permission == '':
print("You are assigning without any permission type specified?! Specify permission type for assignment")
return 1
nuage = nuage_vspk_wrapper();
nuage.connect()
return nuage.assign_permission_group_to_domain(args.entname[0],args.groupname[0],args.domname[0],args.permission)
if args.OBJECT == 'permission-for-group-to-zone':
print("assigning permission-for-group-to-zone starting ...")
if args.entname is None or args.entname == '':
print("You are assigning without enterprise any name?! Specify name for object")
return 1
if args.groupname is None or args.groupname == '':
print("You are assigning without group any name?! Specify name for object")
return 1
if args.domname is None or args.domname == '':
print("You are assigning without domain name any name?! Specify name for object")
return 1
if args.zonename is None or args.zonename == '':
print("You are assigning without zone name any name?! Specify name for object")
return 1
if args.permission is None or args.permission == '':
print("You are assigning without any permission type specified?! Specify permission type for assignment")
return 1
nuage = nuage_vspk_wrapper();
nuage.connect()
return nuage.assign_permission_group_to_zone(args.entname[0],args.groupname[0],args.domname[0],args.zonename[0],args.permission)
| zerxen/Project_FAST | nuage/nuage_assign.py | Python | gpl-2.0 | 4,327 |
"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
from data_structures.attribute_scheme import AttributeScheme
from classifier.__init__ import *
from drift_detection.__init__ import *
from filters.project_creator import Project
from graphic.hex_colors import Color
from streams.readers.arff_reader import ARFFReader
from tasks.prequential_learner_detector_pairs import PrequentialMultiPairs
# 1. Creating a project
project = Project("projects/multi", "sine1")
# 2. Loading an arff file
labels, attributes, stream_records = ARFFReader.read("data_streams/sine1_w_50_n_0.1/sine1_w_50_n_0.1_101.arff")
attributes_scheme = AttributeScheme.get_scheme(attributes)
# 3. Initializing a Classifier-Detector Pairs
pairs = [[NaiveBayes(labels, attributes_scheme['nominal']), FHDDM()],
[NaiveBayes(labels, attributes_scheme['nominal']), FHDDMS()],
[NaiveBayes(labels, attributes_scheme['nominal']), CUSUM()],
[NaiveBayes(labels, attributes_scheme['nominal']), PH()],
[NaiveBayes(labels, attributes_scheme['nominal']), DDM()],
[NaiveBayes(labels, attributes_scheme['nominal']), EDDM()],
[NaiveBayes(labels, attributes_scheme['nominal']), ADWINChangeDetector()],
[NaiveBayes(labels, attributes_scheme['nominal']), SeqDrift2ChangeDetector()],
[NaiveBayes(labels, attributes_scheme['nominal']), HDDM_A_test()],
[NaiveBayes(labels, attributes_scheme['nominal']), HDDM_W_test()],
[Perceptron(labels, attributes_scheme['numeric']), FHDDM()],
[Perceptron(labels, attributes_scheme['numeric']), FHDDMS()],
[Perceptron(labels, attributes_scheme['numeric']), CUSUM()],
[Perceptron(labels, attributes_scheme['numeric']), PH()],
[Perceptron(labels, attributes_scheme['numeric']), DDM()],
[Perceptron(labels, attributes_scheme['numeric']), EDDM()],
[Perceptron(labels, attributes_scheme['numeric']), ADWINChangeDetector()],
[Perceptron(labels, attributes_scheme['numeric']), SeqDrift2ChangeDetector()],
[Perceptron(labels, attributes_scheme['numeric']), HDDM_A_test()],
[Perceptron(labels, attributes_scheme['numeric']), HDDM_W_test()],
[HoeffdingTree(labels, attributes_scheme['nominal']), FHDDM()],
[HoeffdingTree(labels, attributes_scheme['nominal']), FHDDMS()],
[HoeffdingTree(labels, attributes_scheme['nominal']), CUSUM()],
[HoeffdingTree(labels, attributes_scheme['nominal']), PH()],
[HoeffdingTree(labels, attributes_scheme['nominal']), DDM()],
[HoeffdingTree(labels, attributes_scheme['nominal']), EDDM()],
[HoeffdingTree(labels, attributes_scheme['nominal']), ADWINChangeDetector()],
[HoeffdingTree(labels, attributes_scheme['nominal']), SeqDrift2ChangeDetector()],
[HoeffdingTree(labels, attributes_scheme['nominal']), HDDM_A_test()],
[HoeffdingTree(labels, attributes_scheme['nominal']), HDDM_W_test()]]
# 4. Creating a color set for plotting results
colors = [Color.Indigo[1], Color.Blue[1], Color.Green[1], Color.Lime[1], Color.Yellow[1],
Color.Amber[1], Color.Orange[1], Color.Red[1], Color.Purple[1], Color.Pink[1],
Color.Indigo[2], Color.Blue[2], Color.Green[2], Color.Lime[2], Color.Yellow[2],
Color.Amber[2], Color.Orange[2], Color.Red[2], Color.Purple[2], Color.Pink[2],
Color.Indigo[3], Color.Blue[3], Color.Green[3], Color.Lime[3], Color.Yellow[3],
Color.Amber[3], Color.Orange[3], Color.Red[3], Color.Purple[3], Color.Pink[3]]
# 5. Defining actual locations of drifts, acceptance delay interval, and vector of weights
actual_drift_points = [20000, 40000, 60000, 80000]
drift_acceptance_interval = 250
w_vec = [1, 1, 1, 1, 1, 1]
# 6. Creating a Prequential Evaluation Process
prequential = PrequentialMultiPairs(pairs, attributes, attributes_scheme,
actual_drift_points, drift_acceptance_interval,
w_vec, project, color_set=colors, legend_param=False)
prequential.run(stream_records, 1)
| alipsgh/tornado | github_prequential_multi_test.py | Python | mit | 4,255 |
"""SCons.Scanner.LaTeX
This module implements the dependency scanner for LaTeX code.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/LaTeX.py 2014/08/24 12:12:31 garyo"
import os.path
import re
import SCons.Scanner
import SCons.Util
# list of graphics file extensions for TeX and LaTeX
TexGraphics = ['.eps', '.ps']
LatexGraphics = ['.pdf', '.png', '.jpg', '.gif', '.tif']
# Used as a return value of modify_env_var if the variable is not set.
class _Null(object):
pass
_null = _Null
# The user specifies the paths in env[variable], similar to other builders.
# They may be relative and must be converted to absolute, as expected
# by LaTeX and Co. The environment may already have some paths in
# env['ENV'][var]. These paths are honored, but the env[var] paths have
# higher precedence. All changes are un-done on exit.
def modify_env_var(env, var, abspath):
try:
save = env['ENV'][var]
except KeyError:
save = _null
env.PrependENVPath(var, abspath)
try:
if SCons.Util.is_List(env[var]):
env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]])
else:
# Split at os.pathsep to convert into absolute path
env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)])
except KeyError:
pass
# Convert into a string explicitly to append ":" (without which it won't search system
# paths as well). The problem is that env.AppendENVPath(var, ":")
# does not work, refuses to append ":" (os.pathsep).
if SCons.Util.is_List(env['ENV'][var]):
env['ENV'][var] = os.pathsep.join(env['ENV'][var])
# Append the trailing os.pathsep character here to catch the case with no env[var]
env['ENV'][var] = env['ENV'][var] + os.pathsep
return save
class FindENVPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env['ENV'][self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
def LaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with latex.
"""
ds = LaTeX(name = "LaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = TexGraphics,
recursive = 0)
return ds
def PDFLaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with pdflatex.
"""
ds = LaTeX(name = "PDFLaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = LatexGraphics,
recursive = 0)
return ds
class LaTeX(SCons.Scanner.Base):
"""Class for scanning LaTeX files for included files.
Unlike most scanners, which use regular expressions that just
return the included file name, this returns a tuple consisting
of the keyword for the inclusion ("include", "includegraphics",
"input", or "bibliography"), and then the file name itself.
Based on a quick look at LaTeX documentation, it seems that we
should append .tex suffix for the "include" keywords, append .tex if
there is no extension for the "input" keyword, and need to add .bib
for the "bibliography" keyword that does not accept extensions by itself.
Finally, if there is no extension for an "includegraphics" keyword
latex will append .ps or .eps to find the file, while pdftex may use .pdf,
.jpg, .tif, .mps, or .png.
The actual subset and search order may be altered by
DeclareGraphicsExtensions command. This complication is ignored.
The default order corresponds to experimentation with teTeX
$ latex --version
pdfeTeX 3.141592-1.21a-2.2 (Web2C 7.5.4)
kpathsea version 3.5.4
The order is:
['.eps', '.ps'] for latex
['.png', '.pdf', '.jpg', '.tif'].
Another difference is that the search path is determined by the type
of the file being searched:
env['TEXINPUTS'] for "input" and "include" keywords
env['TEXINPUTS'] for "includegraphics" keyword
env['TEXINPUTS'] for "lstinputlisting" keyword
env['BIBINPUTS'] for "bibliography" keyword
env['BSTINPUTS'] for "bibliographystyle" keyword
env['INDEXSTYLE'] for "makeindex" keyword, no scanning support needed
just allows user to set it if needed.
FIXME: also look for the class or style in document[class|style]{}
FIXME: also look for the argument of bibliographystyle{}
"""
keyword_paths = {'include': 'TEXINPUTS',
'input': 'TEXINPUTS',
'includegraphics': 'TEXINPUTS',
'bibliography': 'BIBINPUTS',
'bibliographystyle': 'BSTINPUTS',
'addbibresource': 'BIBINPUTS',
'addglobalbib': 'BIBINPUTS',
'addsectionbib': 'BIBINPUTS',
'makeindex': 'INDEXSTYLE',
'usepackage': 'TEXINPUTS',
'lstinputlisting': 'TEXINPUTS'}
env_variables = SCons.Util.unique(list(keyword_paths.values()))
def __init__(self, name, suffixes, graphics_extensions, *args, **kw):
# We have to include \n with the % we exclude from the first part
# part of the regex because the expression is compiled with re.M.
# Without the \n, the ^ could match the beginning of a *previous*
# line followed by one or more newline characters (i.e. blank
# lines), interfering with a match on the next line.
# add option for whitespace before the '[options]' or the '{filename}'
regex = r'^[^%\n]*\\(include|includegraphics(?:\s*\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|addbibresource|addglobalbib|addsectionbib|usepackage)\s*{([^}]*)}'
self.cre = re.compile(regex, re.M)
self.comment_re = re.compile(r'^((?:(?:\\%)|[^%\n])*)(.*)$', re.M)
self.graphics_extensions = graphics_extensions
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan_recurse(node, path)
class FindMultiPathDirs(object):
"""The stock FindPathDirs function has the wrong granularity:
it is called once per target, while we need the path that depends
on what kind of included files is being searched. This wrapper
hides multiple instances of FindPathDirs, one per the LaTeX path
variable in the environment. When invoked, the function calculates
and returns all the required paths as a dictionary (converted into
a tuple to become hashable). Then the scan function converts it
back and uses a dictionary of tuples rather than a single tuple
of paths.
"""
def __init__(self, dictionary):
self.dictionary = {}
for k,n in dictionary.items():
self.dictionary[k] = ( SCons.Scanner.FindPathDirs(n),
FindENVPathDirs(n) )
def __call__(self, env, dir=None, target=None, source=None,
argument=None):
di = {}
for k,(c,cENV) in self.dictionary.items():
di[k] = ( c(env, dir=None, target=None, source=None,
argument=None) ,
cENV(env, dir=None, target=None, source=None,
argument=None) )
# To prevent "dict is not hashable error"
return tuple(di.items())
class LaTeXScanCheck(object):
"""Skip all but LaTeX source files, i.e., do not scan *.eps,
*.pdf, *.jpg, etc.
"""
def __init__(self, suffixes):
self.suffixes = suffixes
def __call__(self, node, env):
current = not node.has_builder() or node.is_up_to_date()
scannable = node.get_suffix() in env.subst_list(self.suffixes)[0]
# Returning false means that the file is not scanned.
return scannable and current
kw['function'] = _scan
kw['path_function'] = FindMultiPathDirs(LaTeX.keyword_paths)
kw['recursive'] = 0
kw['skeys'] = suffixes
kw['scan_check'] = LaTeXScanCheck(suffixes)
kw['name'] = name
SCons.Scanner.Base.__init__(self, *args, **kw)
def _latex_names(self, include):
filename = include[1]
if include[0] == 'input':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.tex']
if (include[0] == 'include'):
return [filename + '.tex']
if include[0] == 'bibliography':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.bib']
if include[0] == 'usepackage':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.sty']
if include[0] == 'includegraphics':
base, ext = os.path.splitext( filename )
if ext == "":
#return [filename+e for e in self.graphics_extensions + TexGraphics]
# use the line above to find dependencies for the PDF builder
# when only an .eps figure is present. Since it will be found
# if the user tells scons how to make the pdf figure, leave
# it out for now.
return [filename+e for e in self.graphics_extensions]
return [filename]
def sort_key(self, include):
return SCons.Node.FS._my_normcase(str(include))
def find_include(self, include, source_dir, path):
try:
sub_path = path[include[0]]
except (IndexError, KeyError):
sub_path = ()
try_names = self._latex_names(include)
for n in try_names:
# see if we find it using the path in env[var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[0])
if i:
return i, include
# see if we find it using the path in env['ENV'][var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[1])
if i:
return i, include
return i, include
def canonical_text(self, text):
"""Standardize an input TeX-file contents.
Currently:
* removes comments, unwrapping comment-wrapped lines.
"""
out = []
line_continues_a_comment = False
for line in text.splitlines():
line,comment = self.comment_re.findall(line)[0]
if line_continues_a_comment == True:
out[-1] = out[-1] + line.lstrip()
else:
out.append(line)
line_continues_a_comment = len(comment) > 0
return '\n'.join(out).rstrip()+'\n'
def scan(self, node):
# Modify the default scan function to allow for the regular
# expression to return a comma separated list of file names
# as can be the case with the bibliography keyword.
# Cache the includes list in node so we only scan it once:
# path_dict = dict(list(path))
# add option for whitespace (\s) before the '['
noopt_cre = re.compile('\s*\[.*$')
if node.includes != None:
includes = node.includes
else:
text = self.canonical_text(node.get_text_contents())
includes = self.cre.findall(text)
# 1. Split comma-separated lines, e.g.
# ('bibliography', 'phys,comp')
# should become two entries
# ('bibliography', 'phys')
# ('bibliography', 'comp')
# 2. Remove the options, e.g., such as
# ('includegraphics[clip,width=0.7\\linewidth]', 'picture.eps')
# should become
# ('includegraphics', 'picture.eps')
split_includes = []
for include in includes:
inc_type = noopt_cre.sub('', include[0])
inc_list = include[1].split(',')
for j in range(len(inc_list)):
split_includes.append( (inc_type, inc_list[j]) )
#
includes = split_includes
node.includes = includes
return includes
def scan_recurse(self, node, path=()):
""" do a recursive scan of the top level target file
This lets us search for included files based on the
directory of the main file just as latex does"""
path_dict = dict(list(path))
queue = []
queue.extend( self.scan(node) )
seen = {}
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the \include, \input, etc. line.
# TODO: what about the comment in the original Classic scanner:
# """which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally."""
nodes = []
source_dir = node.get_dir()
#for include in includes:
while queue:
include = queue.pop()
try:
if seen[include[1]] == 1:
continue
except KeyError:
seen[include[1]] = 1
#
# Handle multiple filenames in include[1]
#
n, i = self.find_include(include, source_dir, path_dict)
if n is None:
# Do not bother with 'usepackage' warnings, as they most
# likely refer to system-level files
if include[0] != 'usepackage':
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(n)
nodes.append((sortkey, n))
# recurse down
queue.extend( self.scan(n) )
return [pair[1] for pair in sorted(nodes)]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| engineer0x47/SCONS | engine/SCons/Scanner/LaTeX.py | Python | mit | 16,203 |
"""Application-defined errors."""
import tornado.web
class MinigridHTTPError(tornado.web.HTTPError):
"""Base class for application HTTP errors."""
def __init__(self, reason, status_code, template_name, **template_kwargs):
"""Create an instance of this Error.
Subclasses should include a default status_code and template_name.
"""
self.reason = reason
self.status_code = status_code
self.template_name = template_name
self.log_message = None
self.template_kwargs = template_kwargs
template_kwargs['next_page'] = '/'
class LoginError(MinigridHTTPError):
"""Error during login."""
def __init__(self,
status_code=400,
template_name='index-logged-out.html',
*, reason, **template_kwargs):
"""Create a login error (400 by default)."""
super().__init__(reason, status_code, template_name, **template_kwargs)
class CardReadError(Exception):
"""Error while reading a card."""
class CardWriteError(Exception):
"""Error while writing a card."""
| SEL-Columbia/minigrid-server | minigrid/error.py | Python | gpl-3.0 | 1,108 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MenuPluginSettings.template'
db.add_column('cmsplugin_menupluginsettings', 'template', self.gf('django.db.models.fields.CharField')(default='cmsplugin_embeddedmenu/layouts/default.html', max_length=256), keep_default=False)
def backwards(self, orm):
# Deleting field 'MenuPluginSettings.template'
db.delete_column('cmsplugin_menupluginsettings', 'template')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_embeddedmenu.menupluginsettings': {
'Meta': {'object_name': 'MenuPluginSettings', 'db_table': "'cmsplugin_menupluginsettings'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['cms.Page']"}),
'start_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'cmsplugin_embeddedmenu/layouts/default.html'", 'max_length': '256'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_embeddedmenu']
| airtonix/cmsplugin-embedded-menu | cmsplugin_embeddedmenu/migrations/0002_auto__add_field_menupluginsettings_template.py | Python | bsd-2-clause | 6,755 |
#!/usr/bin/env python2
import sys,os,platform
import publishfunctions
curdir = os.path.dirname(os.path.realpath(__file__))
# ------- Configuration parameters -------------
projectname='gspbox'
if platform.system()=='Darwin':
homefolder='/Users/user'
else:
homefolder='/home/user'
project=homefolder+'/'+projectname+'/'
# -------- Configuration of mat2doc ------------
mat2docpath='~/mat2doc'
try :
from publish_local import *
except:
pass
# -------- Automatique configuration ------------
import conf
outputdir=conf.outputdir
outputdirphp=outputdir+projectname+'-php/'
outputdirmat=outputdir+projectname+'-mat/'
outputdirrelease=outputdir+projectname+'-release/'
outputdirtex=outputdir+projectname+'-tex/'
outputdirhtml=outputdir+projectname+'-html/'
f=file(project+projectname+'_version')
versionstring=f.read()[:-1]
f.close()
# ------- do not edit below this line ----------
f = open(project + 'mat2doc/startup.m', 'w')
f.write('addpath ' + unlocxpath + '\n')
f.write('init_unlocbox;\n\n')
f.write('addpath ' + gsppath + '\n')
f.write('gsp_start;\n');
f.close()
todo=sys.argv
if 'package' in todo:
todo.append('release')
# Optional parameters
if 'fast' in todo:
plot='--no-execplot '
else:
plot='--execplot '
if 'rebuild' in todo:
build='--rebuild '
elif 'cached' in todo:
build='--cached '
else:
build='--auto '
# Publish
for mode in ['mat', 'php', 'html', 'tex']:
if mode in todo:
s = '%s %s/mat2doc.py %s%s %s %s' % ('PYTHONPATH="%s:$PYTHONPATH"' % (curdir,), mat2docpath, plot if mode != 'mat' else '', build if mode != 'mat' else '', project, mode,)
os.system(s)
if 'tex' in todo:
s = 'cp '+ project + 'mat2doc/tex/main/* '+outputdirtex
os.system(s)
s = 'cp '+ project + 'mat2doc/project.bib '+outputdirtex
os.system(s)
s = 'cd '+ outputdirtex +' && make clean && make'
os.system(s)
# Release version
if 'release' in todo or 'package' in todo or 'pushrelease' in todo:
os.system('rm -r '+outputdirrelease)
os.system('cp -r '+outputdirmat+' '+outputdirrelease)
os.system('rm -r '+outputdirrelease+'private')
os.system('rm -r '+outputdirrelease+'oose')
os.system('rm -r '+outputdirrelease+'testing')
# os.system('rm -r '+outputdirrelease+'graph_ml')
os.system('rm -r '+outputdirrelease+'test_gsptoolbox')
os.system('rm -r '+outputdirrelease+'to\ be\ removed')
os.system('rm -r '+outputdirrelease+'.git')
os.system('rm -r '+outputdirrelease+'.gitignore')
# Packaging
if 'package' in todo:
fname=outputdir+projectname+'-'+versionstring
# Create the Unix src package
os.system('rm '+fname+'.tar.gz')
os.system('cd '+outputdir+' && cp -r ' +projectname+'-release '+projectname+' && tar zcvf '+fname+'.tar.gz '+projectname+'/')
# Create the Windows src package
os.system('rm '+fname+'.zip')
publishfunctions.unix2dos(outputdir + projectname +'/')
os.system('cd '+outputdir+' && zip -r '+fname+'.zip '+projectname+'/')
os.system('rm -r '+outputdir+projectname)
if 'copyhtml' in todo:
htmldirgit = homefolder+'/work/git/website/gspbox-html/'
# os.system('cp -r '+outputdirhtml+'include/* '+ htmldirgit+'include/')
# os.system('rm -r '+outputdirhtml+'include ')
os.system('cp -r '+outputdirhtml+'* '+ htmldirgit+'doc/')
print('cp -r '+outputdirhtml+'* '+ htmldirgit+'doc/')
# Send to the server
# if 'pushrelease' in todo:
# s='rsync -r --progress '+outputdirrelease+'* '+outputdircode
# print s
# os.system(s)
# if 'pushdoc' in todo:
# s='rsync -av '+outputdirphp+'* '+outputdirweb+'doc/'
# print s
# os.system(s)
| epfl-lts2/gspbox | mat2doc/publish.py | Python | gpl-3.0 | 3,694 |
#!/usr/bin/env python3
"""
This is a minimal, speed-optimized version of a FASTQ->FASTA conversion script. It lacks a
lot of options other scripts might have, but I got tired of runtimes taking hours with
billion-read FASTQ files.
Example timing information on a 5.4GB input file (with 21.7 million records)**:
- 55.3 seconds with default settings
- 2 minutes, 2 seconds when run with --detect_direction option
- 2 minutes, 8 seconds when run with the --width=60 option
- 3 minutes, 17 seconds when run with both --detect_direction and --width=60 options
** Timing was run on a laptop with an Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz,
utilizing only one core.
OUTPUT
------
Running with no options other than -i and -o, this script will transform headers like these:
@SN7001163:78:C0YG5ACXX:6:1101:1129:2043 1:N:0:CCTAGGT
@61JCNAAXX100503:5:100:10001:18267/2
to this in the FASTA output:
>SN7001163:78:C0YG5ACXX:6:1101:1129:2043 1:N:0:CCTAGGT
>61JCNAAXX100503:5:100:10001:18267/2
If you have have headers like this:
@SN7001163:78:C0YG5ACXX:6:1101:1129:2043 1:N:0:CCTAGGT
The optional --detect_direction option will see the '1' or '2' after the whitespace and transform
the header to this instead (in FASTA):
>SN7001163:78:C0YG5ACXX:6:1101:1129:2043/1
Note that this can increase the runtime up to 3x (from my testing), since a regex is used.
The FASTQ format dictates that the sequence residues of each entry be all contained within a
single line, while the convention for FASTA format is a fixed-with for all residue lines (usually
60bp) where longer ones are wrapped to the next line. By default this script will simply copy
the residue lines over from the FASTQ to the FASTA file, but you can use the --width option
to specify that the lines in the output FASTA should be wrapped at some width. This comes with
a performance penalty.
Author: Joshua Orvis (jorvis AT gmail)
"""
import argparse
import re
import sys
def main():
parser = argparse.ArgumentParser( description='Convert FASTQ to FASTA format')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output file to be created' )
parser.add_argument('-d', '--detect_direction', action='store_true', help='Pass this flag to auto-detect the mate pair direction. See full documentation for more info' )
parser.add_argument('-w', '--width', type=int, required=False, help='Defines the width (or number of bases) on each FASTA sequence line' )
args = parser.parse_args()
if args.output_file is None:
ofh = sys.stdout
else:
ofh = open( args.output_file, 'wt' )
line_count = 0
record_count = 0
last_header = None
for line in open(args.input_file, 'rU'):
line_count += 1
if line_count % 4 == 1:
record_count += 1
if args.detect_direction:
m = re.search('^\@(.+?) (\d)', line)
if m:
last_header = "{0}/{1}\n".format(m.group(1), m.group(2))
else:
raise Exception("ERROR: FASTQ header line found that didn't match expected format: {0}".format(line))
else:
line = line.lstrip('@')
last_header = line
elif line_count % 4 == 2:
if args.width:
ofh.write(">{0}{1}\n".format(last_header, wrapped(line, args.width)))
else:
ofh.write(">{0}{1}".format(last_header, line))
ofh.close()
print("{0} records written to the output FASTA file".format(record_count))
def wrapped(string, every=60):
string = string.rstrip()
''' this runs orders of magnitude faster than using the textwrap module '''
return '\n'.join(string[i:i+every] for i in range(0, len(string), every))
if __name__ == '__main__':
main()
| zctea/biocode | fastq/convert_fastq_to_fasta.py | Python | gpl-3.0 | 4,053 |
import unittest
from itertools import islice
from dt174b import dt174b
class TestSettingPacket(unittest.TestCase):
def setUp(self):
self.def_data = {
'year': 2013,
'month': 2,
'day': 5,
'hour': 9,
'min': 5,
'sec': 24,
'rec_int': 10,
'alm_int': 10,
'smpl_int': 1,
'auto': False,
'temp_high': 40.5,
'temp_low': 5.5,
'hum_high': 90.5,
'hum_low': 30.5,
'pressure_high': 1100,
'pressure_low': 700,
'alt': 0,
'samples': 10000
}
self.edge_data = {
'year': 2013,
'month': 2,
'day': 5,
'hour': 9,
'min': 5,
'sec': 24,
'rec_int': None,
'alm_int': None,
'smpl_int': 17*60 + 59,
'auto': True,
'temp_high': 70,
'temp_low': -40,
'hum_high': 100,
'hum_low': 0,
'pressure_high': 1100,
'pressure_low': 700,
'alt': -9999,
'samples': 10
}
def test_default(self):
packet = dt174b.SettingsPacket(**self.def_data)
expected = (
'18 05 09 ' # time
'05 02 07dd ' # date
'ff ff ff ' # unknown
'0a 0a ' # signaling rates
'0001 00 ' # sampling rate, trig source
'0fd2 0226 ' # temperature
'0389 0131 ' # humidity
'0364 f3c4 ' # pressure
'5a ' # unknown
'0000 2710 ' # alt, samples
)
packed_packet = packet.pack().encode('hex')
i = iter(packed_packet)
for ex in expected.split():
self.assertEquals(ex, ''.join(take(len(ex), i)))
def test_edge_vals(self):
packet = dt174b.SettingsPacket(**self.edge_data)
expected = (
'18 05 09 ' # time
'05 02 07dd ' # date
'ff ff ff ' # uknown
'ff ff ' # signaling rates
'0437 01 ' # sampling rate, trig source
'1b58 f060 ' # temp
'03e8 0000 ' # humidity
'0364 f3c4 ' # pressure
'5a ' # unknown
'd8f1 000a ' # alt, samples
)
packed_packet = packet.pack().encode('hex')
i = iter(packed_packet)
for ex in expected.split():
self.assertEquals(ex, ''.join(take(len(ex), i)))
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
| jaryn/dt174b | tests/test_dt174b.py | Python | gpl-3.0 | 2,865 |
class JitPyException(Exception):
pass
| fijal/jitpy | jitpy/exc.py | Python | mit | 43 |
#!/usr/bin/env python
import time
import os
from plugins import *
import CustomHandlers
# Light switch
#try:
# CapSwitch.CapSwitch(SerialEnumeration.find("capbutton1"))
#except:
# pass
try:
# Telephony services
rotdial = RotaryDial.RotaryDial(SerialEnumeration.find("rotarydial"))
# Handle telephony requests and connect calls
CustomHandlers.RotaryHandler(rotdial.ser)
except:
pass
# Interface with a light production mechanism
print(" * Connecting lightbulbs...")
test = Lightbulb.connect(1, 71, 151, Lightbulb.GattQueue("44:A6:E5:03:27:F9", "hci0"))
test2 = Lightbulb.connect(2, 89, 119, Lightbulb.GattQueue("44:A6:E5:03:27:DF", "hci1"))
print(" * Lightbulbs connected!")
# Various telemetry and electromagnetic relay control
#srb = SensorRelayBoard.SensorRelayBoard(SerialEnumeration.find("sensorrelay"))
#SensorRelayBoard.Relay(3, 1, srb.ser)
# Allow control over a Highly Terrific but Troublesome Protocol
HTTP_API.HTTP_API()
# Test temporal quantification mechanism
#CustomHandlers.Timer().start()
#test.setStatus(0)
#test2.setStatus(0)
#time.sleep(2)
#test.setParameter("color", 1)
#test.setParameter("temp", 144)
#time.sleep(5)
#test.setStatus(1)
#test2.setStatus(1)
#print test.getParameters()
#b = 100
#while b > 1:
# test.setParameter("brightness", b)
# b = b-1
# time.sleep(0.1)
#test.setParameter("color", 1)
#test.setParameter("brightness", 100)
#TestHandler()
#TestSensor()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print '^C received, shutting down'
os._exit(0)
| moosd/HomeAutomation | main.py | Python | gpl-2.0 | 1,574 |
# Run this with:
# python setup.py install --install-lib=.
from distutils.core import setup,Extension
import RDConfig
# force the use of g++ please
from distutils import sysconfig
save_init_posix = sysconfig._init_posix
def my_init_posix():
print 'my_init_posix: changing gcc to g++'
save_init_posix()
g = sysconfig.get_config_vars()
g['CC'] = 'g++'
g['LDSHARED'] = 'g++ -shared'
g['PY_CFLAGS']= g['PY_CFLAGS'].replace('-O3','')
sysconfig._init_posix = my_init_posix
destDir = RDConfig.RDCodeDir
extDir=RDConfig.RDBaseDir+"/External"
# this is how things are done with BPLv2
boostInc = '-isystem%s'%(extDir+"/boost_1_30_0")
incDirs = []
# FIX: there's gotta be a better way of doing this
pyLibDir = '/usr/lib/python2.2/config'
boostLibDir=extDir+"/boost_1_30_0/libs/python/build/bin/libboost_python.so/gcc/debug/runtime-link-dynamic/shared-linkable-true/"
boostLib="boost_python"
libDirs=[boostLibDir,pyLibDir]
libraries=[boostLib,"python2.2"] # have to include g++ here or we get link errors with boost
compileArgs=['-ftemplate-depth-150',
'-DBOOST_PYTHON_DYNAMIC_LIB',
boostInc,
]
setup(name="crossTest",version="1.0",
ext_modules=[Extension("moduleA",["wrapA.cpp"],
include_dirs=incDirs,
library_dirs=libDirs,
libraries=libraries,
extra_compile_args=compileArgs),
Extension("moduleB",["moduleB.cpp"],
include_dirs=incDirs,
library_dirs=libDirs,
libraries=libraries,
extra_compile_args=compileArgs),
Extension("moduleC",["wrapC.cpp"],
include_dirs=incDirs,
library_dirs=libDirs,
libraries=libraries,
extra_compile_args=compileArgs)])
| rdkit/rdkit-orig | Code/Demos/boost/cross_mod_err/setup.py | Python | bsd-3-clause | 1,917 |
# coding: utf-8
import numpy as np
from ppyt import const
from ppyt.indicators import IndicatorBase
class PriceIndicator(IndicatorBase):
"""価格のindicatorです。"""
_findkey = '価格' # indicatorを一意に特定できる名前をつけます。
def _build_indicator(self, price_type=const.PRICE_TYPE_CLOSE, **kwds):
"""indicatorのデータを組み立てます。
Args:
price_type: 価格の種別
Returns:
indicatorのデータ(numpyの配列)
"""
return np.array(self.stock.get_prices(price_type), dtype=np.float64)
class MovingAverageIndicator(IndicatorBase):
"""移動平均線のindicatorです。"""
_findkey = '移動平均線' # indicatorを一意に特定できる名前をつけます。
def _build_indicator(self, price_type=const.PRICE_TYPE_CLOSE, **kwds):
"""indicatorのデータを組み立てます。
Args:
price_type: 価格の種別
Returns:
indicatorのデータ(numpyの配列)
"""
return np.average(self.spanned_data(price_type), axis=1)
class RecentHighPriceIndicator(IndicatorBase):
"""直近高値のindicatorです。"""
_findkey = '直近高値' # indicatorを一意に特定できる名前をつけます。
def _build_indicator(self, price_type=const.PRICE_TYPE_HIGH, **kwds):
"""indicatorのデータを組み立てます。
Args:
price_type: 価格の種別
Returns:
indicatorのデータ(numpyの配列)
"""
return np.max(self.spanned_data(price_type), axis=1)
class RecentLowPriceIndicator(IndicatorBase):
_findkey = '直近安値'
def _build_indicator(self, price_type=const.PRICE_TYPE_LOW, **kwds):
"""indicatorのデータを組み立てます。
Args:
price_type: 価格の種別
Returns:
indicatorのデータ(numpyの配列)
"""
return np.min(self.spanned_data(price_type), axis=1)
| yusukemurayama/ppytrading | ppyt/indicators/basic_indicators.py | Python | mit | 2,073 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.windows.TestType import TestType
logger = logging.getLogger(__name__)
class WmiTestElement(TestType):
MODEL_MAP = {
'tag_name': 'wmi_test',
}
| cjaymes/pyscap | src/scap/model/oval_5/defs/windows/WmiTestElement.py | Python | gpl-3.0 | 884 |
from .cli import CLI
def main():
cli = CLI(None)
cli.execute()
if __name__ == '__main__':
main()
| srz-zumix/wandbox-api | wandbox/__main__.py | Python | mit | 113 |
"""Config flow for flo integration."""
from aioflo import async_get_api
from aioflo.errors import RequestError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN, LOGGER # pylint:disable=unused-import
DATA_SCHEMA = vol.Schema({"username": str, "password": str})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
try:
api = await async_get_api(
data[CONF_USERNAME], data[CONF_PASSWORD], session=session
)
except RequestError as request_error:
LOGGER.error("Error connecting to the Flo API: %s", request_error)
raise CannotConnect from request_error
user_info = await api.user.get_info()
a_location_id = user_info["locations"][0]["id"]
location_info = await api.location.get_info(a_location_id)
return {"title": location_info["nickname"]}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for flo."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| tboyce021/home-assistant | homeassistant/components/flo/config_flow.py | Python | apache-2.0 | 2,110 |
import binascii
import datetime
import json
import os
from uuid import uuid4
from flask import Blueprint
from flask import flash, url_for, redirect, request, jsonify, Markup, render_template
from flask.ext.login import current_user
from flask.ext.restplus import abort
from app import db
from app.helpers.auth import AuthManager
from app.helpers.data import DataManager, save_to_db, record_activity, delete_from_db, restore_event
from app.helpers.data_getter import DataGetter
from app.helpers.helpers import fields_not_empty, string_empty, get_count
from app.helpers.helpers import send_event_publish
from app.helpers.helpers import uploaded_file
from app.helpers.invoicing import InvoicingManager
from app.helpers.microservices import AndroidAppCreator, WebAppCreator
from app.helpers.permission_decorators import is_organizer, is_super_admin, can_access
from app.helpers.storage import upload_local, UPLOAD_PATHS
from app.helpers.ticketing import TicketingManager
from app.helpers.wizard.clone import create_event_copy
from app.helpers.wizard.event import get_event_json, save_event_from_json
from app.helpers.wizard.helpers import get_current_timezone
from app.helpers.wizard.sessions_speakers import get_microlocations_json, get_session_types_json, get_tracks_json, \
save_session_speakers
from app.helpers.wizard.sponsors import get_sponsors_json, save_sponsors_from_json
from app.models.call_for_papers import CallForPaper
from app.settings import get_settings
def get_random_hash():
return binascii.b2a_hex(os.urandom(20))
events = Blueprint('events', __name__, url_prefix='/events')
@events.route('/')
def index_view():
live_events = DataGetter.get_live_events_of_user()
draft_events = DataGetter.get_draft_events_of_user()
past_events = DataGetter.get_past_events_of_user()
all_events = DataGetter.get_all_events_of_user()
imported_events = DataGetter.get_imports_by_user()
all_ticket_stats = {}
for event in all_events:
all_ticket_stats[event.id] = TicketingManager.get_ticket_stats(event)
if not AuthManager.is_verified_user():
flash(Markup('Your account is unverified. '
'Please verify by clicking on the confirmation link that has been emailed to you.'
'<br>Did not get the email? Please <a href="/resend_email/" class="alert-link"> '
'click here to resend the confirmation.</a>'))
return render_template('gentelella/users/events/index.html',
live_events=live_events,
draft_events=draft_events,
past_events=past_events,
all_events=all_events,
imported_events=imported_events,
all_ticket_stats=all_ticket_stats)
@events.route('/create/', defaults={'step': ''})
@events.route('/create/<step>')
def create_view(step):
if step != '':
return redirect(url_for('.create_view', step=''))
hash = get_random_hash()
if CallForPaper.query.filter_by(hash=hash).all():
hash = get_random_hash()
current_timezone = get_current_timezone()
return render_template(
'gentelella/users/events/wizard/wizard.html',
current_date=datetime.datetime.now(),
event_types=DataGetter.get_event_types(),
event_licences=DataGetter.get_event_licences(),
event_topics=DataGetter.get_event_topics(),
event_sub_topics=DataGetter.get_event_subtopics(),
timezones=DataGetter.get_all_timezones(),
cfs_hash=hash,
current_timezone=current_timezone,
payment_countries=DataGetter.get_payment_countries(),
payment_currencies=DataGetter.get_payment_currencies(),
included_settings=get_module_settings())
@events.route('/create/files/image/', methods=['POST'])
def create_image_upload():
if request.method == 'POST':
image = request.form['image']
if image:
image_file = uploaded_file(file_content=image)
image_url = upload_local(
image_file,
UPLOAD_PATHS['temp']['image'].format(uuid=uuid4())
)
return jsonify({'status': 'ok', 'image_url': image_url})
else:
return jsonify({'status': 'no_image'})
@events.route('/<event_id>/')
@can_access
def details_view(event_id):
event = DataGetter.get_event(event_id)
checklist = {"": ""}
if fields_not_empty(event,
['name', 'start_time', 'end_time', 'location_name',
'organizer_name', 'organizer_description']):
checklist["1"] = 'success'
elif fields_not_empty(event, ['name', 'start_time', 'end_time']):
checklist["1"] = 'missing_some'
else:
checklist["1"] = 'missing_main'
call_for_speakers = DataGetter.get_call_for_papers(event_id).first()
if call_for_speakers:
if fields_not_empty(call_for_speakers,
['announcement', 'start_date', 'end_date']):
checklist["4"] = "success"
elif fields_not_empty(call_for_speakers,
['start_date', 'end_date']):
checklist["4"] = "missing_some"
else:
checklist["4"] = 'missing_main'
else:
checklist["4"] = "optional"
sponsors = DataGetter.get_sponsors(event_id).all()
if not sponsors:
checklist["2"] = 'optional'
else:
for sponsor in sponsors:
if fields_not_empty(sponsor, ['name', 'description', 'url', 'level', 'logo']):
checklist["2"] = 'success'
break
else:
checklist["2"] = 'missing_some'
if event.has_session_speakers:
session_types = DataGetter.get_session_types_by_event_id(event_id)
tracks = DataGetter.get_tracks(event_id)
microlocations = DataGetter.get_microlocations(event_id)
if not session_types and not tracks and not microlocations:
checklist["3"] = 'optional'
elif not session_types or not tracks or not microlocations:
checklist["3"] = 'missing_main'
else:
for session_type in session_types:
if fields_not_empty(session_type, ['name', 'length']):
checklist["3"] = 'success'
break
else:
checklist["3"] = 'missing_some'
for microlocation in microlocations:
if fields_not_empty(microlocation, ['name']):
checklist["3"] = 'success'
break
else:
checklist["3"] = 'missing_some'
for tracks in tracks:
if fields_not_empty(tracks, ['name', 'color']):
checklist["3"] = 'success'
break
else:
checklist["3"] = 'missing_some'
checklist["5"] = 'success'
else:
checklist["3"] = 'optional'
checklist["4"] = 'optional'
checklist["5"] = 'optional'
if not current_user.can_publish_event() and not AuthManager.is_verified_user():
flash(Markup('To make your event live, please verify your email by '
'clicking on the confirmation link that has been emailed to you.<br>'
'Did not get the email? Please <a href="/resend_email/" class="alert-link"> click here to '
'resend the confirmation.</a>'))
sessions = {'pending': get_count(DataGetter.get_sessions_by_state_and_event_id('pending', event_id)),
'accepted': get_count(DataGetter.get_sessions_by_state_and_event_id('accepted', event_id)),
'rejected': get_count(DataGetter.get_sessions_by_state_and_event_id('rejected', event_id)),
'draft': get_count(DataGetter.get_sessions_by_state_and_event_id('draft', event_id))}
return render_template('gentelella/users/events/details/details.html',
event=event,
checklist=checklist,
sessions=sessions,
settings=get_settings())
@events.route('/<event_id>/edit/', defaults={'step': ''})
@events.route('/<event_id>/edit/<step>/')
@can_access
def edit_view(event_id, step=''):
event = DataGetter.get_event(event_id)
custom_forms = DataGetter.get_custom_form_elements(event_id)
speaker_form = json.loads(custom_forms.speaker_form)
session_form = json.loads(custom_forms.session_form)
call_for_speakers = DataGetter.get_call_for_papers(event_id).first()
preselect = []
required = []
for session_field in session_form:
if session_form[session_field]['include'] == 1:
preselect.append(session_field)
if session_form[session_field]['require'] == 1:
required.append(session_field)
for speaker_field in speaker_form:
if speaker_form[speaker_field]['include'] == 1:
preselect.append(speaker_field)
if speaker_form[speaker_field]['require'] == 1:
required.append(speaker_field)
hash = get_random_hash()
if CallForPaper.query.filter_by(hash=hash).all():
hash = get_random_hash()
current_timezone = get_current_timezone()
seed = {
'event': get_event_json(event),
'sponsors': get_sponsors_json(event_id),
'microlocations': get_microlocations_json(event_id),
'sessionTypes': get_session_types_json(event_id),
'tracks': get_tracks_json(event_id),
'callForSpeakers': call_for_speakers.serialize if call_for_speakers else None
}
return render_template('gentelella/users/events/wizard/wizard.html',
event=event,
step=step,
seed=json.dumps(seed),
required=required,
preselect=preselect,
current_date=datetime.datetime.now(),
event_types=DataGetter.get_event_types(),
event_licences=DataGetter.get_event_licences(),
event_topics=DataGetter.get_event_topics(),
event_sub_topics=DataGetter.get_event_subtopics(),
timezones=DataGetter.get_all_timezones(),
call_for_speakers=call_for_speakers,
cfs_hash=hash,
current_timezone=current_timezone,
payment_countries=DataGetter.get_payment_countries(),
payment_currencies=DataGetter.get_payment_currencies(),
included_settings=get_module_settings(),
session_types=get_session_types_json(event_id),
microlocations=get_microlocations_json(event_id))
@events.route('/<event_id>/trash/')
@can_access
def trash_view(event_id):
if request.method == "GET":
DataManager.trash_event(event_id)
flash("Your event has been deleted.", "danger")
if current_user.is_super_admin:
return redirect(url_for('sadmin_events.index_view'))
return redirect(url_for('.index_view'))
@events.route('/<event_id>/delete/')
@is_super_admin
def delete_view(event_id):
if request.method == "GET":
DataManager.delete_event(event_id)
flash("Your event has been permanently deleted.", "danger")
return redirect(url_for('sadmin_events.index_view'))
@events.route('/<event_id>/restore/')
@is_super_admin
def restore_event_view(event_id):
restore_event(event_id)
flash("Your event has been restored", "success")
return redirect(url_for('sadmin_events.index_view'))
@events.route('/<int:event_id>/publish/')
@can_access
def publish_event(event_id):
event = DataGetter.get_event(event_id)
if string_empty(event.location_name):
flash(
"Your event was saved. To publish your event please review the highlighted fields below.",
"warning")
return redirect(url_for('.edit_view',
event_id=event.id) + "#highlight=location_name")
if not current_user.can_publish_event():
flash("You don't have permission to publish event.")
return redirect(url_for('.details_view', event_id=event_id))
event.state = 'Published'
save_to_db(event, 'Event Published')
organizers = DataGetter.get_user_event_roles_by_role_name(event_id, 'organizer')
speakers = DataGetter.get_user_event_roles_by_role_name(event_id, 'speaker')
link = url_for('.details_view', event_id=event_id, _external=True)
for organizer in organizers:
send_event_publish(organizer.user.email, event.name, link)
for speaker in speakers:
send_event_publish(speaker.user.email, event.name, link)
record_activity('publish_event', event_id=event.id, status='published')
flash("Your event has been published.", "success")
return redirect(url_for('.details_view', event_id=event_id))
@events.route('/<int:event_id>/unpublish/')
@can_access
def unpublish_event(event_id):
event = DataGetter.get_event(event_id)
event.state = 'Draft'
save_to_db(event, 'Event Unpublished')
record_activity('publish_event', event_id=event.id, status='un-published')
flash("Your event has been unpublished.", "warning")
return redirect(url_for('.details_view', event_id=event_id))
@events.route('/<int:event_id>/generate_android_app/', methods=['POST'])
@can_access
def generate_android_app(event_id):
AndroidAppCreator(event_id).create()
return redirect(url_for('.details_view', event_id=event_id))
@events.route('/<int:event_id>/generate_web_app/', methods=['POST'])
@can_access
def generate_web_app(event_id):
WebAppCreator(event_id).create()
return redirect(url_for('.details_view', event_id=event_id))
@events.route('/<int:event_id>/restore/<int:version_id>/')
@can_access
def restore_event_revision(event_id, version_id):
event = DataGetter.get_event(event_id)
version = event.versions[version_id]
version.revert()
db.session.commit()
flash("Your event has been restored.", "success")
return redirect(url_for('.details_view', event_id=event_id))
@events.route('/<int:event_id>/copy/')
@can_access
def copy_event(event_id):
event = create_event_copy(event_id)
return redirect(url_for('.edit_view', event_id=event.id))
@events.route('/<int:event_id>/role-invite/<hash>/', methods=['GET', 'POST'])
def user_role_invite(event_id, hash):
"""Accept User-Role invite for the event.
"""
event = DataGetter.get_event(event_id)
user = current_user
role_invite = DataGetter.get_event_role_invite(event.id, hash,
email=user.email)
if role_invite:
if role_invite.has_expired():
delete_from_db(role_invite, 'Deleted RoleInvite')
flash('Sorry, the invitation link has expired.', 'error')
return redirect(url_for('.details_view', event_id=event.id))
if user.has_role(event.id):
flash('You have already been assigned a Role in the Event.', 'warning')
return redirect(url_for('events.details_view', event_id=event_id))
role = role_invite.role
data = dict()
data['user_email'] = role_invite.email
data['user_role'] = role.name
DataManager.add_role_to_event(data, event.id)
# Delete Role Invite after it has been accepted
delete_from_db(role_invite, 'Deleted RoleInvite')
flash('You have been added as a %s' % role.title_name)
return redirect(url_for('.details_view', event_id=event.id))
else:
abort(404)
@events.route('/<int:event_id>/role-invite/decline/<hash>/', methods=['GET', 'POST'])
def user_role_invite_decline(event_id, hash):
"""Decline User-Role invite for the event.
"""
event = DataGetter.get_event(event_id)
user = current_user
role_invite = DataGetter.get_event_role_invite(event.id, hash,
email=user.email)
if role_invite:
if role_invite.has_expired():
delete_from_db(role_invite, 'Deleted RoleInvite')
flash('Sorry, the invitation link has expired.', 'error')
return redirect(url_for('.details_view', event_id=event.id))
DataManager.decline_role_invite(role_invite)
flash('You have declined the role invite.')
return redirect(url_for('.details_view', event_id=event.id))
else:
abort(404)
@events.route('/<int:event_id>/role-invite/delete/<hash>/', methods=['GET', 'POST'])
@is_organizer
def delete_user_role_invite(event_id, hash):
event = DataGetter.get_event(event_id)
role_invite = DataGetter.get_event_role_invite(event.id, hash)
if role_invite:
delete_from_db(role_invite, 'Deleted RoleInvite')
flash('Invitation link has been successfully deleted.')
return redirect(url_for('.details_view', event_id=event.id))
else:
abort(404)
@events.route('/discount/apply/', methods=['POST'])
def apply_discount_code():
discount_code = request.form['discount_code']
discount_code = InvoicingManager.get_discount_code(discount_code)
if discount_code:
if discount_code.is_active:
if InvoicingManager.get_discount_code_used_count(discount_code.id) >= discount_code.tickets_number:
return jsonify({'status': 'error', 'message': 'Expired discount code'})
return jsonify({'status': 'ok', 'discount_code': discount_code.serialize})
else:
return jsonify({'status': 'error', 'message': 'Expired discount code'})
else:
return jsonify({'status': 'error', 'message': 'Invalid discount code'})
@events.route('/save/<string:what>/', methods=['POST'])
def save_event_from_wizard(what):
data = request.get_json()
if 'event_id' not in data or not data['event_id']:
event_id = None
else:
event_id = data['event_id']
if not current_user.is_staff and not current_user.is_organizer(event_id):
abort(403)
if what == 'event':
return jsonify(save_event_from_json(data, event_id))
elif what == 'sponsors':
return jsonify(save_sponsors_from_json(data))
elif what == 'sessions-tracks-rooms':
return jsonify(save_session_speakers(data))
elif what == 'all':
response = save_event_from_json(data['event'], event_id)
save_sponsors_from_json(data['sponsors'], response['event_id'])
save_session_speakers(data['session_speakers'], response['event_id'])
return jsonify(response)
else:
abort(404)
def get_module_settings():
included_setting = []
module = DataGetter.get_module()
if module is not None:
if module.ticket_include:
included_setting.append('ticketing')
if module.payment_include:
included_setting.append('payments')
if module.donation_include:
included_setting.append('donations')
return included_setting
| arpitn30/open-event-orga-server | app/views/users/events.py | Python | gpl-3.0 | 19,262 |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
import os
import glob
import json
import numpy as np
from PIL import Image
from zipfile import ZipFile
import math
from neon.util.persist import ensure_dirs_exist
from tqdm import tqdm
def convert_annot_to_json(path, im_path, out_path, difficult):
"""
Converts the KITTI annotations to json file.
Uses the below reference for the KITTI dataset:
OO representation of label format used in Kitti dataset.
Description of fields from Kitti dataset dev kit: (link)[]
The label files contain the following information, which can be read and
written using the matlab tools (readLabels.m, writeLabels.m) provided within
this devkit. All values (numerical or strings) are separated via spaces,
each row corresponds to one object. The 15 columns represent:
#Values Name Description
----------------------------------------------------------------------------
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
Arguments:
path (string): path to KITTI annotation file
im_path (string): path to image
out_path (string): path to save the json file
difficult (bool): include difficult objects
"""
with open(path) as f:
labels = f.readlines()
# start empty dictionary
annot = {'object': []}
# load image
im = np.array(Image.open(im_path))
(h, w, c) = im.shape
annot['size'] = {'depth': c, 'height': h, 'width': w}
for label in labels:
vals = label.split()
type = vals[0]
truncated = float(vals[1])
occluded = int(vals[2])
bbox = tuple([float(x) for x in vals[4:8]])
bbox_int = tuple([int(math.floor(x)) for x in bbox])
if type == 'DontCare':
assert truncated == -1
assert occluded == -1
else:
assert occluded in (0, 1, 2, 3)
diff = truncated > 0.5 or occluded == 2
# add object to annotation
obj = {'bndbox': {'xmin': bbox_int[0], 'ymin': bbox_int[1],
'xmax': bbox_int[2], 'ymax': bbox_int[3]},
'difficult': difficult,
'name': type,
'truncated': truncated > 0.5,
'occluded': occluded
}
if not diff or difficult:
annot['object'].append(obj)
# print "Saving to {}".format(out_path)
with open(out_path, 'w') as f:
json.dump(annot, f, indent=4)
def ingest_kitti(input_dir, out_dir, train_percent=90, overwrite=False):
"""
Ingests the KITTI dataset. Peforms the following ops:
0. Unzips the files into output directory.
1. Convert annotations to json format
2. Split the training data into train and validation sets
3. Write manifest file
4. Write configuration file
Arguments:
input_dir (string): path to folder with KITTI zip files.
out_dir (string): path to unzip KITTI data
train_percent (float): percent of data to use for training.
overwrite (bool): overwrite existing files
"""
# define paths
data_dir = ensure_dirs_exist(os.path.join(out_dir, 'kitti'))
train_manifest = os.path.join(data_dir, 'train.csv')
val_manifest = os.path.join(data_dir, 'val.csv')
if not overwrite and os.path.exists(train_manifest) and os.path.exists(val_manifest):
print("""Found existing manfiest files, skipping ingest,
Use --overwrite to rerun ingest anyway.""")
return (train_manifest, val_manifest)
# unzip files to output directory
zipfiles = [os.path.join(input_dir, zipfile) for
zipfile in ['data_object_image_2.zip', 'data_object_label_2.zip']]
for file in zipfiles:
with ZipFile(file, 'r') as zf:
print("Extracting {} to {}".format(file, data_dir))
zf.extractall(data_dir)
# get list of images
img_path = os.path.join(data_dir, 'training', 'image_2')
annot_path = os.path.join(data_dir, 'training', 'label_2')
images = [os.path.splitext(os.path.basename(im))[0] for
im in glob.glob(os.path.join(img_path, '*.png'))]
print("Found {} images".format(len(images)))
assert len(images) > 0, "Did not found any images. Check your input_dir."
# for each image, convert the annotation to json
# create folder names for annotations
annot_save_dir = ensure_dirs_exist(os.path.join(data_dir, 'training', 'label_2-json/'))
annot_save_dir_difficult = ensure_dirs_exist(os.path.join(
data_dir, 'training', 'label_2-json-difficult/'))
print("Writing annotations to: {} and {}".format(annot_save_dir, annot_save_dir_difficult))
for im in tqdm(images):
path = os.path.join(annot_path, im + '.txt')
im_path = os.path.join(img_path, im + '.png')
assert os.path.exists(im_path)
out_path = os.path.join(annot_save_dir, im + '.json')
convert_annot_to_json(path, im_path, out_path, difficult=False)
out_path = os.path.join(annot_save_dir_difficult, im + '.json')
convert_annot_to_json(path, im_path, out_path, difficult=True)
# shuffle files and split into training and validation set.
np.random.seed(0)
np.random.shuffle(images)
train_count = (len(images) * train_percent) // 100
train = images[:train_count]
val = images[train_count:]
# write manifest files
create_manifest(train_manifest, train, annot_save_dir, img_path, data_dir)
create_manifest(val_manifest, val, annot_save_dir_difficult, img_path, data_dir)
# write configuration file
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'kitti.cfg')
with open(config_path, 'w') as f:
f.write('manifest = [train:{}, val:{}]\n'.format(train_manifest, val_manifest))
f.write('manifest_root = {}\n'.format(data_dir))
f.write('epochs = 14\n')
f.write('height = 375\n')
f.write('width = 1242\n')
f.write('batch_size = 1\n')
print("Wrote config file to: {}".format(config_path))
def create_manifest(manifest_path, index_list, annot_dir, image_dir, root_dir):
records = [('@FILE', 'FILE')]
for tag in index_list:
image = os.path.join(image_dir, tag + '.png')
annot = os.path.join(annot_dir, tag + '.json')
assert os.path.exists(image), 'Path {} not found'.format(image)
assert os.path.exists(annot), 'Path {} not found'.format(annot)
records.append((os.path.relpath(image, root_dir),
os.path.relpath(annot, root_dir)))
print("Writing manifest file to: {}".format(manifest_path))
np.savetxt(manifest_path, records, fmt='%s\t%s')
if __name__ == '__main__':
from configargparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, help='path to dir with KITTI zip files.')
parser.add_argument('--output_dir', required=True, help='path to unzip data.')
parser.add_argument('--overwrite', action='store_true', help='overwrite files')
parser.add_argument('--training_pct', default=90, help='fraction of data used for training.')
args = parser.parse_args()
ingest_kitti(args.input_dir, args.output_dir, args.training_pct, overwrite=args.overwrite)
| NervanaSystems/neon | examples/faster-rcnn/ingest_kitti.py | Python | apache-2.0 | 9,191 |
import matplotlib.pyplot as plt
import cosima_cookbook as cc
from tqdm import tqdm_notebook
import IPython.display
def sea_surface_temperature(expts=[], resolution=1):
"""
Plot a map of SST from last decade of run.
"""
if not isinstance(expts, list):
expts = [expts]
# computing
results = []
for expt in tqdm_notebook(expts, leave=False, desc="experiments"):
SST, SSTdiff = cc.diagnostics.sea_surface_temperature(expt, resolution)
result = {"SST": SST, "SSTdiff": SSTdiff, "expt": expt}
results.append(result)
IPython.display.clear_output()
# plotting
for result in results:
SST = result["SST"]
SSTdiff = result["SSTdiff"]
expt = result["expt"]
plt.figure(figsize=(12, 4))
plt.subplot(121)
SST.plot()
plt.title(expt)
plt.subplot(122)
SSTdiff.plot(robust=True)
plt.title(expt)
def sea_surface_salinity(expts=[], resolution=1):
"""
Plot a map of SSS from last decade of run.
"""
if not isinstance(expts, list):
expts = [expts]
# computing
results = []
for expt in tqdm_notebook(expts, leave=False, desc="experiments"):
SSS, SSSdiff = cc.diagnostics.sea_surface_salinity(expt, resolution)
result = {"SSS": SSS, "SSSdiff": SSSdiff, "expt": expt}
results.append(result)
IPython.display.clear_output()
# plotting
for result in results:
SSS = result["SSS"]
SSSdiff = result["SSSdiff"]
expt = result["expt"]
plt.figure(figsize=(12, 4))
plt.subplot(121)
SSS.plot()
plt.title(expt)
plt.subplot(122)
SSSdiff.plot(robust=True)
plt.title(expt)
def mixed_layer_depth(expts=[]):
"""
Plot a map of MLD from last decade of run.
"""
if not isinstance(expts, list):
expts = [expts]
# computing
results = []
for expt in tqdm_notebook(expts, leave=False, desc="experiments"):
MLD = cc.diagnostics.mixed_layer_depth(expt)
result = {"MLD": MLD, "expt": expt}
results.append(result)
IPython.display.clear_output()
# plotting
for result in results:
MLD = result["MLD"]
expt = result["expt"]
plt.figure(figsize=(6, 4))
MLD.plot()
plt.title(expt)
| OceansAus/cosima-cookbook | cosima_cookbook/plots/maps.py | Python | apache-2.0 | 2,361 |
# (C) British Crown Copyright 2011 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import cartopy.io.ogc_clients as ogc
from cartopy.io.ogc_clients import _OWSLIB_AVAILABLE
try:
from owslib.wfs import WebFeatureService
from owslib.wms import WebMapService
from owslib.wmts import ContentMetadata, WebMapTileService
except ImportError:
WebMapService = None
ContentMetadata = None
WebMapTileService = None
import unittest
import cartopy.crs as ccrs
try:
from unittest import mock
except ImportError:
import mock
import numpy as np
RESOLUTION = (30, 30)
@unittest.skipIf(not _OWSLIB_AVAILABLE, 'OWSLib is unavailable.')
class test_WMSRasterSource(unittest.TestCase):
URI = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
layer = 'basic'
layers = ['basic', 'ocean']
projection = ccrs.PlateCarree()
def test_string_service(self):
source = ogc.WMSRasterSource(self.URI, self.layer)
if isinstance(WebMapService, type):
# OWSLib < 0.13.0
self.assertIsInstance(source.service, WebMapService)
else:
# OWSLib >= 0.13.0: WebMapService is a function that creates
# instances of these two classes.
from owslib.map.wms111 import WebMapService_1_1_1
from owslib.map.wms130 import WebMapService_1_3_0
self.assertIsInstance(source.service,
(WebMapService_1_1_1, WebMapService_1_3_0))
self.assertIsInstance(source.layers, list)
self.assertEqual(source.layers, [self.layer])
def test_wms_service_instance(self):
service = WebMapService(self.URI)
source = ogc.WMSRasterSource(service, self.layer)
self.assertIs(source.service, service)
def test_multiple_layers(self):
source = ogc.WMSRasterSource(self.URI, self.layers)
self.assertEqual(source.layers, self.layers)
def test_no_layers(self):
msg = 'One or more layers must be defined.'
with self.assertRaisesRegexp(ValueError, msg):
ogc.WMSRasterSource(self.URI, [])
def test_extra_kwargs_empty(self):
source = ogc.WMSRasterSource(self.URI, self.layer,
getmap_extra_kwargs={})
self.assertEqual(source.getmap_extra_kwargs, {})
def test_extra_kwargs_None(self):
source = ogc.WMSRasterSource(self.URI, self.layer,
getmap_extra_kwargs=None)
self.assertEqual(source.getmap_extra_kwargs, {'transparent': True})
def test_extra_kwargs_non_empty(self):
kwargs = {'another': 'kwarg'}
source = ogc.WMSRasterSource(self.URI, self.layer,
getmap_extra_kwargs=kwargs)
self.assertEqual(source.getmap_extra_kwargs, kwargs)
def test_supported_projection(self):
source = ogc.WMSRasterSource(self.URI, self.layer)
source.validate_projection(self.projection)
def test_unsupported_projection(self):
source = ogc.WMSRasterSource(self.URI, self.layer)
# Patch dict of known Proj->SRS mappings so that it does
# not include any of the available SRSs from the WMS.
with mock.patch.dict('cartopy.io.ogc_clients._CRS_TO_OGC_SRS',
{ccrs.OSNI(): 'EPSG:29901'},
clear=True):
msg = 'not available'
with self.assertRaisesRegexp(ValueError, msg):
source.validate_projection(ccrs.Miller())
def test_fetch_img(self):
source = ogc.WMSRasterSource(self.URI, self.layer)
extent = [-10, 10, 40, 60]
located_image, = source.fetch_raster(self.projection, extent,
RESOLUTION)
img = np.array(located_image.image)
self.assertEqual(img.shape, RESOLUTION + (4,))
# No transparency in this image.
self.assertEqual(img[:, :, 3].min(), 255)
self.assertEqual(extent, located_image.extent)
def test_fetch_img_different_projection(self):
source = ogc.WMSRasterSource(self.URI, self.layer)
extent = [-570000, 5100000, 870000, 3500000]
located_image, = source.fetch_raster(ccrs.Orthographic(), extent,
RESOLUTION)
img = np.array(located_image.image)
self.assertEqual(img.shape, RESOLUTION + (4,))
def test_multi_image_result(self):
source = ogc.WMSRasterSource(self.URI, self.layer)
crs = ccrs.PlateCarree(central_longitude=180)
extent = [-15, 25, 45, 85]
located_images = source.fetch_raster(crs, extent, RESOLUTION)
self.assertEqual(len(located_images), 2)
def test_float_resolution(self):
# The resolution (in pixels) should be cast to ints.
source = ogc.WMSRasterSource(self.URI, self.layer)
extent = [-570000, 5100000, 870000, 3500000]
located_image, = source.fetch_raster(self.projection, extent,
[19.5, 39.1])
img = np.array(located_image.image)
self.assertEqual(img.shape, (40, 20, 4))
@unittest.skipIf(not _OWSLIB_AVAILABLE, 'OWSLib is unavailable.')
class test_WMTSRasterSource(unittest.TestCase):
URI = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
layer_name = 'VIIRS_CityLights_2012'
projection = ccrs.PlateCarree()
def test_string_service(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
self.assertIsInstance(source.wmts, WebMapTileService)
self.assertIsInstance(source.layer, ContentMetadata)
self.assertEqual(source.layer.name, self.layer_name)
def test_wmts_service_instance(self):
service = WebMapTileService(self.URI)
source = ogc.WMTSRasterSource(service, self.layer_name)
self.assertIs(source.wmts, service)
def test_native_projection(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
source.validate_projection(self.projection)
def test_non_native_projection(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
source.validate_projection(ccrs.Miller())
def test_unsupported_projection(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
with mock.patch('cartopy.io.ogc_clients._URN_TO_CRS', {}):
msg = 'Unable to find tile matrix for projection.'
with self.assertRaisesRegexp(ValueError, msg):
source.validate_projection(ccrs.Miller())
def test_fetch_img(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
extent = [-10, 10, 40, 60]
located_image, = source.fetch_raster(self.projection, extent,
RESOLUTION)
img = np.array(located_image.image)
self.assertEqual(img.shape, (512, 512, 4))
# No transparency in this image.
self.assertEqual(img[:, :, 3].min(), 255)
self.assertEqual((-180.0, 107.99999999999994,
-197.99999999999994, 90.0), located_image.extent)
def test_fetch_img_reprojected(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
extent = [-20, -1, 48, 50]
# NB single result in this case.
located_image, = source.fetch_raster(ccrs.NorthPolarStereo(), extent,
(30, 30))
# Check image array is as expected (more or less).
img = np.array(located_image.image)
self.assertEqual(img.shape, (42, 42, 4))
# When reprojected, extent is exactly what you asked for.
self.assertEqual(located_image.extent, extent)
def test_fetch_img_reprojected_twoparts(self):
source = ogc.WMTSRasterSource(self.URI, self.layer_name)
extent = [-10, 12, 48, 50]
images = source.fetch_raster(ccrs.NorthPolarStereo(), extent, (30, 30))
# Check for 2 results in this case.
self.assertEqual(len(images), 2)
im1, im2 = images
# Check image arrays is as expected (more or less).
self.assertEqual(np.array(im1.image).shape, (42, 42, 4))
self.assertEqual(np.array(im2.image).shape, (42, 42, 4))
# When reprojected, extent is exactly what you asked for.
self.assertEqual(im1.extent, extent)
self.assertEqual(im2.extent, extent)
@unittest.skipIf(not _OWSLIB_AVAILABLE, 'OWSLib is unavailable.')
class test_WFSGeometrySource(unittest.TestCase):
URI = 'https://nsidc.org/cgi-bin/atlas_south?service=WFS'
typename = 'land_excluding_antarctica'
native_projection = ccrs.Stereographic(central_latitude=-90,
true_scale_latitude=-71)
def test_string_service(self):
service = WebFeatureService(self.URI)
source = ogc.WFSGeometrySource(self.URI, self.typename)
self.assertIsInstance(source.service, type(service))
self.assertEqual(source.features, [self.typename])
def test_wfs_service_instance(self):
service = WebFeatureService(self.URI)
source = ogc.WFSGeometrySource(service, self.typename)
self.assertIs(source.service, service)
self.assertEqual(source.features, [self.typename])
def test_default_projection(self):
source = ogc.WFSGeometrySource(self.URI, self.typename)
self.assertEqual(source.default_projection(), self.native_projection)
def test_unsupported_projection(self):
source = ogc.WFSGeometrySource(self.URI, self.typename)
with self.assertRaisesRegexp(ValueError,
'Geometries are only available '
'in projection'):
source.fetch_geometries(ccrs.PlateCarree(), [-180, 180, -90, 90])
def test_fetch_geometries(self):
source = ogc.WFSGeometrySource(self.URI, self.typename)
# Extent covering New Zealand.
extent = (-99012, 1523166, -6740315, -4589165)
geoms = source.fetch_geometries(self.native_projection, extent)
self.assertEqual(len(geoms), 23)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-sv', '--with-doctest'], exit=False)
| zak-k/cartopy | lib/cartopy/tests/io/test_ogc_clients.py | Python | lgpl-3.0 | 10,964 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers to make getting and using hardware entities simpler."""
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import aliased, contains_eager, joinedload
from sqlalchemy.sql import and_, or_
from aquilon.exceptions_ import ArgumentError, AquilonError
from aquilon.aqdb.model import (HardwareEntity, Model, ReservedName, Network,
AddressAssignment, ARecord, Fqdn, Interface,
VlanInfo, PortGroup, NetworkEnvironment, Host)
from aquilon.aqdb.model.dns_domain import parse_fqdn
from aquilon.aqdb.model.network import get_net_id_from_ip
from aquilon.worker.dbwrappers.dns import convert_reserved_to_arecord
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.dbwrappers.interface import (check_ip_restrictions,
assign_address)
from aquilon.worker.dbwrappers.network import get_network_byip
from aquilon.utils import first_of
def search_hardware_entity_query(session, hardware_type=HardwareEntity,
subquery=False,
model=None, vendor=None, machine_type=None,
exact_location=False, ip=None, networkip=None,
network_environment=None,
mac=None, pg=None, serial=None,
interface_name=None, interface_model=None,
interface_vendor=None,
interface_bus_address=None,
used=None,
**kwargs):
q = session.query(hardware_type)
if hardware_type is HardwareEntity:
q = q.with_polymorphic('*')
# The ORM deduplicates the result if we query full objects, but not if we
# query just the label
q = q.distinct()
dblocation = get_location(session, **kwargs)
if dblocation:
if exact_location:
q = q.filter_by(location=dblocation)
else:
childids = dblocation.offspring_ids()
q = q.filter(HardwareEntity.location_id.in_(childids))
if model or vendor or machine_type:
subq = Model.get_matching_query(session, name=model, vendor=vendor,
model_type=machine_type, compel=True)
q = q.filter(HardwareEntity.model_id.in_(subq))
if ip or networkip or mac or pg or interface_name or interface_vendor \
or interface_model or interface_bus_address:
IfaceAlias = aliased(Interface)
q = q.join(IfaceAlias, HardwareEntity.interfaces)
if mac:
q = q.filter_by(mac=mac)
if interface_name:
q = q.filter_by(name=interface_name)
if interface_bus_address:
q = q.filter_by(bus_address=interface_bus_address)
if pg:
filters = [IfaceAlias.port_group_name == pg]
dbvi = VlanInfo.get_by_pg(session, pg, compel=False)
if dbvi:
filters.append(and_(PortGroup.network_tag == dbvi.vlan_id,
PortGroup.usage == dbvi.vlan_type))
else:
usage, network_tag = PortGroup.parse_name(pg)
if network_tag is not None:
filters.append(and_(PortGroup.network_tag == network_tag,
PortGroup.usage == usage))
else:
filters.append(PortGroup.usage == pg)
q = q.outerjoin(PortGroup, aliased=True, from_joinpoint=True)
q = q.filter(or_(*filters))
q = q.reset_joinpoint()
if interface_model or interface_vendor:
subq = Model.get_matching_query(session, name=interface_model,
vendor=interface_vendor,
model_type='nic', compel=True)
q = q.filter(IfaceAlias.model_id.in_(subq))
if ip:
dbnet_env = NetworkEnvironment.get_unique_or_default(session,
network_environment)
q = q.join(AddressAssignment, aliased=True, from_joinpoint=True)
q = q.filter_by(ip=ip)
q = q.join(Network, aliased=True, from_joinpoint=True)
q = q.filter_by(network_environment=dbnet_env)
elif networkip:
dbnet_env = NetworkEnvironment.get_unique_or_default(session,
network_environment)
dbnetwork = get_network_byip(session, networkip, dbnet_env)
PGAlias = aliased(PortGroup)
AAAlias = aliased(AddressAssignment)
q = q.outerjoin(PGAlias, IfaceAlias.port_group)
q = q.outerjoin(AAAlias, IfaceAlias.assignments)
q = q.filter(or_(PGAlias.network == dbnetwork,
AAAlias.network == dbnetwork))
q = q.reset_joinpoint()
if serial:
q = q.filter_by(serial_no=serial)
if used is True:
q = q.join(Host, aliased=True)
q = q.reset_joinpoint()
elif used is False:
q = q.outerjoin(Host, aliased=True)
q = q.filter_by(hardware_entity_id=None)
if not subquery:
# Oracle does not like "ORDER BY" in a sub-select, so we have to
# suppress it if we want to use this query as a subquery
q = q.order_by(HardwareEntity.label)
return q
def update_primary_ip(session, logger, dbhw_ent, ip):
if not dbhw_ent.primary_name:
raise ArgumentError("{0} does not have a primary name."
.format(dbhw_ent))
dbnetwork = get_net_id_from_ip(session, ip)
check_ip_restrictions(dbnetwork, ip)
# The primary address must be unique
q = session.query(AddressAssignment)
q = q.filter_by(network=dbnetwork, ip=ip)
addr = q.first()
if addr:
raise ArgumentError("IP address {0} is already in use by {1:l}."
.format(ip, addr.interface))
# We can't steal the IP address from an existing DNS entry
q = session.query(ARecord)
q = q.filter_by(network=dbnetwork, ip=ip)
q = q.join(ARecord.fqdn)
q = q.filter_by(dns_environment=dbhw_ent.primary_name.fqdn.dns_environment)
existing = q.first()
if existing:
raise ArgumentError("IP address {0!s} is already used by "
"{1:l}." .format(ip, existing))
# Convert ReservedName to ARecord if needed
if isinstance(dbhw_ent.primary_name, ReservedName):
convert_reserved_to_arecord(session, dbhw_ent.primary_name, dbnetwork,
ip)
# When converting a ReservedName to an ARecord, we have to bind the
# primary address to an interface. Try to pick one.
dbinterface = first_of(dbhw_ent.interfaces, lambda x: x.bootable)
if not dbinterface:
dbinterface = first_of(dbhw_ent.interfaces, lambda x:
x.interface_type != "management")
if not dbinterface: # pragma: no cover
raise AquilonError("Cannot update the primary IP address of {0:l} "
"because it does not have any interfaces "
"defined.".format(dbhw_ent))
assign_address(dbinterface, ip, dbnetwork, logger=logger)
else:
dns_rec = dbhw_ent.primary_name
q = session.query(AddressAssignment)
q = q.filter_by(network=dns_rec.network)
q = q.filter_by(ip=dns_rec.ip)
q = q.join(Interface)
q = q.options(contains_eager('interface'),
joinedload('interface.port_group'))
q = q.filter_by(hardware_entity=dbhw_ent)
addrs = q.all()
dns_rec.ip = ip
dns_rec.network = dbnetwork
for addr in addrs:
addr.ip = ip
addr.network = dbnetwork
for iface in set(addr.interface for addr in addrs):
iface.check_pg_consistency(logger=logger)
def rename_hardware(session, dbhw_ent, rename_to):
if "." in rename_to:
if not dbhw_ent.primary_name:
raise ArgumentError("{0} does not have a primary name, renaming "
"using an FQDN is not possible."
.format(dbhw_ent))
old_domain = dbhw_ent.primary_name.fqdn.dns_domain
dns_env = dbhw_ent.primary_name.fqdn.dns_environment
new_label, new_domain = parse_fqdn(session, rename_to)
else:
new_label = rename_to
if dbhw_ent.primary_name:
old_domain = new_domain = dbhw_ent.primary_name.fqdn.dns_domain
dns_env = dbhw_ent.primary_name.fqdn.dns_environment
else:
new_domain = None
dns_env = None
old_domain.lock_row()
if new_domain != old_domain:
new_domain.lock_row()
dbhw_ent.check_label(new_label)
HardwareEntity.get_unique(session, new_label, preclude=True)
old_label = dbhw_ent.label
fqdns = []
for addr in dbhw_ent.all_addresses():
fqdns.extend(dns_rec.fqdn for dns_rec in addr.dns_records)
# This case handles reserved names
if dbhw_ent.primary_name and dbhw_ent.primary_name.fqdn not in fqdns:
fqdns.append(dbhw_ent.primary_name.fqdn)
# Filter out unrelated FQDNs
fqdns = [fqdn for fqdn in fqdns if fqdn.dns_domain == old_domain and
(fqdn.name == old_label or fqdn.name.startswith(old_label + "-"))]
# Update all state in one go, so disable autoflush for now.
with session.no_autoflush:
dbhw_ent.label = new_label
for dbfqdn in fqdns:
new_name = new_label + dbfqdn.name[len(old_label):]
Fqdn.get_unique(session, name=new_name, dns_domain=new_domain,
dns_environment=dns_env, preclude=True)
dbfqdn.dns_domain = new_domain
dbfqdn.name = new_name
def check_only_primary_ip(dbhw_ent):
"""Check and complain if the hardware entity has any other addresses
assignments other than its primary address.
"""
addrs = []
for addr in dbhw_ent.all_addresses():
if addr.ip == dbhw_ent.primary_ip:
continue
addrs.append(str(addr.ip))
if addrs:
raise ArgumentError("{0} still provides the following addresses, "
"delete them first: {1}.".format
(dbhw_ent, ", ".join(sorted(addrs))))
def get_hardware(session, compel=True, **kwargs):
mapper = inspect(HardwareEntity)
dbhw_ent = None
for hw_type, submapper in mapper.polymorphic_map.items():
if hw_type not in kwargs or not kwargs[hw_type]:
continue
if dbhw_ent:
raise ArgumentError("Multiple devices are specified.")
dbhw_ent = submapper.class_.get_unique(session, kwargs[hw_type],
compel=True)
if not dbhw_ent and compel:
raise ArgumentError("Please specify a device.")
return dbhw_ent
| guillaume-philippon/aquilon | lib/aquilon/worker/dbwrappers/hardware_entity.py | Python | apache-2.0 | 11,896 |
import deskbar.interfaces.Action
from gettext import gettext as _
import gtk
class CopyToClipboardAction(deskbar.interfaces.Action):
"""
Copy given text to clipboard
"""
def __init__(self, name, text):
deskbar.interfaces.Action.__init__(self, name)
self._text = text
def get_icon(self):
return "gtk-copy"
def get_verb(self):
return _("Copy <b>%(name)s</b> to clipboard")
def activate(self, text=None):
cb = gtk.clipboard_get()
cb.set_text(self._text)
cb.store() | benpicco/mate-deskbar-applet | deskbar/handlers/actions/CopyToClipboardAction.py | Python | gpl-2.0 | 574 |
# -*- coding: utf-8 -*-
import unittest
import datetime
from pyboleto.bank.bancodobrasil import BoletoBB
from .testutils import BoletoTestCase
class TestBancoBrasil(BoletoTestCase):
def setUp(self):
self.dados = []
for i in range(3):
d = BoletoBB(7, 1)
d.carteira = '18'
d.data_documento = datetime.date(2011, 3, 8)
d.data_vencimento = datetime.date(2011, 3, 8)
d.data_processamento = datetime.date(2012, 7, 4)
d.valor_documento = 2952.95
d.agencia = '9999'
d.conta = '99999'
d.convenio = '7777777'
d.nosso_numero = str(87654 + i)
d.numero_documento = str(87654 + i)
self.dados.append(d)
def test_linha_digitavel(self):
self.assertEqual(self.dados[0].linha_digitavel,
'00190.00009 07777.777009 00087.654182 6 49000000295295'
)
def test_codigo_de_barras(self):
self.assertEqual(self.dados[0].barcode,
'00196490000002952950000007777777000008765418'
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoBrasil)
if __name__ == '__main__':
unittest.main()
| opevolution/pyboleto | tests/test_banco_do_brasil.py | Python | bsd-3-clause | 1,202 |
'''
Created on Nov 26, 2014
@author: jiao
'''
import sys
import os
import urllib2
from bs4 import BeautifulSoup
sys.path.append('/home/jiao/QTL')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'QTL.settings')
from qtl.models import Experiment,Gene,Marker,LOD,Parent,RIL,Metabolite,MParent,MRIL,MLOD,Genotype,ExperimentMarker,ExperimentGene,ExperimentRIL,ExperimentParent,ExperimentMetabolite
from qtl.models import TF_Family,TF_Family_Ref,Protein,Promoter_Binding_Site,TF
def promSeqUpload(f):
i=0
j = 0
with open('PromoterSeqmissing.txt','w') as fo: # The gene exists in TAIR 9 (ATcisDB), but not yet been measured in any register experiment.
with open(f,'r') as fi:
for line in fi:
content = line.split('\t')
gene = content[0].upper()[:-2]
seq = content[1].strip()
if len(gene)==9 and Gene.objects.filter(locus_identifier=gene).exists():
i+=1
print gene
g = Gene.objects.get(locus_identifier = gene)
g.promoter_sequence = seq
g.save()
else:
j+=1
g = Gene()
g.locus_identifier = gene
g.promoter_sequence = seq
g.save()
fo.write(gene+'\n')
print i,j
def cdsSeqUpload(f): # NOT yet Used
i = 0
try:
with open('CDSSeqmissing.txt','w') as fo: # The gene exists in TAIR 9 (ATcisDB), but not yet been measured in any register experiment.
print 'openned output file'
with open(f,'r') as fi:
for line in fi:
i+=1
content = line.split('\t')
gene = content[0].upper()[:-2]
seq = content[1].strip()
if len(gene)==9 and Gene.objects.filter(locus_identifier=gene).exists():
print gene
g = Gene.objects.get(locus_identifier = gene)
g.coding_sequence = seq
g.save()
else:
fo.write(gene+'\n')
except:
print 'Didnot open.'
def promInfoUpload(fi):
try:
i = 0
with open(fi,'r') as f:
for line in f:
i+=1
content = line.strip().split('\t')
gene = content[0].upper()[:-2]
chr = int(content[1])
ori = None
if (content[2]) == '+':
ori = 1
else:
ori = 0
prom_start = int(content[3])
prom_end = int(content[4])
prom_type = content[5]
prom_exam_org = content[6]
if Gene.objects.filter(locus_identifier = gene).exists():
g = Gene.objects.get(locus_identifier = gene)
g.chromosome = chr
g.orientation = ori
g.promoter_start = prom_start
g.promoter_end = prom_end
g.promoter_type = prom_type
g.promoter_exam_org = prom_exam_org
g.save()
else:
print 'NOT FOUND'
print gene
print i
except IOError as e:
print e.args
def getGenePosition(gene):
tair_pre = 'http://arabidopsis.org/servlets/TairObject?type=locus&name='
response = urllib2.urlopen(tair_pre+gene)
soup = BeautifulSoup(response)
all_tables = soup.find('table')
trace_tds = all_tables.find_all('td')
for td in trace_tds:
if 'nuc_sequence' in td:
next_sib = td.findNextSibling('td')
next_next_sib = next_sib.findNextSibling('td')
pos = next_sib.text.encode('ascii','ignore').replace('bp','').replace(' ','').replace('\t','')
ind = pos.index('-')
start_ = pos[:ind].strip()
end_ = pos[ind+1:].strip()
orientation_ = next_next_sib.text.encode('ascii','ignore').replace('bp','').replace(' ','').replace('\t','')
g = Gene.objects.get(locus_identifier = gene)
g.start = start_
g.end = end_
g.orientation = orientation_
g.save()
break
def syncGene(gene):
tair_pre = 'http://arabidopsis.org/servlets/TairObject?type=locus&name='
response = urllib2.urlopen(tair_pre+gene)
soup = BeautifulSoup(response)
all_tables = soup.find('table')
trace_tds = all_tables.find_all('td')
for td in trace_tds:
if 'nuc_sequence' in td:
next_sib = td.findNextSibling('td')
next_next_sib = next_sib.findNextSibling('td')
pos = next_sib.text.encode('ascii','ignore').replace('bp','').replace(' ','').replace('\t','')
ind = pos.index('-')
start_ = pos[:ind].strip()
end_ = pos[ind+1:].strip()
orientation_ = next_next_sib.text.encode('ascii','ignore').replace('bp','').replace(' ','').replace('\t','')
g = Gene()
g.locus_identifier = gene
g.start = start_
g.end = end_
g.orientation = orientation_
g.save()
break
def updateGenePosition():
gene_list = Gene.objects.filter(start__isnull = True)
i = 0
for gene in gene_list:
i+=1
gene_str = gene.locus_identifier.encode('ascii','ignore')
getGenePosition(gene_str)
print i,gene_str
def uploadTF_family(fi):
try:
i = 0
with open(fi,'r') as f:
for line in f:
i+=1
content = line.strip().split('\t')
tff = TF_Family()
tff.tf_family_name = content[0].strip().encode('ascii','ignore')
tff.tf_description = content[1].strip().encode('ascii','ignore')
tff.save()
print i,content[0]
except IOError as e:
print e.args
def uploadTF_family_ref(fi):
'''
TF_Family_Ref table
tf_family_name = models.ForeignKey(TF_Family)
reference = models.CharField(max_length = 200,blank = True)
author = models.CharField(max_length = 200,blank = True)
link = models.URLField(max_length = 200,blank = True)
'''
try:
i = 0
with open(fi,'r') as f:
for line in f:
i+=1
content = line.strip().split('\t')
tff_name = content[0].strip().encode('ascii','ignore')
tff_ref = content[1].strip().encode('ascii','ignore')
tff_author = content[2].strip().replace('"','').decode('ascii','ignore')
tff_link = content[3].strip()
tff = TF_Family_Ref()
tff.tf_family_name = TF_Family.objects.get(tf_family_name = tff_name)
tff.reference = tff_ref
tff.author = tff_author
tff.link = tff_link
tff.save()
print i,content[0]
except IOError as e:
print e.args
def read_fasta(fp):
name,seq = None,[]
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name: yield (name,''.join(seq))
name,seq = line,[]
else:
seq.append(line)
if name: yield(name,''.join(seq))
def uploadProtein(TAIR_rel):
'''
Protein(models.Model):
protein_name = models.CharField(max_length = 30,primary_key = True)
locus_identifier = models.ForeignKey(Gene)
protein_sequence = models.TextField(blank = True)
protein_desc = models.TextField(blank = True)
protein_symbol = models.CharField(max_length = 30,blank=True)
'''
try:
with open(TAIR_rel,'r') as fp:
i = 0
for name,seq in read_fasta(fp):
print name
prot_name_list = name.split('|')
prot_name = prot_name_list[0].strip()[1:]
dot_index = prot_name.index('.')
gene = prot_name[:dot_index]
sym_ = prot_name_list[1].split(':')
prot_symbol = sym_[1].strip()
prot_desc = prot_name_list[2].strip()
###TAIR 6 has one extra alias attribute###
#alias = prot_name_list[4].split(':','')
#prot_alias = alias[1].strip()
prot_seq = seq[:-1]
if Gene.objects.filter(locus_identifier = gene).exists():
if not Protein.objects.filter(protein_name = prot_name).exists():
p = Protein()
p.protein_name = prot_name
p.locus_identifier = Gene.objects.get(locus_identifier = gene)
p.protein_sequence = prot_seq
p.protein_desc = prot_desc
if len(prot_symbol):
p.protein_symbol = prot_symbol
p.save()
print i,prot_name,prot_symbol
else:
i+=1
syncGene(gene)
p = Protein()
p.protein_name = prot_name
p.locus_identifier = Gene.objects.get(locus_identifier = gene)
p.protein_sequence = prot_seq
p.protein_desc = prot_desc
if len(prot_symbol):
p.protein_symbol = prot_symbol
p.save()
print 'registered'
print i,prot_name,prot_symbol
except IOError as e:
print e.args
def syncProtein(fi):
try:
with open(fi,'r') as f:
i = 0
for line in f:
content = line.split('\t')
prot = content[1].strip()
if Protein.objects.filter(locus_identifier = prot).exists():
pass
else:
print prot
except IOError as e:
print e.args
def missProtein(fi):
try:
with open(fi,'r') as f:
i = 0
for line in f:
prot = line.strip()
if Protein.objects.filter(locus_identifier = prot).exists():
pass
else:
print prot
except IOError as e:
print e.args
def uploadBS(fi):
'''
locus_identifier = models.ForeignKey(Gene)#
binding_site_name = models.CharField(max_length = 30)
binding_site_sequence = models.TextField()
chr = models.IntegerField()
start = models.IntegerField()
end = models.IntegerField()
tf_family_name = models.ForeignKey(TF_Family,blank = True)#
motif = models.CharField(max_length = 20,blank = True)
bs_atcisdb_color = models.CharField(max_length = 10,blank = True)
fi: 'BindingSite.tbl'
'''
try:
with open(fi,'r') as f:
i = 0
for line in f:
i+=1
content = line.split('\t')
tff = content[9].strip()
if not TF_Family.objects.filter(tf_family_name__iexact= tff).exists():
tff_ins = TF_Family()
tff_ins.tf_family_name = tff
tff_ins.save()
bs = Promoter_Binding_Site()
gene_model = content[6].strip()
dot_ind = gene_model.index('.')
bs.locus_identifier = Gene.objects.get(locus_identifier = gene_model[:dot_ind].upper())
bs.binding_site_name = content[1].strip()
bs.binding_site_sequence = content[7].strip()
bs.chr = int(content[2].strip())
bs.start = int(content[4].strip())
bs.end = int(content[5].strip())
bs.tf_family_name = TF_Family.objects.get(tf_family_name__iexact = tff)#7 case insensitive
if content[10].strip() is not 'NA':
bs.motif = content[10].strip()
bs.bs_atcisdb_color = content[8].strip()
bs.save()
print i,content[0].strip()
except IOError as e:
print e.args
def uploadTF(fi):
'''
upload ATtfDB predicted TF
TF
tf_family_name = models.ForeignKey(TF_Family)
locus_name = models.CharField(max_length=30)
gene_name = models.CharField(max_length=20)
description = models.CharField(max_length = 200,blank = True)
motif = models.CharField(max_length = 100,blank = True)
reference = models.CharField(max_length = 200,blank = True)
author = models.CharField(max_length = 200,blank = True)
link = models.URLField(max_length = 200,blank = True)
families_data.tbl
'''
try:
with open(fi,'r') as f:
i = 0
for line in f:
i+=1
content = line.split('\t')
tff = content[0].strip()
locus = content[1].strip().upper()
gene = content[2].strip()
desc = content[3].strip().replace('"','')
if TF_Family.objects.filter(tf_family_name__iexact = tff).exists() and Gene.objects.filter(locus_identifier__iexact=locus).exists():
tf = TF()
tf.tf_family_name = TF_Family.objects.get(tf_family_name__iexact = tff)
tf.locus_name = locus
if gene is not 'NA':
tf.gene_name = gene
if desc is not 'NA':
tf.description = desc
tf.save()
print i,tff,locus
except IOError as e:
print e.args
def uploadTFRef(fi):
'''
TF
bs_name = models.CharField(max_length=100,blank = True)
motif = models.CharField(max_length = 100,blank = True)
reference = models.CharField(max_length = 200,blank = True)
author = models.CharField(max_length = 200,blank = True)
link = models.URLField(max_length = 200,blank = True)
'bindingsite_data.tbl'
'''
try:
with open(fi,'r') as f:
i = 0
for line in f:
i+=1
content = line.split('\t')
bs = content[0].strip()
locus = content[1].strip().upper()
tff = content[2].strip()
mot = content[3].strip()
ref = content[4].strip()
aut = content[5].strip().replace('"','')
lin = content[6].strip()
if TF_Family.objects.filter(tf_family_name__iexact = tff).exists() and Gene.objects.filter(locus_identifier__iexact=locus).exists():
tf_update =TF.objects.get(tf_family_name = tff,locus_name = locus)
tf_update.bs_name = bs
tf_update.motif = mot
tf_update.reference = ref
tf_update.author = aut
tf_update.link = lin
tf_update.save()
print i, bs, locus,tff
except IOError as e:
print e.args
if __name__ == '__main__':
#promSeqUpload('PromoterSeq.tbl')
#cdsSeqUpload('families_seq.tbl')
#promInfoUpload('PromoterInfo.tbl')
#updateGenePosition()
#uploadTF_family('families_id.tbl')
#uploadTF_family_ref('families_ref.tbl')
#uploadProtein('TAIR9_pep_20090619.txt')
#syncProtein('families_data.tbl')
#uploadBS('BindingSite.tbl')
#uploadBS('BindingSiteCut.tbl')
#uploadTF('families_data.tbl')
uploadTFRef('file/bindingsite_data.tbl') | longing247/QTL | qtl/upload.py | Python | apache-2.0 | 16,057 |
VERSION = '0.4'
| SimonSapin/tinycss | tinycss/version.py | Python | bsd-3-clause | 16 |
import subprocess
import unittest
import urllib2
import shutil
import json
import ast
import os
from flask import Flask
from flask.ext.testing import LiveServerTestCase
from lwp.app import app
from lwp.utils import connect_db
token = 'myrandomapites0987'
class TestApi(LiveServerTestCase):
db = None
type_json = {'Content-Type': 'application/json'}
def create_app(self):
shutil.copyfile('lwp.db', '/tmp/db.sql')
self.db = connect_db('/tmp/db.sql')
self.db.execute('insert into api_tokens(description, token) values(?, ?)', ['test', token])
self.db.commit()
app.config['DATABASE'] = '/tmp/db.sql'
return app
def test_00_get_containers(self):
shutil.rmtree('/tmp/lxc/', ignore_errors=True)
request = urllib2.Request(self.get_server_url() + '/api/v1/containers/',
headers={'Private-Token': token})
response = urllib2.urlopen(request)
self.assertEqual(response.code, 200)
#assert isinstance(response.read(), list)
def test_01_put_containers(self):
data = {'name': 'test_vm_sshd', 'template': 'sshd'}
request = urllib2.Request(self.get_server_url() + '/api/v1/containers/', json.dumps(data),
headers={'Private-Token': token, 'Content-Type': 'application/json' })
request.get_method = lambda: 'PUT'
response = urllib2.urlopen(request)
self.assertEqual(response.code, 200)
assert data['name'] in os.listdir('/tmp/lxc')
def test_02_post_containers(self):
data = {'action': 'start'}
request = urllib2.Request(self.get_server_url() + '/api/v1/containers/test_vm_sshd', json.dumps(data),
headers={'Private-Token': token, 'Content-Type': 'application/json'})
request.get_method = lambda: 'POST'
response = urllib2.urlopen(request)
self.assertEqual(response.code, 200)
def test_03_delete_containers(self):
request = urllib2.Request(self.get_server_url() + '/api/v1/containers/test_vm_sshd',
headers={'Private-Token': token})
request.get_method = lambda: 'DELETE'
response = urllib2.urlopen(request)
self.assertEqual(response.code, 200)
def test_04_post_token(self):
data = {'token': 'test'}
request = urllib2.Request(self.get_server_url() + '/api/v1/tokens/', json.dumps(data),
headers={'Private-Token': token, 'Content-Type': 'application/json'})
response = urllib2.urlopen(request)
self.assertEqual(response.code, 200)
def test_05_delete_token(self):
request = urllib2.Request(self.get_server_url() + '/api/v1/tokens/test',
headers={'Private-Token': token})
request.get_method = lambda: 'DELETE'
response = urllib2.urlopen(request)
self.assertEqual(response.code, 200)
if __name__ == '__main__':
unittest.main()
| romses/LXC-Web-Panel | tests/api.py | Python | mit | 2,899 |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestIosModule(unittest.TestCase):
def execute_module(self, failed=False, changed=False, commands=None,
sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| andreaso/ansible | test/units/modules/network/ios/ios_module.py | Python | gpl-3.0 | 3,255 |
"""okc_scraper includes all the functions needed to scrape profiles from
OKCupid"""
import requests
import cPickle as pickle
import time
from BeautifulSoup import BeautifulSoup
def authorize(username, password):
"""Log into OKCupid to scrape profiles"""
user_info = {"username": username, "password": password}
okc = requests.session()
okc.post("https://www.okcupid.com/login", data=user_info)
return okc
def getProfiles(okc):
"""Searches for profiles and returns a list of profiles (10)"""
# match_info = {"filter1": "0,63", "filter2": "2,100,18",
# "filter3": "5,2678400", "filter4": "1,1",
# "locid": "1", "custom_search": "0",
# "matchOrderBy": "SPECIAL_BLEND",
# "sa": "1", "sort_type": "0", "update_prefs": "1"}
soup = BeautifulSoup(okc.post("https://www.okcupid.com/match?filter1=0,63&filter2=2,100,18&filter3=5,2678400&filter4=1,1&locid=0&timekey=1&matchOrderBy=SPECIAL_BLEND&custom_search=0&fromWhoOnline=0&mygender=mwww.okcupid.com/match?filter1=0,63&filter2=2,100,18&filter3=5,2678400&filter4=1,1&locid=0&timekey=1&matchOrderBy=SPECIAL_BLEND&custom_search=0&fromWhoOnline=0&mygender=m").text)
users = soup.findAll("div", {"class": "user_info"})
return (["https://www.okcupid.com" +
user.find("a")["href"].replace("?cf=regular", "")
for user in users], soup)
def getProfile(okc, profile_link):
"""Takes a link to a profile and returns a BeautifulSoup object"""
page = BeautifulSoup(okc.get(profile_link).text)
return (page, page.find("form", {"id": "flag_form"})
.findAll("input")[0]["value"])
def getInfo(profile, profile_id):
"""Take a BeautifulSoup object corresponding to a profile's home page
and the profile's id and return a list of the profile's user info
(username, age, gender...)"""
try:
main = profile.find("div", {"id": "basic_info"}).findAll("span")
return {"id_table": {"user_id": profile_id,
"user_name": main[0].text,
"user_age": main[1].text,
"user_gender": main[2].text,
"user_orient": main[3].text,
"user_status": main[4].text,
"user_location": main[5].text}, }
except:
print profile
return {"id_table": {"user_id": profile_id,
"data": "NA"}}
def getEssays(profile, profile_id):
"""Takes a BeautifulSoup object corresponding to a profiles home
page and returns a list of the profile's essays"""
etd = {"user_id": profile_id, }
essay_index = ["self_summary", "my_life", "good_at", "first_thing",
"favorite", "six_things", "lot_time", "typical_Friday",
"most_private"]
main = profile.find("div", {"id": "main_column"})
for i in range(0, 9):
try:
etd[essay_index[i]] = (main.find("div", {"id": "essay_text_"
+ str(i)})
.getText(' '))
except:
etd[essay_index[i]] = ""
return {"essay_table": etd, }
def getLookingFor(profile, profile_id):
"""Takes a BeautifulSoup object corresponding to a profiles home
page and returns a list of the profile's looking for items"""
try:
main = (profile.find("div", {"id": "main_column"})
.find("div", {"id": "what_i_want"}).findAll("li"))
if len(main) == 4:
return {"looking_for_table": {"user_id": profile_id,
"other_user": main[0].text,
"other_age": main[1].text,
"other_location": main[2].text,
"other_type": main[3].text}, }
if len(main) == 5:
return {"looking_for_table": {"user_id": profile_id,
"other_user": main[0].text,
"other_age": main[1].text,
"other_location": main[2].text,
"other_status": main[3].text,
"other_type": main[4].text}, }
except:
print profile
return {"looking_for_table": {"user_id": profile_id,
"data": "NA"}}
def getDetails(profile, profile_id):
"""Takes a BeautifulSoup object corresponding to profiles home
page and returns a list of profile's details"""
try:
main = profile.find("div", {"id": "profile_details"}).findAll("dd")
return {"details_table": {"user_id": profile_id,
"last_online": main[0].text,
"ethnicity": main[1].text,
"height": main[2].text,
"body_type": main[3].text,
"diet": main[4].text,
"smokes": main[5].text,
"drinks": main[6].text,
"religion": main[7].text,
"sign": main[8].text,
"education": main[9].text,
"job": main[10].text,
"income": main[11].text,
"offspring": main[12].text,
"pets": main[13].text,
"speaks": main[14].text}, }
except:
print profile
return {"details_table": {"user_id": profile_id,
"data": "NA"}}
def getQuestions(okc, profile_link, profile_id):
"""Take a link to a profile and return a list the questions a user
has answered"""
# Currently this doesn't return anything. All functions need to be
# changed up to work with mysql 07/19/2013 22:50
question_list = []
question_categories = ["Ethics", "Sex", "Religion", "Lifestyle",
"Dating", "Other"]
for category in question_categories:
q = BeautifulSoup(okc.get(profile_link + "/questions?"
+ category).text)
try:
max_page = int(q.find("div", {"class": "pages clearfix"})
.findAll("li")[1].find("a").text)
except IndexError:
max_page = 1
except AttributeError:
return []
for page in range(1, max_page + 1):
q_page = BeautifulSoup(okc.get(profile_link + "/questions?"
+ category + "="
+ str(page)).text)
questions = [q for q in q_page.find("div", {"id": "questions"})
.findAll("div",
{"class":
"question public talk clearfix"})]
for question in questions:
question_id = question["id"]
qtext = question.find("p", {"class": "qtext"}).text
atext = question.find("p",
{"class":
"answer target clearfix"}).text
question_list.append({"question_table":
{"user_id": profile_id,
"question_id": question_id,
"question_text": qtext,
"user_answer": atext,
"question_category": category},
})
return question_list
def pickleDict(dict_, dir):
"""Takes in a directory and a dictionary to be pickled and pickles
the dict in the directory"""
dict_id = dict_.keys()[0]
tab_i = pickle.load(open(dir + dict_id + ".p", "rb"))
tab_i.append(dict_)
pickle.dump(tab_i, open(dir + dict_id + ".p", "wb"))
def main(okc_instance):
"""The main event, takes an okc_instance (logged in) and writes a
profile to the docs"""
profiles, soup = getProfiles(okc_instance)
locations = [l.text.split(";")[1] for l in
soup.findAll("div", {"class": "userinfo"})]
if len([l for l in locations if l == "Chicago, IL"]) > 2:
print "Possible Reset"
for profile in profiles:
prof = getProfile(okc_instance, profile)
pickleDict(getInfo(prof[0], prof[1]), "data/")
pickleDict(getEssays(prof[0], prof[1]), "data/")
pickleDict(getLookingFor(prof[0], prof[1]), "data/")
pickleDict(getDetails(prof[0], prof[1]), "data/")
time.sleep(2)
return prof[1]
| lbybee/okc_project | okc_scraper_no_q.py | Python | gpl-2.0 | 8,941 |
"""
script_watcher.py: Reload watched script upon changes.
Copyright (C) 2015 Isaac Weaver
Author: Isaac Weaver <wisaac407@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
bl_info = {
"name": "Script Watcher",
"author": "Isaac Weaver",
"version": (0, 5),
"blender": (2, 75, 0),
"location": "Properties > Scene > Script Watcher",
"description": "Reloads an external script on edits.",
"warning": "Still in beta stage.",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Development/Script_Watcher",
"tracker_url": "https://github.com/wisaac407/blender-script-watcher/issues/new",
"category": "Development",
}
import os, sys
import io
import traceback
import types
import bpy
from bpy.app.handlers import persistent
@persistent
def load_handler(dummy):
try:
if (bpy.context.scene.sw_settings.running and bpy.context.scene.sw_settings.auto_watch_on_startup):
bpy.ops.wm.sw_watch_end('EXEC_DEFAULT')
bpy.ops.wm.sw_watch_start('EXEC_DEFAULT')
else:
bpy.ops.wm.sw_watch_end('EXEC_DEFAULT')
except:
print("Exception on startup check!")
def add_scrollback(ctx, text, text_type):
for line in text:
bpy.ops.console.scrollback_append(ctx, text=line.replace('\t', ' '),
type=text_type)
class SplitIO(io.StringIO):
"""Feed the input stream into another stream."""
PREFIX = '[Script Watcher]: '
_can_prefix = True
def __init__(self, stream):
io.StringIO.__init__(self)
self.stream = stream
def write(self, s):
# Make sure we prefix our string before we do anything else with it.
if self._can_prefix:
s = self.PREFIX + s
# only add the prefix if the last stream ended with a newline.
self._can_prefix = s.endswith('\n')
# Make sure to call the super classes write method.
io.StringIO.write(self, s)
# When we are written to, we also write to the secondary stream.
self.stream.write(s)
# Define the script watching operator.
class WatchScriptOperator(bpy.types.Operator):
"""Watches the script for changes, reloads the script if any changes occur."""
bl_idname = "wm.sw_watch_start"
bl_label = "Watch Script"
_timer = None
_running = False
_times = None
filepath = None
def get_paths(self):
"""Find all the python paths surrounding the given filepath."""
dirname = os.path.dirname(self.filepath)
paths = []
filepaths = []
for root, dirs, files in os.walk(dirname, topdown=True):
if '__init__.py' in files:
paths.append(root)
for f in files:
filepaths.append(os.path.join(root, f))
else:
dirs[:] = [] # No __init__ so we stop walking this dir.
# If we just have one (non __init__) file then return just that file.
return paths, filepaths or [self.filepath]
def get_mod_name(self):
"""Return the module name and the root path of the givin python file path."""
dir, mod = os.path.split(self.filepath)
# Module is a package.
if mod == '__init__.py':
mod = os.path.basename(dir)
dir = os.path.dirname(dir)
# Module is a single file.
else:
mod = os.path.splitext(mod)[0]
return mod, dir
def remove_cached_mods(self):
"""Remove all the script modules from the system cache."""
paths, files = self.get_paths()
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, '__file__') and os.path.dirname(mod.__file__) in paths:
del sys.modules[mod_name]
def _reload_script_module(self):
print('Reloading script:', self.filepath)
self.remove_cached_mods()
try:
f = open(self.filepath)
paths, files = self.get_paths()
# Get the module name and the root module path.
mod_name, mod_root = self.get_mod_name()
# Create the module and setup the basic properties.
mod = types.ModuleType('__main__')
mod.__file__ = self.filepath
mod.__path__ = paths
mod.__package__ = mod_name
# Add the module to the system module cache.
sys.modules[mod_name] = mod
# Fianally, execute the module.
exec(compile(f.read(), self.filepath, 'exec'), mod.__dict__)
except IOError:
print('Could not open script file.')
except:
sys.stderr.write("There was an error when running the script:\n" + traceback.format_exc())
else:
f.close()
def reload_script(self, context):
"""Reload this script while printing the output to blenders python console."""
# Setup stdout and stderr.
stdout = SplitIO(sys.stdout)
stderr = SplitIO(sys.stderr)
sys.stdout = stdout
sys.stderr = stderr
# Run the script.
self._reload_script_module()
# Go back to the begining so we can read the streams.
stdout.seek(0)
stderr.seek(0)
# Don't use readlines because that leaves trailing new lines.
output = stdout.read().split('\n')
output_err = stderr.read().split('\n')
if self.use_py_console:
# Print the output to the consoles.
for area in context.screen.areas:
if area.type == "CONSOLE":
ctx = context.copy()
ctx.update({"area": area})
# Actually print the output.
if output:
add_scrollback(ctx, output, 'OUTPUT')
if output_err:
add_scrollback(ctx, output_err, 'ERROR')
# Cleanup
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def modal(self, context, event):
if not context.scene.sw_settings.running:
self.cancel(context)
return {'CANCELLED'}
if context.scene.sw_settings.reload:
context.scene.sw_settings.reload = False
self.reload_script(context)
return {'PASS_THROUGH'}
if event.type == 'TIMER':
for path in self._times:
cur_time = os.stat(path).st_mtime
if cur_time != self._times[path]:
self._times[path] = cur_time
self.reload_script(context)
return {'PASS_THROUGH'}
def execute(self, context):
if context.scene.sw_settings.running:
return {'CANCELLED'}
# Grab the settings and store them as local variables.
self.filepath = bpy.path.abspath(context.scene.sw_settings.filepath)
self.use_py_console = context.scene.sw_settings.use_py_console
# If it's not a file, doesn't exist or permistion is denied we don't preceed.
if not os.path.isfile(self.filepath):
self.report({'ERROR'}, 'Unable to open script.')
return {'CANCELLED'}
# Setup the times dict to keep track of when all the files where last edited.
dirs, files = self.get_paths()
self._times = dict((path, os.stat(path).st_mtime) for path in files) # Where we store the times of all the paths.
self._times[files[0]] = 0 # We set one of the times to 0 so the script will be loaded on startup.
# Setup the event timer.
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, context.window)
wm.modal_handler_add(self)
context.scene.sw_settings.running = True
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
self.remove_cached_mods()
context.scene.sw_settings.running = False
class CancelScriptWatcher(bpy.types.Operator):
"""Stop watching the current script."""
bl_idname = "wm.sw_watch_end"
bl_label = "Stop Watching"
def execute(self, context):
# Setting the running flag to false will cause the modal to cancel itself.
context.scene.sw_settings.running = False
return {'FINISHED'}
class ReloadScriptWatcher(bpy.types.Operator):
"""Reload the current script."""
bl_idname = "wm.sw_reload"
bl_label = "Reload Script"
def execute(self, context):
# Setting the reload flag to true will cause the modal to cancel itself.
context.scene.sw_settings.reload = True
return {'FINISHED'}
# Create the UI for the operator. NEEDS FINISHING!!
class ScriptWatcherPanel(bpy.types.Panel):
"""UI for the script watcher."""
bl_label = "Script Watcher"
bl_idname = "SCENE_PT_script_watcher"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
def draw(self, context):
layout = self.layout
running = context.scene.sw_settings.running
col = layout.column()
col.prop(context.scene.sw_settings, 'filepath')
col.prop(context.scene.sw_settings, 'use_py_console')
col.prop(context.scene.sw_settings, 'auto_watch_on_startup')
col.operator('wm.sw_watch_start', icon='VISIBLE_IPO_ON')
col.enabled = not running
if running:
row = layout.row(align=True)
row.operator('wm.sw_watch_end', icon='CANCEL')
row.operator('wm.sw_reload', icon='FILE_REFRESH')
class ScriptWatcherSettings(bpy.types.PropertyGroup):
"""All the script watcher settings."""
running = bpy.props.BoolProperty(default=False)
reload = bpy.props.BoolProperty(default=False)
filepath = bpy.props.StringProperty(
name = 'Script',
description = 'Script file to watch for changes.',
subtype = 'FILE_PATH'
)
use_py_console = bpy.props.BoolProperty(
name = 'Use py console',
description = 'Use blenders built-in python console for program output (e.g. print statments and error messages)',
default = False
)
auto_watch_on_startup = bpy.props.BoolProperty(
name = 'Watch on startup',
description = 'Watch script automatically on new .blend load',
default = False
)
def register():
bpy.utils.register_class(WatchScriptOperator)
bpy.utils.register_class(ScriptWatcherPanel)
bpy.utils.register_class(CancelScriptWatcher)
bpy.utils.register_class(ReloadScriptWatcher)
bpy.utils.register_class(ScriptWatcherSettings)
bpy.types.Scene.sw_settings = \
bpy.props.PointerProperty(type=ScriptWatcherSettings)
bpy.app.handlers.load_post.append(load_handler)
def unregister():
bpy.utils.unregister_class(WatchScriptOperator)
bpy.utils.unregister_class(ScriptWatcherPanel)
bpy.utils.unregister_class(CancelScriptWatcher)
bpy.utils.unregister_class(ReloadScriptWatcher)
bpy.utils.unregister_class(ScriptWatcherSettings)
bpy.app.handlers.load_post.remove(load_handler)
del bpy.types.Scene.sw_settings
if __name__ == "__main__":
register()
| kilbee/blender-script-watcher | script_watcher.py | Python | gpl-2.0 | 12,299 |
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2011 Krzysztof Tarnowski (krzysztof.tarnowski@ymail.com)
# Copyright (C) 2009, 2010, 2011 OpenHatch, Inc.
# Copyright (C) 2011 Jairo E. Lopez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
import mock
import os
import os.path
import subprocess
from StringIO import StringIO
import twill
from twill import commands as tc
import django.test
from django.core.urlresolvers import reverse
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.test.client import Client
from django.core.cache import cache
from django.core.management import CommandError
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import unittest
from django.utils.unittest import expectedFailure, skip
import mysite.base.decorators
import mysite.base.management.commands.nagios
import mysite.base.management.commands.remote_command_check
import mysite.base.templatetags.base_extras
import mysite.base.unicode_sanity
import mysite.base.views
import mysite.base.view_helpers
import mysite.profile.management.commands.send_emails
import mysite.profile.views
import mysite.project.views
import mysite.search.models
import mysite.settings
logger = logging.getLogger(__name__)
def make_twill_url(url):
# modify this
return url.replace("http://openhatch.org/",
"http://127.0.0.1:8080/")
def better_make_twill_url(url):
return make_twill_url(url.replace('+', '%2B'))
def twill_goto_view(view_name, kwargs):
url = "http://openhatch.org" + reverse(view_name, kwargs=kwargs)
tc.go(better_make_twill_url(url))
mock_get = mock.Mock()
mock_get.return_value = None
class TwillTests(django.test.TestCase):
""" Basic tests using twill for the base submodule"""
@staticmethod
def _twill_setup():
app = StaticFilesHandler(WSGIHandler())
twill.add_wsgi_intercept("127.0.0.1", 8080, lambda: app)
@staticmethod
def _twill_quiet():
# suppress normal output of twill.. You don't want to
# call this if you want an interactive session
twill.set_output(StringIO())
def setUp(self):
""" Basic setup method needed by other testing classes """
self.real_get = django.core.cache.cache.get
django.core.cache.cache.get = mock_get
self.old_dbe = settings.DEBUG_PROPAGATE_EXCEPTIONS
settings.DEBUG_PROPAGATE_EXCEPTIONS = True
TwillTests._twill_setup()
TwillTests._twill_quiet()
def tearDown(self):
""" Basic teardown method needed by other testing classes """
# If you get an error on one of these lines,
# maybe you didn't run base.TwillTests.setUp?
settings.DEBUG_PROPAGATE_EXCEPTIONS = self.old_dbe
twill.remove_wsgi_intercept('127.0.0.1', 8080)
tc.reset_browser()
django.core.cache.cache.get = self.real_get
def login_with_twill(self):
""" Tests login page for accounts """
login_url = 'http://openhatch.org/account/login/old'
tc.go(make_twill_url(login_url))
# Log in
username = "paulproteus"
password = "paulproteus's unbreakable password"
tc.fv('login', 'username', username)
tc.fv('login', 'password', password)
tc.submit()
def login_with_client(self, username='paulproteus',
password="paulproteus's unbreakable password"):
""" Test login with a specific user """
client = Client()
success = client.login(username=username,
password=password)
self.assertTrue(success)
return client
def login_with_client_as_barry(self):
""" Test login as a specific user """
return self.login_with_client(username='barry', password='parallelism')
class MySQLRegex(TwillTests):
def test_escape(self):
before2after = {
'n': '[n]',
']': ']',
'[n': '[[][n]'
}
for before, after in before2after.items():
self.assertEqual(
mysite.base.view_helpers.mysql_regex_escape(before),
after)
class TestUriDataHelper(TwillTests):
def test(self):
request = mysite.base.view_helpers.ObjectFromDict({
'is_secure': lambda: True,
'META': {'SERVER_PORT': '443',
'SERVER_NAME': 'name'}})
data = ((mysite.base.view_helpers.
get_uri_metadata_for_generating_absolute_links(request)))
self.assertEqual(data, {'uri_scheme': 'https',
'url_prefix': 'name'})
class GeocoderCanGeocode(TwillTests):
def get_geocoding_in_json_for_unicode_string(self):
unicode_str = u'Bark\xe5ker, T\xf8nsberg, Vestfold, Norway'
# Just exercise the geocoder and ensure it doesn't blow up.
return mysite.base.view_helpers.cached_geocoding_in_json(unicode_str)
def test_unicode_string(self):
self.get_geocoding_in_json_for_unicode_string()
class RemoveByteOrderMarker(unittest.TestCase):
def test(self):
sample_bytes = '\xef\xbb\xbf' + 'hi'
as_fd = StringIO(sample_bytes)
self.assertNotEqual('hi', as_fd.read())
as_fd = StringIO(sample_bytes)
cleaned_up_fd = (
mysite.base.unicode_sanity.wrap_file_object_in_utf8_check(as_fd))
result = cleaned_up_fd.read()
self.assertEqual(type(result), str) # not unicode
self.assertEqual(result, 'hi')
class GeocoderCanCache(django.test.TestCase):
unicode_address = u'Bark\xe5ker, T\xf8nsberg, Vestfold, Norway'
def get_geocoding_in_json_for_unicode_string(self):
# Just exercise the geocoder and ensure it doesn't blow up.
return mysite.base.view_helpers.cached_geocoding_in_json(
self.unicode_address)
mock_geocoder = mock.Mock()
@mock.patch("mysite.base.view_helpers._geocode", mock_geocoder)
def test_unicode_strings_get_cached(self):
# Let's make sure that the first time, this runs with original_json,
# that the cache is empty, and we populate it with original_json.
cache.delete(
mysite.base.view_helpers.address2cache_key_name(
self.unicode_address))
# NOTE This test uses django.tests.TestCase to skip our
# monkey-patching of the cache framework
# When the geocoder's results are being cached properly,
# the base controller named '_geocode' will not run more than once.
original_json = "{'key': 'original value'}"
different_json = (
"{'key': 'if caching works we should never get this value'}")
self.mock_geocoder.return_value = eval(original_json)
self.assertTrue(
'original value' in
self.get_geocoding_in_json_for_unicode_string())
self.mock_geocoder.return_value = eval(different_json)
try:
json = self.get_geocoding_in_json_for_unicode_string()
self.assertTrue('original value' in json)
except AssertionError:
raise AssertionError(
"Geocoded location in json was not cached; it now equals "
+ json)
class TestUnicodifyDecorator(TwillTests):
def test(self):
utf8_data = u'\xc3\xa9'.encode('utf-8') # é
@mysite.base.decorators.unicodify_strings_when_inputted
def sample_thing(arg):
self.assertEqual(type(arg), unicode)
sample_thing(utf8_data)
class Feed(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_feed_shows_answers(self):
# Visit the homepage, notice that there are no answers in the context.
def get_answers_from_homepage():
homepage_response = self.client.get('/')
return homepage_response.context[0]['recent_feed_items']
self.assertFalse(get_answers_from_homepage())
# Create a few answers on the project discussion page.
for x in range(4):
mysite.search.models.Answer.create_dummy()
recent_feed_items = (
mysite.search.models.Answer.objects.all().order_by(
'-modified_date'))
# Visit the homepage, assert that the feed item data is on the page,
# ordered by date descending.
actual_answer_pks = [
answer.pk for answer in get_answers_from_homepage()]
expected_answer_pks = [answer.pk for answer in recent_feed_items]
self.assertEqual(actual_answer_pks, expected_answer_pks)
def test_feed_shows_wanna_help(self):
# set things up so there was a wanna help button click
person = mysite.profile.models.Person.objects.get(
user__username='paulproteus')
p_before = mysite.search.models.Project.create_dummy()
client = self.login_with_client()
post_to = reverse(mysite.project.views.wanna_help_do)
response = client.post(post_to, {u'project': unicode(p_before.pk)})
# Now when we GET the home page, we see a Note
# to that effect in the feed
response = client.get('/')
items = response.context[0]['recent_feed_items']
note_we_want_to_see = (
mysite.search.models.WannaHelperNote.objects.get(
person=person, project=p_before))
self.assertTrue(note_we_want_to_see in items)
class CacheMethod(TwillTests):
@mock.patch('django.core.cache.cache')
def test(self, mock_cache):
# Step 0: mock_cache.get() needs to return None
mock_cache.get.return_value = None
# Step 1: Create a method where we can test if it was cached (+ cache
# it)
class SomeClass:
def __init__(self):
self.call_counter = 0
def cache_key_getter_name(self):
return 'doodles'
@mysite.base.decorators.cache_method('cache_key_getter_name')
def some_method(self):
self.call_counter += 1
return str(self.call_counter)
# Step 2: Call it once to fill the cache
sc = SomeClass()
self.assertEqual(sc.some_method(), '1')
# Step 3: See if the cache has it now
mock_cache.set.assert_called_with(
'doodles', '{"value": "1"}', 86400 * 10)
class EnhanceNextWithNewUserMetadata(TwillTests):
def test_easy(self):
sample_input = '/'
wanted = '/?newuser=true'
got = (
mysite.base.templatetags.base_extras.
enhance_next_to_annotate_it_with_newuser_is_true(sample_input))
self.assertEqual(wanted, got)
def test_with_existing_query_string(self):
sample_input = '/?a=b'
wanted = '/?a=b&newuser=true'
got = (
mysite.base.templatetags.base_extras.
enhance_next_to_annotate_it_with_newuser_is_true(sample_input))
self.assertEqual(wanted, got)
def test_with_existing_newuser_equals_true(self):
sample_input = '/?a=b&newuser=true'
wanted = sample_input
got = (mysite.base.templatetags.base_extras.
enhance_next_to_annotate_it_with_newuser_is_true(sample_input))
self.assertEqual(wanted, got)
class Unsubscribe(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_verify_unsubscribe_token(self):
"""Generate a valid unsubscribe token. Use it. See that it works. Use
an invalid one. See that it doesn't work."""
dude = mysite.profile.models.Person.objects.get(user__username='paulproteus')
# Generate an invalid token (easiest to do this first)
plausible_but_invalid_token_string = dude.generate_new_unsubscribe_token().string
# Make that token invalid by nuking the UnsubscribeToken table
mysite.profile.models.UnsubscribeToken.objects.all().delete()
# Generate a once-valid but now-expired token
expired_token = dude.generate_new_unsubscribe_token()
just_over_three_months_ago = datetime.datetime.utcnow() - datetime.timedelta(days=91)
expired_token.created_date = just_over_three_months_ago
expired_token.save()
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
owner = mysite.profile.models.UnsubscribeToken.whose_token_string_is_this(valid_token_string)
self.assertEqual(owner, dude)
# This should definitely be false
self.assertNotEqual(valid_token_string, plausible_but_invalid_token_string)
# The invalid token should fail
self.assertFalse(mysite.profile.models.UnsubscribeToken.whose_token_string_is_this(plausible_but_invalid_token_string))
self.assertFalse(mysite.profile.models.UnsubscribeToken.whose_token_string_is_this(expired_token.string))
def test_unsubscribe_view(self):
dude = mysite.profile.models.Person.objects.get(user__username='paulproteus')
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
# Test that the unsubscribe view's context contains the owner
url = reverse(mysite.profile.views.unsubscribe, kwargs={'token_string': valid_token_string})
logger.debug("url %s", url)
response = self.client.get(url)
logger.debug("response %s", response)
self.assertEqual(
mysite.profile.models.Person.objects.get(),
response.context['unsubscribe_this_user'])
def test_unsubscribe_post_handler(self):
def get_dude():
return mysite.profile.models.Person.objects.get(user__username='paulproteus')
dude = get_dude()
self.assertTrue(get_dude().email_me_re_projects)
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
self.client.post(reverse(mysite.profile.views.unsubscribe_do), {'token_string': valid_token_string})
self.assertFalse(get_dude().email_me_re_projects)
def test_submit_form(self):
def get_dude():
return mysite.profile.models.Person.objects.get(user__username='paulproteus')
dude = get_dude()
self.assertTrue(get_dude().email_me_re_projects)
# Generate a valid token
valid_token_string = dude.generate_new_unsubscribe_token().string
self.assertIsNone(twill_goto_view(mysite.profile.views.unsubscribe, kwargs={'token_string': valid_token_string}))
#TODO Figure out why tc.submit() returns a NoneType and fails
#A couple of ideas:
# South migration on MySQL
# submit is broken
# twill should leave the code base for WebTest
self.assertIsNone(tc.submit())
self.assertIsNotNone(get_dude().email_me_re_projects)
class TimestampTests(django.test.TestCase):
def test_bugzilla_urls_get_and_update_timestamp_without_errors(self):
# List of URLs to test (from Bugzila trackers)
urls = {
'Miro bitesized':
'http://bugzilla.pculture.org/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field-1-0-0=bug_status&field-1-1-0=product&field-1-2-0=keywords&keywords=bitesized&product=Miro&query_format=advanced&remaction=&type-1-0-0=anyexact&type-1-1-0=anyexact&type-1-2-0=anywords&value-1-0-0=NEW%2CASSIGNED%2CREOPENED&value-1-1-0=Miro&value-1-2-0=bitesized',
'KDE bitesized':
'https://bugs.kde.org/buglist.cgi?query_format=advanced&keywords=junior-jobs&resolution=---',
'KDE documentation':
'https://bugs.kde.org/buglist.cgi?query_format=advanced&product=docs&resolution=---',
'MediaWiki bitesized':
'https://bugzilla.wikimedia.org/buglist.cgi?keywords=easy&query_format=advanced&resolution=LATER&resolution=---',
'MediaWiki documentation':
'https://bugzilla.wikimedia.org/buglist.cgi?query_format=advanced&component=Documentation&resolution=---',
'Gnome bitesized':
'https://bugzilla.gnome.org/buglist.cgi?columnlist=id&keywords=gnome-love&query_format=advanced&resolution=---',
'Mozilla bitesized':
'https://bugzilla.mozilla.org/buglist.cgi?resolution=---;status_whiteboard_type=substring;query_format=advanced;status_whiteboard=[good%20first%20bug]',
'Songbird helpwanted':
'http://bugzilla.songbirdnest.com/buglist.cgi?query_format=advanced&resolution=---&keywords=helpwanted',
'Songbird documentation':
'http://bugzilla.songbirdnest.com/buglist.cgi?query_format=advanced&component=Documentation&resolution=---',
'Apertium':
'http://bugs.apertium.org/cgi-bin/bugzilla/buglist.cgi?query_format=advanced&resolution=---',
'RTEMS':
'https://www.rtems.org/bugzilla/buglist.cgi?query_format=advanced&resolution=---',
'XOrg bitesized':
'https://bugs.freedesktop.org/buglist.cgi?query_format=advanced&keywords=janitor&resolution=---&product=xorg',
'XOrg documentation':
'https://bugs.freedesktop.org/buglist.cgi?query_format=advanced&component=Docs%2Fother&component=Documentation&component=Fonts%2Fdoc&resolution=---&product=xorg',
'Locamotion':
'http://bugs.locamotion.org/buglist.cgi?query_format=advanced&resolution=---',
'Hypertriton':
'https://hypertriton.com/bugzilla/buglist.cgi?query_format=advanced&resolution=---&product=Agar&product=EDAcious&product=FabBSD&product=FreeSG',
'pygame':
'http://pygame.motherhamster.org/bugzilla/buglist.cgi?query_format=advanced&resolution=---'
}
for url_name in urls:
logger.info('Testing %s bugs URL.' % url_name)
url = urls[url_name]
# Check there is no timestamp i.e. get zero o'clock
first_timestamp = (
mysite.base.models.Timestamp.get_timestamp_for_string(url))
self.assertEqual(first_timestamp,
mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Check the timestamp of the URL can be updated
mysite.base.models.Timestamp.update_timestamp_for_string(url)
# Check the new timestamp is after zero o'clock
new_timestamp = (
mysite.base.models.Timestamp.get_timestamp_for_string(url))
self.assertTrue(new_timestamp >
mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Test cases for Nagios integration
class NagiosTests(django.test.TestCase):
# Test for OK Nagios meta data return (0)
def test_nagios_meta_return_ok(self):
data = {}
data['bug_diagnostics'] = {}
my = data['bug_diagnostics']
my['Bugs last polled more than than two days + one hour ago'] = 0
my['Bugs last polled more than three days ago'] = 0
my['Bugs last polled more than three days ago (in percent)'] = 0.0
self.assertEqual(0, mysite.base.views.meta_exit_code(data))
# Test for WARNING Nagios meta data return (1)
def test_nagios_meta_return_warning(self):
data = {}
data['bug_diagnostics'] = {}
my = data['bug_diagnostics']
my['Bugs last polled more than than two days + one hour ago'] = 1
my['Bugs last polled more than three days ago'] = 0
my['Bugs last polled more than three days ago (in percent)'] = 0.0
self.assertEqual(1, mysite.base.views.meta_exit_code(data))
# Test for CRITICAL Nagios meta data return (2)
def test_nagios_meta_return_critical(self):
data = {}
data['bug_diagnostics'] = {}
my = data['bug_diagnostics']
my['Bugs last polled more than than two days + one hour ago'] = 0
my['Bugs last polled more than three days ago'] = 1
my['Bugs last polled more than three days ago (in percent)'] = 0.0
self.assertEqual(2, mysite.base.views.meta_exit_code(data))
# Test for OK Nagios weekly mail return (0)
def test_nagios_weeklymail_return_ok(self):
newtime = datetime.datetime.utcnow() - datetime.timedelta(days=4)
self.assertEqual(0, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code(newtime))
# Test for OK Nagios weekly mail return (0) after send_emails is
# run as a management command
def test_nagios_weeklymail_return_ok_after_send(self):
# Run the send_mail
command = mysite.profile.management.commands.send_emails.Command()
command.handle()
# Now run to see if the function sees things are ok in the
# database
self.assertEqual(0, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code())
# Test for CRITICAL Nagios weekly mail return (2)
def test_nagios_weeklymail_return_critical(self):
newtime = datetime.datetime.utcnow() - datetime.timedelta(days=8)
self.assertEqual(2, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code(newtime))
# Test for CRITICAL Nagios weekly mail return (2) on new database
def test_nagios_weeklymail_return_critical_newdb(self):
self.assertEqual(2, mysite.base.management.commands.nagios.Command.
send_weekly_exit_code())
# Test cases for remote command sanity checking
@skip('Skip these until jwm (or someone else) has a chance to look at them')
class RemoteCommandCheckTests(django.test.TestCase):
@mock.patch.dict('os.environ')
@mock.patch('django.contrib.auth.models.User.objects.get')
@mock.patch('subprocess.check_call')
def test_remote_command_allows_git_reset(self, mock_call, mock_user):
os.environ['SSH_ORIGINAL_COMMAND'] = (
'milestone-a/manage.py missions git_reset someuser')
mock_user.return_value = User(username='someuser')
self.assertEqual(
None,
mysite.base.management.commands.remote_command_check.Command().handle()
)
mock_call.assert_called_once_with([
'milestone-a/manage.py', 'missions', 'git_reset', 'someuser'])
@mock.patch.dict('os.environ')
@mock.patch('django.contrib.auth.models.User.objects.get')
@mock.patch('subprocess.check_call')
def test_remote_command_allows_svn_reset(self, mock_call, mock_user):
os.environ['SSH_ORIGINAL_COMMAND'] = (
'milestone-a/manage.py missions svn_reset someuser')
mock_user.return_value = User(username='someuser')
self.assertEqual(
None,
mysite.base.management.commands.remote_command_check.Command().handle()
)
mock_call.assert_called_once_with([
'milestone-a/manage.py', 'missions', 'svn_reset', 'someuser'])
@mock.patch.dict('os.environ')
@mock.patch('django.contrib.auth.models.User.objects.get')
@mock.patch('subprocess.check_call')
def test_remote_command_blocks_nonexistent_user(self, mock_call, mock_user):
os.environ['SSH_ORIGINAL_COMMAND'] = (
'milestone-a/manage.py missions svn_reset nonexistent')
def invalid_user(*args, **kwargs):
raise User.DoesNotExist
mock_user.side_effect = invalid_user
with self.assertRaises(User.DoesNotExist):
mysite.base.management.commands.remote_command_check.Command().handle()
@mock.patch.dict('os.environ')
@mock.patch('subprocess.check_call')
def test_remote_command_blocks_invalid_command(self, mock_call):
os.environ['SSH_ORIGINAL_COMMAND'] = 'echo pwned'
with self.assertRaises(CommandError):
mysite.base.management.commands.remote_command_check.Command().handle()
self.assertEqual(0, mock_call.call_count)
# Test cases for meta data generation
class MetaDataTests(django.test.TestCase):
def test_meta_data_zero_div(self):
mysite.base.views.meta_data()
def find_git_path():
maybe_git_dir = os.path.abspath(os.getcwd())
while not os.path.exists(os.path.join(maybe_git_dir, '.git')):
maybe_git_dir = os.path.abspath(os.path.join(maybe_git_dir, '..'))
if os.path.exists(os.path.join(maybe_git_dir, '.git')):
return maybe_git_dir
raise ValueError("Could not find git directory path.")
# Test that the git repository has no files that conflict with Windows
class WindowsFilesystemCompatibilityTests(unittest.TestCase):
def test(self):
# Find the base directory
dir_with_git = find_git_path()
# Get a list of files from git
files = subprocess.Popen(
['git', 'ls-files'],
shell=False,
stdout=subprocess.PIPE,
cwd=dir_with_git)
stdout, stderr = files.communicate()
file_set = set(stdout.rstrip().split('\n'))
# Filter that file set down by constraints that would
# apply on Windows. To that end:
# Unify files based on case-insensitivity
files_filtered = set(
[x.lower() for x in file_set])
# Filter out any files with '?' in their names, because that is an
# invalid character for filenames on Windows.
files_filtered = set(
[x for x in file_set
if ('?' not in x)])
self.assertEqual(file_set, files_filtered)
class GoogleApiTests(unittest.TestCase):
def test_google_api(self):
""" Test to see if the google api is returning what we expect """
response_file_path = os.path.join(settings.MEDIA_ROOT, 'sample-data',
'google_api', 'sample_response')
with open(response_file_path, 'r') as f:
response = f.read()
# Check that latitude and longitude are returned and status is 'OK'
geocode = mysite.base.view_helpers._geocode(response_data=response)
self.assertNotEqual(geocode, None)
# Test cases for robots generation
class RenderLiveRobotsTest(django.test.TestCase):
def test_robots_with_debug_false(self):
'''Verify that robots.txt returns render_robots_live_site.txt with
DEBUG set to False
'''
response = self.client.get('/robots.txt')
robots_text = ""
with open('mysite/base/templates/robots_for_live_site.txt', 'rU') as f:
robots_text += f.read()
self.assertEqual(response.content, robots_text)
class RenderDevRobotsTest(django.test.TestCase):
def setUp(self):
self.original_value = settings.DEBUG
settings.DEBUG = True
def test_robots_with_debug_true(self):
'''Verify that robots.txt contains text identical to that seen in
render_robots_for_dev_env.txt with DEBUG set to True
'''
response = self.client.get('/robots.txt')
robots_text = ""
with open('mysite/base/templates/robots_for_dev_env.txt', 'rU') as f:
robots_text += f.read()
settings.DEBUG = False
self.assertEqual(response.content, robots_text)
def tearDown(self):
settings.DEBUG = self.original_value
| ehashman/oh-mainline | mysite/base/tests.py | Python | agpl-3.0 | 27,957 |
#encoding:utf-8
subreddit = 'Bertra'
t_channel = '@r_Bertra'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| Fillll/reddit2telegram | reddit2telegram/channels/~inactive/r_bertra/app.py | Python | mit | 135 |
import pickle
import sys
from sklearn.multiclass import OneVsOneClassifier
from sklearn.svm import SVC
sys.path.append('src')
from data import get_featues, get_label
class SVMModel(object):
def __init__(self):
self.clf = OneVsOneClassifier(SVC())
self.name = 'SVM'
def get_params(self):
return self.clf.get_params()
def train(self, dframe):
X = get_featues(dframe)
y = get_label(dframe)
self.clf.fit(X, y)
def predict(self, X):
y_pred = self.clf.predict(X)
return y_pred
def save(self, fname):
with open(fname, 'wb') as ofile:
pickle.dump(self.clf, ofile, pickle.HIGHEST_PROTOCOL)
def load(self, fname):
with open(fname, 'rb') as ifile:
self.clf = pickle.load(ifile)
| artofai/overcome-the-chaos | src/models/svm.py | Python | mit | 806 |
# Joshua Mazur
# AI class
import random
from copy import deepcopy
import TTT_game
class AI:
def __init__(self,level):
# Current time seed for random calls
random.seed()
# Setting difficulty level
if (level == 0) or (level == 1):
self.difficulty = level # 0 for easy, 1 for medium, 2 for hard
elif (level == 2):
self.difficulty = level
self.hard_method = self.hard_next_move()
else:
errormessage = "Error setting difficulty in AI constructor.\nDifficulty given: " + str(level) + ". Needs to be either 0, 1, or 2."
print(errormessage)
exit()
print("** AI has intialized **")
def next_move(self, currentboard, currentpiece):
# Returns the next move the AI wished to make on the board
# Pre: Current board setup as a 9 character list, and the currentpiece
# Post: Returns AI's coordinate choice
# List of coordinates that are left in play
leftlist = []
for i in range(0,len(currentboard),1):
if currentboard[i] == ".":
leftlist.append(i)
# Finding move
if self.difficulty == 0: # Easy
answer = self.easy_next_move(leftlist)
elif self.difficulty == 1: # Medium
answer = self.medium_next_move(leftlist,currentboard,currentpiece)
else: # hard
self.leftlist = leftlist
self.currentboard = currentboard
self.currentpiece = currentpiece
answer = self.hard_method.next()
# Returning answer
return answer
def easy_next_move(self,leftlist):
# Method for randomly choosing next easy move
# Pre: List of coordinates that are still open
# Post: Returns coordinate randomly chosen
index = random.randrange(0,len(leftlist),1)
return leftlist[index]
def medium_next_move(self,leftlist,currentboard,currentpiece):
# Method for choosing random move, unless there is a move open to win.
# Pre: The list of coordinates left to choose, and the current board state
# as a nine character list
# Post: Returns the coordinate chosen.
if len(leftlist) <= 5: # Minimum number of moves for a winning move to exist
# Checking if win is possible in next move
winlist = self.win_next(leftlist,currentboard,currentpiece)
if len(winlist) > 0: # More than one winning move
index = random.randrange(0,len(winlist),1)
answer = winlist[index]
print("AI: I can win.")
else: # Currently no winning moves
# Checking if a move is necessary to block
nextpiece = "X"
if currentpiece == "X":
nextpiece = "O"
loselist = self.win_next(leftlist,currentboard,nextpiece)
if len(loselist) > 0:
# One in three chance it will choose to block the winning move
chance = random.randrange(0,4,1)
if chance == 0:
index = random.randrange(0,len(loselist),1)
answer = loselist[index]
print("AI: Choosing to block.")
else:
# Otherwise choose randomly again
answer = self.easy_next_move(leftlist)
else:
answer = self.easy_next_move(leftlist)
else: # Minimum number of moves not yet reached
answer = self.easy_next_move(leftlist)
return answer
def hard_next_move(self):
# Method for choosing a move based on strategy
# Pre: The list of coordinates left to choose, the current board state as a nine
# character list, and the current piece to be played.
# Post: Returns the coordinate choosen.
# Variable setup
favor_list = [0,2,4,6,8] # Favor corners and center
opponentpiece = "X"
if self.currentpiece == "X":
opponentpiece = "O"
# AI went first
if len(self.leftlist) == 9:
## First Move (randomly choose from favor list
index = random.randrange(0,len(favor_list),1)
answer = favor_list[index]
if answer == 4:
havecenter = True
else:
havecenter = False
yield answer
## Second Move
if havecenter: # Starting with center
# Finding where they moved
for coor in [0,1,2,3,5,6,7,8]:
if self.currentboard[coor] == opponentpiece:
move = coor
if move % 2 == 0: # One of the corners
# Go across from them
if move == 0:
lastmove = 8
elif move == 2:
lastmove = 6
elif move == 6:
lastmove = 2
else:
lastmove = 0
yield lastmove
## Third Move
# Check if block is necessary
answer = self.check_block(self.leftlist,self.currentboard,opponentpiece)
if answer != -1:
yield answer
# If block was not necessary, move in the only other spot they did not
else:
if lastmove == 8:
if self.currentboard[5] == ".":
yield 5
elif self.currentboard[7] == ".":
yield 7
elif lastmove == 6:
if self.currentboard[3] == ".":
yield 3
elif self.currentboard[7] == ".":
yield 7
elif lastmove == 2:
if self.currentboard[5] == ".":
yield 5
elif self.currentboard[1] == ".":
yield 1
elif lastmove == 0:
if self.currentboard[1] == ".":
yield 1
elif self.currentboard[3] == ".":
yield 3
else: # Opponent did not choose corner
# Choose corner across from them
if move == 1:
choicelist = [6,8]
elif move == 5:
choicelist = [0,6]
elif move == 7:
choicelist = [0,2]
else:
choicelist = [2,8]
index = random.randrange(0,2,1)
lastmove = choicelist[index]
yield lastmove
else: # Starting from a corner
# If opponent choose center choose across from them.
if self.currentboard[4] == opponentpiece:
if answer == 0:
lastmove = 8
elif answer == 2:
lastmove = 6
elif answer == 8:
lastmove = 0
elif answer == 6:
lastmove = 2
yield lastmove
else:
# Claim center yourself
yield 4
## Third Move
# Check for win or block
win_block_answer = self.check_block(self.leftlist,self.currentboard,opponentpiece)
if win_block_answer != -1:
yield win_block_answer
else:
if answer == 0:
if self.currentboard[1] == ".":
lastmove = 1
else:
lastmove = 3
elif answer == 2:
if self.currentboard[1] == ".":
lastmove = 1
else:
lastmove = 5
elif answer == 8:
if self.currentboard[5] == ".":
lastmove = 5
else:
lastmove = 7
elif answer == 6:
if self.currentboard[7] == ".":
lastmove = 7
else:
lastmove = 3
yield lastmove
# Other Player went first
else:
## First Move
if self.currentboard[4] == ".": # Claim center if empty
yield 4
havecenter = True
else: # Choose randomly of corners left
favor_left = list(set(favor_list) & set(self.leftlist))
index = random.randrange(0,len(favor_left),1)
lastmove = answer = favor_left[index]
yield answer
havecenter = False
## Second Move
answer = self.check_block(self.leftlist,self.currentboard,opponentpiece)
if answer != -1: # Need to block
yield answer
else: # Did not need to block
if havecenter: # Need to check for tricks
# Corner Trick
answer = self.check_corner_trick(self.leftlist,self.currentboard,opponentpiece)
if answer != -1:
yield answer
else:
# Split Trick
answer = self.check_split_trick(self.currentboard,opponentpiece)
if answer != -1:
yield answer
else:
# L Trick
answer = self.check_L_trick(self.currentboard,opponentpiece)
if answer != -1:
yield answer
else:
print("AI: Choosing randomly.")
yield self.easy_next_move(self.leftlist)
else:
# Checking bluff trick
answer = self.check_bluff_trick(self.currentboard,lastmove,opponentpiece)
if answer != -1:
yield answer
else:
print("AI: Choosing randomly.")
yield self.easy_next_move(self.leftlist)
## Rest
while True:
answer = self.check_win_block(self.leftlist,self.currentboard,self.currentpiece,opponentpiece)
if answer != -1:
yield answer
else:
print("AI: Choosing Randomly.")
yield self.easy_next_move(self.leftlist)
# hard_next_move support methods
def check_bluff_trick(self,currentboard,lastmove,opponentpiece):
# Checking whether opponent is trying to bluff using center
####### Should only be called on the second move for an AI that went second
# Pre: The current board as a nine character list, the coordinate of the AI's last move, and
# the opponent's piece symbol
# Post: Returns coordinate if trick is discovered, -1 otherwise
answer = -1
trick = False
if (lastmove == 0) and (currentboard[4] == opponentpiece) and (currentboard[8] == opponentpiece):
trick = True
choicelist = [2,6]
elif (lastmove == 2) and (currentboard[4] == opponentpiece) and (currentboard[6] == opponentpiece):
trick = True
choicelist = [0,8]
elif (lastmove == 8) and (currentboard[4] == opponentpiece) and (currentboard[0] == opponentpiece):
trick = True
choicelist = [2,6]
elif (lastmove == 6) and (currentboard[4] == opponentpiece) and (currentboard[2] == opponentpiece):
trick = True
choicelist = [0,8]
if trick:
print("AI: Assuming bluff trick.")
index = random.randrange(0,2,1)
answer = choicelist[index]
return answer
def check_L_trick(self,currentboard,opponentpiece):
# Checks to see if opponent is trying L trick
# Pre: The currentboard as a nine character list, and the opponent's piece symobl
# Post: Returns coordinate needed to block, -1 otherwise
answer = -1
if (currentboard[1] == opponentpiece) and (currentboard[3] == opponentpiece):
if currentboard[0] == ".":
answer = 0
if (currentboard[1] == opponentpiece) and (currentboard[5] == opponentpiece):
if currentboard[2] == ".":
answer = 2
if (currentboard[3] == opponentpiece) and (currentboard[7] == opponentpiece):
if currentboard[6] == ".":
answer = 6
if (currentboard[5] == opponentpiece) and (currentboard[7] == opponentpiece):
if currentboard[8] == ".":
answer = 8
if answer != -1:
print("AI: Assuming L trick.")
return answer
def check_split_trick(self,currentboard,opponentpiece):
# Checks if opponent if trying "split" trick
# Pre: Currentboard board a nine character list, and opponent's piece symbol
# Post: Returns move if opponent is trying split trick, -1 otherwise
answer = -1
if (currentboard[1] == opponentpiece) and (currentboard[8] == opponentpiece):
if currentboard[2] == ".":
answer = 2
if (currentboard[1] == opponentpiece) and (currentboard[6] == opponentpiece):
if currentboard[0] == ".":
answer = 0
if (currentboard[5] == opponentpiece) and (currentboard[6] == opponentpiece):
if currentboard[8] == ".":
answer = 8
if (currentboard[5] == opponentpiece) and (currentboard[0] == opponentpiece):
if currentboard[2] == ".":
answer = 2
if (currentboard[2] == opponentpiece) and (currentboard[7] == opponentpiece):
if currentboard[8] == ".":
answer = 8
if (currentboard[0] == opponentpiece) and (currentboard[7] == opponentpiece):
if currentboard[6] == ".":
answer = 6
if (currentboard[2] == opponentpiece) and (currentboard[3] == opponentpiece):
if currentboard[0] == ".":
answer = 0
if (currentboard[3] == opponentpiece) and (currentboard[8] == opponentpiece):
if currentboard[6] == ".":
answer = 6
if answer != -1:
print("AI: Assuming Split trick")
return answer
def check_corner_trick(self,leftlist,currentboard,opponentpiece):
# Returns coordinate if opponent is trying to use the corner trick
# Pre: currentboard and the opponent's piece symbol
# Post: Returns coordinate if opponent is trying trick, -1 otherwise
answer = -1
trick = False
if (currentboard[0] == opponentpiece) and (currentboard[8] == opponentpiece):
trick = True
elif (currentboard[2] == opponentpiece) and (currentboard[6] == opponentpiece):
trick = True
if trick:
favor_left = list(set([1,3,5,7]) & set(leftlist))
index = random.randrange(0,len(favor_left),1)
answer = favor_left[index]
print("AI: Assuming corner trick.")
return answer
def check_win_block(self,leftlist,currentboard,currentpiece,opponentpiece):
# Finds whether a win is possible in the next move, or if a block will be necessary
# Pre: The leftlist of avaliable moves, the currentboard as a nine character list, and the current
# player's piece.
# Post: Returns the coordinate for a win or a block if either exists, or -1
# Checking if a win is possible in the next move
winlist = self.win_next(leftlist,currentboard,currentpiece)
if len(winlist) > 0: # Winning move is avaliable
index = random.randrange(0,len(winlist),1)
answer = winlist[index]
print("AI: I can win.")
else:
answer = self.check_block(leftlist,currentboard,opponentpiece)
return answer
def check_block(self,leftlist,currentboard,opponentpiece):
# Finds whether a block will be necessary on this move
# Pre: The leftlist of avaliable moves, the currentboard as a nine character list, and the current
# player's piece.
# Post: Returns coordinate that needs blocked, -1 otherwise.
answer = -1
# Checking if a block is needed
loselist = self.win_next(leftlist,currentboard,opponentpiece)
# Need to block
if len(loselist) > 0:
index = random.randrange(0,len(loselist),1)
answer = loselist[index]
print("AI: I need to block.")
return answer
# Common Method
def win_next(self,leftlist,currentboard,currentpiece):
# Returns list of coordinates for leftlist that will allow the currentpiece to win after the next move
# Pre: The list of coordinates left to choose, the currentboard setup as a nine character list, and
# the current piece to be played.
answerlist = []
for coor in leftlist:
if coor == 0:
if (currentboard[1] == currentpiece) and (currentboard[2] == currentpiece):
answerlist.append(coor)
elif (currentboard[3] == currentpiece) and (currentboard[6] == currentpiece):
answerlist.append(coor)
elif (currentboard[4] == currentpiece) and (currentboard[8] == currentpiece):
answerlist.append(coor)
elif coor == 1:
if (currentboard[0] == currentpiece) and (currentboard[2] == currentpiece):
answerlist.append(coor)
elif (currentboard[4] == currentpiece) and (currentboard[7] == currentpiece):
answerlist.append(coor)
elif coor == 2:
if (currentboard[0] == currentpiece) and (currentboard[1] == currentpiece):
answerlist.append(coor)
elif (currentboard[4] == currentpiece) and (currentboard[6] == currentpiece):
answerlist.append(coor)
elif (currentboard[5] == currentpiece) and (currentboard[8] == currentpiece):
answerlist.append(coor)
elif coor == 3:
if (currentboard[0] == currentpiece) and (currentboard[6] == currentpiece):
answerlist.append(coor)
elif (currentboard[4] == currentpiece) and (currentboard[5] == currentpiece):
answerlist.append(coor)
elif coor == 4:
if (currentboard[0] == currentpiece) and (currentboard[8] == currentpiece):
answerlist.append(coor)
elif (currentboard[1] == currentpiece) and (currentboard[7] == currentpiece):
answerlist.append(coor)
elif (currentboard[2] == currentpiece) and (currentboard[6] == currentpiece):
answerlist.append(coor)
elif (currentboard[3] == currentpiece) and (currentboard[5] == currentpiece):
answerlist.append(coor)
elif coor == 5:
if (currentboard[3] == currentpiece) and (currentboard[4] == currentpiece):
answerlist.append(coor)
elif (currentboard[2] == currentpiece) and (currentboard[8] == currentpiece):
answerlist.append(coor)
elif coor == 6:
if (currentboard[0] == currentpiece) and (currentboard[3] == currentpiece):
answerlist.append(coor)
elif (currentboard[2] == currentpiece) and (currentboard[4] == currentpiece):
answerlist.append(coor)
elif (currentboard[7] == currentpiece) and (currentboard[8] == currentpiece):
answerlist.append(coor)
elif coor == 7:
if (currentboard[1] == currentpiece) and (currentboard[4] == currentpiece):
answerlist.append(coor)
elif (currentboard[6] == currentpiece) and (currentboard[8] == currentpiece):
answerlist.append(coor)
elif coor == 8:
if (currentboard[0] == currentpiece) and (currentboard[4] == currentpiece):
answerlist.append(coor)
elif (currentboard[2] == currentpiece) and (currentboard[5] == currentpiece):
answerlist.append(coor)
elif (currentboard[6] == currentpiece) and (currentboard[7] == currentpiece):
answerlist.append(coor)
return answerlist
| carpeyoyo/Python-TicTacToe | TTT_AI.py | Python | mit | 21,641 |
#!/usr/bin/env python
"""Client actions related to administrating the client and its configuration."""
import logging
import os
import platform
import socket
import traceback
import cryptography
from cryptography.hazmat.backends import openssl
import pkg_resources
import psutil
import pytsk3
import yara
from grr_response_client import actions
from grr_response_client import communicator
from grr_response_client.client_actions import tempfiles
from grr_response_client.client_actions import timeline
from grr_response_client.unprivileged import sandbox
from grr_response_core import config
from grr_response_core.lib import config_lib
from grr_response_core.lib import queues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
class Echo(actions.ActionPlugin):
"""Returns a message to the server."""
in_rdfvalue = rdf_client_action.EchoRequest
out_rdfvalues = [rdf_client_action.EchoRequest]
def Run(self, args):
self.SendReply(args)
def GetHostnameFromClient(args):
del args # Unused.
yield rdf_protodict.DataBlob(string=socket.gethostname())
class GetHostname(actions.ActionPlugin):
"""Retrieves the host name of the client."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, args):
for res in GetHostnameFromClient(args):
self.SendReply(res)
class GetPlatformInfo(actions.ActionPlugin):
"""Retrieves platform information."""
out_rdfvalues = [rdf_client.Uname]
def Run(self, unused_args):
"""Populate platform information into a Uname response."""
self.SendReply(rdf_client.Uname.FromCurrentSystem())
class Kill(actions.ActionPlugin):
"""A client action for terminating (killing) the client.
Used for testing process respawn.
"""
out_rdfvalues = [rdf_flows.GrrMessage]
def Run(self, unused_arg):
"""Run the kill."""
# Send a message back to the service to say that we are about to shutdown.
reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
# Queue up the response message, jump the queue.
self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS)
# Give the http thread some time to send the reply.
self.grr_worker.Sleep(10)
# Die ourselves.
logging.info("Dying on request.")
os._exit(242) # pylint: disable=protected-access
class GetConfiguration(actions.ActionPlugin):
"""Retrieves the running configuration parameters."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
BLOCKED_PARAMETERS = ["Client.private_key"]
def Run(self, unused_arg):
"""Retrieve the configuration except for the blocked parameters."""
out = self.out_rdfvalues[0]()
for descriptor in config.CONFIG.type_infos:
if descriptor.name in self.BLOCKED_PARAMETERS:
value = "[Redacted]"
else:
try:
value = config.CONFIG.Get(descriptor.name, default=None)
except (config_lib.Error, KeyError, AttributeError, ValueError) as e:
logging.info("Config reading error: %s", e)
continue
if value is not None:
out[descriptor.name] = value
self.SendReply(out)
class GetLibraryVersions(actions.ActionPlugin):
"""Retrieves version information for installed libraries."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
def GetSSLVersion(self):
return openssl.backend.openssl_version_text()
def GetCryptographyVersion(self):
return cryptography.__version__
def GetPSUtilVersion(self):
return ".".join(map(utils.SmartUnicode, psutil.version_info))
def GetProtoVersion(self):
return pkg_resources.get_distribution("protobuf").version
def GetTSKVersion(self):
return pytsk3.TSK_VERSION_STR
def GetPyTSKVersion(self):
return pytsk3.get_version()
def GetYaraVersion(self):
return yara.YARA_VERSION
library_map = {
"pytsk": GetPyTSKVersion,
"TSK": GetTSKVersion,
"cryptography": GetCryptographyVersion,
"SSL": GetSSLVersion,
"psutil": GetPSUtilVersion,
"yara": GetYaraVersion,
}
error_str = "Unable to determine library version: %s"
def Run(self, unused_arg):
result = self.out_rdfvalues[0]()
for lib, f in self.library_map.items():
try:
result[lib] = f(self)
except Exception: # pylint: disable=broad-except
result[lib] = self.error_str % traceback.format_exc()
self.SendReply(result)
class UpdateConfiguration(actions.ActionPlugin):
"""Updates configuration parameters on the client."""
in_rdfvalue = rdf_protodict.Dict
UPDATABLE_FIELDS = {"Client.foreman_check_frequency",
"Client.server_urls",
"Client.max_post_size",
"Client.max_out_queue",
"Client.poll_min",
"Client.poll_max",
"Client.rss_max"} # pyformat: disable
def _UpdateConfig(self, filtered_arg, config_obj):
for field, value in filtered_arg.items():
config_obj.Set(field, value)
try:
config_obj.Write()
except (IOError, OSError):
pass
def Run(self, arg):
"""Does the actual work."""
try:
if self.grr_worker.client.FleetspeakEnabled():
raise ValueError("Not supported on Fleetspeak enabled clients.")
except AttributeError:
pass
smart_arg = {str(field): value for field, value in arg.items()}
disallowed_fields = [
field for field in smart_arg
if field not in UpdateConfiguration.UPDATABLE_FIELDS
]
if disallowed_fields:
raise ValueError("Received an update request for restricted field(s) %s."
% ",".join(disallowed_fields))
if platform.system() != "Windows":
# Check config validity before really applying the changes. This isn't
# implemented for our Windows clients though, whose configs are stored in
# the registry, as opposed to in the filesystem.
canary_config = config.CONFIG.CopyConfig()
# Prepare a temporary file we'll write changes to.
with tempfiles.CreateGRRTempFile(mode="w+") as temp_fd:
temp_filename = temp_fd.name
# Write canary_config changes to temp_filename.
canary_config.SetWriteBack(temp_filename)
self._UpdateConfig(smart_arg, canary_config)
try:
# Assert temp_filename is usable by loading it.
canary_config.SetWriteBack(temp_filename)
# Wide exception handling passed here from config_lib.py...
except Exception: # pylint: disable=broad-except
logging.warning("Updated config file %s is not usable.", temp_filename)
raise
# If temp_filename works, remove it (if not, it's useful for debugging).
os.unlink(temp_filename)
# The changes seem to work, so push them to the real config.
self._UpdateConfig(smart_arg, config.CONFIG)
def GetClientInformation() -> rdf_client.ClientInformation:
return rdf_client.ClientInformation(
client_name=config.CONFIG["Client.name"],
client_binary_name=psutil.Process().name(),
client_description=config.CONFIG["Client.description"],
client_version=int(config.CONFIG["Source.version_numeric"]),
build_time=config.CONFIG["Client.build_time"],
labels=config.CONFIG.Get("Client.labels", default=None),
timeline_btime_support=timeline.BTIME_SUPPORT,
sandbox_support=sandbox.IsSandboxInitialized())
class GetClientInfo(actions.ActionPlugin):
"""Obtains information about the GRR client installed."""
out_rdfvalues = [rdf_client.ClientInformation]
def Run(self, unused_args):
self.SendReply(GetClientInformation())
class GetClientStats(actions.ActionPlugin):
"""This retrieves some stats about the GRR process."""
in_rdfvalue = rdf_client_action.GetClientStatsRequest
out_rdfvalues = [rdf_client_stats.ClientStats]
def Run(self, arg):
"""Returns the client stats."""
if arg is None:
arg = rdf_client_action.GetClientStatsRequest()
proc = psutil.Process(os.getpid())
meminfo = proc.memory_info()
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
create_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(proc.create_time())
response = rdf_client_stats.ClientStats(
RSS_size=meminfo.rss,
VMS_size=meminfo.vms,
memory_percent=proc.memory_percent(),
bytes_received=communicator.GRR_CLIENT_RECEIVED_BYTES.GetValue(),
bytes_sent=communicator.GRR_CLIENT_SENT_BYTES.GetValue(),
create_time=create_time,
boot_time=boot_time)
response.cpu_samples = self.grr_worker.stats_collector.CpuSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
response.io_samples = self.grr_worker.stats_collector.IOSamplesBetween(
start_time=arg.start_time, end_time=arg.end_time)
self.Send(response)
def Send(self, response):
self.SendReply(response)
class GetClientStatsAuto(GetClientStats):
"""This class is used to send the reply to a well known flow on the server."""
def Send(self, response):
self.grr_worker.SendReply(
rdf_client_stats.ClientStats.Downsampled(response),
session_id=rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False)
class SendStartupInfo(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalues = [rdf_client.StartupInfo]
well_known_session_id = rdfvalue.SessionID(flow_name="Startup")
def _CheckInterrogateTrigger(self) -> bool:
interrogate_trigger_path = config.CONFIG["Client.interrogate_trigger_path"]
if not interrogate_trigger_path:
logging.info(
"Client.interrogate_trigger_path not set, skipping the check.")
return False
if not os.path.exists(interrogate_trigger_path):
logging.info("Interrogate trigger file (%s) does not exist.",
interrogate_trigger_path)
return False
logging.info("Interrogate trigger file exists: %s",
interrogate_trigger_path)
# First try to remove the file and return True only if the removal
# is successful. This is to prevent a permission error + a crash loop from
# trigger infinite amount of interrogations.
try:
os.remove(interrogate_trigger_path)
except (OSError, IOError) as e:
logging.exception(
"Not triggering interrogate - failed to remove the "
"interrogate trigger file (%s): %s", interrogate_trigger_path, e)
return False
return True
def Run(self, unused_arg, ttl=None):
"""Returns the startup information."""
logging.debug("Sending startup information.")
boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(psutil.boot_time())
response = rdf_client.StartupInfo(
boot_time=boot_time,
client_info=GetClientInformation(),
interrogate_requested=self._CheckInterrogateTrigger(),
)
self.grr_worker.SendReply(
response,
session_id=self.well_known_session_id,
response_id=0,
request_id=0,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
require_fastpoll=False,
ttl=ttl)
| google/grr | grr/client/grr_response_client/client_actions/admin.py | Python | apache-2.0 | 11,600 |
#!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
# Icebridge utility functions
import os, sys, datetime, time, subprocess, logging, re, hashlib, string
import errno, getpass, glob#, psutil
# The path to the ASP python files
basepath = os.path.abspath(sys.path[0])
pythonpath = os.path.abspath(basepath + '/../IceBridge') # for dev ASP
pythonpath = os.path.abspath(basepath + '/../Python') # for dev ASP
libexecpath = os.path.abspath(basepath + '/../libexec') # for packaged ASP
sys.path.insert(0, basepath) # prepend to Python path
sys.path.insert(0, pythonpath)
sys.path.insert(0, libexecpath)
#import asp_system_utils, asp_alg_utils, asp_geo_utils, asp_image_utils, asp_file_utils
#asp_system_utils.verify_python_version_is_supported()
def switchWorkDir():
'''A work directory must be set before running a qsub job, and here
we switch to it.'''
workDir = ""
if 'OIB_WORK_DIR' in os.environ:
workDir = os.environ['OIB_WORK_DIR']
if os.path.isdir(workDir):
os.chdir(workDir)
else:
raise Exception("Work directory does not exist: " + workDir)
def getUser():
'''Return the current user name.'''
return getpass.getuser()
def fullPath(script):
'''The full path to a script on the icebridge folder.'''
basepath = os.path.dirname(os.path.realpath(__file__))
return os.path.join(basepath, script)
def outputFolder(site, yyyymmdd):
'''The output folder name.'''
return site + '_' + yyyymmdd
def makeSymLink(oldFile, newFile, verbose=True):
'''Safely create a symlink'''
oldPath = os.path.abspath(oldFile)
try:
asp_system_utils.mkdir_p(os.path.dirname(newFile))
if verbose:
print("ln -s " + oldPath + " " + newFile)
os.symlink(oldPath, newFile)
except OSError, e:
if e.errno == errno.EEXIST:
os.remove(newFile)
os.symlink(oldPath, newFile)
def getSmallestFrame():
'''Return the smallest possible frame number.'''
return 0
def getLargestFrame():
'''Return the largest possible frame number.'''
return 99999999 # 100 million should be enough
def fileExtension(filename):
'''Convenience function to get the file extension.'''
return os.path.splitext(filename)[1]
def hasImageExtension(filename):
'''Return true if the file is a recognized image extension.'''
extension = fileExtension(filename).lower()
validExtensions = ['.tif', '.jpg', '.jpeg', '.ntf']
if extension in validExtensions:
return True
return False
def getRunStatsFile():
return 'runStats.txt'
def getCameraFolder(outputFolder):
return os.path.join(outputFolder, 'camera')
def getImageFolder(outputFolder):
return os.path.join(outputFolder, 'image')
def getJpegFolder(outputFolder):
return os.path.join(outputFolder, 'jpeg')
def getOrthoFolder(outputFolder):
return os.path.join(outputFolder, 'ortho')
def getFireballFolder(outputFolder):
return os.path.join(outputFolder, 'fireball')
def getCorrFireballFolder(outputFolder):
return os.path.join(outputFolder, 'corr_fireball')
def getLidarFolder(outputFolder):
return os.path.join(outputFolder, 'lidar')
def getProcessedFolder(outputFolder):
return os.path.join(outputFolder, 'processed')
def getPairedLidarFolder(lidarFolder):
return os.path.join(lidarFolder, 'paired')
def getNavFolder(outputFolder):
return os.path.join(outputFolder, 'nav')
def getNavCameraFolder(outputFolder):
return os.path.join(outputFolder, 'nav_camera')
def getLabelFolder(outputFolder):
return os.path.join(outputFolder, 'labeled')
def getConvertedLidarIndexFile(lidarFolder):
return os.path.join(lidarFolder, 'converted_lidar_index.csv')
def getPairedIndexFile(pairedFolder):
return os.path.join(pairedFolder, 'paired_lidar_index.csv')
def folderToType(folder):
'''If input is myRun/ortho, return "ortho". Same for "fireball", "lidar", etc.'''
return os.path.basename(folder)
def htmlIndexFile(folder):
'''Return the html index file for this folder (if appropriate)'''
return os.path.join(folder, folderToType(folder) + "_index.html")
def csvIndexFile(folder):
'''Return the clean csv version of the html index file for this folder (if appropriate) '''
return htmlIndexFile(folder) + ".csv"
def getJpegDateTime(filepath):
'''Get the date and time from a raw jpeg file.'''
# TODO: For some files it is probably in the name.
# Use this tool to extract the metadata
cmd = [asp_system_utils.which('gdalinfo'), filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = p.communicate()
lines = out.split('\n')
for line in lines:
if 'EXIF_DateTimeOriginal' not in line:
continue
parts = line.replace('=',' ').split()
dateString = parts[1].strip().replace(':','')
timeString = parts[2].strip().replace(':','')
return (dateString, timeString)
raise Exception('Failed to read date/time from file: ' + filepath)
def getPixelSize(filepath):
'''Get the pixel size from a GeoTiff'''
cmd = [asp_system_utils.which('gdalinfo'), filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = p.communicate()
lines = out.split('\n')
for line in lines:
m = re.match("^.*?Pixel\s+Size\s*=\s*\((.*?)\s*,", line)
if m:
return float(m.group(1))
# If nothing works
return -1.0
def jpegToImageFile(jpegFile, orthoFile):
'''Given AN_20121107/jpeg/2012_11_08_17415.JPG and DMS_1381721_17415_20121108_00303910.tif
create AN_20121107/image/DMS_20121108_003039_17415.tif.
This can throw an exception.'''
jpegFolder = os.path.dirname(jpegFile)
imageFolder = getImageFolder(os.path.dirname(jpegFolder))
if not os.path.exists(jpegFolder):
raise Exception("Missing " + jpegFolder)
if not os.path.exists(imageFolder):
raise Exception("Missing " + imageFolder)
if not os.path.exists(jpegFile):
raise Exception("Missing " + jpegFile)
frame = getFrameNumberFromFilename(jpegFile)
# This was the original implementation, but it can give wrong results
# when the jpeg has incorrect time.
#(dateString, timeString) = getJpegDateTime(jpegFile)
[dateString, timeString] = parseTimeStamps(orthoFile)
outputName = formFilePrefix(dateString, timeString, frame) + ".tif"
outputPath = os.path.join(imageFolder, outputName)
return outputPath
def projectionBoundsFile(folder):
return os.path.join(folder, 'projection_bounds.csv')
def readProjectionBounds(indexFile):
'''Read projection bunds for each ortho image.'''
bounds = {}
# Nothing to
if not os.path.exists(indexFile):
return bounds
with open(indexFile, 'r') as f:
for line in f:
parts = line.strip().split(',')
for v in range(len(parts)):
parts[v] = parts[v].strip()
if parts[v] != "":
parts[v] = float(parts[v].strip())
if len(parts) != 6:
# Maybe when we wrote it last time we got interrupted.
# Note that the last value is just an empty space.
continue
frame = int(parts[0])
bounds[frame] = (parts[1], parts[2], parts[3], parts[4])
return bounds
def writeProjectionBounds(indexFile, bounds):
'''Write projection bounds for all images.'''
with open(indexFile, 'w') as f:
for frame in sorted(bounds.keys()):
a,b,c,d = bounds[frame]
vals = [frame, a, b, c, d]
for val in vals:
f.write(str(val) + ', ')
f.write('\n')
def readLinesInSet(fileName):
'''Read the lines from a file as elements in a set, while stripping all leading
and trailing spaces.'''
filesSet = set()
if not os.path.exists(fileName):
return filesSet
with open(fileName, 'r') as f:
for line in f:
line = line.strip()
filesSet.add(line)
return filesSet
def logFilePrefix():
return 'icebridge_batch_log'
def validFilesPrefix():
'''This one is used in multiple places.'''
return 'valid_files'
def manager_log_prefix():
return 'pleiades_manager_log'
def validFilesList(folder, startFrame, stopFrame):
'''File containing the list of fetched files that were validated.
for the given range. Need the range so that when we validate in
parallel, we do not overwrite the same file. Later these validation
files will be merged.'''
prefix = validFilesPrefix() + '_' + str(startFrame) + '_' + str(stopFrame) + '.csv'
return os.path.join(folder, prefix)
def updateValidFilesListFromDisk(filesList, filesSet):
'''Update the current set of valid files with any new info from disk.'''
# Nothing to
if not os.path.exists(filesList):
return filesSet
print("Reading: " + filesList)
with open(filesList, 'r') as f:
for line in f:
line = line.strip()
filesSet.add(line)
return filesSet
def writeValidFilesList(filesList, filesSet):
'''Write the list of valid files to disk.'''
print("Writing: " + filesList)
with open(filesList, 'w') as f:
for filename in sorted(filesSet):
f.write(filename + '\n')
def readIndexFile(parsedIndexPath, prependFolder = False):
'''Read an index file having frame number, filename, and url it came from.'''
frameDict = {}
urlDict = {}
with open(parsedIndexPath, 'r') as f:
for line in f:
parts = line.strip().split(',')
if len(parts) < 3:
# Odd index file
raise Exception("Invalid index file: " + parsedIndexPath)
frameNumber = int(parts[0])
frameDict[frameNumber] = parts[1].strip()
if prependFolder:
frameDict[frameNumber] = os.path.join(os.path.dirname(parsedIndexPath),
frameDict[frameNumber])
urlDict[frameNumber] = parts[2].strip()
return (frameDict, urlDict)
def writeIndexFile(indexPath, frameDict, urlDict):
'''Write an index file, optionally with urls.'''
with open(indexPath, 'w') as f:
for frame in sorted(frameDict.keys()):
frameName = frameDict[frame]
urlName = ""
if frame in urlDict:
urlName = urlDict[frame]
f.write(str(frame) + ', ' + frameName + ', ' + urlName + '\n')
def isValidImage(filename):
'''Check that an image file is not corrupted in some way. This check is not enough.'''
if not os.path.exists(filename):
return False
# Must always wipe .aux.xml. Always. Otherwise, if this function is called first time
# it may return False, but if called second time it may return True.
auxFile = filename + '.aux.xml'
if os.path.exists(auxFile):
os.remove(auxFile)
gdalinfoPath = asp_system_utils.which("gdalinfo")
cmd = gdalinfoPath + ' -stats ' + filename
if os.path.exists(auxFile):
os.remove(auxFile)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
return False
if error is not None:
output += error
m = re.match("^.*?(Block\s+failed|Premature\s+end)", output,
re.IGNORECASE|re.MULTILINE|re.DOTALL)
if m:
return False
return True
def isDEM(filename):
'''Return true if a file is a recognized DEM.'''
if 'crop' in filename or 'CMAP' in filename: return False # ignore some stray files
return (len(filename) >= 8 and filename[-8:] == '_DEM.tif')
def isLidar(filename):
'''Return true if the file is an input (not converted) lidar format'''
extension = fileExtension(filename)
return (extension == '.qi') or (extension == '.hdf5') or \
(extension == '.h5') or (extension == '.TXT')
def isValidLidarCSV(filename):
'''Check that a lidar csv file is valid. It must have at least threee entries on one line.'''
if not os.path.exists(filename):
return False
with open(filename, "r") as ins:
array = []
for line in ins:
# This will help with lines which only have spaces
line = line.strip()
# Skip empty lines
if len(line) == 0:
continue
# Skip lines starting with spaces followed by #
m = re.match("^\s*\#", line)
if m:
continue
line = string.replace(line, ',', ' ')
line = string.replace(line, '\t', ' ')
vals = line.split(' ')
num = 0
for val in vals:
if len(val) == 0:
continue
num += 1
if num >= 3:
return True
else:
return False
return False
def getLidarCsvFormat(filename):
'''Returns the ASP CSV format string to use for a lidar file'''
extension = fileExtension(filename)
if extension == '.TXT': # LVIS
return '5:lat,4:lon,6:height_above_datum'
return '1:lat,2:lon,3:height_above_datum' # ATM
def getCameraGsdAndBounds(imagePath, cameraPath, logger, referenceDem=None, projString=""):
'''Compute the GSD and bounding box of a single camera.
Use the DEM is provided, otherwise use the datum.'''
# Run GSD tool
tool = asp_system_utils.which('camera_footprint')
cmd = ('%s --quick --datum wgs84 -t nadirpinhole %s %s' %
(tool, imagePath, cameraPath))
if referenceDem:
cmd += ' --dem-file ' + referenceDem
cmd = cmd.split()
if projString:
cmd.append('--t_srs',)
cmd.append(projString)
logger.info(" ".join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
textOutput, err = p.communicate()
logger.info(textOutput)
# Extract the gsd from the output text
m = re.findall(r"Computed mean gsd: (\d+\.*[0-9e\-]*)", textOutput)
if len(m) != 1: # An unknown error occurred, move on.
raise Exception('Unable to compute GSD for file: ' + cameraPath)
gsd = float(m[0])
# Extract the bounding box from the output text
print textOutput
m = re.findall(
r"Origin: \(([0-9e\-\.\+]*), ([0-9e\-\.\+]*)\) width: ([0-9e\-\.\+]*) height: ([0-9e\-\.\+]*)",
textOutput)
if (len(m) != 1) and (len(m[0]) != 4): # An unknown error occurred, move on.
raise Exception('Unable to compute GSD for file: ' + cameraPath)
bounds = [float(x) for x in m[0]]
return (gsd, bounds)
def getGsdFromMapproject(imagePath, cameraPath, logger, lidarDem, referenceDem):
'''Compute the GSD by quering mapproject.'''
# Try to compute the gsd first from the lidar dem, and if that fails,
# from the reference dem.
gsd = -1
for dem in [lidarDem, referenceDem]:
tmpOutFile = cameraPath + ".tmp.tif"
tool = asp_system_utils.which('mapproject')
cmd = ('%s %s %s %s %s --query-projection' %
(tool, dem, imagePath, cameraPath, tmpOutFile))
cmd = cmd.split()
logger.info(" ".join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
textOutput, err = p.communicate()
logger.info(textOutput)
# Extract the gsd from the output text
m = re.findall(r"Output pixel size:\s*(.*?)\n", textOutput)
if len(m) == 1:
gsd = float(m[0])
break
if gsd == -1:
raise Exception('Unable to compute GSD for file: ' + cameraPath)
os.system("rm -f " + tmpOutFile)
return gsd
def getCorrectedFireballDems(outputFolder):
'''Get a dictionary of the corrected fireball DEMs, with path prepended to them.'''
fireballFolder = getFireballFolder(outputFolder)
corrFireballFolder = getCorrFireballFolder(outputFolder)
fireballIndexPath = csvIndexFile(fireballFolder)
if not os.path.exists(fireballIndexPath):
raise Exception("Error: Missing fireball index file: " + fireballIndexPath + ".")
(fireballFrameDict, fireballUrlDict) = \
readIndexFile(fireballIndexPath, prependFolder = True)
orthoFolder = getOrthoFolder(outputFolder)
orthoIndexPath = csvIndexFile(orthoFolder)
if not os.path.exists(orthoIndexPath):
raise Exception("Error: Missing ortho index file: " + orthoIndexPath + ".")
(orthoFrameDict, orthoUrlDict) = \
readIndexFile(orthoIndexPath, prependFolder = True)
correctedFireballFrameDict = {}
for frame in fireballFrameDict.keys():
fireballDem = fireballFrameDict[frame]
# Get the corrected one
corrDem = os.path.join(corrFireballFolder, os.path.basename(fireballDem))
# This is a bugfix. Sometimes fireball DEMs for one flight
# actually are for the next flight. All this sorry story is
# because sometimes flights extend beyond midnight, and the
# book-keeping gets confused. This was fixed for orthos. Here, if
# a DEM has a different date than the ortho, ignore it.
if frame not in orthoFrameDict:
continue
[orthoDateString, orthoTimeString] = parseTimeStamps(orthoFrameDict[frame])
[fireballDateString, fireballTimeString] = parseTimeStamps(fireballDem)
if orthoDateString != fireballDateString:
continue
correctedFireballFrameDict[frame] = corrDem
return correctedFireballFrameDict
def getCameraGsdAndBoundsRetry(imagePath, cameraPath, logger, referenceDem, projString=""):
'''As getCameraGsd, but retry with the datum if the DEM fails.'''
try:
# Compute GSD using the DEM
# - Only need the projection string when not using a DEM
results = getCameraGsdAndBounds(imagePath, cameraPath, logger, referenceDem)
except:
# If that failed, try intersecting with the datum.
logger.info('DEM intersection failed, trying with datum...')
results = getCameraGsdAndBounds(imagePath, cameraPath, logger, None, projString)
return results
def getImageCameraPairs(imageFolder, cameraFolder, startFrame, stopFrame, logger):
'''Return a list of paired image/camera files.'''
# TODO: This is not robust. Need to create an index of all images rather than
# reading whatever is in that directory.
# Get a list of all the input files
allImageFiles = getTifs(imageFolder)
allCameraFiles = getByExtension(cameraFolder, '.tsai')
allImageFiles.sort() # Put in order so the frames line up
allCameraFiles.sort()
# Keep only the images and cameras within the given range
imageFiles = []
imageFrames = []
for image in allImageFiles:
frame = getFrameNumberFromFilename(image)
if not ( (frame >= startFrame) and (frame <= stopFrame) ):
continue
imageFiles.append(image)
imageFrames.append(frame)
cameraFiles = []
cameraFrames = []
for camera in allCameraFiles:
frame = getFrameNumberFromFilename(camera)
if not ( (frame >= startFrame) and (frame <= stopFrame) ):
continue
cameraFiles.append(camera)
cameraFrames.append(frame)
# Remove files without a matching pair
goodImages = []
goodCameras = []
for frame in imageFrames:
goodImages.append(frame in cameraFrames)
for frame in cameraFrames:
goodCameras.append(frame in imageFrames)
imageFiles = [p[0] for p in zip(imageFiles, goodImages ) if p[1]]
cameraFiles = [p[0] for p in zip(cameraFiles, goodCameras) if p[1]]
logger.info('Of %d input images in range, using %d with camera files.'
% (len(goodImages), len(imageFiles)))
if len(imageFiles) < 2:
logger.error('Not enough input pairs exist to continue, quitting!')
return []
# Get full paths
imageFiles = [os.path.join(imageFolder, f) for f in imageFiles ]
cameraFiles = [os.path.join(cameraFolder,f) for f in cameraFiles]
numFiles = len(imageFiles)
if (len(cameraFiles) != numFiles):
logger.error('process_icebridge_run.py: counted ' + str(len(imageFiles)) + \
' image files.\n' +
'and ' + str(len(cameraFiles)) + ' camera files.\n'+
'Error: Number of image files and number of camera files must match!')
return []
imageCameraPairs = zip(imageFiles, cameraFiles)
return imageCameraPairs
def batchFolderPrefix():
'''The name of the batch folder starts like this.'''
return "batch_"
def batchFolderName(startFrame, stopFrame, bundleLength):
'''The name of the folder containing processed data for given frames.'''
return ('%s%05d_%05d_%d' % (batchFolderPrefix(), startFrame, stopFrame, bundleLength))
def frameToFile(frame, suffix, processFolder, bundleLength):
'''For a given frame, find the corresponding file in the batch
folder with given suffix.'''
# We count here on the convention for writing batch folders
prefix = ('%s%05d_*_%d' % (batchFolderPrefix(), frame, bundleLength))
batchFolderGlob = os.path.join(processFolder,
prefix + '/*' + suffix)
matches = glob.glob(batchFolderGlob)
if len(matches) == 0:
#print("Error: No matches for: " + batchFolderGlob + ". Will skip this frame.")
return "", ""
if len(matches) > 1:
# This I believe is an artifact of running the entire flight twice,
# with different value for --start-frame each time.
# For now, just take whatever is there, at some point this needs to be sorted out.
print("Warning: Found more than one answer matching glob:" + batchFolderGlob)
print("Values are: " + " ".join(matches))
#return "", ""
return matches[0], os.path.dirname(matches[0])
def findInvalidFrames(validFilesSet, outputFolder, fileType):
'''Out of the files of a given type, find the ones that are not in
the given set of valid images.'''
if fileType == 'ortho':
dataFolder = getOrthoFolder(outputFolder)
elif fileType == 'jpeg':
dataFolder = getJpegFolder(outputFolder)
else:
raise Exception("Unknown file type: " + fileType)
# To be able to study which files are in which set, make all paths consistenty
# absolute
localOutputFolder = os.path.basename(outputFolder)
localFileSet = set()
for fileName in validFilesSet:
fileName = os.path.abspath(fileName)
# Correct for wrong path to folder (fragile)
#m = re.match("^.*?" + localOutputFolder + "/(.*?)$", fileName)
#if not m: continue
#fileName = os.path.abspath(os.path.join(outputFolder, m.group(1)))
localFileSet.add(fileName)
indexPath = csvIndexFile(dataFolder)
if not os.path.exists(indexPath):
raise Exception("Missing file: " + indexPath)
(frameDict, urlDict) = readIndexFile(indexPath, prependFolder = True)
badFrameDict = {}
for frame in frameDict.keys():
fileName = os.path.abspath(frameDict[frame])
# Correct for wrong path to folder (fragile)
#m = re.match("^.*?" + localOutputFolder + "/(.*?)$", fileName)
#if not m: continue
#fileName = os.path.abspath(os.path.join(outputFolder, m.group(1)))
if fileName not in localFileSet:
badFrameDict[frame] = fileName
return badFrameDict
def orthoListToRerun(validFilesSet, outputFolder, startFrame, stopFrame):
'''See for which files we need to redo ortho2pinhole.'''
invalidJpegs = findInvalidFrames(validFilesSet, outputFolder, 'jpeg')
invalidOrthos = findInvalidFrames(validFilesSet, outputFolder, 'ortho')
trackedFrames = set()
orthoList = os.path.join(outputFolder, 'orthosToRerun.txt')
with open(orthoList, 'w') as f:
for frame in sorted(invalidJpegs.keys() + invalidOrthos.keys()):
if frame in trackedFrames: continue # we already saw this
if int(frame) < startFrame or int(frame) > stopFrame: continue
trackedFrames.add(frame)
f.write(str(frame) + '\n')
return (orthoList, len(trackedFrames))
def getBatchFolderFromBatchLine(line):
'''Returns something like /path/to/AN_20111012/processed/batch_125_126_2.'''
# Extract just the desired folder name
m = re.match('^.*?\s([^\s]*?' + batchFolderPrefix() +'\d+_\d+_\d+)', line)
if m:
return m.group(1)
return ""
def getFrameRangeFromBatchFolder(folder):
'''Returns (startFrame, endFrame) for a batch folder.'''
'''This is also used to parse a command in a batch file.'''
# Extract just the desired folder name
m = re.match('^.*?' + batchFolderPrefix() + '([0-9]+)_([0-9]+)', folder)
if not m:
raise Exception('Failed to find batch frames in folder: ' + folder)
return (int(m.group(1)), int(m.group(2)))
def xmlFile(filename):
'''Return the matching xml file path for the input file.'''
if (len(filename) >= 8 and filename[-7:-4] == 'DEM'): # DEM.tif and DEM.tfw
#file_DEM.tif and file_DEM.tfw becomes file.xml
return filename[:-8] + '.xml'
# For other types
return filename + '.xml'
def xmlToImage(filename):
if fileExtension(filename) != '.xml':
raise Exception("Not an XML file: " + filename)
return filename[:-4]
def tfwFile(filename):
'''Return the matching tfw file path for the input file.'''
return filename[:-4] + '.tfw'
def isFloat(value):
'''Return true if the input value can be converted to a float.'''
try:
float(value)
return True
except:
return False
def hasValidChkSum(filename, logger):
'''Some files have an xml file containing the chksum. If so, varify
its validity. This applies to orthoimages, DEMs, and tfw files.'''
#isTfw = (fileExtension(filename) == '.tfw')
if not os.path.exists(filename):
logger.info("File does not exist: " + filename)
return False
baseFile = os.path.basename(filename)
xml_file = xmlFile(filename)
if not os.path.exists(xml_file):
logger.info("File does not exist: " + xml_file)
return False
expectedChksum = ''
chkSumCount = 0
currFile = ''
with open(xml_file, "r") as xf:
for line in xf:
# There can be multiple files
m = re.match("^.*?\<DistributedFileName\>(.*?)\<", line, re.IGNORECASE)
if m:
currFile = m.group(1)
# Encompass both kinds of checksum
m = re.match("^.*?\<Checksum\>(\w+)(\||\<)", line, re.IGNORECASE)
if m:
chkSumCount += 1
# There can be multiple checksums. The file can give a hint:
if currFile != '':
if currFile == baseFile:
expectedChksum = m.group(1)
else:
# Just pick the first chksum
if chkSumCount == 1:
expectedChksum = m.group(1)
actualChksum = hashlib.md5(open(filename,'rb').read()).hexdigest()
if actualChksum != expectedChksum or actualChksum == '' or expectedChksum == '':
logger.info("Computed chksum: " + str(actualChksum) + " in " + filename)
logger.info("Expected chksum: " + str(expectedChksum) + " in " + filename)
return False
return True
def isValidTfw(filename, logger):
'''This file must have 6 lines of floats and a valid chksum.'''
if fileExtension(filename) != '.tfw':
return False
if not hasValidChkSum(filename, logger):
return False
count = 0
with open(filename, "r") as xf:
for line in xf:
line = line.strip()
if isFloat(line):
count += 1
return (count >= 6)
def parseLatitude(filename):
'''Find the <PointLatitude> value in the given file.'''
if not os.path.exists(filename):
raise Exception("Could not find file: " + filename)
latitude = None
with open(filename, "r") as xf:
for line in xf:
m = re.match("^.*?\<PointLatitude\>(.*?)\<", line, re.IGNORECASE)
if m:
latitude = float(m.group(1))
break
if latitude is None:
raise Exception("Could not parse positive or negative latitude from: " + filename)
return latitude
def getCameraFileName(imageFileName):
'''Get the camera file name we associate with an input image file.'''
return imageFileName.replace('.tif', '.tsai')
# This function works for raw images, camera tsai files, orthoimages,
# DEMs, lvis, atm1, and atm2 files.
def getFrameNumberFromFilename(filename):
# Match 2009_10_16_<several digits>.JPG
m = re.match("^.*?\d+\_\d+\_\d+\_(\d+)\.JPG", filename, re.IGNORECASE)
if m: return int(m.group(1))
# Match DMS_20111012_145559_00156.tif or .tsai (created by our python scripts)
m = re.match("^.*?DMS\_\d+\_\d+\_(\d+)\.(tif|tsai)", filename, re.IGNORECASE)
if m: return int(m.group(1))
# Match DMS_1000109_03939_20091016_23310503_V02.tif (fetched from NSIDC)
m = re.match("^.*?DMS\_\d+\_(\d+)\w+\.tif", filename, re.IGNORECASE)
if m: return int(m.group(1))
# Match IODMS3_20111018_14295436_00347_DEM.tif
m = re.match("^.*?IODMS[a-zA-Z0-9]*?\_\d+\_\d+\_(\d+)\w+DEM\.tif", filename, re.IGNORECASE)
if m: return int(m.group(1))
# Match ILVIS2_AQ2015_0929_R1605_060226.TXT
m = re.match("^.*?ILVIS.*?_(\d+)(.TXT)", filename, re.IGNORECASE)
if m: return int(m.group(1))
# Match ILATM1B_20091016_193033.atm4cT3.qi
# or ILATM1B_20160713_195419.ATM5BT5.h5
m = re.match("^.*?ILATM\w+\_\d+\_(\d+)\.\w+\.(h5|qi)", filename, re.IGNORECASE)
if m: return int(m.group(1))
raise Exception('Could not parse: ' + filename)
def getTifs(folder, prependFolder=False):
'''Get tif files in given directory, ignoring _sub files.
This returns the files without sorting.'''
files = []
for f in os.listdir(folder):
# Skip non-image files and sub-images
ext = os.path.splitext(f)[1]
if (ext != '.tif') or ('_sub' in f) or ('pct.tif' in f) or ('_hillshade_' in f):
continue
if prependFolder:
files.append(os.path.join(folder, f))
else:
files.append(f)
return files
def getJpegs(folder):
# TODO: This function should not be used as it is not robust.
# Rather, look up the index, and read only files listed there.
'''Get jpeg files in given directory. This returns the files
without sorting or the folder name prepended to them.'''
files = []
for f in os.listdir(folder):
ext = os.path.splitext(f)[1]
if ext != '.JPG':
continue
files.append(f)
return files
def getByExtension(folder, ext):
# TODO: This function should not be used as it is not robust.
# Rather, look up the index, and read only files listed there.
'''Get files with given extension. This returns the files without
sorting or the folder name prepended to them.'''
files = []
for f in os.listdir(folder):
curr_ext = os.path.splitext(f)[1]
if ext != curr_ext:
continue
files.append(f)
return files
def getDems(folder):
# TODO: This function should not be used as it is not robust.
# Rather, look up the index, and read only files listed there.
'''Get DEM files. This returns the files without sorting or the
folder name prepended to them.'''
files = []
for f in os.listdir(folder):
if not isDEM(f):
continue
files.append(f)
return files
def getLidar(folder):
# TODO: This function should not be used as it is not robust.
# Rather, look up the index, and read only files listed there.
'''Get LIDAR files. This returns the files without sorting or the
folder name prepended to them.'''
files = []
for f in os.listdir(folder):
if not isLidar(f):
continue
files.append(f)
return files
def getMatchingFrames(inputFiles, candidateFiles):
'''Given a list of input files and candidate files,
returns a list of candidate files having the same
frame numbers as the input files in the same order.
An entry will be 'None' if there is no matching frame.'''
# Init output structure
numFiles = len(inputFiles)
outputList = []
for i in range(0,numFiles):
outputList.append(None)
numMatched = 0
# Loop through all the candidate files
for c in candidateFiles:
candidateFrame = getFrameNumberFromFilename(c)
# Compare them to each of the input files
for i in range(0,numFiles):
if outputList[i]: # Skip matched files
continue
inputFrame = getFrameNumberFromFilename(inputFiles[i])
if inputFrame == candidateFrame: # If the frames match, record the file
outputList[i] = c
numMatched += 1
if numMatched == numFiles: # Quit once all files are matched
return outputList
return outputList
def parseDateTimeStrings(dateString, timeString, useTimeFix, returnMinAndSecOnly):
'''Parse strings in the format 20110323_17433900.'''
MILLISECOND_TO_MICROSECOND = 10000
year = int(dateString[0:4])
month = int(dateString[4:6])
day = int(dateString[6:8])
hour = int(timeString[0:2])
minute = int(timeString[2:4])
second = int(timeString[4:6])
if returnMinAndSecOnly:
return (minute, second)
if useTimeFix: # Some files number the minutes and seconds from 1-60!
minute = minute - 1
second = second - 1
usecond = 0
if len(timeString) > 6:
usecond = int(timeString[6:8]) * MILLISECOND_TO_MICROSECOND
try:
result = datetime.datetime(year, month, day, hour, minute, second, usecond)
return result
except Exception, e:
raise Exception('Caught exception processing dateString: '
+ dateString +', timeString: ' + timeString
+'\n with values: ' + str((year, month, day, hour, minute, second, usecond))
+'\n' + str(e))
def secondsSinceMidnightToHHMMSS(secondsSinceMidnight):
'''Convert from integer number to HHMMSS string.'''
hours, remainder = divmod(secondsSinceMidnight, 3600)
minutes, seconds = divmod(remainder, 60)
return ('%02d%02d%02d' % (hours, minutes, seconds))
def formFilePrefix(dateString, timeString, frame):
'''Form a file with given numbers. This is used in more than one place.'''
if len(timeString) > 6:
timeString = timeString[0:6] # dump the fractions of a second
return ('DMS_%s_%s_%05d') % (dateString, timeString, frame)
def parseParts(fileName):
'''This function parses pieces of a file. It is very related
to formFilePrefix(), parseTimeStamps(), and getFrameNumberFromFilename()'''
m = re.match("^(.*?)DMS_(\d+)_(\d+)_(\d+)(\..*?)$", fileName)
if not m:
return ["", "", "", "", ""]
return [m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)]
def parseTimeStamps(fileName):
'''Pull two six or eight digit values from the given file name
as the time and date stamps.'''
# Start by handling ILVIS2_AQ2011_1012_R1203_049752.TXT
# Format is ILVIS1B_LOYYYY_MMDD_RYYMM_nnnnnn.xxx
# where nnnnnn is number of seconds since UTC midnight of the day the data collection started
# per http://nsidc.org/data/ilvis1b/versions/1 and
# https://nsidc.org/data/ilvis1b
m = re.match("^.*?ILVIS\d\_[A-Z][A-Z](\d\d\d\d)\_(\d\d\d\d)\_.*?\_(\d+)\.TXT",
fileName, re.IGNORECASE)
if m:
lidarDateString = m.group(1) + m.group(2)
secondsInDay = int(m.group(3))
lidarTimeString = secondsSinceMidnightToHHMMSS(secondsInDay)
return [lidarDateString, lidarTimeString]
fileName = os.path.basename(fileName)
# Now look at something like: DMS_1100106_11985_20101026_19113275.tif
m = re.match("^DMS\_(\d+)_(\d+)_(\d+)_(\d+)\.tif", fileName, re.IGNORECASE)
if m:
# The format is: a value, frame number, yyyymmdd, hhmmssss
dateString = m.group(3)
timeString = m.group(4)
return [dateString, timeString]
# This is somewhat fragile older code andling other cases
fileName = fileName.replace('.', '_')
fileName = fileName.replace('-', '_')
parts = fileName.split('_')
imageDateString = ""
imageTimeString = ""
for part in parts:
if len(part) != 6 and len(part) != 8:
continue
if len(part) == 6:
if part < '000000' or part > '999999':
continue
if len(part) == 8:
if part < '00000000' or part > '99999999':
continue
if imageDateString == "" and len(part) == 8:
# The date must always be 8 digits (YYYYMMDD)
imageDateString = part
continue
if imageTimeString == "":
# The time can be hhmmss or hhmmssff (ff = hundreds of seconds)
imageTimeString = part
continue
if imageDateString == "":
return []
if imageTimeString == "":
return []
return [imageDateString, imageTimeString]
def lidarFiles(lidarFolder):
'''Find lidar files in given folder that are plain text, so either
.TXT or .csv converted from ATM, etc. Note that the folder name is
not prepended to the file names.'''
# All files in the folder
allFiles = os.listdir(lidarFolder)
# See based on existing files if we are dealing with LVIS
isLVIS = False
for f in allFiles:
m = re.match("^.*?ILVIS.*?\d+\.TXT", f, re.IGNORECASE)
if m:
isLVIS = True
# Get just the files we converted to csv format or plain text LVIS files
lidarFiles = []
for f in allFiles:
extension = os.path.splitext(f)[1]
if 'html.csv' in f:
continue # skip index.html.csv
if (not isLVIS and extension == '.csv') or (isLVIS and extension == '.TXT'):
lidarFiles.append(f)
lidarFiles.sort()
lidarExt = '.csv'
if isLVIS:
lidarExt = '.TXT'
return (lidarFiles, lidarExt, isLVIS)
def alignFileName():
'''The name of a generated aligned DEM.'''
return 'out-align-DEM.tif'
def blendFileName():
'''The name of a generated blended DEM.'''
return 'out-blend-DEM.tif'
def orthoFileName():
'''The name of a generated ortho file.'''
return 'out-ortho.tif'
def footprintFileName():
'''The name of a generated footprint DEM.'''
return 'out-trans-footprint-DEM.tif'
def orthoPreviewFileName():
'''The name of a generated ortho preview file.'''
return 'out-ortho-PREVIEW.jpg'
def getAlignPrefix(outputFolder):
return os.path.join(outputFolder, 'align/out')
def getBundlePrefix(outputFolder):
'''The name of the prefix for the bundle-adjusted cameras.'''
return os.path.join(outputFolder, 'bundle/out')
def alignedBundleStr():
'''The name of the prefix (sans folder) for the generated
bundle-adjusted and pc_aligned camera files.'''
return 'aligned_bundle/out'
def getAlignedBundlePrefix(outputFolder):
'''The name of the prefix for the bundle-adjusted cameras.'''
return os.path.join(outputFolder, alignedBundleStr())
def lidar_pair_prefix():
return 'LIDAR_PAIR_'
def findMatchingLidarFile(imageFile, lidarFolder):
'''Given an image file, find the best lidar file to use for alignment.'''
# Look in the paired lidar folder, not the original lidar folder.
pairedFolder = getPairedLidarFolder(lidarFolder)
pairedLidarFile = getPairedIndexFile(pairedFolder)
if not os.path.exists(pairedLidarFile):
raise Exception("Missing file: " + pairedLidarFile)
(lidarDict, dummyUrlDict) = readIndexFile(pairedLidarFile, prependFolder = True)
lidarFilesIn = sorted(lidarDict.values())
lidarFiles = []
# Verify that all the expected lidar files are there!
for f in lidarFilesIn:
if os.path.exists(f):
lidarFiles.append(f)
else:
print 'WARNING: Expected paired lidar file ' + f + ' does not exist!'
if len(lidarFiles) <= 0:
raise Exception("Empty directory of pairs in " + pairedFolder)
return findMatchingLidarFileFromList(imageFile, lidarFiles)
def findMatchingLidarFileFromList(imageFile, lidarFiles):
'''Find the best matching lidar file from a list.'''
vals = parseTimeStamps(imageFile)
if len(vals) < 2:
raise Exception('Failed to parse the date and time from: ' + imageFile)
useTimeFix = False
returnMinAndSecOnly = False
imageDateTime = parseDateTimeStrings(vals[0], vals[1], useTimeFix, returnMinAndSecOnly)
#print 'INPUT = ' + str(imageDateTime)
# Search for the matching file in the lidar folder.
# - We are looking for the closest lidar time that starts BEFORE the image time.
# - It is possible for an image to span lidar files, we will address that if we need to!
bestTimeDelta = datetime.timedelta.max
bestLidarFile = 'NA'
zeroDelta = datetime.timedelta()
# First see if we need correction for sometimes seconds going from 1 to 60.
minMinSec = 60
maxMinSec = 0
for lidarPath in lidarFiles:
vals = parseTimeStamps(lidarPath)
if len(vals) < 2:
continue # ignore bad files
useTimeFix = False
returnMinAndSecOnly = True
(minute, second) = parseDateTimeStrings(vals[0], vals[1], useTimeFix, returnMinAndSecOnly)
if second < minMinSec: minMinSec = second
if second > maxMinSec: maxMinSec = second
if minute < minMinSec: minMinSec = minute
if minute > maxMinSec: maxMinSec = minute
if minMinSec <= 0 and maxMinSec >= 60:
raise Exception("The minute/second range goes from " + str(minMinSec) +
" to " + str(maxMinSec))
useTimeFix = False
if maxMinSec >= 60:
useTimeFix = True
#print 'Using lidar time fix!'
for lidarPath in lidarFiles:
vals = parseTimeStamps(lidarPath)
if len(vals) < 2:
continue # ignore bad files
try:
returnMinAndSecOnly = False
lidarDateTime = parseDateTimeStrings(vals[0], vals[1], useTimeFix, returnMinAndSecOnly)
#lidarDateTime = lidarDateTime + datetime.timedelta(hours=2, minutes=3, seconds=42) # Manual hack for flights with bad lidar times!
except Exception as e:
raise Exception('Failed to parse datetime for lidar file: ' + lidarPath + '\n' +
'Error is: ' + str(e))
#print 'THIS = ' + str(lidarDateTime)
# Compare time to the image time
timeDelta = abs(imageDateTime - lidarDateTime)
#print 'DELTA = ' + str(timeDelta)
# Select the closest lidar time
# - Since we are using the paired files, the file time is in the middle
# of the (large) file so being close to the middle should make sure the DEM
# is fully covered by LIDAR data.
if timeDelta < bestTimeDelta:
bestLidarFile = lidarPath
bestTimeDelta = timeDelta
# Normal spacing seems to be 6.5 minutes but this must vary by flight.
MAX_DELTA = datetime.timedelta(minutes=15)
if (bestLidarFile == 'NA') or (bestTimeDelta > MAX_DELTA):
errorMessage = 'Failed to find matching lidar file for image ' + imageFile
if bestLidarFile:
errorMessage += '\n--> Nearest lidar file was '+ bestLidarFile +' with delta ' + str(bestTimeDelta)
raise Exception(errorMessage)
#print bestLidarFile
#print bestTimeDelta
return bestLidarFile
def fileNonEmpty(path):
'''Make sure file exists and is non-empty.'''
return os.path.exists(path) and (os.path.getsize(path) > 0)
def fetchFile(url, outputPath):
'''Retrieve one file using curl. Return True on success.'''
# Set up the command
cookiePaths = ' -b ~/.urs_cookies -c ~/.urs_cookies '
curlOpts = ' -n -L '
cmd = 'curl ' + cookiePaths + curlOpts + url + ' > ' + outputPath
# Download the file
print cmd
p = subprocess.Popen(cmd, shell=True)
os.waitpid(p.pid, 0)
return os.path.exists(outputPath)
def partitionArray(arr, wid):
'''Partition one array into sub-arrays, each of length at most wid.'''
out = []
cur = []
start = 0
while (start < len(arr)):
if len(cur) < wid:
cur.append(arr[start])
else:
out.append(cur[:])
cur = [arr[start]]
start += 1
# Append the leftover elements
if len(cur) > 0:
out.append(cur[:])
return out
# It is faster to invoke one curl command for multiple files.
# Do not fetch files that already exist. Note that we expect
# that each file looks like outputFolder/name.<ext>,
# and each url looks like https://.../name.<ext>.
def fetchFilesInBatches(baseCurlCmd, batchSize, dryRun, outputFolder, files, urls, logger):
'''Fetch a list of files in batches using curl.'''
curlCmd = baseCurlCmd
numFiles = len(files)
if numFiles != len(urls):
raise Exception("Expecting as many files as urls.")
currentFileCount = 0
for fileIter in range(numFiles):
if not fileNonEmpty(files[fileIter]):
# Add to the command
curlCmd += ' -O ' + urls[fileIter]
currentFileCount += 1 # Number of files in the current download command
# Download the indicated files when we hit the limit or run out of files
if ( (currentFileCount >= batchSize) or (fileIter == numFiles - 1) ) and \
currentFileCount > 0:
logger.info(curlCmd)
if not dryRun:
logger.info("Saving the data in " + outputFolder)
p = subprocess.Popen(curlCmd, cwd=outputFolder, shell=True)
os.waitpid(p.pid, 0)
# Start command fresh for the next file
currentFileCount = 0
curlCmd = baseCurlCmd
# This block of code is just to get a non-blocking keyboard check!
import signal
class AlarmException(Exception):
pass
def alarmHandler(signum, frame):
raise AlarmException
def nonBlockingRawInput(prompt='', timeout=20):
'''Return a key if pressed or an empty string otherwise.
Waits for timeout, non-blocking.'''
signal.signal(signal.SIGALRM, alarmHandler)
signal.alarm(timeout)
try:
text = raw_input(prompt)
signal.alarm(0)
return text
except AlarmException:
pass # Timeout
signal.signal(signal.SIGALRM, signal.SIG_IGN)
return ''
def waitForTaskCompletionOrKeypress(taskHandles, logger = None, interactive=True, quitKey='q',
sleepTime=20):
'''Block in this function until the user presses a key or all tasks complete.'''
# Wait for all the tasks to complete
notReady = len(taskHandles)
while notReady > 0:
if interactive:
# Wait and see if the user presses a key
msg = 'Waiting on ' + str(notReady) + ' process(es), press '+str(quitKey)+'<Enter> to abort...\n'
keypress = nonBlockingRawInput(prompt=msg, timeout=sleepTime)
if keypress == quitKey:
logger_print(logger, 'Recieved quit command!')
break
else:
logger_print(logger, "Waiting on " + str(notReady) + ' incomplete tasks.')
time.sleep(sleepTime)
# # As long as we have this process waiting, keep track of our resource consumption.
# cpuPercentUsage = psutil.cpu_percent()
# memInfo = psutil.virtual_memory()
# memUsed = memInfo[0] - memInfo[1]
# memPercentUsage = float(memUsed) / float(memInfo[0])
# usageMessage = ('CPU percent usage = %f, Memory percent usage = %f'
# % (cpuPercentUsage, memPercentUsage))
# logger_print(logger, usageMessage)
# Otherwise count up the tasks we are still waiting on.
notReady = 0
for task in taskHandles:
if not task.ready():
notReady += 1
return
def stopTaskPool(pool):
'''Stop remaining tasks and kill the pool.'''
PROCESS_POOL_KILL_TIMEOUT = 3
pool.close()
time.sleep(PROCESS_POOL_KILL_TIMEOUT)
pool.terminate()
pool.join()
def setUpLogger(outputFolder, logLevel, logPathPrefix):
'''Set up the root logger so all called files will write to the same output file.'''
# Generate a timestamped log file in the output folder
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
logName = logPathPrefix +'_'+ timestamp + '.txt'
logPath = os.path.join(outputFolder, logName)
logger = logging.getLogger() # Call with no argument to configure the root logger.
logger.setLevel(level=logLevel)
logger.propagate = False # This is a unique logger, don't copy messages to parent modules.
# Make sure we have exacly one stream handler to mirror logging to console.
hasStreamHandler = False
for h in logger.handlers:
if 'StreamHandler' in str(h):
hasStreamHandler = True
if not hasStreamHandler:
logger.addHandler(logging.StreamHandler())
fileHandler = logging.FileHandler(logPath)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
logger = logging.getLogger(__name__) # We configured root, but continue logging with the normal name.
return logger
def logger_print(logger, msg):
'''Print to logger, if present. This helps keeps all messages in sync.'''
if logger is not None:
logger.info(msg)
else:
print(msg)
# TODO: Rename to isSouth.
def checkSite(site):
'''Verify the site is legal and return True if it is in the southern hemisphere.'''
possibleSites = ['AN', 'GR', 'AL']
if site not in possibleSites:
raise Exception("Site must be either AN, GR, or AL.")
isSouth = (site == 'AN')
return isSouth
def getElevationLimits(site):
'''Return the min and max elevation expected at a given site'''
# Would it work better to compute this on a per-flight or per-DEM basis?
if site == 'AN':
return (-50, 4500)
if site == 'GR':
return (-50, 3500)
if site == 'AL':
return (-50, 3500)
def getEpsgCode(isSouth, asString=True):
'''Return EPSG code for a location. See notes in getProjString.'''
code = 3413
if isSouth:
code = 3031
if asString:
return 'EPSG:' + str(code)
return code
def getProjString(isSouth, addQuotes=False):
'''Return the correct proj string for the pole. Surrounding quotes are optional'''
# EPSG 3413 - WGS 84 / NSIDC Sea Ice Polar Stereographic North
#PROJ_STRING_NORTH = '+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
# EPSG 3031 - WGS 84 / Antarctic Polar Stereographic
#PROJ_STRING_SOUTH = '+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
s = getEpsgCode(isSouth, asString=True)
if addQuotes:
return '"'+s+'"'
else:
return s
def getReferenceDemName(site):
'''Returns the DEM name to use for a given location'''
# Note: The AN and GR DEMs match our projection system for those sites.
if site == 'AN':
#return 'krigged_dem_nsidc_ndv0_fill.tif' # Higher resolution
return 'ramp200dem_wgs_v2.tif' # Used to produce the orthos - EPSG 3031
if site == 'GR':
#return 'gimpdem_90m_v1.1.tif' # Higher resolution
return 'NSIDC_Grn1km_wgs84_elev.tif' # Used to produce the orthos - EPSG 3413
if site == 'AL':
# Supposedly these were produced with the SRTM map but that map
# does not seem to actually include Alaska. This may mean the NED
# map (60 meter) was used but this would require tile handling logic
# so for now we will try to use this single 300m DEM.
return 'akdem300m.tif'
# Proj string: '+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs '
def readStats(inputPath):
'''Read some info about a run. Do a little parsing.'''
if os.path.exists(inputPath):
with open(inputPath, 'r') as f:
for line in f:
if line == "":
continue
line = line.strip()
vals = line.split(",")
if len(vals) != 3:
continue
# Convert time to a float value representing minutes.
# Pre-pad with zeros for missing fields.
time_arr = ["0", "0", "0"] + vals[2].split(":")
minutes = float(time_arr[-1])/60.0 + float(time_arr[-2]) + 60.0*float(time_arr[-3])
minutes = round(10*minutes)/10
vals[2] = " " + str(minutes)
# Rm too many zeros
vals[1] = " " + str( round(float(vals[1])*10)/10.0 )
line = ",".join(vals)
return line
return "-1, -1, -1"
def readGeodiffOutput(inputPath):
'''Read in the header from a geodiff output csv file.
Returns a dictionary containing 'Max', 'Min', 'Mean', and 'StdDev'. '''
if not os.path.exists(inputPath):
raise Exception('geodiff output file ' + inputPath + ' does not exist!')
# Pack the results into a dictionary
keywords = ['Max', 'Min', 'Mean', 'StdDev']
results = {}
ext = os.path.splitext(inputPath)[1]
if ext == '.csv':
numHeaderLines = 0
with open(inputPath, 'r') as f:
for line in f:
if '#' not in line: # Quit when we go past the comment lines
break
numHeaderLines = numHeaderLines + 1
for word in keywords: # Look for the four values
if word in line:
parts = line.split(':') # Extract the number
if len(parts) != 2:
raise Exception('Error parsing geodiff line:\n' + line)
results[word] = float(parts[1])
break # Go on to the next line in the file
# For CSV files, include a count of the number of points compared.
numLines = asp_file_utils.getFileLineCount(inputPath) - numHeaderLines
results['NumDiffs'] = numLines
else: # Handle .tif files
stats = asp_image_utils.getImageStats(inputPath)[0]
results['Min' ] = stats[0]
results['Max' ] = stats[1]
results['Mean' ] = stats[2]
results['StdDev'] = stats[3]
return results
def isBatchValid(batchFolder):
'''Returns true if the given batch has produced a good output DEM.'''
# The maximum allowed distance between our DEM and the lidar file.
MAX_LIDAR_DEM_DIFF_METERS = 5
try:
diffPath = os.path.join(batchFolder, 'out-diff.csv')
results = readGeodiffOutput(diffPath)
return (results['MEAN'] <= MAX_LIDAR_DEM_DIFF_METERS)
except:
return False
def gsdToDemRes(gsd):
'''The DEM resolution is 4x the GSD.'''
GSD_RESOLUTION_MULTIPLIER = 4.0
return gsd * GSD_RESOLUTION_MULTIPLIER
# For debugging functions
#if __name__ == "__main__":
# print getFrameRangeFromBatchFolder('/home/test/batch_234_1425/')
| NeoGeographyToolkit/Tools | nsidc_upload/labels/icebridge_common.py | Python | apache-2.0 | 57,502 |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u'\xa0' # non-breaking space
NUMBER_GROUPING = 3
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/conf/locale/fr/formats.py | Python | bsd-3-clause | 1,555 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import skip_because
class SecurityGroupRulesNegativeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(SecurityGroupRulesNegativeTestJSON, cls).setUpClass()
cls.client = cls.security_groups_client
@skip_because(bug="1182384",
condition=config.TempestConfig().service_available.neutron)
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_non_existent_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with non existent Parent group id
# Adding rules to the non existent Security Group id
parent_group_id = data_utils.rand_int_id(start=999)
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.NotFound,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@testtools.skipIf(config.TempestConfig().service_available.neutron,
"Neutron not check the security_group_id")
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with Parent group id which is not integer
# Adding rules to the non int Security Group id
parent_group_id = data_utils.rand_name('non_int_id')
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_duplicate(self):
# Negative test: Create Security Group rule duplicate should fail
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, sg = self.client.create_security_group(s_name, s_description)
self.assertEqual(200, resp.status)
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.addCleanup(self.client.delete_security_group, sg['id'])
resp, rule = \
self.client.create_security_group_rule(parent_group_id,
ip_protocol,
from_port,
to_port)
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
# Add the same rule to the group should fail
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = data_utils.rand_name('999')
from_port = 22
to_port = 22
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = data_utils.rand_int_id(start=65536)
to_port = 22
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
# Creating a Security Group to add rule to it
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = 22
to_port = data_utils.rand_int_id(start=65536)
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
# Creating a Security Group to add rule to it.
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
# Adding a rule to the created Security Group
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 21
self.addCleanup(self.client.delete_security_group, securitygroup['id'])
self.assertRaises(exceptions.BadRequest,
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
@skip_because(bug="1182384",
condition=config.TempestConfig().service_available.neutron)
@attr(type=['negative', 'smoke'])
def test_delete_security_group_rule_with_non_existent_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with non existent id
non_existent_rule_id = data_utils.rand_int_id(start=999)
self.assertRaises(exceptions.NotFound,
self.client.delete_security_group_rule,
non_existent_rule_id)
class SecurityGroupRulesNegativeTestXML(SecurityGroupRulesNegativeTestJSON):
_interface = 'xml'
| eltonkevani/tempest_el_env | tempest/api/compute/security_groups/test_security_group_rules_negative.py | Python | apache-2.0 | 8,583 |
# System
import sys
# Flask
from flask import Flask, render_template, request, send_from_directory
# SQLAlchemy
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# Utils
from utils.countrycode2toname import countrycode2toname
# Define the WSGI application object
app = Flask(__name__)
app.config['APP_NAME'] = 'POSS'
app.config['APP_VERSION'] = '1.2'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Configurations
config = 'config'
for argument in sys.argv:
if argument[0:len('--config=')] == '--config=':
config = str(argument[len('--config='):])
app.config.from_object(config)
if not app.config['DEBUG']:
app.jinja_env.auto_reload = False
# app.jinja_env.add_extension('utils.jinja2htmlcompress.HTMLCompress')
# Define the database object and Base model which
# is imported by modules and controllers
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime,
default=db.func.current_timestamp(),
nullable=False)
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp(),
nullable=False)
def __init__(self):
pass
# Error handling
@app.errorhandler(403)
def e_403(e):
return render_template(
'server_message.html',
window_title='%s %s' % (e.code, e.name),
title='%s' % e.name,
description='%s' % e.description
), e.code
@app.errorhandler(404)
def e_404(e):
return render_template(
'server_message.html',
window_title='%s %s' % (e.code, e.name),
title='%s' % e.name,
description='%s' % e.description
), e.code
@app.errorhandler(500)
def e_500(e):
try:
db.session.rollback()
except Exception:
pass
return render_template(
'server_message.html',
window_title='500 Internal Server Error',
title='Internal Server Error',
description='The server encountered an internal error and was unable to complete your request.'
), 500
@app.after_request
def db_commit(response):
db.session.commit()
return response
# Jinja2 Functions
from app.objects.utils import human_readable_size
app.jinja_env.globals.update(human_readable_size=human_readable_size)
app.jinja_env.filters['countrycode2toname'] = countrycode2toname
# Register functions
from .auth.controllers import app as auth
app.register_blueprint(auth)
from .objects.controllers import app as objects
app.register_blueprint(objects)
from .stats.controllers import app as stats
app.register_blueprint(stats)
@app.route('/robots.txt')
def overview():
return send_from_directory(app.static_folder, request.path[1:])
db.create_all()
# Create initial user if there is no user in db
from app.auth.models import User
if User.query.count() == 0:
import uuid
user = User('admin', 'admin@example.com', str(uuid.uuid4())[0:8])
user.role_admin = True
db.session.add(user)
db.session.commit()
print('''===
Welcome to your new %s installation!
A new admin account has been created.
Login: %s
Password: %s
===''' % (app.config['APP_NAME'], user.email, user.plain_password))
| fnkr/POSS | app/__init__.py | Python | mit | 3,396 |
'''This is not a valid Python module as it contains two
non-standard keywords: repeat and function. However,
by using a custom importer, and the presence of the special
import line below, these non-standard keywords will be converted
into valid Python syntax prior to execution.
'''
from __experimental__ import repeat_keyword, function_keyword # magic! :-)
def normal_syntax():
'''Creates the list [4, 4, 4] by using the normal Python syntax,
with a for loop and a lambda-defined function.
'''
res = []
g = lambda x: x**2
for _ in range(3):
res.append(g(2))
return res
def experimental_syntax():
'''Creates the list [4, 4, 4] by using an experimental syntax
with the keywords "repeat" and "function", otherwise
using the same algorithm as the function called "normal_syntax".
'''
res = []
g = function x: x**2
repeat 3:
res.append(g(2))
return res
if __name__ == '__main__':
if normal_syntax() == experimental_syntax():
print("Success")
else:
print("Failure")
| aroberge/python_experiments | version4/test.py | Python | cc0-1.0 | 1,091 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
GdalUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import range
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
import platform
import re
import psycopg2
from osgeo import gdal
from osgeo import ogr
from qgis.core import (QgsApplication,
QgsVectorFileWriter,
QgsProcessingFeedback,
QgsProcessingUtils,
QgsMessageLog,
QgsSettings,
QgsCredentials,
QgsDataSourceUri)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools.system import isWindows, isMac
try:
from osgeo import gdal # NOQA
gdalAvailable = True
except:
gdalAvailable = False
class GdalUtils(object):
GDAL_HELP_PATH = 'GDAL_HELP_PATH'
supportedRasters = None
supportedOutputRasters = None
@staticmethod
def runGdal(commands, feedback=None):
if feedback is None:
feedback = QgsProcessingFeedback()
envval = os.getenv('PATH')
# We need to give some extra hints to get things picked up on OS X
isDarwin = False
try:
isDarwin = platform.system() == 'Darwin'
except IOError: # https://travis-ci.org/m-kuhn/QGIS#L1493-L1526
pass
if isDarwin and os.path.isfile(os.path.join(QgsApplication.prefixPath(), "bin", "gdalinfo")):
# Looks like there's a bundled gdal. Let's use it.
os.environ['PATH'] = "{}{}{}".format(os.path.join(QgsApplication.prefixPath(), "bin"), os.pathsep, envval)
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(QgsApplication.prefixPath(), "lib")
else:
# Other platforms should use default gdal finder codepath
settings = QgsSettings()
path = settings.value('/GdalTools/gdalPath', '')
if not path.lower() in envval.lower().split(os.pathsep):
envval += '{}{}'.format(os.pathsep, path)
os.putenv('PATH', envval)
fused_command = ' '.join([str(c) for c in commands])
QgsMessageLog.logMessage(fused_command, 'Processing', QgsMessageLog.INFO)
feedback.pushInfo('GDAL command:')
feedback.pushCommandInfo(fused_command)
feedback.pushInfo('GDAL command output:')
success = False
retry_count = 0
while not success:
loglines = []
loglines.append('GDAL execution console output')
try:
with subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
) as proc:
for line in proc.stdout:
feedback.pushConsoleInfo(line)
loglines.append(line)
success = True
except IOError as e:
if retry_count < 5:
retry_count += 1
else:
raise IOError(
str(e) + u'\nTried 5 times without success. Last iteration stopped after reading {} line(s).\nLast line(s):\n{}'.format(
len(loglines), u'\n'.join(loglines[-10:])))
QgsMessageLog.logMessage('\n'.join(loglines), 'Processing', QgsMessageLog.INFO)
GdalUtils.consoleOutput = loglines
@staticmethod
def getConsoleOutput():
return GdalUtils.consoleOutput
@staticmethod
def getSupportedRasters():
if not gdalAvailable:
return {}
if GdalUtils.supportedRasters is not None:
return GdalUtils.supportedRasters
if gdal.GetDriverCount() == 0:
gdal.AllRegister()
GdalUtils.supportedRasters = {}
GdalUtils.supportedOutputRasters = {}
GdalUtils.supportedRasters['GTiff'] = ['tif']
GdalUtils.supportedOutputRasters['GTiff'] = ['tif']
for i in range(gdal.GetDriverCount()):
driver = gdal.GetDriver(i)
if driver is None:
continue
shortName = driver.ShortName
metadata = driver.GetMetadata()
if gdal.DCAP_RASTER not in metadata \
or metadata[gdal.DCAP_RASTER] != 'YES':
continue
if gdal.DMD_EXTENSION in metadata:
extensions = metadata[gdal.DMD_EXTENSION].split('/')
if extensions:
GdalUtils.supportedRasters[shortName] = extensions
# Only creatable rasters can be referenced in output rasters
if ((gdal.DCAP_CREATE in metadata
and metadata[gdal.DCAP_CREATE] == 'YES')
or (gdal.DCAP_CREATECOPY in metadata
and metadata[gdal.DCAP_CREATECOPY] == 'YES')):
GdalUtils.supportedOutputRasters[shortName] = extensions
return GdalUtils.supportedRasters
@staticmethod
def getSupportedOutputRasters():
if not gdalAvailable:
return {}
if GdalUtils.supportedOutputRasters is not None:
return GdalUtils.supportedOutputRasters
else:
GdalUtils.getSupportedRasters()
return GdalUtils.supportedOutputRasters
@staticmethod
def getSupportedRasterExtensions():
allexts = ['tif']
for exts in list(GdalUtils.getSupportedRasters().values()):
for ext in exts:
if ext not in allexts and ext != '':
allexts.append(ext)
return allexts
@staticmethod
def getSupportedOutputRasterExtensions():
allexts = ['tif']
for exts in list(GdalUtils.getSupportedOutputRasters().values()):
for ext in exts:
if ext not in allexts and ext != '':
allexts.append(ext)
return allexts
@staticmethod
def getVectorDriverFromFileName(filename):
ext = os.path.splitext(filename)[1]
if ext == '':
return 'ESRI Shapefile'
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
for format in formats:
if ext in format.filterString:
return format.driverName
return 'ESRI Shapefile'
@staticmethod
def getFormatShortNameFromFilename(filename):
ext = filename[filename.rfind('.') + 1:]
supported = GdalUtils.getSupportedRasters()
for name in list(supported.keys()):
exts = supported[name]
if ext in exts:
return name
return 'GTiff'
@staticmethod
def escapeAndJoin(strList):
joined = ''
for s in strList:
if not isinstance(s, str):
s = str(s)
if s and s[0] != '-' and ' ' in s:
escaped = '"' + s.replace('\\', '\\\\').replace('"', '\\"') \
+ '"'
else:
escaped = s
if escaped is not None:
joined += escaped + ' '
return joined.strip()
@staticmethod
def version():
return int(gdal.VersionInfo('VERSION_NUM'))
@staticmethod
def readableVersion():
return gdal.VersionInfo('RELEASE_NAME')
@staticmethod
def gdalHelpPath():
helpPath = ProcessingConfig.getSetting(GdalUtils.GDAL_HELP_PATH)
if helpPath is None:
if isWindows():
pass
elif isMac():
pass
else:
searchPaths = ['/usr/share/doc/libgdal-doc/gdal']
for path in searchPaths:
if os.path.exists(path):
helpPath = os.path.abspath(path)
break
return helpPath if helpPath is not None else 'http://www.gdal.org/'
@staticmethod
def ogrConnectionString(uri, context):
"""Generates OGR connection string from layer source
"""
return GdalUtils.ogrConnectionStringAndFormat(uri, context)[0]
@staticmethod
def ogrConnectionStringAndFormat(uri, context):
"""Generates OGR connection string and format string from layer source
Returned values are a tuple of the connection string and format string
"""
ogrstr = None
format = None
layer = QgsProcessingUtils.mapLayerFromString(uri, context, False)
if layer is None:
path, ext = os.path.splitext(uri)
format = QgsVectorFileWriter.driverForExtension(ext)
return '"' + uri + '"', '"' + format + '"'
provider = layer.dataProvider().name()
if provider == 'spatialite':
# dbname='/geodata/osm_ch.sqlite' table="places" (Geometry) sql=
regex = re.compile("dbname='(.+)'")
r = regex.search(str(layer.source()))
ogrstr = r.groups()[0]
format = 'SQLite'
elif provider == 'postgres':
# dbname='ktryjh_iuuqef' host=spacialdb.com port=9999
# user='ktryjh_iuuqef' password='xyqwer' sslmode=disable
# key='gid' estimatedmetadata=true srid=4326 type=MULTIPOLYGON
# table="t4" (geom) sql=
dsUri = QgsDataSourceUri(layer.dataProvider().dataSourceUri())
conninfo = dsUri.connectionInfo()
conn = None
ok = False
while not conn:
try:
conn = psycopg2.connect(dsUri.connectionInfo())
except psycopg2.OperationalError:
(ok, user, passwd) = QgsCredentials.instance().get(conninfo, dsUri.username(), dsUri.password())
if not ok:
break
dsUri.setUsername(user)
dsUri.setPassword(passwd)
if not conn:
raise RuntimeError('Could not connect to PostgreSQL database - check connection info')
if ok:
QgsCredentials.instance().put(conninfo, user, passwd)
ogrstr = "PG:%s" % dsUri.connectionInfo()
format = 'PostgreSQL'
elif provider == "oracle":
# OCI:user/password@host:port/service:table
dsUri = QgsDataSourceUri(layer.dataProvider().dataSourceUri())
ogrstr = "OCI:"
if dsUri.username() != "":
ogrstr += dsUri.username()
if dsUri.password() != "":
ogrstr += "/" + dsUri.password()
delim = "@"
if dsUri.host() != "":
ogrstr += delim + dsUri.host()
delim = ""
if dsUri.port() != "" and dsUri.port() != '1521':
ogrstr += ":" + dsUri.port()
ogrstr += "/"
if dsUri.database() != "":
ogrstr += dsUri.database()
elif dsUri.database() != "":
ogrstr += delim + dsUri.database()
if ogrstr == "OCI:":
raise RuntimeError('Invalid oracle data source - check connection info')
ogrstr += ":"
if dsUri.schema() != "":
ogrstr += dsUri.schema() + "."
ogrstr += dsUri.table()
format = 'OCI'
else:
ogrstr = str(layer.source()).split("|")[0]
path, ext = os.path.splitext(ogrstr)
format = QgsVectorFileWriter.driverForExtension(ext)
return '"' + ogrstr + '"', '"' + format + '"'
@staticmethod
def ogrLayerName(uri):
uri = uri.strip('"')
#if os.path.isfile(uri):
# return os.path.basename(os.path.splitext(uri)[0])
if ' table=' in uri:
# table="schema"."table"
re_table_schema = re.compile(' table="([^"]*)"\\."([^"]*)"')
r = re_table_schema.search(uri)
if r:
return r.groups()[0] + '.' + r.groups()[1]
# table="table"
re_table = re.compile(' table="([^"]*)"')
r = re_table.search(uri)
if r:
return r.groups()[0]
elif 'layername' in uri:
regex = re.compile('(layername=)([^|]*)')
r = regex.search(uri)
return r.groups()[1]
fields = uri.split('|')
basePath = fields[0]
fields = fields[1:]
layerid = 0
for f in fields:
if f.startswith('layername='):
return f.split('=')[1]
if f.startswith('layerid='):
layerid = int(f.split('=')[1])
ds = ogr.Open(basePath)
if not ds:
return None
ly = ds.GetLayer(layerid)
if not ly:
return None
name = ly.GetName()
ds = None
return name
| nirvn/QGIS | python/plugins/processing/algs/gdal/GdalUtils.py | Python | gpl-2.0 | 14,018 |
import os
import jinja2
import webapp2
from src.lib.TaskHandler import TaskHandler
from src.lib.TasksHandler import TasksHandler
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render())
app = webapp2.WSGIApplication([
('/', MainHandler),
('/task', TaskHandler),
('/tasks', TasksHandler),
], debug=True) | EduardoCopat/TaskList-angular-gae-python | main.py | Python | mit | 602 |
# -*- coding: utf-8 -*-
import babel
from dateutil.relativedelta import relativedelta
import itertools
import json
from odoo import fields, _, models
from odoo.tools import float_round
from odoo.addons.web.controllers.main import clean_action
DEFAULT_MONTH_RANGE = 3
class Project(models.Model):
_inherit = 'project.project'
def _qweb_prepare_qcontext(self, view_id, domain):
values = super()._qweb_prepare_qcontext(view_id, domain)
projects = self.search(domain)
values.update(projects._plan_prepare_values())
values['actions'] = projects._plan_prepare_actions(values)
return values
def _plan_prepare_values(self):
currency = self.env.user.company_id.currency_id
uom_hour = self.env.ref('uom.product_uom_hour')
hour_rounding = uom_hour.rounding
billable_types = ['non_billable', 'non_billable_project', 'billable_time', 'billable_fixed']
values = {
'projects': self,
'currency': currency,
'timesheet_domain': [('project_id', 'in', self.ids)],
'stat_buttons': self._plan_get_stat_button(),
}
#
# Hours, Rates and Profitability
#
dashboard_values = {
'hours': dict.fromkeys(billable_types + ['total'], 0.0),
'rates': dict.fromkeys(billable_types + ['total'], 0.0),
'profit': {
'invoiced': 0.0,
'to_invoice': 0.0,
'cost': 0.0,
'total': 0.0,
}
}
# hours (from timesheet) and rates (by billable type)
dashboard_domain = [('project_id', 'in', self.ids), ('timesheet_invoice_type', '!=', False)] # force billable type
dashboard_data = self.env['account.analytic.line'].read_group(dashboard_domain, ['unit_amount', 'timesheet_invoice_type'], ['timesheet_invoice_type'])
dashboard_total_hours = sum([data['unit_amount'] for data in dashboard_data])
for data in dashboard_data:
billable_type = data['timesheet_invoice_type']
dashboard_values['hours'][billable_type] = float_round(data.get('unit_amount'), precision_rounding=hour_rounding)
dashboard_values['hours']['total'] += float_round(data.get('unit_amount'), precision_rounding=hour_rounding)
# rates
rate = round(data.get('unit_amount') / dashboard_total_hours * 100, 2) if dashboard_total_hours else 0.0
dashboard_values['rates'][billable_type] = rate
dashboard_values['rates']['total'] += rate
# profitability, using profitability SQL report
profit = dict.fromkeys(['invoiced', 'to_invoice', 'cost', 'expense_cost', 'expense_amount_untaxed_invoiced', 'total'], 0.0)
profitability_raw_data = self.env['project.profitability.report'].read_group([('project_id', 'in', self.ids)], ['project_id', 'amount_untaxed_to_invoice', 'amount_untaxed_invoiced', 'timesheet_cost', 'expense_cost', 'expense_amount_untaxed_invoiced'], ['project_id'])
for data in profitability_raw_data:
profit['invoiced'] += data.get('amount_untaxed_invoiced', 0.0)
profit['to_invoice'] += data.get('amount_untaxed_to_invoice', 0.0)
profit['cost'] += data.get('timesheet_cost', 0.0)
profit['expense_cost'] += data.get('expense_cost', 0.0)
profit['expense_amount_untaxed_invoiced'] += data.get('expense_amount_untaxed_invoiced', 0.0)
profit['total'] = sum([profit[item] for item in profit.keys()])
dashboard_values['profit'] = profit
values['dashboard'] = dashboard_values
#
# Time Repartition (per employee per billable types)
#
user_ids = self.env['project.task'].sudo().search_read([('project_id', 'in', self.ids), ('user_id', '!=', False)], ['user_id'])
user_ids = [user_id['user_id'][0] for user_id in user_ids]
employee_ids = self.env['res.users'].sudo().search_read([('id', 'in', user_ids)], ['employee_ids'])
# flatten the list of list
employee_ids = list(itertools.chain.from_iterable([employee_id['employee_ids'] for employee_id in employee_ids]))
employees = self.env['hr.employee'].sudo().browse(employee_ids) | self.env['account.analytic.line'].search([('project_id', 'in', self.ids)]).mapped('employee_id')
repartition_domain = [('project_id', 'in', self.ids), ('employee_id', '!=', False), ('timesheet_invoice_type', '!=', False)] # force billable type
repartition_data = self.env['account.analytic.line'].read_group(repartition_domain, ['employee_id', 'timesheet_invoice_type', 'unit_amount'], ['employee_id', 'timesheet_invoice_type'], lazy=False)
# set repartition per type per employee
repartition_employee = {}
for employee in employees:
repartition_employee[employee.id] = dict(
employee_id=employee.id,
employee_name=employee.name,
non_billable_project=0.0,
non_billable=0.0,
billable_time=0.0,
billable_fixed=0.0,
total=0.0,
)
for data in repartition_data:
employee_id = data['employee_id'][0]
repartition_employee.setdefault(employee_id, dict(
employee_id=data['employee_id'][0],
employee_name=data['employee_id'][1],
non_billable_project=0.0,
non_billable=0.0,
billable_time=0.0,
billable_fixed=0.0,
total=0.0,
))[data['timesheet_invoice_type']] = float_round(data.get('unit_amount', 0.0), precision_rounding=hour_rounding)
repartition_employee[employee_id]['__domain_' + data['timesheet_invoice_type']] = data['__domain']
# compute total
for employee_id, vals in repartition_employee.items():
repartition_employee[employee_id]['total'] = sum([vals[inv_type] for inv_type in billable_types])
hours_per_employee = [repartition_employee[employee_id]['total'] for employee_id in repartition_employee]
values['repartition_employee_max'] = (max(hours_per_employee) if hours_per_employee else 1) or 1
values['repartition_employee'] = repartition_employee
#
# Table grouped by SO / SOL / Employees
#
timesheet_forecast_table_rows = self._table_get_line_values()
if timesheet_forecast_table_rows:
values['timesheet_forecast_table'] = timesheet_forecast_table_rows
return values
def _table_get_line_values(self):
""" return the header and the rows informations of the table """
if not self:
return False
uom_hour = self.env.ref('uom.product_uom_hour')
# build SQL query and fetch raw data
query, query_params = self._table_rows_sql_query()
self.env.cr.execute(query, query_params)
raw_data = self.env.cr.dictfetchall()
rows_employee = self._table_rows_get_employee_lines(raw_data)
default_row_vals = self._table_row_default()
empty_line_ids, empty_order_ids = self._table_get_empty_so_lines()
# extract row labels
sale_line_ids = set()
sale_order_ids = set()
for key_tuple, row in rows_employee.items():
if row[0]['sale_line_id']:
sale_line_ids.add(row[0]['sale_line_id'])
if row[0]['sale_order_id']:
sale_order_ids.add(row[0]['sale_order_id'])
sale_order_lines = self.env['sale.order.line'].sudo().browse(sale_line_ids | empty_line_ids)
map_so_names = {so.id: so.name for so in self.env['sale.order'].sudo().browse(sale_order_ids | empty_order_ids)}
map_sol = {sol.id: sol for sol in sale_order_lines}
map_sol_names = {sol.id: sol.name.split('\n')[0] if sol.name else _('No Sales Order Line') for sol in sale_order_lines}
map_sol_so = {sol.id: sol.order_id.id for sol in sale_order_lines}
rows_sale_line = {} # (so, sol) -> [INFO, before, M1, M2, M3, Done, M3, M4, M5, After, Forecasted]
for sale_line_id in empty_line_ids: # add service SO line having no timesheet
sale_line_row_key = (map_sol_so.get(sale_line_id), sale_line_id)
sale_line = map_sol.get(sale_line_id)
is_milestone = sale_line.product_id.invoice_policy == 'delivery' and sale_line.product_id.service_type == 'manual' if sale_line else False
rows_sale_line[sale_line_row_key] = [{'label': map_sol_names.get(sale_line_id, _('No Sales Order Line')), 'res_id': sale_line_id, 'res_model': 'sale.order.line', 'type': 'sale_order_line', 'is_milestone': is_milestone}] + default_row_vals[:]
if not is_milestone:
rows_sale_line[sale_line_row_key][-2] = sale_line.product_uom._compute_quantity(sale_line.product_uom_qty, uom_hour, raise_if_failure=False) if sale_line else 0.0
for row_key, row_employee in rows_employee.items():
sale_line_id = row_key[1]
sale_order_id = row_key[0]
# sale line row
sale_line_row_key = (sale_order_id, sale_line_id)
if sale_line_row_key not in rows_sale_line:
sale_line = map_sol.get(sale_line_id, self.env['sale.order.line'])
is_milestone = sale_line.product_id.invoice_policy == 'delivery' and sale_line.product_id.service_type == 'manual' if sale_line else False
rows_sale_line[sale_line_row_key] = [{'label': map_sol_names.get(sale_line.id) if sale_line else _('No Sales Order Line'), 'res_id': sale_line_id, 'res_model': 'sale.order.line', 'type': 'sale_order_line', 'is_milestone': is_milestone}] + default_row_vals[:] # INFO, before, M1, M2, M3, Done, M3, M4, M5, After, Forecasted
if not is_milestone:
rows_sale_line[sale_line_row_key][-2] = sale_line.product_uom._compute_quantity(sale_line.product_uom_qty, uom_hour, raise_if_failure=False) if sale_line else 0.0
for index in range(len(rows_employee[row_key])):
if index != 0:
rows_sale_line[sale_line_row_key][index] += rows_employee[row_key][index]
if not rows_sale_line[sale_line_row_key][0].get('is_milestone'):
rows_sale_line[sale_line_row_key][-1] = rows_sale_line[sale_line_row_key][-2] - rows_sale_line[sale_line_row_key][5]
else:
rows_sale_line[sale_line_row_key][-1] = 0
rows_sale_order = {} # so -> [INFO, before, M1, M2, M3, Done, M3, M4, M5, After, Forecasted]
rows_sale_order_done_sold = dict.fromkeys(set(map_sol_so.values()) | set([None]), dict(sold=0.0, done=0.0)) # SO id -> {'sold':0.0, 'done': 0.0}
for row_key, row_sale_line in rows_sale_line.items():
sale_order_id = row_key[0]
# sale order row
if sale_order_id not in rows_sale_order:
rows_sale_order[sale_order_id] = [{'label': map_so_names.get(sale_order_id, _('No Sales Order')), 'res_id': sale_order_id, 'res_model': 'sale.order', 'type': 'sale_order'}] + default_row_vals[:] # INFO, before, M1, M2, M3, Done, M3, M4, M5, After, Forecasted
for index in range(len(rows_sale_line[row_key])):
if index != 0:
rows_sale_order[sale_order_id][index] += rows_sale_line[row_key][index]
# do not sum the milestone SO line for sold and done (for remaining computation)
if not rows_sale_line[row_key][0].get('is_milestone'):
rows_sale_order_done_sold[sale_order_id]['sold'] += rows_sale_line[row_key][-2]
rows_sale_order_done_sold[sale_order_id]['done'] += rows_sale_line[row_key][5]
# remaining computation of SO row, as Sold - Done (timesheet total)
for sale_order_id, done_sold_vals in rows_sale_order_done_sold.items():
if sale_order_id in rows_sale_order:
rows_sale_order[sale_order_id][-1] = done_sold_vals['sold'] - done_sold_vals['done']
# group rows SO, SOL and their related employee rows.
timesheet_forecast_table_rows = []
for sale_order_id, sale_order_row in rows_sale_order.items():
timesheet_forecast_table_rows.append(sale_order_row)
for sale_line_row_key, sale_line_row in rows_sale_line.items():
if sale_order_id == sale_line_row_key[0]:
timesheet_forecast_table_rows.append(sale_line_row)
for employee_row_key, employee_row in rows_employee.items():
if sale_order_id == employee_row_key[0] and sale_line_row_key[1] == employee_row_key[1]:
timesheet_forecast_table_rows.append(employee_row)
# complete table data
return {
'header': self._table_header(),
'rows': timesheet_forecast_table_rows
}
def _table_header(self):
initial_date = fields.Date.from_string(fields.Date.today())
ts_months = sorted([fields.Date.to_string(initial_date - relativedelta(months=i, day=1)) for i in range(0, DEFAULT_MONTH_RANGE)]) # M1, M2, M3
def _to_short_month_name(date):
month_index = fields.Date.from_string(date).month
return babel.dates.get_month_names('abbreviated', locale=self.env.context.get('lang', 'en_US'))[month_index]
header_names = [_('Name'), _('Before')] + [_to_short_month_name(date) for date in ts_months] + [_('Done'), _('Sold'), _('Remaining')]
result = []
for name in header_names:
result.append({
'label': name,
'tooltip': '',
})
# add tooltip for reminaing
result[-1]['tooltip'] = _('What is still to deliver based on sold hours and hours already done. Equals to sold hours - done hours.')
return result
def _table_row_default(self):
lenght = len(self._table_header())
return [0.0] * (lenght - 1) # before, M1, M2, M3, Done, Sold, Remaining
def _table_rows_sql_query(self):
initial_date = fields.Date.from_string(fields.Date.today())
ts_months = sorted([fields.Date.to_string(initial_date - relativedelta(months=i, day=1)) for i in range(0, DEFAULT_MONTH_RANGE)]) # M1, M2, M3
# build query
query = """
SELECT
'timesheet' AS type,
date_trunc('month', date)::date AS month_date,
E.id AS employee_id,
S.order_id AS sale_order_id,
A.so_line AS sale_line_id,
SUM(A.unit_amount) AS number_hours
FROM account_analytic_line A
JOIN hr_employee E ON E.id = A.employee_id
LEFT JOIN sale_order_line S ON S.id = A.so_line
WHERE A.project_id IS NOT NULL
AND A.project_id IN %s
AND A.date < %s
GROUP BY date_trunc('month', date)::date, S.order_id, A.so_line, E.id
"""
last_ts_month = fields.Date.to_string(fields.Date.from_string(ts_months[-1]) + relativedelta(months=1))
query_params = (tuple(self.ids), last_ts_month)
return query, query_params
def _table_rows_get_employee_lines(self, data_from_db):
initial_date = fields.Date.today()
ts_months = sorted([initial_date - relativedelta(months=i, day=1) for i in range(0, DEFAULT_MONTH_RANGE)]) # M1, M2, M3
default_row_vals = self._table_row_default()
# extract employee names
employee_ids = set()
for data in data_from_db:
employee_ids.add(data['employee_id'])
map_empl_names = {empl.id: empl.name for empl in self.env['hr.employee'].sudo().browse(employee_ids)}
# extract rows data for employee, sol and so rows
rows_employee = {} # (so, sol, employee) -> [INFO, before, M1, M2, M3, Done, M3, M4, M5, After, Forecasted]
for data in data_from_db:
sale_line_id = data['sale_line_id']
sale_order_id = data['sale_order_id']
# employee row
row_key = (data['sale_order_id'], sale_line_id, data['employee_id'])
if row_key not in rows_employee:
meta_vals = {
'label': map_empl_names.get(row_key[2]),
'sale_line_id': sale_line_id,
'sale_order_id': sale_order_id,
'res_id': row_key[2],
'res_model': 'hr.employee',
'type': 'hr_employee'
}
rows_employee[row_key] = [meta_vals] + default_row_vals[:] # INFO, before, M1, M2, M3, Done, M3, M4, M5, After, Forecasted
index = False
if data['type'] == 'timesheet':
if data['month_date'] in ts_months:
index = ts_months.index(data['month_date']) + 2
elif data['month_date'] < ts_months[0]:
index = 1
rows_employee[row_key][index] += data['number_hours']
rows_employee[row_key][5] += data['number_hours']
return rows_employee
def _table_get_empty_so_lines(self):
""" get the Sale Order Lines having no timesheet but having generated a task or a project """
so_lines = self.sudo().mapped('sale_line_id.order_id.order_line').filtered(lambda sol: sol.is_service and not sol.is_expense)
return set(so_lines.ids), set(so_lines.mapped('order_id').ids)
# --------------------------------------------------
# Actions: Stat buttons, ...
# --------------------------------------------------
def _plan_prepare_actions(self, values):
actions = []
if len(self) == 1:
if self.env.user.has_group('sales_team.group_sale_salesman'):
if not self.sale_line_id and not self.tasks.mapped('sale_line_id'):
actions.append({
'label': _("Create a Sales Order"),
'type': 'action',
'action_id': 'sale_timesheet.project_project_action_multi_create_sale_order',
'context': json.dumps({'active_id': self.id, 'active_model': 'project.project'}),
})
if self.env.user.has_group('sales_team.group_sale_salesman_all_leads'):
to_invoice_amount = values['dashboard']['profit'].get('to_invoice', False) # plan project only takes services SO line with timesheet into account
sale_orders = self.tasks.mapped('sale_line_id.order_id').filtered(lambda so: so.invoice_status == 'to invoice')
if to_invoice_amount and sale_orders:
if len(sale_orders) == 1:
actions.append({
'label': _("Create Invoice"),
'type': 'action',
'action_id': 'sale.action_view_sale_advance_payment_inv',
'context': json.dumps({'active_ids': sale_orders.ids, 'active_model': 'project.project'}),
})
else:
actions.append({
'label': _("Create Invoice"),
'type': 'action',
'action_id': 'sale_timesheet.project_project_action_multi_create_invoice',
'context': json.dumps({'active_id': self.id, 'active_model': 'project.project'}),
})
return actions
def _plan_get_stat_button(self):
stat_buttons = []
if len(self) == 1:
edit_project = self.env.ref('project.edit_project')
stat_buttons.append({
'name': _('Project'),
'icon': 'fa fa-puzzle-piece',
'action': _to_action_data('project.project', res_id=self.id,
views=[[edit_project.id, 'form']])
})
ts_tree = self.env.ref('hr_timesheet.hr_timesheet_line_tree')
ts_form = self.env.ref('hr_timesheet.hr_timesheet_line_form')
stat_buttons.append({
'name': _('Timesheets'),
'icon': 'fa fa-calendar',
'action': _to_action_data(
'account.analytic.line',
domain=[('project_id', 'in', self.ids)],
views=[(ts_tree.id, 'list'), (ts_form.id, 'form')],
)
})
# if only one project, add it in the context as default value
tasks_domain = [('project_id', 'in', self.ids)]
tasks_context = self.env.context
tasks_projects = self.env['project.task'].sudo().search(tasks_domain).mapped('project_id')
if len(tasks_projects) == 1:
tasks_context = {**tasks_context, 'default_project_id': tasks_projects.id}
stat_buttons.append({
'name': _('Tasks'),
'count': sum(self.mapped('task_count')),
'icon': 'fa fa-tasks',
'action': _to_action_data(
action=self.env.ref('project.action_view_task'),
domain=tasks_domain,
context=tasks_context
)
})
if self.env.user.has_group('sales_team.group_sale_salesman_all_leads'):
sale_orders = self.mapped('sale_line_id.order_id') | self.mapped(
'tasks.sale_order_id')
if sale_orders:
stat_buttons.append({
'name': _('Sales Orders'),
'count': len(sale_orders),
'icon': 'fa fa-dollar',
'action': _to_action_data(
action=self.env.ref('sale.action_orders'),
domain=[('id', 'in', sale_orders.ids)],
context={'create': False, 'edit': False, 'delete': False}
)
})
invoices = sale_orders.mapped('invoice_ids').filtered(lambda inv: inv.type == 'out_invoice')
if invoices:
stat_buttons.append({
'name': _('Invoices'),
'count': len(invoices),
'icon': 'fa fa-pencil-square-o',
'action': _to_action_data(
action=self.env.ref('account.action_invoice_tree1'),
domain=[('id', 'in', invoices.ids), ('type', '=', 'out_invoice')],
context={'create': False, 'delete': False}
)
})
return stat_buttons
def _to_action_data(model=None, *, action=None, views=None, res_id=None, domain=None, context=None):
# pass in either action or (model, views)
if action:
assert model is None and views is None
act = clean_action(action.read()[0])
model = act['res_model']
views = act['views']
# FIXME: search-view-id, possibly help?
descr = {
'data-model': model,
'data-views': json.dumps(views),
}
if context is not None: # otherwise copy action's?
descr['data-context'] = json.dumps(context)
if res_id:
descr['data-res-id'] = res_id
elif domain:
descr['data-domain'] = json.dumps(domain)
return descr
| t3dev/odoo | addons/sale_timesheet/models/project_overview.py | Python | gpl-3.0 | 23,386 |
# coding: utf8
import logging
import time
import sys
import pprint
import requests
from vilya.libs.store import mc, clear_local_cache, ONE_DAY
MC_KEY_REPOSITORY_RELEASE = 'latest_release:%s'
MC_KEY_UNRELEASE_COMMITS = 'release:%s:%s:%s:commits'
def get_release(repository):
'''get latest successed release info
return: a dict with keys {annotate, changesets, message, first_rev,
last_rev, pre_release_rev, status,
project_source, release_manger, release_time,
url}
'''
if not repository.startswith('http:'):
repository = 'http://code.dapps.douban.com/' + repository
if not repository.endswith('.git'):
repository = repository + '.git'
key = MC_KEY_REPOSITORY_RELEASE % repository
info = mc.get(key)
if info is not None:
return info
try:
info = fetch_release(repository)
mc.set(key, info, ONE_DAY)
return info
except:
return {}
def fetch_release(repository):
# TODO
return {}
def get_unreleased_commit_num(project):
last_release_info = get_release(project.repository)
from_ref = last_release_info['last_rev'] if last_release_info else None
key = MC_KEY_UNRELEASE_COMMITS % (project.id,
project.default_sha,
from_ref)
num = mc.get(key)
if num is None:
commits = project.repo.get_commits(project.default_branch,
from_ref=from_ref,
max_count=100)
num = len(commits)
mc.set(key, num, ONE_DAY)
return num
def expire_outdated_releases():
'''TODO: 新代码上线后及时清理缓存
'''
if __name__ == '__main__':
if sys.argv[1:] and sys.argv[1] == 'expire':
expire_outdated_releases()
else:
if sys.argv[1:]:
resp = sys.argv[1]
else:
resp = 'http://code.dapps.douban.com/code.git'
pprint.pprint(get_release(resp))
| xtao/code | vilya/models/release.py | Python | bsd-3-clause | 2,102 |
import os
import subprocess
class TestFunctions:
def setup_method(self):
self.test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "scripts")
def test_functions(self):
test_file_name = "functions.py"
test_target = os.path.join(self.test_dir, test_file_name)
output = subprocess.check_output(["python", test_target])
my_output = subprocess.check_output(["python", "src/mjpython.py", test_target])
assert my_output == output
| mjpatter88/mjpython | test/acc/test_functions.py | Python | mit | 500 |
# -*- coding: utf-8 -*-
"""An output module that saves events to OpenSearch."""
from plaso.output import manager
from plaso.output import shared_opensearch
class OpenSearchOutputModule(shared_opensearch.SharedOpenSearchOutputModule):
"""Output module for OpenSearch."""
NAME = 'opensearch'
DESCRIPTION = 'Saves the events into an OpenSearch database.'
MAPPINGS_FILENAME = 'opensearch.mappings'
def WriteHeader(self):
"""Connects to the OpenSearch server and creates the index."""
self._Connect()
self._CreateIndexIfNotExists(self._index_name, self._mappings)
manager.OutputManager.RegisterOutput(
OpenSearchOutputModule, disabled=shared_opensearch.opensearchpy is None)
| joachimmetz/plaso | plaso/output/opensearch.py | Python | apache-2.0 | 705 |
#!/usr/bin/env python3.5
# coding:utf-8
# 全局
# 分页显示数量
MAX_PER_PAGE = 5
# 银行相关
ATM_LOG = '../log/atm_history.log'
ACCOUNT_FILE = '../db/user_record.db'
MAX_BALANCE = 15000.0
DEFAULT_PASSWORD = '123'
MAX_ERROR_COUNT = 3
INTEREST = 5 / 1000
# 银行后台相关
# 管理员密码
ADMIN_USER = 'admin'
ADMIN_PASSWORD = '202cb962ac59075b964b07152d234b70'
# 银行每日任务相关
# 账单日
BILL_DAY = 22
# 还款日
REPAYMENT_DAY = 10
# 商城相关
CUSTOMER_FILE = '../db/userinfo.db'
GOODS_FILE = '../db/list.db'
SHOPPING_LOG = '../log/shopping_history.log'
| smartczm/python-learn | Old-day01-10/s13-day5/day5/homework/src/conf.py | Python | gpl-2.0 | 626 |
import sys
import warnings
import textwrap
msg = textwrap.dedent("""
You are running Setuptools on Python 2, which is no longer
supported and
>>> SETUPTOOLS WILL STOP WORKING <<<
in a subsequent release. Please ensure you are installing
Setuptools using pip 9.x or later or pin to `setuptools<45`
in your environment.
If you have done those things and are still encountering
this message, please comment in
https://github.com/pypa/setuptools/issues/1458
about the steps that led to this unsupported combination.
""")
sys.version_info < (3,) and warnings.warn("*" * 60 + msg + "*" * 60)
| Khan/khan-linter | vendor/py3/pkg_resources/py2_warn.py | Python | apache-2.0 | 633 |
from datetime import timedelta
from django.contrib import admin
from django.db.models import Case, Value, When
from django.utils import timezone
from .models import Channel, Post, RssFeed
@admin.register(Channel)
class ChannelAdmin(admin.ModelAdmin):
list_display = ('__str__', 'title', 'username', 'publish_picture', 'linked_title', 'short_link')
change_list_template = "rss/actions.html"
@admin.register(RssFeed)
class RssFeedAdmin(admin.ModelAdmin):
list_display = ('__str__', 'channel', 'link', 'active')
actions = ('activate', 'deactivate', 'toggle_active')
def activate(self, request, queryset):
queryset.update(active=True)
def deactivate(self, request, queryset):
queryset.update(active=False)
def toggle_active(self, request, queryset):
queryset.update(active=Case(When(active=True, then=Value(False)), default=Value(True)))
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'feed', 'link', 'created', 'older_then_five_days')
ordering = ('-created',)
def older_then_five_days(self, post: Post):
five_days_before = timezone.now() - timedelta(days=5)
return post.created < five_days_before
older_then_five_days.boolean = True
| vaniakosmos/memes-reposter | apps/rss/admin.py | Python | mit | 1,262 |
import cv2
import numpy as np
roi = cv2.imread( 'mtn1.jpg' )
hsv = cv2.cvtColor( roi, cv2.COLOR_BGR2HSV )
target = cv2.imread( 'mtn2.jpg' )
hsvt = cv2.cvtColor( target, cv2.COLOR_BGR2HSV )
mask = np.zeros( roi.shape[0:2], np.uint8 )
mask[302:1000, 766:1617] = 255
# calculating the object histogram
roihist = cv2.calcHist( [hsv], [0, 1], mask, [180, 256], [0, 180, 0, 256] )
cv2.imwrite("hist.jpg", roihist)
# normalize the histogram and apply Backprojection
cv2.normalize( roihist, roihist, 0, 255, cv2.NORM_MINMAX )
prob = cv2.calcBackProject( [hsvt], [0, 1], roihist, [0, 180, 0, 256], 1 )
# # now convolve with circular disk
disk = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, ( 5,5 ) )
prob = cv2.filter2D( prob, -1, disk)
cv2.imshow( 'probability', prob )
cv2.imshow("hist", roihist)
# cv2.imwrite( 'probability.jpg', prob )
cv2.waitKey( 0 )
cv2.destroyAllWindows( )
| SSG-DRD-IOT/commercial-iot-security-system | opencv/commercial/Instructions/CV_Fundamentals/histogram/backproj/backproj.py | Python | mit | 886 |
#!/usr/bin/env python
"""this will be the runner file"""
__author__ = 'Geoff Rosen'
__email__ = 'geoff.rosen <at> gmail.com'
__status__ = 'development'
__description__ = '''This program will help to run pplacer \n\
from start to end'''
import argparse, logging, time, argparse, sys
from classes import update_refpkg as cur
from classes import config_parser as ccp
from classes import run_pplacer_pipeline as rpp
def main(argv):
logger = make_log('pplacer-runner', 'pplacer-runner.log')
logger.info('Initiated with command: %s' % ' '.join(argv))
parser = argparse.ArgumentParser(description = __description__,\
epilog = 'Note: refpkg, alignment, and config files are required without --upgrade')
parser.add_argument('-u', '--upgrade', type=str, help='upgrade refpkg')
parser.add_argument('-r', '--refpkg', type=str, help = 'previously updated refpkg file path. if not upgraded use -u')
parser.add_argument('-f', '--fasta', type=str, help = 'unaligned fasta file path')
parser.add_argument('-c', '--config', type=str, help = 'configuration file path formatted as command:title argument')
parser.add_argument('-j', '--threads', type=int, help = 'threads to perform parallel options on', default = 4)
args = parser.parse_args(argv[1:])
if args.upgrade != None:
cur.update_refpkg_run(args.upgrade, logger)
elif args.refpkg != None and args.fasta != None:
rpp.run_pplacer_pipeline(args.refpkg, args.fasta, logger, threads = args.threads)
elif args.refpkg == None or args.fasta == None or args.config == None:
parser.error('refpkg, fasta, and config are required')
else:
config_info = ccp.FullConfig(args.config)
print config_info
def make_log(log_name, log_fp):
logger = logging.getLogger(log_name)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(log_fp)
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Log initiated - %s' % time.strftime("%c"))
return logger
if __name__ == '__main__':
main(sys.argv)
| geoffrosen/vaginal-microbiome | pplacerrunner.py | Python | mit | 2,034 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
class Xsdk(Package):
"""Xsdk is a suite of Department of Energy (DOE) packages for numerical
simulation. This is a Spack bundle package that installs the xSDK
packages
"""
homepage = "http://xsdk.info"
# Dummy url since Spack complains if I don't list something, will be
# removed when metapackage is available
url = 'http://ftp.mcs.anl.gov/pub/petsc/externalpackages/xsdk.tar.gz'
version('develop', 'a52dc710c744afa0b71429b8ec9425bc')
version('0.3.0', 'a52dc710c744afa0b71429b8ec9425bc', preferred=True)
version('xsdk-0.2.0', 'a52dc710c744afa0b71429b8ec9425bc')
variant('debug', default=False, description='Compile in debug mode')
variant('cuda', default=False, description='Enable CUDA dependent packages')
depends_on('hypre@2.12.1~internal-superlu', when='@0.3.0')
depends_on('hypre@xsdk-0.2.0~internal-superlu', when='@xsdk-0.2.0')
depends_on('hypre@develop~internal-superlu', when='@develop')
depends_on('mfem@3.3.2+mpi+hypre+superlu-dist+petsc+sundials+examples+miniapps', when='@0.3.0')
depends_on('mfem@develop+mpi+hypre+superlu-dist+petsc+sundials+examples+miniapps', when='@develop')
depends_on('superlu-dist@5.2.2', when='@0.3.0')
depends_on('superlu-dist@xsdk-0.2.0', when='@xsdk-0.2.0')
depends_on('superlu-dist@develop', when='@develop')
depends_on('trilinos@12.12.1+hypre+superlu-dist+metis+hdf5~mumps+boost~suite-sparse~tpetra~ifpack2~zoltan2~amesos2~exodus',
when='@0.3.0')
depends_on('trilinos@xsdk-0.2.0+hypre+superlu-dist+metis+hdf5~mumps+boost~suite-sparse~tpetra~ifpack2~zoltan2~amesos2~exodus',
when='@xsdk-0.2.0')
depends_on('trilinos@12.12.1+hypre+superlu-dist+metis+hdf5~mumps+boost~suite-sparse~tpetra+nox~ifpack2~zoltan2~amesos2~exodus',
when='@develop')
depends_on('petsc@3.8.2+trilinos+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@0.3.0')
depends_on('petsc@xsdk-0.2.0+trilinos+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@xsdk-0.2.0')
depends_on('petsc@develop+trilinos+mpi+hypre+superlu-dist+metis+hdf5~mumps+double~int64',
when='@develop')
depends_on('pflotran@xsdk-0.3.0', when='@0.3.0')
depends_on('pflotran@xsdk-0.2.0', when='@xsdk-0.2.0')
depends_on('pflotran@develop', when='@develop')
depends_on('alquimia@xsdk-0.3.0', when='@0.3.0')
depends_on('alquimia@xsdk-0.2.0', when='@xsdk-0.2.0')
depends_on('alquimia@develop', when='@develop')
depends_on('sundials@3.1.0~int64+hypre', when='@0.3.0')
depends_on('sundials@3.1.0~int64+hypre', when='@develop')
depends_on('plasma@17.2:', when='@develop %gcc@6.0:')
depends_on('magma@2.2.0', when='@0.3.0 +cuda')
depends_on('magma@2.2.0', when='@develop +cuda')
# xSDKTrilinos depends on the version of Trilinos built with
# +tpetra which is turned off for faster xSDK
# depends_on('xsdktrilinos@xsdk-0.2.0', when='@xsdk-0.2.0')
# depends_on('xsdktrilinos@develop', when='@develop')
# How do we propagate debug flag to all depends on packages ?
# If I just do spack install xsdk+debug will that propogate it down?
# Dummy install for now, will be removed when metapackage is available
def install(self, spec, prefix):
# Prevent the error message
# ==> Error: Install failed for xsdk. Nothing was installed!
# ==> Error: Installation process had nonzero exit code : 256
with open(os.path.join(spec.prefix, 'bundle-package.txt'), 'w') as out:
out.write('This is a bundle\n')
out.close()
| matthiasdiener/spack | var/spack/repos/builtin/packages/xsdk/package.py | Python | lgpl-2.1 | 4,933 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'https://relopezbriega.github.io'
#RELATIVE_URLS = False
#FEED_ALL_ATOM = 'feeds/all.atom.xml'
#CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
DISQUS_SITENAME = "relopezbriegagithub"
GOOGLE_ANALYTICS = "UA-29080434-3"
| relopezbriega/mi-python-blog | publishconf.py | Python | gpl-2.0 | 574 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.